We use CLASS
since the implementation of the variation of fundamental constants has been done from version 3.1 (see git
diff for implementation details). This parametrisation was initially used in this paper and implemented in CLASS
for this purpose (strangely they do not investigate the variation of $\alpha_\text{EM}$).
import classy
classy.__version__
'v3.2.0'
Show code cell
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set_theme(style="ticks", rc={f"axes.spines.{spine}": False for spine in ["top", "right"]})
We first compute the power spectra from $\Lambda$CDM parameters to use it later as reference curves.
common_settings = dict(
# h=0.67810,
omega_b=0.02238280,
omega_cdm=0.1201075,
A_s=2.100549e-09,
n_s=0.9660499,
tau_reio=0.05430842,
output="tCl,pCl,lCl",
lensing="yes",
l_max_scalars=5000,
)
common_settings["100*theta_s"] = 1.041783
lcdm = classy.Class()
lcdm.set(common_settings)
lcdm.compute()
lmin, lmax = 2, 3000
cl_lcdm = lcdm.lensed_cl(lmax)
ell = cl_lcdm["ell"][lmin:]
factor = (ell * (ell + 1)) / 2 / np.pi * 10**12
The variation of fundamental constants is defined as
$$
\delta m_e\equiv\frac{m_{e,\text{early}}}{m_{e,\text{late}}} - 1,
$$
and same for $\alpha_\text{EM}$. For each value, we compute the $C_\ell$ with CLASS
as follows
from tqdm.auto import tqdm
varconst = classy.Class()
varying_range = np.arange(step=(step := 0.025), start=0.9, stop=1.1 + step)
cl_me, cl_alpha = {}, {}
for r in (pbar := tqdm(varying_range)):
pbar.set_description(f"Processing r = {r:.2f}")
def run_class(**kwargs):
settings = common_settings.copy()
settings.update(dict(varying_fundamental_constants="instantaneous", **kwargs))
varconst.set(settings)
varconst.compute()
return varconst.lensed_cl(lmax)
cl_me[r] = run_class(varying_me=r)
varconst.empty()
cl_alpha[r] = run_class(varying_alpha=r)
varconst.empty()
Let's plot the power spectra
palette = "vlag"
color_list = sns.color_palette(palette, n_colors=len(varying_range)).as_hex()
def add_colorbar(fig, label, ax=None):
if ax is not None:
cbar_ax = ax.inset_axes([0.125, -0.15, 0.75, 0.025])
else:
cbar_ax = fig.add_axes([0.25, 0.0, 0.5, 0.025])
cmap = sns.color_palette(palette, n_colors=len(varying_range), as_cmap=True)
norm = mpl.colors.BoundaryNorm(varying_range, cmap.N)
mpl.colorbar.ColorbarBase(
cbar_ax,
cmap=cmap,
norm=norm,
orientation="horizontal",
label=label,
boundaries=varying_range,
)
def plot_cl(cl, label=None):
fig, axes = plt.subplots(2, 3, sharex=True, figsize=(12, 8))
for i, mode in enumerate(cl_lcdm):
if mode == "ell":
continue
ax = axes.flatten()[i]
for j, (k, v) in enumerate(cl.items()):
ax.plot(ell, v[mode][ell] * factor, color=color_list[j])
ax.plot(ell, cl_lcdm[mode][ell] * factor, "-k")
if mode == "tt":
ax.plot([], [], "-k", label=r"$\Lambda$CDM")
ax.legend()
ax.set(title=mode.upper(), yscale="log" if "p" in mode else "linear")
add_colorbar(fig, label)
label_me = r"$\delta m_e\equiv m_{e,\mathrm{early}}/m_{e,\mathrm{late}} - 1$"
label_alpha = (
r"$\delta \alpha_\mathrm{EM}\equiv \alpha_\mathrm{EM, early}/\alpha_\mathrm{EM, late} - 1$"
)
for cl, label in zip([cl_me, cl_alpha], [label_me, label_alpha]):
plot_cl(cl, label)
$\mathcal{D}_\ell^\text{TT}/\mathcal{D}_\ell^\text{EE}$¶
Show code cell
def plot_misc(cl, cl_lcdm, yscale="log", ylabel=None, label=None, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
else:
fig = ax.get_figure()
for j, (k, v) in enumerate(cl.items()):
ax.plot(ell, v[ell], color=color_list[j])
ax.plot(ell, cl_lcdm[ell], "-k")
ax.plot([], [], "-k", label=r"$\Lambda$CDM")
ax.set(ylabel=ylabel, yscale=yscale)
ax.legend()
add_colorbar(fig, label, ax=ax)
with np.errstate(divide="ignore", invalid="ignore"):
fig, axes = plt.subplots(1, 2, figsize=(18, 6))
for cl, label, ax in zip([cl_me, cl_alpha], [label_me, label_alpha], axes):
cl_tt_over_cl_ee = {k: v["tt"] / v["ee"] for k, v in cl.items()}
plot_misc(
cl_tt_over_cl_ee,
cl_lcdm["tt"] / cl_lcdm["ee"],
ylabel=r"$\mathcal{D}_\ell^\mathrm{TT}/\mathcal{D}_\ell^\mathrm{EE}$",
label=label,
ax=ax,
)
$\mathcal{R}_\ell^\text{TE}=\mathcal{D}_\ell^\text{TE}/\sqrt{\mathcal{D}_\ell^\text{TT}\times\mathcal{D}_\ell^\text{EE}}$¶
with np.errstate(divide="ignore", invalid="ignore"):
fig, axes = plt.subplots(1, 2, figsize=(18, 6))
for cl, label, ax in zip([cl_me, cl_alpha], [label_me, label_alpha], axes):
Rl_te = {k: v["te"] / np.sqrt(v["tt"] * v["ee"]) for k, v in cl.items()}
plot_misc(
Rl_te,
cl_lcdm["te"] / np.sqrt(cl_lcdm["tt"] * cl_lcdm["ee"]),
ylabel=r"$\mathcal{R}_\ell^\mathrm{TE}$",
label=label,
yscale="linear",
ax=ax,
)
$\mathcal{D}_\ell^\text{EE}(\delta)/\mathcal{D}_\ell^\text{EE}(\Lambda\text{CDM})$¶
with np.errstate(divide="ignore", invalid="ignore"):
fig, axes = plt.subplots(1, 2, figsize=(18, 6))
for cl, label, ax in zip([cl_me, cl_alpha], [label_me, label_alpha], axes):
cl_ee_ratio = {k: np.sqrt(v["ee"] / cl_lcdm["ee"]) for k, v in cl.items()}
plot_misc(
cl_ee_ratio,
cl_lcdm["ee"] / cl_lcdm["ee"],
ylabel=r"$\sqrt{\mathcal{D}_\ell^\mathrm{EE}(\delta)/\mathcal{D}_\ell^\mathrm{EE}(\Lambda\mathrm{CDM})}$",
label=label,
yscale="linear",
ax=ax,
)
Running MCMC chains¶
This notebook makes use of GetDist and cobaya_utilities python packages to plot and to analyse MCMC samples.
from cobaya_utilities import plots, tools
plots.set_style(use_seaborn=True)
Definitions¶
Define CMB & nuisance parameter names
cosmo_params = ["theta_s_100", "logA", "n_s", "omega_b", "omega_cdm", "H0", "tau_reio"]
varying_params = ["varying_me", "varying_alpha"]
calibration_params = [f"pe{f}{s}" for f in [100, 143, 217] for s in "AB"]
fg_params = []
Set a dictionnary holding the path to the MCMC chains and its name
mcmc_samples = {
"planck_class_lcdm": dict(label=r"$\Lambda$CDM", color="gray"),
"planck_class_lcdm_polar_eff": dict(
label=(label := r"$\Lambda$CDM + polar. efficiencies"), color="blue"
),
"planck_camb_lcdm_polar_eff": dict(label=label + " (CAMB)", color="cyan", exclude=[4]),
"planck_class_varying_me": dict(label=r"$\delta m_e$", color="red"),
# "planck_class_varying_alpha": dict(label=r"$\delta\alpha_\text{EM}$"),
}
MCMC chains¶
Let's plot the chains size
tools.print_chains_size(mcmc_samples)
mcmc 1 | mcmc 2 | mcmc 3 | mcmc 4 | all mcmc | |||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
accept. | total | rate | R-1 | accept. | total | rate | R-1 | accept. | total | rate | R-1 | accept. | total | rate | R-1 | accept. | total | rate | |
$\Lambda$CDM | 31631 | 76590 | 41.3% | 0.08 | 36955 | 90151 | 41.0% | 0.06 | 30796 | 78821 | 39.1% | 0.09 | 19107 | 38711 | 49.4% | 0.11 | 118489 | 284273 | 41.7% |
$\Lambda$CDM + polar. efficiencies | 3844 | 18761 | 20.5% | 18632 | 115409 | 16.1% | 8624 | 68115 | 12.7% | 14059 | 122684 | 11.5% | 45159 | 324969 | 13.9% | ||||
$\Lambda$CDM + polar. efficiencies (CAMB) | 204935 | 540537 | 37.9% | 0.02 | 147094 | 366451 | 40.1% | 0.02 | 106009 | 258246 | 41.0% | 0.03 | 52407 | 490676 | 10.7% | 510445 | 1655910 | 30.8% | |
$\delta m_e$ | 10133 | 93906 | 10.8% | 15618 | 107859 | 14.5% | 9100 | 219 | 4155.3% | 15459 | 100678 | 15.4% | 50310 | 302662 | 16.6% |
and let's have a look at how chains evolve with time and the resulting convergence criteria
tools.plot_chains(mcmc_samples, params=cosmo_params + varying_params + calibration_params, ncol=5);
tools.plot_progress(mcmc_samples);
Posterior distributions¶
samples, kwargs = plots.get_mc_samples(
mcmc_samples, select_first="planck_class_varying_me", no_progress_bar=False
)
plots.triangle_plot(samples, params := cosmo_params + ["varying_me"], filled=True, **kwargs);
tools.print_results(samples, params, labels=kwargs["legend_labels"])
$100\theta_\mathrm{s}$ | $\log(10^{10} A_\mathrm{s})$ | $n_\mathrm{s}$ | $\Omega_\mathrm{b} h^2$ | $\Omega_\mathrm{c} h^2$ | $H_0$ | $\tau_\mathrm{reio}$ | $\delta m_e$ | |
---|---|---|---|---|---|---|---|---|
$\delta m_e$ | $ 1.04182\pm 0.00025$ | $ 3.031\pm 0.011$ | $ 0.9659\pm 0.0036$ | $ 0.02239^{+0.00065}_{-0.00040}$ | $ 0.1202^{+0.0036}_{-0.0026}$ | $ 69.4^{+6.1}_{-3.9}$ | $ 0.0530\pm 0.0056$ | $ 1.008^{+0.028}_{-0.018}$ |
$\Lambda$CDM | $ 1.04181\pm 0.00024$ | $ 3.028\pm 0.016$ | $ 0.9669\pm 0.0040$ | $ 0.02222\pm 0.00013$ | $ 0.1190\pm 0.0012$ | $ 67.58\pm 0.52$ | $ 0.0516\pm 0.0075$ | $ $ |
$\Lambda$CDM + polar. efficiencies | $ 1.04183\pm 0.00025$ | $ 3.031\pm 0.011$ | $ 0.9666\pm 0.0038$ | $ 0.02222\pm 0.00013$ | $ 0.1189\pm 0.0012$ | $ 67.61\pm 0.52$ | $ 0.0565^{+0.0045}_{-0.0058}$ | $ $ |
$\Lambda$CDM + polar. efficiencies (CAMB) | $ $ | $ 3.027\pm 0.017$ | $ 0.9673\pm 0.0040$ | $ 0.02224\pm 0.00013$ | $ 0.1190\pm 0.0012$ | $ 67.57\pm 0.53$ | $ 0.0520\pm 0.0078$ | $ $ |
Polarisation efficiencies¶
samples, kwargs = plots.get_mc_samples(
mcmc_samples, selected=[f"planck_{c}_lcdm_polar_eff" for c in ["class", "camb"]]
)
plots.triangle_plot(
samples,
params := calibration_params,
filled=True,
inputs=(defaults := {p: 0.975 if "217" in p else 1.0 for p in params}),
**kwargs,
);
df = tools.print_results(samples, params, labels=kwargs["legend_labels"])
df.loc["default parameter values"] = [f"${v}$" for v in defaults.values()]
df
$\eta_\mathrm{100A}$ | $\eta_\mathrm{100B}$ | $\eta_\mathrm{143A}$ | $\eta_\mathrm{143B}$ | $\eta_\mathrm{217A}$ | $\eta_\mathrm{217B}$ | |
---|---|---|---|---|---|---|
$\Lambda$CDM + polar. efficiencies | $ 1.025\pm 0.032$ | $ 0.961\pm 0.028$ | $ 1.046\pm 0.023$ | $ 0.971\pm 0.021$ | $ 0.992^{+0.025}_{-0.031}$ | $ 0.951^{+0.031}_{-0.026}$ |
$\Lambda$CDM + polar. efficiencies (CAMB) | $ 1.021^{+0.030}_{-0.037}$ | $ 0.963^{+0.032}_{-0.027}$ | $ 1.039\pm 0.022$ | $ 0.978\pm 0.021$ | $ 0.995\pm 0.029$ | $ 0.949^{+0.027}_{-0.031}$ |
default parameter values | $1.0$ | $1.0$ | $1.0$ | $1.0$ | $0.975$ | $0.975$ |
Correlation $\delta m_e$ vs. polarisation efficiencies $\eta$¶
samples, kwargs = plots.get_mc_samples(mcmc_samples, selected="planck_class_varying_me")
corr = samples[0].corr(["varying_me"] + calibration_params)
g = plots.plots_2d(
samples,
param_pairs=[["varying_me", c] for c in calibration_params],
nx=6,
filled=True,
colors=["yellow"],
width_inch=20,
titles=[f"$r = {r:.3f}$" for r in corr[0, 1:]],
)