Perhaps this is a pathalogical example but I'm trying to integrate a function over a large domain.
I want to integrate many thousands of functions (with different parameters) and sometimes the important area of the function is very small or very large. Therefore, I use the adapt_to_samples
to give the integrator a reasonable shot at finding that area.
Traceback (most recent call last):
File "/home/ppzscr/star-forming/integrator.py", line 100, in <module>
r = integrator.integrate(nadapt=1e5, alpha=0.5)
File "/home/ppzscr/star-forming/integrator.py", line 84, in integrate
self.adaptive_map.adapt_to_samples(self._samples, func(self._samples), nitn=nadapt) # precondition map
File "_vegas.pyx", line 736, in vegas._vegas.AdaptiveMap.adapt_to_samples
File "_vegas.pyx", line 555, in vegas._vegas.AdaptiveMap.adapt
ZeroDivisionError: float division
Process finished with exit code 1
from sklearn.mixture import GaussianMixture
import vegas
from astropy.cosmology import Planck15
import numpy as np
from scipy import stats
class Integrator:
def __init__(self, logm_observed, logm_error, f_observed, f_error, z_observed, z_error,
mu, V,
logl_range, logm_range,
z_range=(0., 1.), zpoints=1000, n_adapt_samples=1000,
zdeg=2, cosmo=Planck15, alpha=-0.71):
self.f_observed = f_observed
self.f_error = f_error
self.z_observed = z_observed
self.z_error = z_error
self.logm_observed = logm_observed
self.logm_error = logm_error
self.mu = mu
self.V = V
self.gaussian_component = stats.multivariate_normal(mu, V)
self.z_range = z_range
self.alpha = alpha
_zs = np.linspace(*z_range, num=zpoints)
_dls = cosmo.luminosity_distance(_zs)
self.dl_params = np.polyfit(_zs, Planck15.luminosity_distance(_zs), zdeg)
self.logm_range = logm_range
self.logl_range = logl_range
self.adaptive_map = vegas.AdaptiveMap([logm_range, logl_range, z_range]) # uniform map
self.logm_dist = stats.norm(self.logm_observed, self.logm_error)
self.f_dist = stats.norm(self.f_observed, self.f_error)
self.z_dist = stats.norm(self.z_observed, self.z_error)
self._samples = self.generate_map_samples(n_adapt_samples)
def dl(self, z):
return np.polyval(self.dl_params, z)
def logl_to_f(self, logl, z):
return np.exp(logl - np.log(4 * np.pi) - (2 * np.log(self.dl(z))) + ((1 + self.alpha) * np.log(1 + z)))
def f_to_logl(self, f, z):
return np.log(f * 4 * np.pi) + (np.log(self.dl(z))*2) - ((1 + self.alpha) * np.log(1 + z))
def invdetjacobian(self, f, z):
"""
The jacobian for the transform `P(f,z | vL, vz) -> P(log[L], z | vL, vz)` is
[[1/f, dL/dz],
[0, 1]]
1 / abs(determinant) = f
"""
return f
def logprob_observed_given_true(self, true_m, true_logl, true_z):
true_f = self.logl_to_f(true_logl, true_z)
logl_part = self.f_dist.logpdf(true_f) - np.log(self.invdetjacobian(true_f, true_z))
z_part = self.z_dist.logpdf(true_z)
m_part = self.logm_dist.logpdf(true_m)
return logl_part + z_part + m_part
def logprob_gaussian_component(self, true_mlogl):
return self.gaussian_component.logpdf(true_mlogl)
def integrand(self, x):
return np.exp(self.logprob_observed_given_true(x[:, 0], x[:, 1], x[:, 2]) +
self.logprob_gaussian_component(x[:, :2]))
def sample_true_given_observed(self, n):
zs = self.z_dist.rvs(n)
fs = self.f_dist.rvs(n)
logms = self.logm_dist.rvs(n)
logls = self.f_to_logl(fs, zs)
r = np.stack([logms, logls, zs]).T
return r[np.isnan(r).all(axis=1)]
def generate_map_samples(self, n):
comp_samples = np.concatenate([self.gaussian_component.rvs(n), self.z_dist.rvs((n, 1))], axis=1)
samples = np.concatenate([self.sample_true_given_observed(n), comp_samples])
gmm = GaussianMixture(1).fit(samples)
return np.concatenate([samples, gmm.sample(n)[0]])
def integrate(self, neval=1e5, nitn=10, nadapt=10, alpha=0.1):
func = vegas.batchintegrand(self.integrand)
self.adaptive_map.adapt_to_samples(self._samples, func(self._samples), nitn=nadapt) # precondition map
integ = vegas.Integrator(self.adaptive_map, alpha=alpha)
return integ(func, neval=neval, nitn=nitn)
def plot_result(self, r):
import matplotlib.pyplot as plt
plt.errorbar(np.arange(len(r.itn_results)), *zip(*[(i.mean, i.sdev) for i in r.itn_results]))
plt.axhspan(r.mean - r.sdev, r.mean + r.sdev, alpha=0.2)
if __name__ == '__main__':
mu = [11, 20.8]
cov = np.eye(2) * 0.1
f, ferr, z, zerr = 0, 1e-4, 0.085, 0.0001
m, merr = 11, 0.1
integrator = Integrator(m, merr, f, ferr, z, zerr, mu, cov, [0, 50], [0, 50])
r = integrator.integrate()
print('result = %s Q = %.2f' % (r, r.Q))
print(r.summary())
There are many adaption samples which have a value of 0 but vegas should integrate anyway, right?