Commit 4be2cb31 authored by lesgourg's avatar lesgourg
Browse files

Main new features are P_cb, P_k_eq, RealSpaceInterface, new notebooks and few bugfixes

* devel: (54 commits)
  updated doc
  added a few comments in spectra related to P_cb
  updated version  number to 2.7.0
  fixed minor issue with dcdm in input
  restored the empty directory RealSpaceInterface/cache/
  added 11 notebooks and equivalent scripts (cambridge version)
  pushing the Real Space Interface
  further cosmetic changes in P_cb
  few extra comkments to Pcb part
  Fixed a bug leading to double free when combining the new quadrature strategies with shooting
  Fixed typo in ncdm number density, cf. issue #225
  final polishing of pk_eq method
  taken care of the case thatan x-array with 2 entries only needs to be splined by using the natural_spline and not the estimated derivative spline method by default in this particular case
  sigma8 and sigma8_cb now computed in two separate functions to avoid confusion
  mem leak fix
  Pcb changes: M and CB share the same tau_min_nl
  Nils pk_type trick
  transfer.c dNdz according to Euclid IST specification
  removed the enum halofit_found_k_max
parents de7d9d7e 55a99c61
import os
import logging
import cv2
import numpy as np
from classy import Class
from Calc2D.TransferFunction import ComputeTransferFunctionList
from Calc2D.DataGeneration import GenerateGaussianData, GenerateSIData
from Calc2D.DataPropagation import PropagateDatawithList
from Calc2D.rFourier import *
from Calc2D.Database import Database
from collections import namedtuple
import config
ClSpectrum = namedtuple("Cls", ["l", "tCl"])
PkSpectrum = namedtuple("Pkh", ["kh", "Pkh"])
def normalize(real):
Given the `real` data, i.e. either a 2d array or a flattened 1d array
of the relative density perturbations, normalize its values as follows:
The client expects the values to be in the interval of [-1, 1].
Take a symmetric interval around a `real` value of 0 and linearly
map it to the required interval [-1, 1].
minimum, maximum = real.min(), real.max()
bound = max(abs(maximum), abs(minimum))
result = real / bound
return result
class Calculation(object):
def __init__(self,
resolution = 200,
# also sets `endshape` through setter
self.resolution = resolution
self.gauge = gauge
self.evolver = evolver
self.P_k_max = P_k_max
self.kperdecade = kperdecade
self.redshift = None # to be set later
self.size = None # to be set later
self.krange = np.logspace(-4, 1, kbins)
def resolution(self):
return self._resolution
def resolution(self, resolution):
self._resolution = resolution
self.endshape = (resolution, resolution)
def getData(self, redshiftindex):
FValuenew = PropagateDatawithList(
Valuenew = dict()
FValue_abs = np.abs(self.FValue)
_min, _max = FValue_abs.min(), FValue_abs.max()
dimensions = (self.endshape[0] / 2, self.endshape[1])
for quantity, FT in FValuenew.items():
FT_abs = np.abs(FT)
FT_normalized = cv2.resize(FT_abs, dimensions).ravel()
FT_normalized = (FT_normalized - _min) / (_max - _min)
real = realInverseFourier(FT.reshape(self.FValue.shape))
# real = cv2.resize(real, self.endshape).ravel()
real = real.ravel()
minimum, maximum = real.min(), real.max()
Valuenew[quantity] = normalize(real)
return Valuenew, FValuenew, (minimum, maximum)
def getInitialData(self):
# for odd values of self._resolution, this is necessary
Value = cv2.resize(realInverseFourier(self.FValue), self.endshape)
minimum, maximum = Value.min(), Value.max()
Value = normalize(Value)
assert Value.size == self.resolution ** 2
return Value.ravel(), cv2.resize(
(np.abs(self.FValue) - np.abs(self.FValue).min()) /
(np.abs(self.FValue).max() - np.abs(self.FValue).min()),
(self.endshape[0] / 2, self.endshape[1])).ravel(), (minimum,
def getTransferData(self, redshiftindex):
return {field: transfer_function[redshiftindex](self.krange) for field, transfer_function in self.TransferFunctionList.items()}, self.krange
def setCosmologialParameters(self, cosmologicalParameters):
self.cosmologicalParameters = cosmologicalParameters
# Calculate transfer functions
self.TransferFunctionList = ComputeTransferFunctionList(self.cosmologicalParameters, self.redshift)
# Calculate Cl's
self.tCl, self.mPk = self.calculate_spectra(self.cosmologicalParameters)
def calculate_spectra(self, cosmo_params, force_recalc=False):
settings = cosmo_params.copy()
"output": "tCl,mPk",
"evolver": "1",
"gauge": "newtonian",
"P_k_max_1/Mpc": 10,
database = Database(config.DATABASE_DIR, "spectra.dat")
if settings in database and not force_recalc:
data = database[settings]
ell = data["ell"]
tt = data["tt"]
kh = data["kh"]
Pkh = data["Pkh"]
self.z_rec = data["z_rec"]
cosmo = Class()
# Cl's
data = cosmo.raw_cl()
ell = data["ell"]
tt = data["tt"]
# Matter spectrum
k = np.logspace(-3, 1, config.MATTER_SPECTRUM_CLIENT_SAMPLES_PER_DECADE * 4)
Pk = np.vectorize(, 0)
kh = k * cosmo.h()
Pkh = Pk / cosmo.h()**3
# Get redshift of decoupling
z_rec = cosmo.get_current_derived_parameters(['z_rec'])['z_rec']
self.z_rec = z_rec
# Store to database
database[settings] = {
"ell": data["ell"],
"tt": data["tt"],
"kh": k,
"Pkh": Pk,
"z_rec": z_rec,
return ClSpectrum(ell[2:], tt[2:]), PkSpectrum(kh, Pkh)
def z_dec(self):
if self.z_rec is None:
raise ValueError("z_rec hasn't been computed yet")
return self.z_rec
def setInitialConditions(self,
SI_ns=0.96):"Generating Initial Condition")
if initialDataType == "Gaussian":
self.ValueE, self.FValue, self.k, self.kxE, self.kyE = GenerateGaussianData(
sigma, self.size, self.resolution)
elif initialDataType == "SI":
self.ValueE, self.FValue, self.k, self.kxE, self.kyE = GenerateSIData(
logging.warn("initialDataType " + str(initialDataType) + " not found")
import logging
import numpy as np
import cv2
from Calc2D.rFourier import realFourier, realInverseFourier
def GenerateGaussianData(sigma, size, points, A=1):
xr = np.linspace(-size / 2.0, size / 2.0, points)
yr = np.linspace(-size / 2.0, size / 2.0, points)
step = xr[1] - xr[0]
x, y = np.meshgrid(
xr, yr, indexing='ij', sparse=True) # indexing is important
del xr, yr
#use the more easy formula
Value = A * np.exp(-(x**2 + y**2) / (2 * sigma**2))
kx, ky, FValue = realFourier(step, Value)
kxr, kyr = np.meshgrid(kx, ky, indexing='ij', sparse=True)
k = np.sqrt(kxr**2 + kyr**2)
del kxr, kyr
kx = (min(kx), max(kx)) #just return the extremal values to save memory
ky = (min(ky), max(ky))
ValueE = (Value.min(), Value.max())
return ValueE, FValue, k, kx, ky
def GenerateSIData(A, size, points, limit=None, ns=0.96):
xr = np.linspace(-size / 2.0, size / 2.0, points)
yr = np.linspace(-size / 2.0, size / 2.0, points)
step = xr[1] - xr[0]
x, y = np.meshgrid(
xr, yr, indexing='ij', sparse=True) # indexing is important
del xr, yr
Value = 0 * x + 0 * y
kx, ky, FValue = realFourier(step, Value) #FValue==0
kxr, kyr = np.meshgrid(kx, ky, indexing='ij', sparse=True)
k = np.sqrt(kxr**2 + kyr**2)
del kxr, kyr
if limit == None:
ktilde = k.flatten()
ktilde[np.argmin(k)] = 10**9 #just let the background be arbitrary low
ktilde = ktilde.reshape(k.shape)
FValue = np.random.normal(
scale=np.sqrt(A / ktilde**(
2 - (ns - 1) * 2. / 3.)) / np.sqrt(2)) + np.random.normal(
scale=np.sqrt(A / ktilde**
(2 - (ns - 1) * 2. / 3.)) / np.sqrt(2)) * 1j
elif type(limit) == list or type(limit) == tuple:
iunder, junder = np.where(k < limit[1])
for t in range(len(iunder)):
if k[iunder[t]][junder[t]] > limit[0] and k[iunder[t]][junder[t]] > 0:
FValue[iunder[t]][junder[t]] = np.random.normal(
scale=np.sqrt(A / k[iunder[t]][junder[t]]**
(2 - (ns - 1) * 2. / 3.)) /
np.sqrt(2)) + np.random.normal(
scale=np.sqrt(A / k[iunder[t]][junder[t]]**
(2 -
(ns - 1) * 2. / 3.)) / np.sqrt(2)) * 1j
raise ValueError("limit must be None or tuple or list")
Value = realInverseFourier(FValue)
kx = (min(kx), max(kx))
ky = (min(ky), max(ky))
ValueE = (Value.min(), Value.max())
return ValueE, FValue, k, kx, ky
import numpy as np
#uses one dimensional interpolation
def PropagateDatawithListOld(k,FValue,zredindex,transferFunctionlist):
return (transferFunctionlist[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
def PropagateDatawithList(k, FValue, zredindex, transferFunctionlist):
result = {}
for field, transfer_function in transferFunctionlist.items():
result[field] = (transfer_function[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
return result
#module with uses two dimensional interpolation and propagates all data at once (fastest but high memory consumption)
def PropagateAllData(k,FValue,allzred,transferFunction):
allFValue = np.ones((len(allzred),FValue.shape[0],FValue.shape[1]),dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[:,kxindex,:] = transferFunction(allzred,k[kxindex])*FValue[kxindex]
return allFValue
#module with uses 2 dimensional interpolation (slowest but can be useful if the set of redshift changes very often)
def PropagateData(k,FValue,zred,transferFunction):
FValuenew = np.ones(FValue.shape,dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[kxindex,:] = transferFunction(zred,k[kxindex])*FValue[kxindex]
return allFValue
import pickle
import os
import logging
import uuid
class Database:
def __init__(self, directory, db_file="database.dat"): = directory
self.db_file = db_file
if not os.path.isdir(directory):
raise ValueError("'{}' is not a directory!".format(directory))
self.db_path = os.path.join(directory, db_file)
if not os.path.exists(self.db_path):"No database found; Creating one at {}.".format(self.db_path))
with open(self.db_path, "w") as f:
pickle.dump(dict(), f)
self.db = self.__read_database()
def __read_database(self):
with open(self.db_path) as f:
return pickle.load(f)
def __write_database(self):
with open(self.db_path, "w") as f:
pickle.dump(self.db, f)
def __create_file(self, data):
filename = str(uuid.uuid4())
with open(os.path.join(, filename), "w") as f:
pickle.dump(data, f)
return filename
def __get_frozen_key(self, key):
return frozenset(key.items())
def __getitem__(self, key):
frozen_key = self.__get_frozen_key(key)
if frozen_key in self.db:
filename = self.db[frozen_key]
with open(os.path.join(, filename)) as f:
return pickle.load(f)
raise KeyError("No data for key: {}".format(key))
def __setitem__(self, key, data):
frozen_key = self.__get_frozen_key(key)
self.db[frozen_key] = self.__create_file(data)
def __contains__(self, key):
Return whether `self` contains a record
for the given `key`.
return self.__get_frozen_key(key) in self.db
\ No newline at end of file
import os.path
import pickle
import uuid
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline, RectBivariateSpline
import sys
import logging
from classy import Class
import Calc2D.Database as Database
import config
TRANSFER_QUANTITIES = ["d_g", "d_ur", "d_cdm", "d_b", "d_g/4 + psi"]
def ComputeTransferData(settings, redshift):
database_key = settings.copy()
database_key.update({'redshift': tuple(redshift)})
database = Database.Database(config.DATABASE_DIR)
if database_key in database:
return database[database_key], redshift
cosmo = Class()
outputData = [cosmo.get_transfer(z) for z in redshift]
# Calculate d_g/4+psi
for transfer_function_dict in outputData:
transfer_function_dict["d_g/4 + psi"] = transfer_function_dict["d_g"]/4 + transfer_function_dict["psi"]
# Now filter the relevant fields
fields = TRANSFER_QUANTITIES + ["k (h/Mpc)"]
outputData = [{field: outputData[i][field] for field in fields} for i in range(len(redshift))]
database[database_key] = outputData
return outputData, redshift
def ComputeTransferFunctionList(cosmologicalParameters, redshift, kperdecade=200, P_k_max=100):
class_settings = cosmologicalParameters.copy()
"output": "mTk",
"gauge": "newtonian",
"evolver": "1",
"P_k_max_h/Mpc": P_k_max,
"k_per_decade_for_pk": kperdecade,
"z_max_pk": str(max(redshift)),
data_dict, redshift = ComputeTransferData(class_settings, redshift)
transfer_functions = {field: [] for field in TRANSFER_QUANTITIES}
for i in range(len(redshift)):
k_data = data_dict[0]["k (h/Mpc)"] * cosmologicalParameters["h"] #in order to get k [1/Mpc]
k_data_zero = np.concatenate(([0.0], k_data))
data = data_dict[i][field] / data_dict[i][field][0]
data_zero = np.concatenate(([1.0], data))
interpolated_func = InterpolatedUnivariateSpline(k_data_zero, data_zero)
return transfer_functions
import numpy as np
import numpy.fft as fft
def realFourier(step, Value):
FValue = np.fft.fftshift(
np.fft.rfft2(Value), axes=(0)) #shifting only the x axes
kx = np.fft.fftshift(np.fft.fftfreq(Value.shape[0], d=step)) * 2 * np.pi
ky = np.fft.rfftfreq(Value.shape[0], d=step) * 2 * np.pi
return kx, ky, FValue
def realInverseFourier(FValue):
return np.fft.irfft2(np.fft.ifftshift(
FValue, axes=(0))) #shifting only on the x axes
def realInverseAllFourier(allFValue):
return np.fft.irfftn(
np.fft.ifftshift(allFValue, axes=(1)),
axes=(1, 2)) #shifting only on the x axes
For installation of python packages, run
pip install -r requirements.txt
Launch the application with
Then in any browser open the URL
Cache files are located in cache/, so to clear the cache, run
rm cache/*
import as cm
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
OUTPUT_DIR = os.path.join("static", "images", "colormaps")
WIDTH = 512
def create_image(cmap, width):
values = np.linspace(0, 1, width)
colors = cmap(values).reshape((1, width, 4))
image = Image.fromarray(np.uint8(255 * colors))
return image
cmap_names = {}
cmap_names['Uniform'] = [
'viridis', 'plasma', 'inferno', 'magma']
cmap_names['Diverging'] = [
'seismic', 'RdYlBu', 'Spectral'
cmap_names['Miscellaneous'] = ['jet']
if __name__ == "__main__":
for category in cmap_names:
category_dir = os.path.join(OUTPUT_DIR, category)
if not os.path.exists(category_dir):
for name in cmap_names[category]:
result = create_image(plt.get_cmap(name), width=WIDTH)
output_path = os.path.join(category_dir, "{}.png".format(name))
import os
# Default port number to listen on. Can be overriden by passing a port number
# as the first command line argument, e.g. `python 1234`
PORT = 7777
# Directory to store previously computed transfer functions, spectra etc. in
DATABASE_DIR = "cache"
# Maximum number of thread pool workers (only required for multi-user usage)
# Path of colormap directory relative to the static directory from which
# tornado serves static files
COLORMAP_PATH = os.path.join("images", "colormaps")
# number of sample points for the transfer function that is displayed
# in the client
# number of sample points for the matter spectrum that is displayed
# in the client per decade
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment