Skip to content

Commit

Permalink
Merge branch 'master' into ndes
Browse files Browse the repository at this point in the history
  • Loading branch information
lukashergt authored Mar 6, 2024
2 parents 57af94b + 54170d8 commit 2a949c8
Show file tree
Hide file tree
Showing 23 changed files with 391 additions and 150 deletions.
28 changes: 14 additions & 14 deletions .github/workflows/CI.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- name: Upgrade pip and install linters
run: |
python -m pip install --upgrade pip
Expand Down Expand Up @@ -67,9 +67,9 @@ jobs:
extras: true

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

Expand Down Expand Up @@ -102,7 +102,7 @@ jobs:
python-version: '3.11'

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: conda-incubator/setup-miniconda@v2
with:
Expand All @@ -113,7 +113,7 @@ jobs:
run: |
conda config --append channels conda-forge
conda install pytest pytest-cov
conda install scipy numpy 'matplotlib>=3.6.1' 'pandas>=2.0.0'
conda install scipy numpy 'matplotlib>=3.6.1,<3.9.0' 'pandas>=2.0.0,<2.2.0'
- name: Test with pytest
shell: bash -l {0}
Expand All @@ -125,9 +125,9 @@ jobs:
test-build-n-publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Install pypa/build
Expand All @@ -145,9 +145,9 @@ jobs:
minimum-dependencies:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.9

Expand All @@ -166,9 +166,9 @@ jobs:
latest-dependencies:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up latest stable Python 3
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.x

Expand All @@ -189,9 +189,9 @@ jobs:
check-for-new-versions:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up latest stable Python 3
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.x

Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
git-tag-and-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Get version number
run: |
VERSION="$(grep ':Version:' README.rst | awk '{print $2}')"
Expand All @@ -34,9 +34,9 @@ jobs:
pypi-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Install pypa/build
Expand All @@ -52,9 +52,9 @@ jobs:
needs: pypi-release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Install dependencies
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/version.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ jobs:
version-is-unit-incremented:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- name: Upgrade pip and install linters
run: |
python -m pip install --upgrade pip
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,5 @@ plikHM_TTTEEE_lowl_lowE_lensing_NS/
*~
.pytest_cache/*
.coverage

.DS_Store
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
anesthetic: nested sampling post-processing
===========================================
:Authors: Will Handley and Lukas Hergt
:Version: 2.7.0
:Version: 2.9.0
:Homepage: https://github.com/handley-lab/anesthetic
:Documentation: http://anesthetic.readthedocs.io/

Expand Down
2 changes: 1 addition & 1 deletion anesthetic/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import pandas.plotting._misc
from anesthetic._format import _DataFrameFormatter
from anesthetic._version import __version__ # noqa: F401

# TODO: remove this when conda pandas version catches up
from packaging.version import parse
assert parse(pandas.__version__) >= parse('2.0.0')
Expand Down Expand Up @@ -50,3 +49,4 @@ def wrapper(backend=None):

read_hdf = anesthetic.read.hdf.read_hdf
read_chains = anesthetic.read.chain.read_chains
read_csv = anesthetic.read.csv.read_csv
2 changes: 1 addition & 1 deletion anesthetic/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '2.7.0'
__version__ = '2.9.0'
71 changes: 42 additions & 29 deletions anesthetic/examples/perfect_ns.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from anesthetic.samples import merge_nested_samples


def gaussian(nlive, ndims, sigma=0.1, R=1, logLmin=-1e-2):
def gaussian(nlive, ndims, sigma=0.1, R=1, logLmin=-1e-2, logLmax=0,
*args, **kwargs):
"""Perfect nested sampling run for a spherical Gaussian & prior.
Up to normalisation this is identical to the example in John Skilling's
Expand All @@ -29,14 +30,20 @@ def gaussian(nlive, ndims, sigma=0.1, R=1, logLmin=-1e-2):
logLmin : float
loglikelihood at which to terminate
logLmax : float
maximum loglikelihood
The remaining arguments are passed to the
:class:`anesthetic.samples.NestedSamples` constructor.
Returns
-------
samples : :class:`anesthetic.samples.NestedSamples`
Nested sampling run
"""

def logLike(x):
return -(x**2).sum(axis=-1)/2/sigma**2
return logLmax - (x**2).sum(axis=-1)/2/sigma**2

def random_sphere(n):
return random_ellipsoid(np.zeros(ndims), np.eye(ndims), n)
Expand All @@ -48,7 +55,8 @@ def random_sphere(n):
while logL.min() < logLmin:
points = r * random_sphere(nlive)
logL = logLike(points)
samples.append(NestedSamples(points, logL=logL, logL_birth=logL_birth))
samples.append(NestedSamples(points, logL=logL, logL_birth=logL_birth,
*args, **kwargs))
logL_birth = logL.copy()
r = (points**2).sum(axis=-1, keepdims=True)**0.5

Expand All @@ -57,14 +65,14 @@ def random_sphere(n):
return samples.loc[samples.logL_birth < logLend].recompute()


def correlated_gaussian(nlive, mean, cov, bounds=None):
def correlated_gaussian(nlive, mean, cov, bounds=None, logLmax=0,
*args, **kwargs):
"""Perfect nested sampling run for a correlated gaussian likelihood.
This produces a perfect nested sampling run with a uniform prior over the
unit hypercube, with a likelihood gaussian in the parameters normalised so
that the evidence is unity. The algorithm proceeds by simultaneously
rejection sampling from the prior and sampling exactly and uniformly from
the known ellipsoidal contours.
This produces a perfect nested sampling run with a uniform prior over
the unit hypercube. The algorithm proceeds by simultaneously rejection
sampling from the prior and sampling exactly and uniformly from the
known ellipsoidal contours.
This can produce perfect runs in up to around d~15 dimensions. Beyond
this rejection sampling from a truncated gaussian in the early stage
Expand All @@ -85,6 +93,12 @@ def correlated_gaussian(nlive, mean, cov, bounds=None):
bounds : 2d array-like, shape (ndims, 2)
bounds of a gaussian, default ``[[0, 1]]*ndims``
logLmax : float
maximum loglikelihood
The remaining arguments are passed to the
:class:`anesthetic.samples.NestedSamples` constructor.
Returns
-------
samples : :class:`anesthetic.samples.NestedSamples`
Expand All @@ -95,7 +109,7 @@ def correlated_gaussian(nlive, mean, cov, bounds=None):
invcov = np.linalg.inv(cov)

def logLike(x):
return -0.5 * ((x-mean) @ invcov * (x-mean)).sum(axis=-1)
return logLmax - 0.5 * ((x - mean) @ invcov * (x - mean)).sum(axis=-1)

ndims = len(mean)

Expand All @@ -104,10 +118,9 @@ def logLike(x):

bounds = np.array(bounds, dtype=float)

logLmax = logLike(mean)

points = np.random.uniform(*bounds.T, (2*nlive, ndims))
samples = NestedSamples(points, logL=logLike(points), logL_birth=-np.inf)
samples = NestedSamples(points, logL=logLike(points), logL_birth=-np.inf,
*args, **kwargs)

while (1/samples.nlive.iloc[:-nlive]).sum() < samples.D_KL()*2:
logLs = samples.logL.iloc[-nlive]
Expand All @@ -120,19 +133,21 @@ def logLike(x):
points = np.random.uniform(*bounds.T, (nlive, ndims))
logL = logLike(points)
i = logL > logLs
samps_1 = NestedSamples(points[i], logL=logL[i], logL_birth=logLs)
samps_1 = NestedSamples(points[i], logL=logL[i], logL_birth=logLs,
*args, **kwargs)

# Ellipsoidal round
points = random_ellipsoid(mean, cov*2*(logLmax - logLs), nlive)
logL = logLike(points)
i = ((points > bounds.T[0]) & (points < bounds.T[1])).all(axis=1)
samps_2 = NestedSamples(points[i], logL=logL[i], logL_birth=logLs)
samps_2 = NestedSamples(points[i], logL=logL[i], logL_birth=logLs,
*args, **kwargs)
samples = merge_nested_samples([samples, samps_1, samps_2])

return samples


def wedding_cake(nlive, ndims, sigma=0.01, alpha=0.5):
def wedding_cake(nlive, ndims, sigma=0.01, alpha=0.5, *args, **kwargs):
"""Perfect nested sampling run for a wedding cake likelihood.
This is a likelihood with nested hypercuboidal plateau regions of constant
Expand All @@ -157,6 +172,9 @@ def wedding_cake(nlive, ndims, sigma=0.01, alpha=0.5):
alpha : float
volume compression between plateau regions
The remaining arguments are passed to the
:class:`anesthetic.samples.NestedSamples` constructor.
"""

def i(x):
Expand Down Expand Up @@ -192,7 +210,8 @@ def logL(x):
live_points[j] = x_
live_likes[j] = logL(x_)

samps = NestedSamples(points, logL=death_likes, logL_birth=birth_likes)
samps = NestedSamples(points, logL=death_likes, logL_birth=birth_likes,
*args, **kwargs)
weights = samps.get_weights()
if weights[-nlive:].sum() < 0.001 * weights.sum():
break
Expand All @@ -201,7 +220,8 @@ def logL(x):
birth_likes = np.concatenate([birth_likes, live_birth_likes])
points = np.concatenate([points, live_points])

return NestedSamples(points, logL=death_likes, logL_birth=birth_likes)
return NestedSamples(points, logL=death_likes, logL_birth=birth_likes,
*args, **kwargs)


def planck_gaussian(nlive=500):
Expand Down Expand Up @@ -244,14 +264,7 @@ def planck_gaussian(nlive=500):
[8.00e-01, 1.20e+00]])

logL_mean = -1400.35

samples = correlated_gaussian(nlive, mean, cov, bounds)

data = samples.iloc[:, :len(columns)].to_numpy()
logL = samples['logL'].to_numpy()
logL_birth = samples['logL_birth'].to_numpy()
logL_birth += logL_mean - samples.logL.mean()
logL += logL_mean - samples.logL.mean()
samples = NestedSamples(data=data, columns=columns, labels=labels,
logL=logL, logL_birth=logL_birth)
return samples
d = len(mean)
logLmax = logL_mean + d/2
return correlated_gaussian(nlive, mean, cov, bounds, logLmax,
columns=columns, labels=labels)
20 changes: 20 additions & 0 deletions anesthetic/labelled_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,26 @@
import numpy as np
from functools import cmp_to_key
from pandas.errors import IndexingError
import pandas as pd


def read_csv(filename, *args, **kwargs):
"""Read a CSV file into a ``LabelledDataFrame``."""
df = pd.read_csv(filename, index_col=[0, 1], header=[0, 1],
*args, **kwargs)
ldf = LabelledDataFrame(df)
if ldf.islabelled(0) and ldf.islabelled(1):
return ldf
df = pd.read_csv(filename, index_col=[0, 1], *args, **kwargs)
ldf = LabelledDataFrame(df)
if ldf.islabelled(0):
return ldf
df = pd.read_csv(filename, index_col=0, header=[0, 1], *args, **kwargs)
ldf = LabelledDataFrame(df)
if ldf.islabelled(1):
return ldf
df = pd.read_csv(filename, index_col=0, *args, **kwargs)
return LabelledDataFrame(df)


def ac(funcs, *args):
Expand Down
Loading

0 comments on commit 2a949c8

Please sign in to comment.