Skip to content

Commit

Permalink
[doc] docstring syntax
Browse files Browse the repository at this point in the history
  • Loading branch information
JohannesBuchner committed Sep 9, 2022
1 parent a9883ba commit 3131a2f
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 45 deletions.
55 changes: 28 additions & 27 deletions ultranest/hotstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, d
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns:
Returns
---------
aux_loglike: function
auxiliary loglikelihood function.
Expand Down Expand Up @@ -122,7 +122,7 @@ def get_extended_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns:
Returns
---------
aux_loglike: function
auxiliary loglikelihood function. Takes d + 1 parameters (see below).
Expand Down Expand Up @@ -204,7 +204,7 @@ def get_extended_auxiliary_independent_problem(loglike, transform, ctr, err, df=
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns:
Returns
---------
aux_loglike: function
auxiliary loglikelihood function.
Expand Down Expand Up @@ -254,7 +254,7 @@ def compute_quantile_intervals(steps, upoints, uweights):
uweights: array
sample weights
Returns:
Returns
---------
ulo: array
list of lower quantiles (at q), one entry for each dimension d.
Expand All @@ -263,8 +263,8 @@ def compute_quantile_intervals(steps, upoints, uweights):
"""
ndim = upoints.shape[1]
nboxes = len(steps)
ulos = np.empty((nboxes+1,ndim))
uhis = np.empty((nboxes+1,ndim))
ulos = np.empty((nboxes + 1, ndim))
uhis = np.empty((nboxes + 1, ndim))
for j, pthresh in enumerate(steps):
for i, ui in enumerate(upoints.transpose()):
order = np.argsort(ui)
Expand All @@ -276,19 +276,20 @@ def compute_quantile_intervals(steps, upoints, uweights):
uhis[-1] = 1
return ulos, uhis


def compute_quantile_intervals_refined(steps, upoints, uweights, logsteps_max=20):
"""Compute lower and upper axis quantiles.
Parameters
------------
steps: array
list of quantiles q to compute, with dimensions
list of quantiles q to compute, with dimensions
upoints: array
samples, with dimensions (N, d)
uweights: array
sample weights. N entries.
Returns:
Returns
---------
ulo: array
list of lower quantiles (at q), of shape (M, d), one entry per quantile and dimension d.
Expand All @@ -297,32 +298,32 @@ def compute_quantile_intervals_refined(steps, upoints, uweights, logsteps_max=20
"""
nboxes = len(steps)
ulos_orig, uhis_orig = compute_quantile_intervals(steps, upoints, uweights)
assert len(ulos_orig) == nboxes+1
assert len(uhis_orig) == nboxes+1
assert len(ulos_orig) == nboxes + 1
assert len(uhis_orig) == nboxes + 1

smallest_axis_width = np.min(uhis_orig[-2,:] - ulos_orig[-2,:])
logsteps = min(logsteps_max, int(np.ceil(-np.log10(max(1e-100, smallest_axis_width)))))

weights = np.logspace(-logsteps, 0, logsteps+1).reshape((-1, 1))
weights = np.logspace(-logsteps, 0, logsteps + 1).reshape((-1, 1))
# print("logspace:", weights, logsteps)
assert len(weights) == logsteps+1, (weights.shape, logsteps)
assert len(weights) == logsteps + 1, (weights.shape, logsteps)
# print("quantiles:", ulos_orig, uhis_orig)
ulos_new = ulos_orig[nboxes-1, :].reshape((1, -1)) * (1 - weights) + 0 * weights
uhis_new = uhis_orig[nboxes-1, :].reshape((1, -1)) * (1 - weights) + 1 * weights
ulos_new = ulos_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 0 * weights
uhis_new = uhis_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 1 * weights

# print("additional quantiles:", ulos_new, uhis_new)

ulos = np.vstack((ulos_orig[:-1,:], ulos_new))
uhis = np.vstack((uhis_orig[:-1,:], uhis_new))
# print("combined quantiles:", ulos, uhis)
assert (ulos[-1,:] == 0).all()
assert (uhis[-1,:] == 1).all()
uinterpspace = np.ones(nboxes+logsteps+1)
uinterpspace[:nboxes+1] = np.linspace(0, 1, nboxes+1)
assert 0 < uinterpspace[nboxes-1] < 1, uinterpspace[nboxes]
uinterpspace[nboxes:] = np.linspace(uinterpspace[nboxes-1], 1, logsteps+2)[1:]

uinterpspace = np.ones(nboxes + logsteps + 1)
uinterpspace[:nboxes + 1] = np.linspace(0, 1, nboxes + 1)
assert 0 < uinterpspace[nboxes - 1] < 1, uinterpspace[nboxes]
uinterpspace[nboxes:] = np.linspace(uinterpspace[nboxes - 1], 1, logsteps + 2)[1:]

return ulos, uhis, uinterpspace


Expand Down Expand Up @@ -354,7 +355,7 @@ def get_auxiliary_contbox_parameterization(
and segments it into quantile segments. Within each segment,
the parameter edges in u-space are linearly interpolated.
To see the interpolation quantiles for each axis, use::
steps = 10**-(1.0 * np.arange(1, 8, 2))
ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)
Expand All @@ -367,7 +368,7 @@ def get_auxiliary_contbox_parameterization(
auxiliary_usamples: array
Posterior samples (in u-space).
Returns:
Returns
---------
aux_loglike: function
auxiliary loglikelihood function.
Expand All @@ -381,7 +382,7 @@ def get_auxiliary_contbox_parameterization(
nsamples, ndim = upoints.shape
assert nsamples > 10
ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)

aux_param_names = param_names + ['aux_logweight']

def aux_transform(u):
Expand Down Expand Up @@ -462,7 +463,7 @@ def reuse_samples(
log_weight_threshold: float
Lowest log-weight to consider
Returns:
Returns
---------
results: dict
All information of the run. Important keys:
Expand Down
35 changes: 17 additions & 18 deletions ultranest/integrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def _get_cumsum_range(pi, dp):
dp: float
Quantile (between 0 and 0.5).
Returns:
Returns
---------
index_lo: int
Index of the item corresponding to quantile ``dp``.
Expand All @@ -65,7 +65,7 @@ def _sequentialize_width_sequence(minimal_widths, min_width):
min_width: int
Minimum width everywhere.
Returns:
Returns
---------
Lsequence: list of (L, width)
A sequence of L points and the expected tree width at and above it.
Expand Down Expand Up @@ -925,6 +925,18 @@ def warmstart_from_similar_file(
):
"""Warmstart from a previous run.
Usage::
aux_paramnames, aux_log_likelihood, aux_prior_transform, vectorized = warmstart_from_similar_file(
'model1/chains/weighted_post_untransformed.txt', parameters, log_likelihood_with_background, prior_transform)
aux_sampler = ReactiveNestedSampler(aux_paramnames, aux_log_likelihood, transform=aux_prior_transform,vectorized=vectorized)
aux_sampler.run()
posterior_samples = aux_results['samples'][:,-1]
See :py:func:`ultranest.hotstart.get_auxiliary_contbox_parameterization`
for more information.
Parameters
------------
usample_filename: str
Expand All @@ -934,10 +946,10 @@ def warmstart_from_similar_file(
min_num_samples: int
minimum number of samples in the usample_filename file required.
Too few samples will give a poor approximation.
otherparameters: ...
The remaining parameters have the same meaning as in :class:ReactiveNestedSampler.
The remaining parameters have the same meaning as in :class:ReactiveNestedSampler.
Returns:
Returns
---------
aux_param_names: list
new parameter list
Expand All @@ -947,19 +959,6 @@ def warmstart_from_similar_file(
new prior transform function
vectorized: bool
whether the new functions are vectorized
Usage::
aux_paramnames, aux_log_likelihood, aux_prior_transform, vectorized = warmstart_from_similar_file(
'model1/chains/weighted_post_untransformed.txt', parameters, log_likelihood_with_background, prior_transform)
aux_sampler = ReactiveNestedSampler(aux_paramnames, aux_log_likelihood, transform=aux_prior_transform,vectorized=vectorized)
aux_sampler.run()
posterior_samples = aux_results['samples'][:,-1]
See :py:func:`ultranest.hotstart.get_auxiliary_contbox_parameterization`
for more information.
"""
# load samples
try:
Expand Down

0 comments on commit 3131a2f

Please sign in to comment.