forked from ageorgou/ProPPA
-
Notifications
You must be signed in to change notification settings - Fork 1
/
mh.py
185 lines (160 loc) · 7.11 KB
/
mh.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 16:58:47 2015
@author: ageorgou
"""
import numpy as np
class MetropolisSampler(object):
"""A class implementing a Metropolis-Hastings sampler.
Attributes:
required_conf Required configuration parameters
supports_enhanced Whether the sampler supports enhanced features
(multiple observation files and general observables)
supports_partial Whether the sampler supports partial observations
(only some of the species)
"""
required_conf = ['proposals']
supports_enhanced = False
supports_partial = False
PRINT_EVERY = 500 # How often to report on progress
def __init__(self,model,conf=None):
if conf is not None:
self.apply_configuration(conf)
self._set_model(model)
self.n_pars = len(self.priors)
self.state = [d.rvs() for d in self.priors]
self.samples = []
self.current_prior = np.prod(
[p.pdf(v) for (p,v) in zip(self.priors,self.state)])
self.current_L = self._calculate_likelihood(self.state)
@staticmethod
def prepare_conf(model):
"""
Return a dictionary containing a basic configuration for the sampler.
Create a basic configuration containing all necessary parameters
for the sampler and return it in a dictionary. Subclasses should extend
this as needed to account for their own required parameters.
"""
conf = {'parameters': []}
for p in model.uncertain:
par_conf = {'name' : p.lhs}
prior = p.rhs.to_distribution()
par_conf['prior'] = prior
# NB: the below line does not work:
# par_conf['limits'] = (max(prior.a,0), min(prior.b,np.inf))
# because prior.a (prior.b) gives the lower (upper) bound
# of the support of p ONLY IF p has the default parametrisation
# (loc = 0, scale = 1).
# Instead, use the inverse cdf to find the support:
par_conf['limits'] = (max(prior.ppf(0),0),min(prior.ppf(1),np.inf))
conf['parameters'].append(par_conf)
conf['rate_funcs'] = model.reaction_functions()
return conf
def _set_model(self,model):
"""Initialise the sampler according to a model.
Extract information from a model which the sampler will require in the
future. Samplers must override this to copy the appropriate model
contents.
"""
pass
def apply_configuration(self,conf):
"""
Alter the default behaviour of the sampler.
Apply the given configuration to specify the parameters of the sampler
and tune its behaviour. The conf argument should hold information about
each parameter's prior, proposal and limits, as well as the model's
rate functions (for example, as generated by the proppa module).
"""
self.priors = [p['prior'] for p in conf['parameters']]
self.proposals = [p['proposal'] for p in conf['parameters']]
self.limits = [p['limits'] for p in conf['parameters']]
self.rate_funcs = conf['rate_funcs']
def _calculate_accept_prob(self,proposed):
"""Return the probability that the given state will be accepted.
The default implementation uses standard Metropolis-Hastings dynamics,
but subclasses can override this.
"""
self.proposed_prior = np.prod([p.pdf(v)
for (p,v) in zip(self.priors,proposed)])
self.proposed_L = self._calculate_likelihood(proposed)
ratio = ((self.proposed_prior * self.proposed_L) /
(self.current_prior * self.current_L))
return ratio
def take_sample(self,append=True):
"""Take a single sample from the chain, optionally retaining it.
This can be used either to add another sample to the list that will be
returned, or to just advance the state of the chain by one iteration.
In the latter case, use append=False to not register the new state in
the list, although note that this may affect the validity of the
returned result (but could be useful to e.g. thin the samples.)
"""
proposed = self._propose_state()
acceptance_prob = self._calculate_accept_prob(proposed)
if np.random.rand() <= acceptance_prob:
# With probability acceptance_prob, move to the proposed state
self.current_L = self.proposed_L
self.current_prior = self.proposed_prior
self.state = proposed
if append:
self.samples.append(self.state)
def gather_samples(self,n_samples):
"""Sample from the target distribution, and return all samples so far.
"""
n = 0
while n < n_samples:
self.take_sample()
n = n + 1
if n % self.PRINT_EVERY == 0:
print("Taken",n,"samples")
return self.samples
def _propose_state(self):
"""Choose and return the state of the chain to be examined next."""
proposed_all = [0] * self.n_pars
i = 0
while i < self.n_pars:
lower, upper = self.limits[i]
within_limits = False
while not within_limits:
proposed = self.proposals[i](self.state[i]).rvs()
within_limits = proposed < upper and proposed > lower
proposed_all[i] = proposed
i = i +1
return tuple(proposed_all)
def _calculate_likelihood(self,proposed):
"""Compute the likelihood of a given state.
Compute the likelihood of the given state for the model's observations,
under the sampler's assumptions.
Subclasses must override this to fully define the sampler, unless they
have changed the acceptance probability computation to not use the
likelihood value.
"""
raise NotImplementedError
def reset(self):
"""Reset the sampler by erasing all samples."""
self.samples = []
self.state = tuple(d.rvs() for d in self.priors)
if __name__ == "__main__":
from numpy import inf
import scipy.stats as spst
import matplotlib.pylab
# make default model
species = ['S','I','R']
reactions = ['infect','cure']
params = ['r1','r2']
stoich = []
# make default configuration
default_conf = {'obs': [], 'parameters': []}
default_conf['obs'] = []
parameter_conf = {}
parameter_conf['prior'] = spst.uniform(loc=0,scale=10)
parameter_conf['proposal'] = lambda x: spst.norm(loc=x,scale=1)
parameter_conf['limits'] = (-inf,inf)
default_conf['parameters'].append(parameter_conf)
class GaussianSampler(MetropolisSampler):
target_dist = spst.norm(loc=5,scale=1)
def calculate_likelihood(self,proposed):
return self.target_dist.pdf(proposed)
s = GaussianSampler(default_conf)
n_samples = 1000
samples = s.gather_samples(n_samples)
matplotlib.pylab.hist(np.array(samples))