-
Notifications
You must be signed in to change notification settings - Fork 0
/
optimize.py
51 lines (40 loc) · 1.61 KB
/
optimize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# coding=utf-8
# Metodos útiles para optimizar GP
# -----------------------------------------------------------------------------
from functools import partial
import scipy.optimize as op
import numpy as np
import george
# Defino la función objetivo (negative log-likelihood in this case).
def nll(p, gp=None, y_obs=None):
"""
gp: objeto de gaussian process
y: observaciones sobre las que se ajusta el modelo
p: parametros sobre los que se quiere optimizar el modelo
"""
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y_obs, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# Gradiente de la función objetivo. Se ocupa para algunos métodos de optimización
def grad_nll(p, gp=None, y_obs=None):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y_obs, quiet=True)
def rms_score(p, gp=None, y_obs=None, x=None):
n = len(y_obs)
gp.kernel[:] = p
mu, cov = gp.predict(y_obs, x)
aux = (y_obs - mu) ** 2
aux = np.sqrt(sum(aux) / n)
return aux / np.std(y_obs)
def find_best_fit(kernel, t_obs, y_obs, err_obs):
gp = george.GP(kernel, mean=np.mean(y_obs))
gp.compute(t_obs, yerr=err_obs)
partial_op = partial(nll, gp=gp, y_obs=y_obs)
# partial_op = partial(rms_score, gp=gp, y_obs=y_obs, x=t_obs)
p0 = gp.kernel.vector
results = op.minimize(partial_op, p0, method='Nelder-Mead')
gp.kernel[:] = results.x
return gp.kernel