diff --git a/INSTALL.rst b/INSTALL.rst index daf05c0..d383ef3 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -57,10 +57,17 @@ Required python packages =============== ===================== ============================================================ package version url =============== ===================== ============================================================ +<<<<<<< HEAD MDAnalysis >= 0.7.5 http://www.mdanalysis.org numpy >=1.0.3 http://numpy.scipy.org NetworkX >=1.0 https://networkx.lanl.gov GridDataFormats >= 0.1.1 https://github.com/MDAnalysis/GridDataFormats +======= + MDAnalysis >= 0.7.5 http://mdanalysis.googlecode.com + numpy >=1.0.3 http://numpy.scipy.org/ + NetworkX >=1.9.1 https://networkx.lanl.gov/ + GridDataFormats >= 0.1.1 https://github.com/orbeckst/GridDataFormats/archives/master +>>>>>>> origin =============== ===================== ============================================================ MDAnalysis_ requires additional modules; see the instructions at diff --git a/README.rst b/README.rst index 5b41f9a..cf86de7 100644 --- a/README.rst +++ b/README.rst @@ -4,6 +4,10 @@ |zenodo| +<<<<<<< HEAD +======= + +>>>>>>> origin **DEVELOPMENT VERSION of hop**: Please note that this is a beta version of the package. It is still in heavy development. Feedback_ is very welcome. @@ -29,7 +33,11 @@ and reflecting the fact that a "hopping analysis" is performed). Hop requires MDAnalysis_. +<<<<<<< HEAD .. _MDAnalysis: http://www.mdanalysis.org +======= +.. _MDAnalysis: http://www.mdanalysis.org/ +>>>>>>> origin Installation @@ -50,10 +58,21 @@ Documentation Please see the contents of the ``doc/`` directory, in particular ``doc/overview.txt``, and the Python doc strings. +<<<<<<< HEAD +======= + +>>>>>>> origin +<<<<<<< HEAD Bug reporting and Contributing ============================== +======= +Almost invariably, things will not work right away or it will be +unclear how to accomplish a certain task. In order to keep track of +feedback I ask you to use the Issue tracker at +http://github.com/Becksteinlab/hop/issues +>>>>>>> origin Please raise issues in the `issue tracker`_. This includes problems or if you have suggestions how to improve the package or the @@ -87,4 +106,21 @@ Thanks! :target: https://zenodo.org/badge/latestdoi/13219/Becksteinlab/hop :alt: Zenodo DOI +<<<<<<< HEAD .. _Feedback: https://github.com/Becksteinlab/hop/issues +======= + +Contact +======= + +Please do not hesitate to raise issues in the `issue tracker`_ or +contact Oliver Beckstein if problems occur +or if you have suggestions how to improve the package or these instructions. + +.. _issue tracker: http://github.com/Becksteinlab/hop/issues + +.. |zenodo| image:: https://zenodo.org/badge/doi/10.5281/zenodo.18864.svg + :target: http://dx.doi.org/10.5281/zenodo.18864 + + +>>>>>>> origin diff --git a/hop/.graph.py.swo b/hop/.graph.py.swo new file mode 100644 index 0000000..0987f7d Binary files /dev/null and b/hop/.graph.py.swo differ diff --git a/hop/.utilities.py.swp b/hop/.utilities.py.swp new file mode 100644 index 0000000..2e6deb3 Binary files /dev/null and b/hop/.utilities.py.swp differ diff --git a/hop/MCMC_old.py b/hop/MCMC_old.py new file mode 100644 index 0000000..f41ff23 --- /dev/null +++ b/hop/MCMC_old.py @@ -0,0 +1,718 @@ +# $Id$ +# Hop --- a framework to analyze solvation dynamics from MD simulations +# Copyright (c) 2009 Oliver Beckstein +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""" +Markov Chain Monte Carlo on hopping graph --- :mod:`hop.MCMC` +============================================================= + +The :mod:`hop.MCMC` module uses the information encoded in a hopping +graph to set up a Markov Chain Monte Carlo sampling procedure that +allows one to rapidly generate site occupancy distributions that are +distributed in the same way as the one sampled from MD. + +The most convenient entry point is the :func:`hop.MCMC.run` function :: + + M = MCMC.run(filename='hopgraph.pickle',Ntotal=) + +It takes as input a stored hopgraph and immediately runs an MCMC run of *Ntotal* +steps. The output is a :class:`MCMCsampler` object. It contains the 'trajectory' and +useful analysis functions. (Use interactive introspection in ipython to explore +the possibilities of the object.) + +Notes on the algorithm: + +* some sort of dynamic lattice Monte Carlo with very simple acceptance + probabilities (0 or 1, if there's no space on the site, and 1 if + there is) + + ... is 'MCMC' actually the proper description? + +* Extension to multiply occupied sites: use the site occupancy + distributions from siteanalysis, and replace the unconditional move + by an acceptance probability == s_i(n) + +* I am currently using time-forward (out-of) and time-backward (into) + site moves (the latter inspired by coupling from the past). + +Classes and functions +--------------------- + +""" + +import hop.graph +from hop.constants import SITELABEL +import hop.utilities +from hop.utilities import msg,set_verbosity +import numpy + +Nequil_default = 10000 + +class MCMCsampler(hop.utilities.Saveable): + """Generate an equilibrium distribution of states from a hop graph.""" + + _saved_attributes = 'all' + + def __init__(self,h=None,min_hops_observed=1,filename=None): + """Initialize the Markov Chain Monte Carlo sample with a HoppingGraph. + + M = MCMCsampler(HoppingGraph) + + Build a Markov Chain model from a (with edges deleted + that have less than hops). + """ + if h is not None: + # standard init: setup everything from hopgraph + import copy + # 1) compute transition probabilities between all nodes. + # represent T_ji (j-->i) as a sparse matrix in dict of dict format + h.filter(exclude={'Nmin':min_hops_observed}) + self.graph = h.select_graph(use_filtered_graph=True) + self.site_properties = h.site_properties + A = self.graph.adj # adjacency matrix as dict-of-dicts; A[i][j] == j --> i !! + Nsites = numpy.max(self.graph.nodes())+1 + self.T = numpy.zeros((Nsites,Nsites)) # exit probabilities T[j,i] = P(i -> j|i) + self.R = numpy.zeros((Nsites,Nsites)) # entry probabilities R[i,j] = P(j -> i|i?) (?) + # may look into sparse matrices... + + # fast lookup of neighbours + self.out_neighbors = dict( [ (i,numpy.sort([e[1] for e in self.graph.out_edges(i)])) + for i in self.graph.nodes()] ) + self.in_neighbors = dict( [ (i,numpy.sort([e[0] for e in self.graph.in_edges(i)])) + for i in self.graph.nodes()] ) + + # probably slow but simple ... actually, fast enough so that I don't care + # format of a edge in the graph: + # i : { j1: {'k':k, 'N':N, 'fit':fit_instance}, j2: {...}, ...} + # i : { + # {1: {'k':0.100953823261, 'N':32, 'fit':}}, + # {12: {'k':0.00272870618459, 'N':1, 'fit':}} + # } + + # probability to exit i to j = k(i->j)/Z with Z = Sum_l k(i-->l) + # or N_ji/Z' with Z' = Sum_l N_li + # probability to enter i from j: + # N_ij/Y with Y = Sum_l N_il + for i in self.graph.nodes(): + Nout = numpy.sum( [A[i][j]['N'] for j in self.out_neighbors[i]] ) # total exiting from i + Nin = numpy.sum( [A[j][i]['N'] for j in self.in_neighbors[i]] ) # total entering into i + for j in self.out_neighbors[i]: + self.T[j,i] = A[i][j]['N'] * 1.0 / Nout + for j in self.in_neighbors[i]: + self.R[i,j] = A[j][i]['N'] * 1.0 / Nin + + # cumulative prob. distrib. of exit nodes (could sort by decreasing + # probability to optimize for more probable hits but then must sort + # nodes as well) + self.exit_cdf = dict( [(i,numpy.cumsum(self.T[self.out_neighbors[i],i])) + for i in self.graph.nodes() if len(self.out_neighbors[i])>0 ] ) + self.entry_cdf = dict( [(i,numpy.cumsum(self.R[i,self.in_neighbors[i]])) + for i in self.graph.nodes() if len(self.in_neighbors[i])>0 ] ) + + self.init_state() + + # obtain maxsite from volume of the site (assume waters are hard spheres) + # max_occupancy = volume_site/volume_water + 1 + v_water = 4./3.*numpy.pi*hop.constants.r_water**3 + def max_occupancy(sitelabel): + return int(h.site_properties.volume[sitelabel]/v_water + 1) + + self.maxstate = dict( [(i,max_occupancy(i)) for i in self.graph] ) + self.maxstate[SITELABEL['bulk']] = int(1e10) # no restrictions on bulk + + # Site probability p_i: probability to find site occupied by at least one particle. + # p(i) = P(nu_i >= 1) = 1 - P(nu_i = 0) + # For max nu_i = 1: p(i) = /max nu_i + # For max nu_i > 1: worry about it later... + self.siteprobability = dict( [(i,self.site_properties.occupancy_avg[i]/self.maxstate[i]) + for i in self.graph] ) + # Set bulk to a random value such as 1/2; it is not being used. + self.siteprobability[SITELABEL['bulk']] = 0.5 + elif filename is not None: + # setup from pickled file + self.load(filename) + else: + raise ValueError('Either a hopgraph or a pickled file is needed.') + + def init_state(self,Nbulk=1e4): + """Initialize state with 1 particle per standard site and Nbulk for the bulk site.""" + # 2) initialize state vector + # (many on bulk and 1 on proper sites) + self.state = dict( [(i,1) for i in self.graph]) + self.state[SITELABEL['bulk']] = int(Nbulk) # large bulk reservoir + self.states = numpy.array([]) # hold 'trajectory' + self.iteration = 0 # number of iterations up to/incl states[-1] + + def sample(self,max_iter=10000,record_iterations=True): + """Run Monte Carlo site moves. + + sample(max_iter=10000) + + Runs a batch of MCMC moves. + """ + # 3) MCMC sampler + sites = numpy.array(self.graph.nodes()) + Nsites = len(sites) + + for n in xrange(max_iter): + site = sites[numpy.random.uniform(SITELABEL['bulk']+1,Nsites)] + self._MCMCmove(site) + if record_iterations: + self.iteration += max_iter + + def run(self,Ntotal=500000,Nskip=1000,verbosity=None): + """MCMC run multiple cycles of lebgth scans for a total of . + + run(Ntotal=500000,Nskip=1000) + + Starts from the current configuration in state. + Creates the collection of configurations states: one state every Nskip steps + """ + set_verbosity(verbosity) + + Ntotal = int(Ntotal) + Nskip = int(Nskip) + Ncycle = int(Ntotal/Nskip) + + self.runparameters = {'Ntotal':Ntotal,'Nskip':Nskip,'Ncycle':Ncycle} + state_list = [] + msg(1,"Running MCMC:\n\tNtotal = %(Ntotal)d\n\tSaving state every cycle of Nskip steps = %(Nskip)d\n\t" + "Hence in total Ncycle cycles = %(Ncycle)d\n" % locals()) + for i in xrange(1,Ncycle+1): + msg(3,"cycle %(i)4d/%(Ncycle)4d\r" % locals()) + self.sample(Nskip) + state_list.append(self.statevector) + msg(3,"\n") + + if len(self.states) == 0: + self.states = numpy.array(state_list) + else: + msg(2,"Extending states from %d configurations by %d new ones." \ + % (len(self.states), len(state_list))) + self.states = numpy.concatenate((self.states, numpy.array(state_list))) + + def _MCMCmove(self,i): + if numpy.random.uniform(0,1) < self.siteprobability[i]: + # try entry move (add one particle to site) + if self.state[i] >= self.maxstate[i]: # only transfer if there's space on node + return False + jnodes = self.in_neighbors[i] # all entry nodes + if len(jnodes) == 0: # isolated node + return False + x = numpy.random.uniform(0,1) # random number for deciding exit channel + j = jnodes[ x <= self.entry_cdf[i] ][0] # j for which p(j-1) < x <= p(j) ('=' necessary to catch x==1) + if self.state[j] == 0: + return False # no particles to transfer from source site + self.state[i] += 1 + self.state[j] -= 1 + return True + else: + # try exit move (remove one particle from site) + if self.state[i] == 0: + return False # no water to move + jnodes = self.out_neighbors[i] # all exit nodes + if len(jnodes) == 0: # isolated node + return False + x = numpy.random.uniform(0,1) # random number for deciding exit channel + j = jnodes[ x <= self.exit_cdf[i] ][0] # first jnode for which x > prob ('=' necessary to catch x==1) + if self.state[j] >= self.maxstate[j]: # only transfer if there's space on node + return False + self.state[i] -= 1 + self.state[j] += 1 + return True + + def occupancy(self): + """Ensemble averaged occupancy (over ALL states incl bulk) and fluctuation.""" + return self.states.mean(axis=0), self.states.std(axis=0) + + def mean(self): + """Mean for each site (excl bulk).""" + return self.states[:,self.firstsiteindex:].mean(axis=0) + + def std(self): + """Standard deviation for each site.""" + return self.states[:,self.firstsiteindex:].std(axis=0) + + def mean_std(self): + "Returns site labels, mean, and standard deviation for each site (excl bulk).""" + return self.sites[self.firstsiteindex:], self.mean(), self.std() + + def firstsiteindex(): + doc = """State array index of the first site after bulk.""" + def fget(self): + return self.node2index[SITELABEL['bulk']]+1 + return locals() + firstsiteindex = property(**firstsiteindex()) + + def statevector(): + doc = """State as a numpy array; the corresponding nodes are state.keys()""" + def fget(self): + return numpy.array(self.state.values()) + def fset(self,x): + raise NotImplementedError("statevector cannot be assigned; modify state.") + return locals() + statevector = property(**statevector()) + + def node2index(): + doc = """Translates node label (in graph) to the sequential array index.""" + def fget(self): + # NOTE: if self.graph is changed manually then this will be wrong + try: + return self.__node2index # use cached dictionary + except: + self.__node2index = dict( [(n,idx) for idx,n in enumerate(self.graph.nodes())] ) + return self.__node2index + return locals() + node2index = property(**node2index()) + + def index2node(): + doc = """Translates sequential array index to node label (in graph).""" + def fget(self): + # NOTE: if self.graph is changed manually then this will be wrong + try: + return self.__index2node + except: + self.__index2node = numpy.array(self.graph.nodes()) + return self.__index2node + return locals() + index2node = property(**index2node()) + sites = index2node + + def plot(self,filename=None,plot_skip=None): + """Plot density plot of the saved configurations in states[].""" + if len(self.states) == 0: + raise ValueError("No configurations computed yet. Execute 'run()' first.") + + import pylab + + if plot_skip is None: + plot_skip = self.runparameters['Nskip'] + plot_step = int(plot_skip/self.runparameters['Nskip']) or 1 + if plot_step == 1: + plot_skip = self.runparameters['Nskip'] + + site_states = self.states[:,self.firstsiteindex:] # ignore 0 (bulk) + maxsite = numpy.max([self.maxstate[site] for site in self.graph \ + if site <> SITELABEL['bulk']]) + + # pylab.clf() + img = pylab.imshow(site_states[::plot_step].transpose(), + interpolation='nearest',cmap=pylab.cm.hot) + img.axes.set_aspect('auto') + pylab.colorbar(ticks=range(maxsite+1),shrink=0.75) + + pylab.title('Number of water molecules per site') + pylab.xlabel('MCMC iteration/%d' % plot_skip) + pylab.ylabel('site (sequentially enumerated)') + + pylab.draw_if_interactive() + if filename: + pylab.savefig(filename) + + def plot_occupancy(self,legend=True,**kwargs): + """Plot site label vs +/- std(N). + + legend True: add legend, False: return (line,description) + **kwargs additional arguments for errbar plot such as color='k', fmt='o' + + """ + import pylab + plotargs = {'fmt':'ko', 'color':'black'} + plotargs.update(kwargs) + sites = self.index2node + o_mean, o_std = self.occupancy() + firstsiteindx = self.node2index[SITELABEL['bulk']]+1 + # upper graph: mean + stdev bar + ##pylab.subplot(211,frameon=False) + pylab.subplot(111,frameon=False) + pylab.title('occupancy' ) + pylab.ylabel(r'$$') + lines = pylab.errorbar(x=sites[firstsiteindx:], + y=o_mean[firstsiteindx:], + yerr=o_std[firstsiteindx:], + **plotargs) + labelstring = '' # no legend + #labelstring = r'$N=%g$' % self.runparameters['Ntotal'] + #if legend: + # pylab.legend((lines[0],),(labelstring,), + # loc='upper right',numpoints=1,shadow=True) + pylab.xlabel('site label') + + # lower graph: stdev + ##pylab.subplot(212,frameon=False) + ##pylab.xlabel('site label') + ##pylab.ylabel(r'$\sigma(\nu_i)$') + ##pylab.plot(sites[firstsiteindx:], + ## o_std[firstsiteindx:],'o', + ## color=plotargs['color']) + + if not legend: + return (lines[0],labelstring) + return None + + def occupancy_mean_correl(self): + """Calculates the correlation coefficient between simulation and MCMC occupancies.""" + corr = numpy.corrcoef(*self._occupancies_mean()) + return corr[0,1] + + def occupancy_std_correl(self): + """Calculates the correlation coefficient between simulation and MCMC occupancy fluctuations.""" + corr = numpy.corrcoef(*self._occupancies_std()) + return corr[0,1] + + def plot_correl(self,legend=True,**kwargs): + """Plot the occupancy from the MD simulation vs the MCMC one.""" + import pylab + + occ_MD,occ_MCMC = self._occupancies_mean() + corr = self.occupancy_mean_correl() + labelstring = r'$C=%g$' % (corr) + lines = pylab.plot(occ_MD, occ_MCMC, 'o', label=labelstring, **kwargs) + + # straight line fit + import hop.graph + f = hop.graph.fitlin(occ_MD,occ_MCMC) + x = numpy.array([0,1]) + pylab.plot(x,f.fit(x),'--',**kwargs) + + pylab.xlabel('MD occupancy') + pylab.ylabel('MCMC occupancy') + if legend: + pylab.legend(loc='best',numpoints=1, + prop=pylab.matplotlib.font_manager.FontProperties(size='smaller')) + if not legend: + return (lines[0],labelstring) + return None + + def _occupancies_mean(self): + """Returns the mean occupancy from MD and MCMC.""" + occ_MD = self.site_properties.occupancy_avg[self.index2node] + occ_MCMC = self.occupancy()[0] + firstsiteindx = self.node2index[SITELABEL['bulk']]+1 + return occ_MD[firstsiteindx:], occ_MCMC[firstsiteindx:] + + def _occupancies_std(self): + """Returns the fluctuation (stdev) in the occupancy from MD and MCMC.""" + + raise NotImplementedError('MD Occupancy must be read from SiteAnalysisArray.') + + occ_MD = self.site_properties.occupancy_std[self.index2node] + occ_MCMC = self.occupancy()[1] + firstsiteindx = self.node2index[SITELABEL['bulk']]+1 + return occ_MD[firstsiteindx:], occ_MCMC[firstsiteindx:] + + def autocorrelation(self,start=None,stop=None,step=None,**kwargs): + """Calculates the auto correlation function for all site trajectories.""" + from hop.utilities import autocorrelation_fft as ACF + return numpy.array([ACF(traj,**kwargs) + for traj in self.states.T[:,start:stop:step]]) + + def averaged_autocorrelation(self,step=None,**kwargs): + """Calculates the ACF or each site by resampling from the whole trajectory. + + mean(acf), standardev(acf) = averaged_autocorrelation(**kwargs) + + :Arguments: + step only take every from the trajectory (None == 1) + ??? step > 1 seems to take LONGER ??? + length length (in frames) of the ACF (default: 1/2*len(series)) + sliding_window repeat ACF calculation every N frames (default: len(series)/100) + + :Returns: + mean_acf average over all resampled acfs per site, shape = (Nsites,length) + std_acf standard deviation or the resampled acfs, shape = (Nsites,length) + + See also for kwargs: + """ + from hop.utilities import averaged_autocorrelation as avACF + tmp = numpy.array([avACF(traj,**kwargs) for traj in self.states.T[:,::step]]) + return tmp[:,0], tmp[:,1] + + +def run(filename='hopgraph.pickle',Ntotal=500000,Nskip=1000,Nequil=Nequil_default): + """Perform Markov Chain Monte Carlo on a model derived from the hopping graph.""" + + h = hop.graph.HoppingGraph(filename=filename) + M = MCMCsampler(h) + + msg(0,"MCMCsampler() for %s\n" % filename) + + if Nequil > 0: + # burn in/flooding + msg(1,"Running %(Nequil)d steps for equilibration...\n" % locals()) + M.sample(Nequil,record_iterations=False) + + # MCMC run + M.run(Ntotal=Ntotal,Nskip=Nskip,verbosity=3) + + return M + +class Pscan(object): + """Run a MCMC sampler for a number of parameter values.""" + + def __init__(self,parameter,pvalues=None,filename='hopgraph.pickle', + Ntotal=1e6,**kwargs): + """Sample on hopping graph for different values of = p. + + P = Pscan(parameter=,pvalues=,filename=,**kwargs) + + must be a keyword argument to hop.MCMC.run(); + the parameter overrides any default values that may have been + set. For instance, can be 'Ntotal' or 'filename'. + + kwargs: all other kwargs are directly passed on to MCMC.run(). + """ + + self.parameter = parameter + self.pvalues = list(pvalues) + self.Ntotal = Ntotal + M = {} + for p in pvalues: + print "parameter %s = %g" % (parameter,p) + kwargs[parameter]=p + kwargs.setdefault('Ntotal',Ntotal) + kwargs.setdefault('filename',filename) + M[p] = run(**kwargs) + + self.MCMCsamplers = M + del M + + def occupancy_mean_correl(self): + """Returns X=pvalues, Y=occupancy_mean_correlations.""" + self.pvalues.sort() + return (numpy.array(self.pvalues), + numpy.array([self.MCMCsamplers[v].occupancy_mean_correl() + for v in self.pvalues])) + + def plot_occupancy_mean_correl(self,linestyle='ko-',**kwargs): + import pylab + x,y = self.occupancy_mean_correl() + lines = pylab.semilogx(x,y,linestyle,**kwargs) + pylab.xlabel('total MCMC steps') + pylab.ylabel('correlation coefficient with MD') + + def plot_states(self,maxcolumns=2): + """Plot all state 'trajectories' as a tiled plot.""" + import pylab + pvalues = sorted(self.MCMCsamplers.keys()) + Nmax = len(pvalues) + maxrows = int(Nmax*1.0 / maxcolumns + 0.5) + nplot = 0 + pylab.clf() + for irow in xrange(maxrows): + row = irow + 1 + for icolumn in xrange(maxcolumns): + column = icolumn + 1 + nplot += 1 + iplot = nplot - 1 + p = pvalues[iplot] + pylab.subplot(maxrows,maxcolumns,nplot, frameon=False) + self.MCMCsamplers[p].plot() + ax = pylab.gca() + if ax.is_first_col(): + pylab.ylabel('site #') + else: + pylab.ylabel('') + if ax.is_last_row(): + pass # keep the original label + else: + pylab.xlabel('') + pylab.title(r'%s=%g' % (self.parameter,p)) + + + def plot_occupancy(self,**kwargs): + """Plot (site occupancy from MCMC) for all parameter values. + + (See _plotter())""" + self._plotter('plot_occupancy',fmt='o',**kwargs) + + def plot_occupancy_mean_correl(self,**kwargs): + """Plot MD occupancy vs MCMC occupancy. + + plot_correl(colorscale='linear'|'log') + + (See _plotter())""" + import pylab + self._plotter('plot_correl',**kwargs) + pylab.plot([0,1],[0,1],'k-.') + + def _plotter(self,func,colorscale='linear',**fargs): + """Plot func for all MCMCsamplers in the collection. + + _plotter('',colorscale='linear'|'log',**fargs) + + '' string, names a plotting method such as 'plot_correl' + colorscale scale color based on the parameter value ('linear' or 'log'; + note that 'log' can only be used with numerical data > 0) + **fargs additional arguments that the function may need + """ + import pylab + # currently, pvalues must be numerical; if not then one should use the enumeration + # instead of the pvalues list + if numpy.all(numpy.isreal(self.pvalues)): + p_dict = dict( zip(self.pvalues, map(float,self.pvalues)) ) + else: + p_dict = dict( [(p,n) for n,p in enumerate(self.pvalues)] ) + + _scalings = {'linear': lambda x:x, + 'log': lambda x:numpy.log(x)} + try: + scale = _scalings[colorscale] + except KeyError: + raise ValueError(" must be one of "+str(_scalings.keys())) + pnormalize = pylab.normalize(vmin=numpy.min(scale(p_dict.values())), + vmax=numpy.max(scale(p_dict.values()))) + + pylab.clf() + ax = pylab.axes(frameon=False) + + lines = [] + descriptions = [] + for p in sorted(self.MCMCsamplers): + p_num = p_dict[p] + plot_func = self.MCMCsamplers[p].__getattribute__(func) + (line,correl_legend) = plot_func(color=pylab.cm.jet(pnormalize(scale(p_num))), + legend=False, **fargs) + descr = r'$%s\ %s$' % (str(p), correl_legend.replace(r'$','')) # minimalistic legend + lines.append(line) + descriptions.append(descr) + + pylab.legend(lines,descriptions,numpoints=1,shadow=True, + prop=pylab.matplotlib.font_manager.FontProperties(size='smaller')) + + def save(self,filename='pscan.pickle'): + """Save pscan object to pickle file. + + save(pscan.pickle) + + Load with + + import cPickle + myPscan = cPickle.load(open('pscan.pickle')) + """ + import cPickle + fh = open(filename,'wb') + try: + cPickle.dump(self,fh,cPickle.HIGHEST_PROTOCOL) + finally: + fh.close() + msg(3,"Wrote Pscan object to '%s'.\n" % filename) + +class MultiPscan(hop.utilities.Saveable): + """Run Pscan(**pscanargs) times and collect all Pscan objects in list. + """ + + _saved_attributes = 'all' + + def __init__(self,repeat=10,**pscanargs): + """pscans = MultiPscan(repeat=10, parameter='Ntotal', pvalues=[1e4,2.5e4,....], ...) + See Pscan() for description of pscanargs. + """ + self.collection = [Pscan(**pscanargs) for i in xrange(repeat)] + + def plot(self,**kwargs): + multi_plot(self.collection,plottype='whisker',funcname='occupancy_mean_correl',**kwargs) + +def multi_plot(plist,plottype='whisker',Nequil=Nequil_default,funcname='occupancy_mean_correl',**kwargs): + """Display a collection of functions. + + multi_plot(plist,plottype='whisker',Nequil=10000,funcname='occupancy_mean_correl',**kwargs) + + The function is obtained from a method call on the objects in plist. The + assumption is that these are functions of Ntotal (if not, set Nequil=0; Nequil is + added to x). Each object is a different realization, e.g. multiple MCMC runs. + + plottype 'whisker' (whisker plot), 'standard' (average and standard deviations) + Nequil correction, added to x + funcname string; a method of the objects in plist that does EXACTLY the following: + x,y = obj.funcname() + where x and y are numpy arrays of equal length + **kwargs color, boxcolor, mediancolor, capsize + """ + import pylab + + plottypes = ('whisker','standard') + + # sanity checks + if plottype not in plottypes: + raise ValueError("Only plottypes from %(plottypes)r, not %(plottype)s." % locals()) + + try: + plist[0].__getattribute__(funcname)() # How do I handle old-style classes? Check __getattr__? + except: + raise ValueError("funcname='%(funcname)r' has the wrong signature or is not a method " + "of the objects in plist." % locals()) + + def xy_from(obj): + return obj.__getattribute__(funcname)() + def x_from(obj): + return xy_from(obj)[0] + def y_from(obj): + return xy_from(obj)[1] + + kwargs.setdefault('color','b') # main color + kwargs.setdefault('capsize',0) + + x = x_from(plist[0]) + Nequil + all_y = numpy.array([y_from(p) for p in plist]) + ymean = all_y.mean(axis=0) + yerr = all_y.std(axis=0) + + ax = pylab.axes(frameon=False) + if plottype == 'standard': + lines = pylab.errorbar(x,ymean,yerr=yerr,fmt='o',linestyle='-',**kwargs) + + for p in plist: + px,py = xy_from(p) + px += Nequil + components = pylab.semilogx(px,py,'o',color=kwargs['color'],ms=3) + elif plottype == 'whisker': + # widths that look the same in a log plot + def logwidth(x,delta): + """Return linear widths at x that, logarithmically scaled, appear to be delta wide.""" + q = numpy.exp(delta) + return x * (1 - q)/(1 + q) + + kwargs.setdefault('mediancolor','k') # for median in whiskerplot + kwargs.setdefault('boxcolor','darkgray') + + pylab.semilogx(x,ymean,'o-',color=kwargs['color'],ms=3) + components = pylab.boxplot(all_y,positions=x, + widths=logwidth(x,kwargs.setdefault('widths',0.15))) + ax = pylab.gca() + ax.set_xscale('log') + ax.set_yscale('linear') + # must modify components for customisation + for l in components['boxes']: + l.set_color(kwargs['boxcolor']) + for l in components['whiskers']: + l.set_color(kwargs['color']) + l.set_linestyle('-') + for l in components['caps']: + l.set_color(kwargs['color']) + for l in components['medians']: + l.set_color(kwargs['mediancolor']) + l.set_linewidth(3.0) + for l in components['fliers']: + l.set_color(kwargs['color']) + + pylab.draw_if_interactive() + + pylab.xlabel('total MCMC steps') + pylab.ylabel('correlation coefficient with MD') + pylab.title('convergence of occupancy correlation with MD') + + #return x,ymean,yerr diff --git a/hop/density.py b/hop/density.py index e607ec7..d47988f 100644 --- a/hop/density.py +++ b/hop/density.py @@ -138,21 +138,9 @@ def Density(self): raise MissingDataError(errmsg) u = self.universe metadata = self.metadata - metadata['collector'] = self.name - metadata['collector_mode'] = self.mode metadata['psf'] = u.filename # named psf for historical reasons: any topol metadata['dcd'] = u.trajectory.filename # named dcd for historical reasons: any traj metadata['atomselection'] = self.atomselection - metadata['n_frames'] = u.trajectory.n_frames - metadata['dt'] = u.trajectory.dt # in ps for default MDAnalysis - # totaltime should be in MDAnalysis! - metadata['totaltime'] = round(u.trajectory.n_frames * metadata['dt'] * u.trajectory.skip_timestep, 3) - metadata['time_unit'] = MDAnalysis.core.flags['time_unit'] # just to make sure we know it... - metadata['dcd_skip'] = u.trajectory.skip_timestep # frames - metadata['dcd_delta'] = u.trajectory.delta # in native units (?) - if self.mode == 'BULK': - metadata['soluteselection'] = self.soluteselection - metadata['cutoff'] = self.cutoff # in Angstrom parameters = self.parameters parameters['isDensity'] = False # must override @@ -522,17 +510,6 @@ def current_coordinates(): metadata['psf'] = u.filename # named psf for historical reasons metadata['dcd'] = u.trajectory.filename # named dcd for historical reasons metadata['atomselection'] = atomselection - metadata['n_frames'] = n_frames - metadata['totaltime'] = round(u.trajectory.n_frames * u.trajectory.delta * u.trajectory.skip_timestep \ - * constants.get_conversion_factor('time','AKMA','ps'), 3) - metadata['dt'] = u.trajectory.delta * u.trajectory.skip_timestep * \ - constants.get_conversion_factor('time','AKMA','ps') - metadata['time_unit'] = 'ps' - metadata['dcd_skip'] = u.trajectory.skip_timestep # frames - metadata['dcd_delta'] = u.trajectory.delta # in AKMA - if cutoff > 0 and soluteselection is not None: - metadata['soluteselection'] = soluteselection - metadata['cutoff'] = cutoff # in Angstrom parameters = kwargs.pop('parameters',{}) parameters['isDensity'] = False # must override diff --git a/hop/g_flux.py b/hop/g_flux.py new file mode 100644 index 0000000..f0aa7ad --- /dev/null +++ b/hop/g_flux.py @@ -0,0 +1,71 @@ +import MDAnalysis +import numpy as np +import MDAnalysis.KDTree.NeighborSearch as NS +import copy +import time + +def setup(topology,trajectory,water_label,protein_label): + + u=MDAnalysis.Universe(topology,trajectory) + w=u.selectAtoms('name ' + water_label) + box=u.atoms.bbox() + l=u.selectAtoms('resname ' + protein_label) + p=u.selectAtoms('protein') + protein_box=p.bbox() + return [u,w] + +def track_counts(topology,trajectory,water_label,protein_label,in_top,in_bottom,write_out_time=1000,timestep=0.001,use_cutoff=False,time_cutoff=1000,filename_down='flux_down',filename_up='flux_up'): + universe,water=setup(topology,trajectory, + water_label,protein_label) + u=universe + total_time=universe.trajectory.totaltime + tracking_list=np.zeros(len(water)) + tracking_list_last=np.zeros(len(water)) + counts_up=0 + counts_down=0 + rate_observation=0 + time_elapsed=0 + crossed_particles_up=[] + crossed_particles_down=[] + rates_down=open(filename_down+'.txt','w') + rates_up=open(filename_up+'.txt','w') + rates_down.write('timestep' + ' ' + str(timestep) + ' ' + 'upper boundary' + ' ' + str(in_top) + ' ' + 'lower boundary' + ' ' + str(in_bottom) + '\n') + rates_up.write('timestep' + ' ' + str(timestep) + ' ' + 'upper boundary' + ' ' + str(in_top) + ' ' + 'lower boundary' + ' ' + str(in_bottom) + '\n') + cumulative_counts=0 + total_residence_time=0 + weighted_residence_time=0 + if use_cutoff==False: + time_cutoff=total_time + for ts in universe.trajectory: + inst_counts_down=0 + inst_counts_up=0 + time_elapsed+=1 + water_z=water.coordinates()[:,2] + progress=np.divide(universe.trajectory.frame,universe.trajectory.numframes,dtype=np.float64) + tracking_list_last[:]=tracking_list + if time_elapsed>=write_out_time: + time_elapsed=0 + for water_index in xrange(len(water_z)): + if water_z[water_index]>=in_top: + tracking_list[water_index]=1 + if tracking_list_last[water_index]==-2: + counts_up+=1 + elif water_z[water_index]in_bottom: + if tracking_list_last[water_index]==1 or tracking_list_last[water_index]==2: + tracking_list[water_index]=2 + elif tracking_list_last[water_index]==-1 or tracking_list_last[water_index]==-2: + tracking_list[water_index]=-2 + elif water_z[water_index] (1-a)*k2: - if a > 0.5: - k = k1 - else: - k = k2 + #Above is old; take a weighted average instead + k = a*k1 + (1-a)*k2 + else: raise ValueError('Unknown waitingtime fit, '+str(f)) return 1000 * k # 1/ps --> 1/ns @@ -1601,6 +1601,216 @@ def write_pdb(self,graph,props,filename=None): pdbfile = self.filename(filename,'pdb') io.save(pdbfile) + def write_tcl(self,filename='hopgraph',color='blue',sphere_radius="1",highlight_nodes=[],highlight_col='yellow',color_weight=False,partition=8,node_subset=[],use_node_subset=False,rate_cutoff=0): + """ + Writes out a tcl file which can then be visualized. Optionally, you can pass a set of + nodes to be highlighted with a color of your choice. Alternatively, you can try to + weight the edge color by node rate. The weighting is not implemented properly. + """ + + out=open(filename+".tcl","w") + graph=self.filtered_graph + props=self.site_properties + def draw_nodes(graph): + for node in graph: + if node in highlight_nodes: + out.write(" draw color " + highlight_col + "\n") + pos = props[node].center + vol = props[node].volume + occ = props[node].occupancy_avg + degree = graph.degree(node) ## TODO w/filtered (may be off by 1) + commonlabel = props.equivalence_name[node].strip() + out.write(" draw sphere" +" {"+ str(pos[0]) +" "+ str(pos[1])+ + " " +str(pos[2])+ "}" + " radius"+" " + sphere_radius + " resolution 16\n") + for i in graph[node]: + if graph[node][i]['k']>rate_cutoff: + pos_nbr=props[i].center + out.write(" draw cylinder" + " {" +str(pos[0]) +" "+str(pos[1])+" "+ + str(pos[2])+ "}" + " {" +str(pos_nbr[0]) +" "+str(pos_nbr[1])+" "+ + str(pos_nbr[2])+ " }"+ " radius 0.1" + " resolution 16\n") + else: + pos = props[node].center + vol = props[node].volume + occ = props[node].occupancy_avg + degree = graph.degree(node) ## TODO w/filtered (may be off by 1) + commonlabel = props.equivalence_name[node].strip() + out.write(" draw sphere" +" {"+ str(pos[0]) +" "+ str(pos[1])+" " + + str(pos[2])+ "}" + " radius"+" " + sphere_radius + " resolution 16\n") + + + for i in graph[node]: + if color_weight==True: + if graph[node][i]['k']*10> 1: + for j in xrange(int(1024/partition)): + if partition*(j+1)< graph[node][i]['k']*100 and graph[node][i]['k']*100 < partition*(j+2): + out.write(" draw color " + str(partition*(j+2)+100) + "\n") + break + elif graph[node][i]['k']*10<1: + for j in xrange(int(1024/partition)): + if partition*(j+1)< graph[node][i]['k']*1000 and graph[node][i]['k']*1000 < partition*(j+2): + out.write(" draw color " + str(partition*(j+2)) + "\n") + break + pos_nbr=props[i].center + if graph[node][i]['k']>rate_cutoff: + out.write(" draw cylinder" + " {" +str(pos[0]) +" "+str(pos[1])+" "+ + str(pos[2])+ "}" + " {" +str(pos_nbr[0]) +" "+str(pos_nbr[1])+" "+ + str(pos_nbr[2])+ " }"+ " radius 0.1" + " resolution 16\n") + out.write(" draw color " + color + "\n") + out.close() + if use_node_subset==True: + draw_nodes(node_subset) + else: + draw_nodes(graph) + + def write_tcl_cutoff(self,filename='hopgraph',sphere_radius="1",cutoff=1,cutoff_radius='5',cutoff_color='red'): + """ + A redundant function with the above, it is simply a variant which will be merged or will + supplant write_tcl, although it could also simply be retained along side it or included + as an option. A cutoff rate is specified by the user, and edges above the cutoff are + made thicker as well as colored differently from those below to highlight fast or slow + edges. + """ + + out=open(filename+".tcl","w") + graph=self.filtered_graph + props=self.site_properties + for node in graph: + pos = props[node].center + vol = props[node].volume + occ = props[node].occupancy_avg + degree = graph.degree(node) ## TODO w/filtered (may be off by 1) + commonlabel = props.equivalence_name[node].strip() + out.write(" draw color white \n ") + out.write(" draw sphere" +" {"+ str(pos[0]) +" "+ str(pos[1])+" " +str(pos[2])+ "}" + " radius"+" " + sphere_radius + " resolution 16\n") + for i in graph[node]: + pos_nbr=props[i].center + if graph[node][i]['k'] j, k_ji, from the hopping times tau_ji. + + :Returns: dict(kij=*kji*, N=*N*, fit=*fit*) + + - *kji* : rate constant in 1/ps + - *N* : number of events + - *fit* : instance of :class:`fit_func` + + :Arguments: + *taus* + array of all waiting times + *method* + 'survivalfunction' + compute as a fit of a*exp(-k1*t)+(1-a)*exp(-k2*t) or exp(-k*t) + to the survival function S(t); the double exp is tried first + and then decided heuristically if a single exp is a better choice. + Heuristics: Use single exp if + + * number of data points is <= 10 + * double exp asymmetry abs(0.5-a) > 0.49 + * k1<0 or k2<0 + + :Bugs: + - Does not switch to single exponential if double exponential fit fails + to converge. + + .. Notes:: Should probably use the integral of the double-exponential fit as an + approximation for the rate constant instead of just using the slower + one (G. Hummer, pers. comm.) + """ + def write_psf(self,graph,props,filename=None): """Pseudo psf with nodes as atoms and edges as bonds""" # Standard no CHEQ format for a Charmm PSF file: diff --git a/hop/interactive_old.py b/hop/interactive_old.py new file mode 100644 index 0000000..d65808a --- /dev/null +++ b/hop/interactive_old.py @@ -0,0 +1,531 @@ +# $Id$ +# Hop --- a framework to analyze solvation dynamics from MD simulations +# Copyright (c) 2009 Oliver Beckstein +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""" +Quickstart: using the hop package --- :mod:`hop.interactive` +============================================================ + +A typical session starts with a trajectory (which should have been +RMS-fitted to a reference structure). Any topology and trajectory file +suitable for MDAnalysis_ can be used such as PSF+DCD, PDB+XTC or a +single PDB. In the following Charmm/NAMD psf and dcd files are used as +examples. + +We will use the high-level wrapper functions in :mod:`hop.interactive`: + +>>> from hop.interactive import * + +.. _MDAnalysis:: http://mdanalysis.googlecode.com + +Hydration sites +--------------- + +Hydration sites are sites of water density higher than the bulk +density but one special site is the bulk. The hydration sites and the +bulk site are computed in two separate steps. + + +High density sites +.................. + +First build the density of the water oxygens. + +>>> density = make_density(psf,dcd,filename,delta=1.0) + +If you have VMD with the VolMap plugin installed *and* your trajectory +fits into your computer's RAM you can also choose the VMD backend to +compute the density (which can be marginally faster): + +>>> density = make_density(psf,dcd,filename,delta=1.0,backend='VMD') + + +The density is also saved as a pickled python object so that one can +easily reload it. The density is also exported as a dx file for +visualization (e.g. use :func:`hop.interactive.visualize_density`, +which calls :program:`VMD`). + +From the density one creates the 'site map' for a given threshold (by +default this is a multiple of the water bulk density): + +>>> density.map_sites(threshold=2.72) + +Experiment with the threshold; :class:`hop.analysis.DensityAnalysis` can help +to systematically explore the parameter space, and it is also helpful +to load the density into a visualization software such as VMD and +interactively explore contour levels. Values between 1.65 and 3 have +given decent results in the past but this is system-dependent.) + + +Bulk site +......... + +For a full analysis of hopping events one also needs to define a bulk +site. This is currently accomplished by calculating a second 'bulk' +density (all water not within 3.5 A of the protein) and manually +inserting the bulk site into the site map for the first density. + +>>> density_bulk = make_density(psf,dcd,'bulk',delta=1.0, +... atomselection='name OH2', +... soluteselection='protein and not name H*', +... cutoff=3.5 +... ) + +Using VMD's VolMap can be potentially be faster --- try it if the +default seems too slow to you: + +>>> density_bulk = make_density(psf,dcd,'bulk',delta=1.0, +... atomselection='name OH2 and not within 3.5 of (protein and name not hydrogen)', +... backend='VMD',load_new=False) + +The bulk density should be a big, well defined volume so we choose a +fairly low threshold: + +>>> density_bulk.map_sites(0.6) + +Add the biggest bulk site: + +>>> density.site_insert_bulk(density_bulk) +>>> density.save() +>>> del density_bulk + +.. Note:: Behind the scenes, the bulk is simply prepended to the list + of all sites (``density.sites``, + :attr:`hop.sitemap.Density.sites`), found so far. By convention the + site at position 1 in the list of all sites is treated specially in + many parts of hop (it has the so-called sitelabel "1", which is + simply the position in the list of sites) and hence you might + encounter unexpected behaviour later if you do not insert a bulk + site. + +Statistics about the sites can be produced with + +>>> analyze_density(density,figname) + +The results figures will be named `.pdf`. + + +Remapping for comparing site maps +................................. + +This section is only relevant if you plan on comparing site maps. Then +you *must* compare the density to your reference density *now* before +proceeding. You will + + 1) remap this density to be defined on the same grid as the reference + density (for this to work, this density must have been generated from + a trajectory that has been RMS-fitted to the same reference structure + as; see hop.trajectory.rms_fit_trj() and + hop.trajectory.fasta2select()) + + >>> ref_density = hop.sitemap.Density(filename='my_reference_density') + >>> remapped_density = hop.sitemap.remap_density(density,ref_density) + + 2) find the equivalence sites in the two densities and add those sites + to **both** densities: + + >>> remapped_density.find_equivalence_sites_with(ref_density,verbosity=3) + >>> remapped_density.save() + >>> ref_density.save() + + (You must also recalculate the reference densities hopping + trajectory (see below) because some sites may have been merged into + 'equivalence sites'. See docs for + hop.sitemap.find_equivalence_sites_with() and + hop.graph.CombinedGraph()). + + From now on, work with the remapped density: + >>> density = remapped_density + + +Hopping trajectory +------------------ + +Next we translate the dcd into a 'hopping trajectory' (saved in dcd +format) in which coordinates for a given water oxygen are replaced by +the site it visits at each time step. + +>>> hops = make_hoppingtraj(density,'hop_water+bulk') + +All further analysis should use this hopping trajectory (from disk) as +it is computationally much cheaper to read the trajectory than to +re-translate the coordinate trajectory (which is done behind the +scences if the hopping trajectory is not available). + + +Hopping graph +------------- + +The final step is to map out the graph of transitions between sites +(using the hopping trajectory): + +>>> tn = build_hoppinggraph(hops,density) + +tn.hopgraph holds this graph (tn.graph just contains all jumps +including the interstitial and off-sites). The edges of hopgraph are +the rate constants k_ji (in 1/ps) for hops i --> j. They are computed +from an exponential fit to the site survival function S_ji(t) for +particles waiting to hop from i to j. + +The density is provided to attach data to the nodes of the +hopgraph. It is required for visualization and analysis (although not +strictly necessary for the hopgraph itself). + +Further analysis uses tn.hopgraph: + +>>> h = tn.hopgraph # main result is the 'hopgraph' +>>> h.save('hopgraph') # save the hopping graph (necessary for cg part) +>>> h.filter(exclude={'outliers':True, 'Nmin':2, 'unconnected':True}) +>>> h.show_rates() # show all calculated rate constants (filtered graph) +>>> h.plot_fits(xrange(301)) # plot rate constant fits for t=0ps to 300ps +>>> h.plot_fits() +>>> h.export('water') # write dot file to visualize (filtered) graph +>>> h.plot_site_occupancy('siteoccupancy') # plot site occupancy from graph ---is NOT working +>>> h.plot_residency_times('residencytimes')# residency times --- is NOT working + +To compare the water network based on density with another hop graph +(based on ref_density), construct the CombinedGraph: + +>>> h_ref = hop.graph.HoppingGraph(filename=) --- basically repeat steps from +### --- ref_density only with differ labels +>>> cg = hop.graph.CombinedGraph(g0=h,g1=h_ref) +>>> cg.plot(0,'cg_h',linewidths=(0.01,)) +>>> cg.plot(1,'cg_h_ref',linewidths=(0.01,)) + + +.. rubric:: TODO + +Currently un(der)-documented: +* Remapping densities to a reference density (see hop.sitemap.remap_density). +* Comparing densities and finding equivalence sites (see + hop.sitemap.find_common_sites() and Density.find_equivalence_sites_with()). +* Comparing hopgraphs across different simulations: requires equivalence sites in + both densities; then build the hop.graph.CombinedGraph(). + +Functions +--------- + +""" + +import hop.sitemap, hop.trajectory, hop.graph, hop.constants +import hop.density +import MDAnalysis +import numpy +import os + +import logging +logger = logging.getLogger("MDAnalysis.analysis.hop.interactive") + +def generate_densities(*args, **kwargs): + """Analyze the trajectory and generate solvent and bulk density. + + generate_densities(topol, traj, atomselection='name OW') --> densities + + This function can take a long time because it has to read the whole + trajectory. Progress is printed to the screen. It saves results to pickle + files. These files are :class:`hop.sitemap.Density` objects and can be used + to instantiate such a density object. + + :Arguments: + filename + name of the solvent density with bulk site + bulkname + bulk density + density_unit + unit of measurement for densities and thresholds + (Molar, nm, Angstrom, water, SPC, TIP3P, TIP4P) + solvent_threshold : exp(1) = 2.7182818284590451 + hydration sites when density > this threshold + bulk_threshold : exp(-0.5) = 0.60653065971263342 + bulk site are regions with density > this threshold + (and water farther away from the protein heavy atoms than *cutoff*) + delta : 1.0 + cubic grid size in Angstrom + cutoff + bulk-water is assumed to start at this distance from the + soluteselection + soluteselection : "protein and not name H*" + how to select the solute (for bulk density) + + :Returns: a dict containing :class:`hop.sitemap.Density` instances for the + the "solvent" and the "bulk" density; the "solvent" has the bulk + site (largest site in "bulk") inserted as site 1. + + .. Note:: + + The "solvent" density is going to be used throughout the rest of the + protocol. Should you ever remap the sites (i.e. run + :meth:`~hop.sitemap.Density.map_sites` with a different threshold) then + **you must insert the bulk site again** (because the bulk site is + removed for technical reasons whenever the sites change); use the saved + bulk site and the :meth:`hop.sitemap.Density.site_insert_bulk` method. + + .. SeeAlso:: + + Keyword arguments are passed on to :class:`hop.density.DensityCreator` + where all possible keywords are documented; the site mapping is done + with :meth:`hop.sitemap.Density.map_sites`. + """ + filename = kwargs.pop('filename', 'water') # solvent pickle file + bulkname = kwargs.pop('bulkname', 'bulk') # bulk solvent pickle file + solvent_threshold = kwargs.pop('solvent_threshold', numpy.e) + bulk_threshold = kwargs.pop('bulk_threshold', numpy.exp(-0.5)) + density_unit = kwargs.pop('density_unit', "water") + kwargs['mode'] = "all" + DC = hop.density.DensityCreator(*args, **kwargs) + densities = DC.create() + # save the precious files right away + densities['bulk'].save(bulkname) + densities['solvent'].save(filename) + # modifies densities['solvent'] (and also returns it) + density = DC.DensityWithBulk(density_unit=density_unit, + solvent_threshold=solvent_threshold, + bulk_threshold=bulk_threshold) + # save again, but now with mapped sites and bulk site include in "solvent" + densities['bulk'].save(bulkname) + density.save(filename) # This is solvent but with bulk site added. + # finally write dx files for visualization + densities['bulk'].export() + density.export() + logger.info("generate_densities(): saved densities and exported dx files") + return densities + +def make_density(psf,dcd,filename,delta=1.0,atomselection='name OH2', + backend='MDAnalysis',**kwargs): + """Build the density by histogramming all the water oxygens in a dcd. + + density = make_density(psf,dcd,filename,delta=1.0) + + The function builds the density object, writes it to disk, and + also exports it as a dx file for visualization (use + vizualize_density(density)). + + :Arguments: + + *psf + topology + *dcd* + trajectory (should be RMS fitted to a reference frame) + *filename* + default filename for the density + *delta* + grid spacing Angstrom + *backend* + "MDAnalysis" or "VMD" to be used for histogramming the density + ["MDAnalysis"] + + *kwargs* (depend on backend): + only for MDAnalysis: + *padding* + increase box dimensions for 3D histogramming by padding + *soluteselection* + *cutoff* + for bulk density: setting both `soluteselection='protein and not name H*'` + and `cutoff=3.5` A selects *' NOT WITHIN OF '* + + only for VMD: + *load_new* + `True`: load psf and dcd from file. `False`: use already loaded + psf and dcd + + :Returns: *density*, :class:`hop.sitemap.Density` object; the density is + converted to a fraction of the density of bulk TIP3P water + + """ + if backend == 'MDAnalysis': + density = hop.sitemap.density_from_trajectory( + psf,dcd,delta=delta,atomselection=atomselection, + verbosity=3,**kwargs) + elif backend == 'VMD': + density = hop.sitemap.density_from_volmap( + psf,dcd,delta=delta,atomselection=atomselection, + verbosity=3,**kwargs) + else: + errmsg = "Unknown backend '%s'." % backend + logger.fatal(errmsg) + raise ValueError(errmsg) + density.convert_density('TIP3P') + density.save(filename) + density.export() + + logger.info("make_density() completed. Run map_sites(density,...) next") + return density + +def make_xstal_density(pdb,filename,**kwargs): + """Generate a density from the crystalwaters in a PDB. + + For arguments see :func:`make_density`. + + (These water are typically named HOH.) + + .. SeeAlso:: Water molecules are counted as point-like + particles. One can also use + :class:`hop.sitemap.BfactorDensityCreator` to broaden water + molecules according to their B-factor. + + """ + kwargs.setdefault('atomselection', "resname HOH") + return make_density(pdb,filename,**kwargs) + +def analyze_density(density,figure='sitestats'): + """Site statistics based on the density alone. + + Plots site volumes, average densities and occupancy, and writes it to the + pdf file
.pdf + """ + import pylab + #import os + + # convert density to the chosen density unit (typically, relative to bulk water) + factor = hop.constants.get_conversion_factor('density', + density.unit['length'],density.unit['density']) + + x,N,DN = density.site_occupancy(include='sites') + x,V = density.site_volume(include='sites') + + pylab.clf() + pylab.title('Site statistics for threshold %.2f (%s)' % + (density.P['threshold'],density.unit['threshold'])) + pylab.plot(x,V/10,'ro') + pylab.plot(x,factor * N/V,'r.') + pylab.plot(x,N,'b.') + pylab.legend(('V/10 %s**3' % density.unit['length'], + 'density %s' % density.unit['density'], + 'occupancy')) + pylab.errorbar(x,factor*N/V,yerr=factor*DN/V,fmt=None,ecolor='r',capsize=0) + pylab.errorbar(x,N,yerr=DN,fmt=None,ecolor='b',capsize=0) + pylab.xlabel('site label') + + #eps = figure+'.eps' + pdf = figure+'.pdf' + pylab.savefig(pdf) + #os.system('epstopdf --outfile=%(pdf)s %(eps)s' % locals()) + #os.remove(eps) + +def visualize_density(density): + """Visualize the trajectory with the density in VMD. + + visualize_density(density) + + :Arguments: + *density* + hop.sitemap.Density object + + """ + dx = density.filename() + '.dx' + os.system('vmd '+density.metadata['psf']+' '+density.metadata['dcd']+' -m '+dx) + +def make_hoppingtraj(density,filename,**hopargs): + """Create the hopping trajectory from a density with a site map. + + hops = make_hoptraj(density,filename) + + :Arguments: + *density* + density object with a site map + *filename* + prefix for the hop trajectory files (psf and dcd) + *hopargs* + keyword args to add to :class:`~hop.trajectory.HoppingTrajectory` such + as `fixtrajectory = {'delta':10.22741474887299}` + + This function relies on the density's metadata. In particular it + uses `density.metadata['psf']` and `metadata['dcd']` to find its input + data and `metadata['atomselection']` to define the atoms to track. + """ + try: + if len(density.sites) < 2: + raise ValueError + except AttributeError,ValueError: + errmsg = 'The density misses a site map or has only one site.' + logger.fatal(errmsg) + raise ValueError(errmsg) + + u = MDAnalysis.Universe(density.metadata['psf'],density.metadata['dcd']) + group = u.selectAtoms(density.metadata['atomselection']) + hops = hop.trajectory.HoppingTrajectory(u.trajectory,group,density,**hopargs) + hops.write(filename) + return hops + +def build_hoppinggraph(hoppingtrajectory,density): + """Compute the graph of all site hops and calculate the rate constants. + + tgraph = build_hoppinggraph(hops,density) + + :Arguments: + hops + hop.trajectory.HoppingTrajectory object + density + hop.sitemap.Density object + + :Returns: tgraph, a :class:`hop.graph.TransportNetwork` object + """ + tgraph = hop.graph.TransportNetwork(hoppingtrajectory,density) + tgraph.compute_residency_times() + return tgraph + +def build_hoppinggraph_fromfiles(hoppingtrajectory_filename,density_filename): + """Compute the TransportNetwork including HoppingGraph from files. + + tn = build_hoppinggraph_fromfiles('hoptraj','water_density') + + Input: + hoppingtrajectory_filename filename for HoppingTrajectory psf and dcd + density_filename filename for pickled Density + + Output: + tn hop.graph.TransportNetwork object (qv) + """ + hoppingtrajectory = hop.trajectory.HoppingTrajectory(filename=hoppingtrajectory_filename) + density = hop.sitemap.Density(filename=density_filename) + return build_hoppinggraph(hoppingtrajectory,density) + +def hopgraph_basic_analysis(h, density, filename): + """Do some simple analysis tasks on the hopgraph. + + hopgraph_basic_analysis(h, density, filename) + + :Arguments: + h + hopgraph, a :class:`hop.graph.HoppingGraph` + density + density, a :class:`hop.sitemap.Density` + filename + default filename for generated files; all files and new + directories are written in the directory pointed to by the + path component + """ + analysisdir = os.path.dirname(filename) + logger.warn("Setting analysisdir = %(analysisdir)r", vars()) + + ratesfile = os.path.join(analysisdir, 'rates.txt') + h.show_rates(filename=ratesfile) + logger.info("Wrote all rates to %(ratesfile)r.", vars()) + + h.filter(exclude={'outliers':True, 'bulk':True}) + h.export(filename, format='XGMML') + logger.info("Exported hopgraph as %(filename)s.xgmml", vars()) + + logger.info("Generating 3D graph %(filename)s.psf/pdb", vars()) + logger.info("Note: bulk site omitted for clarity.") + h.export3D(density) + + survival_time_dir = os.path.join(analysisdir, 'survival_times') + logger.info("Generating survival times plots in %(survival_time_dir)r", vars()) + logger.info("This takes a while. Note: transitions to/from bulk are excluded, Nmin=5.") + + h.filter(exclude={'outliers':True, 'bulk':True, 'Nmin':5}) + h.plot_fits(directory=survival_time_dir, ncol=2, nrow=3) diff --git a/hop/plot/.probability_plot.py.swp b/hop/plot/.probability_plot.py.swp new file mode 100644 index 0000000..e1110e5 Binary files /dev/null and b/hop/plot/.probability_plot.py.swp differ diff --git a/hop/rate_distribution.py b/hop/rate_distribution.py new file mode 100644 index 0000000..7f46bea --- /dev/null +++ b/hop/rate_distribution.py @@ -0,0 +1,15 @@ +import hop.graph +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt + +def rate_distribution(hopgraph,bins=10,alpha=0.5,log=False): + + h=hopgraph + h.filter(exclude={'bulk':True,'unconnected':True,'outliers':True}) + rates=[] + for i in h.filtered_graph: + for j in h.filtered_graph[i]: + rates.append(h.filtered_graph[i][j]['k']) + + plt.hist(rates,bins,alpha=alpha,log=log) diff --git a/hop/sitemap.py b/hop/sitemap.py index 103f164..680d0cf 100644 --- a/hop/sitemap.py +++ b/hop/sitemap.py @@ -29,6 +29,7 @@ import os, os.path import errno import cPickle +import pickle import warnings import numpy # need v >= 1.0 @@ -42,7 +43,7 @@ from . import utilities from .utilities import msg,set_verbosity,get_verbosity, flatten, sorted, \ DefaultDict, fixedwidth_bins, iterable, asiterable - +from itertools import izip class Grid(utilities.Saveable): """Class to manage a multidimensional grid object. @@ -100,7 +101,7 @@ def __init__(self,grid=None,edges=None,filename=None,dxfile=None, If the input histogram consists of counts per cell then the make_density() method converts the grid to a physical - density. For a probability density, divide it by grid.sum(0 or + density. For a probability density, divide it by grid.sum() or use normed=True right away in histogramdd(). If grid, edges, AND filename are given then the @@ -250,7 +251,7 @@ def convert_density(self,unit='Angstrom'): There may be some undesirable cross-interactions with convert_length... """ if not self.P['isDensity']: - raise RuntimeError('The grid is not a density so converty_density(0 makes no sense.') + raise RuntimeError('The grid is not a density so convert_density() makes no sense.') if unit == self.unit['density']: return self.grid *= constants.get_conversion_factor('density',self.unit['density'],unit) @@ -1167,7 +1168,7 @@ def warn_and_remove(density): ebunch = [map(tuple,e) for e in edges] # make nodes hashable tuples g = NX.Graph() g.add_edges_from(ebunch) - commonsites = NX.connected_components(g) # each item: collection of equivalent sites + commonsites = [i for i in NX.connected_components(g)] # each item: collection of equivalent sites # liz overlap overlap = find_overlap_coeff(densities[SELF],densities[REF]) @@ -1547,10 +1548,20 @@ def _label_connected_graphs(self): marking up the map with -1 and then looking for the -1 at the end of this function.) """ - self.sites = list(NX.connected_components(self.graph)) # this does the hard work + + self.sites = [] + components = list(NX.connected_components(self.graph)) # this does the hard work + for component in components: + self.sites.append(list(component)) + volume_list=map(len,self.sites) #pull all of the 'volumes' + volume_sites_list=list(izip(volume_list,self.sites)) #crudely zip the volumes with the site lists for sorting + volume_sites_list.sort() #should sort by the first element, the "volume" + volume_sites_list.reverse() + self.sites = [site[1] for site in volume_sites_list] #hopefully returns the sorted sites self.sites.insert(SITELABEL['interstitial'],[]) # placeholder for interstitial self._draw_map_from_sites() self._annotate_sites() + def _draw_map_from_sites(self): """Label cells in the map based on the site list. diff --git a/hop/trajectory.py b/hop/trajectory.py index cd45922..16a602e 100644 --- a/hop/trajectory.py +++ b/hop/trajectory.py @@ -280,7 +280,7 @@ def write(self,filename,start=None,step=None,delta=None,load=True): for ts in self.map_dcd(): dcdwriter.write_next_timestep(ts) pm.echo(ts.frame) - dcdwriter.close() + dcdwriter.close() logger.info("HoppingTrajectory.write(): wrote hoptraj %r.", dcdname) self.write_psf(psfname) @@ -397,13 +397,14 @@ def map_dcd(self,start=None,stop=None,skip=1): self._init_coord2hop() #for traj_ts in self.traj[start:stop]: - for traj_ts in self.traj: # no slicing for big trajectories + for traj_ts in self.traj: # no slicing for big trajectories yield self._coord2hop(traj_ts) def _init_coord2hop(self): """Allocate helper arrays for _coord2hop()""" # initialization with 'interstitial' is CRUCIAL: throws away first frame # and makes sure that we don't keep spurious sites from 1st frame around + self._sites_last = SITELABEL['interstitial'] * numpy.ones(self.tgroup.n_atoms) self._offsites = numpy.empty(self.tgroup.n_atoms,dtype=bool) @@ -461,9 +462,9 @@ def _coord2hop(self,ts): # # pos[:,0] = site(t), pos[:,1] = orbit site, pos[:,2] = 0 (unused) pos = self.ts._pos # assign slices to avoid loop (thanks to Naveen) - pos[:,0] = [self.buffered_map[indices[0][iatom],indices[1][iatom],indices[2][iatom]] \ + pos[:,0] = [self.buffered_map[indices[0][iatom],indices[1][iatom],indices[2][iatom]]\ for iatom in xrange(N)] - s = pos[:,0] + s = pos[:,0] self._offsites[:] = (s == SITELABEL['interstitial']) | (s == SITELABEL['outlier']) pos[:,1] = s # particles in interstital and outliers are assigned their previous site pos[self._offsites,1] = self._sites_last[self._offsites] @@ -472,8 +473,8 @@ def _coord2hop(self,ts): # _sites_last[] was initialized to 'interstitial': this ensures proper accounting # for all later steps (because 'interstitial' is thrown away at the analysis stage) self._sites_last[:] = pos[:,1] # save orbit sites for next step - return self.ts - + return self.ts + def iterator(self): return self.__iter__() @@ -640,7 +641,7 @@ def next(self): nextTS = self._read_next_timestep else: nextTS = self._map_next_timestep - return nextTS() + return nextTS() def rewind(self): if self.TAPtraj: diff --git a/hop/txt/trajectory.txt b/hop/txt/trajectory.txt new file mode 100644 index 0000000..50560ff --- /dev/null +++ b/hop/txt/trajectory.txt @@ -0,0 +1,20 @@ +This outline is intended to map out what code is written in hop.trajectory, and as a waste-bin for my +thoughts on the code to help me figure out what exactly is there. A more organized outline should be made in +the future. + +IMPORTS: +hop.constants, hop.utilities, msg, set_verbosity, SelectionError. These occur everywhere in Oliver's code. +They are clearly useful function dumps; the only unifying theme behind them is that they contain useful stuff +but that stuff is useful in all sorts of places. + +numpy, MDAnalysis, MDAnalysis.coordinates, warnings. Two big packages full of trajectory analysis software and +general analysis software; they are the work-horses of this business. + +Lines 39 - 47 (try statement): What is a try statement? Naturally, it tries the try clause, and if there is +an exception, it passes to the except/finally clause. It stores the exception and tries again. If it happens +again, it passes to the finally clause and gives up. + +totaltime (58 - 60): Calls a function from some other module; presumably book keeping + +Line 63: Very important! I finally understand what the trajectory is; it says which molecules were at which point +in the network. diff --git a/hop/walker.py b/hop/walker.py new file mode 100644 index 0000000..77c6946 --- /dev/null +++ b/hop/walker.py @@ -0,0 +1,234 @@ +import random +import numpy as np +import copy +import MDAnalysis + +def rate_sum_update(hopgraph,oneway=True): + + h=hopgraph + if oneway==True: + h.filter(exclude={"bulk":True,"outliers":True,"unconnected":True,'oneway':True}) + else: + h.filter(exclude={"bulk":True,"outliers":True,"unconnected":True}) + g=copy.deepcopy(h) + for node in h.filtered_graph: + rate_sum=0 + for neighbor in g.filtered_graph[node]: + rate_sum+=g.filtered_graph[node][neighbor]['k'] + g.filtered_graph[node]['rate_sum']=rate_sum + rate_sum_bulk=0 + for neighbor in h.graph[1]: + rate_sum_bulk+=h.graph[1][neighbor]['k'] + g.graph[1]['rate_sum']=rate_sum_bulk + return g + +def flux_calculator(hopgraph,topology,cutoff=1,delta=0.1,steps=1000,particle_num=400,filename='walker_rates',edge_count=False,track_entropy_production=False,oneway=True,up_flux=True,writeout_time=100,writeout=False): + h=hopgraph + if oneway==True: + h.filter(exclude={"bulk":True,"outliers":True,"unconnected":True,'oneway':True}) + else: + h.filter(exclude={"bulk":True,"outliers":True,"unconnected":True}) + + h=rate_sum_update(h,oneway) + trajectories=np.zeros((4,particle_num)) + u=MDAnalysis.Universe(topology) + p=u.selectAtoms('protein') + z_center=p.centerOfGeometry()[2] + + def generate_waiting_time(site): + rate_sum=h.filtered_graph[site]['rate_sum'] # normalization factor + r1=random.random() + waiting_time=-np.log(r1)/rate_sum # assume expontential jump attempt waiting time + return waiting_time + + def add_particles(trajectories,hopgraph,sites,rate_sum): + random.seed() + h=hopgraph + r1=random.random() + probability=0 + probability2=0 + added=False + added_changed=False + last_site=0 + random.shuffle(sites) + for site in sites: + if last_site!=0: + probability+=h.graph[1][last_site]['k']/rate_sum + probability2+=h.graph[1][site]['k']/rate_sum + if probability<=r1<=probability2: + shuffle_list=range(len(trajectories[0])) + random.shuffle(shuffle_list) + shuffle_list_iter=iter(shuffle_list) + for state in shuffle_list_iter: + if trajectories[0][state]==0 and site not in trajectories[0]: + trajectories[0][state]=site + added=True + added_changed=True + break + if added==True: + added=False + break + last_site=site + return trajectories + + def remove_particles(trajectories,h,sites,rate_sum): + random.seed() + r1=random.random() + probability=0 + probability2=0 + removed=False + last_site=0 + shuffle_list=range(len(trajectories[0])) + random.shuffle(shuffle_list) + random_draw_iter=iter(shuffle_list) + for site in random_draw_iter: + if trajectories[0][site]!=0 and trajectories[0][site] in sites: + if last_site!=0: + probability+=h.graph[trajectories[0][last_site]][1]['k'] + probability2+=h.graph[trajectories[0][site]][1]['k']/rate_sum + if probability<=r1<=probability2: + removed=True + trajectories[0][site]=0 + trajectories[1][site]=0 + trajectories[2][site]=0 + trajectories[3][site]=0 + break + last_site=site + if removed==True: + removed=False + return trajectories + else: + return False + def bulk_time_calculator(sites): + + h=hopgraph + bulk_rate=0 + for site in h.graph[1]: + if site in sites: + bulk_rate+=h.graph[1][site]['k'] + + bulk_time=1/bulk_rate + return bulk_time + + def poisson_step(site,site_index,walker_time,waiting_time,delta,entropy_production): + random.seed() + w=waiting_time + r2=random.random() + probability=0 + for neighbor in h.filtered_graph[site]: + if type(neighbor)!=str: #filter out rate_sum to prevent bugs + rate_sum=h.filtered_graph[site]['rate_sum'] + probability+=h.filtered_graph[site][neighbor]['k'] + if probability/rate_sum >= r2: + if walker_time>=w: + walker_time=0 + w=generate_waiting_time(neighbor) + trajectories[0][site_index]=neighbor + trajectories[1][site_index]=0 + trajectories[2][site_index]=generate_waiting_time(site) + if track_entropy_production==True: + try: + trajectories[3][site]+=np.log(np.divide(h.filtered_graph[site][neighbor]['k'],h.filtered_graph[neighbor][site]['k'])) + except: + trajectories[3][site]=np.inf + break + else: + walker_time+=delta + trajectories[0][site_index]=site + trajectories[1][site_index]=walker_time+delta + trajectories[2][site_index]=waiting_time + break + return (trajectories) + + def main(particle_num=particle_num,trajectories=trajectories,up_flux=True,filename=filename): + random.seed() + rate_sum_bottom=0 + rate_sum_top=0 + entropy_production=0 + if up_flux==True: + top_sites_unfiltered = [site for site in h.filtered_graph.nodes() if + h.site_properties.center[site][2]>=z_center] + top_sites=[site for site in top_sites_unfiltered if h.graph.has_edge(1,site)] + bottom_sites_unfiltered = [site for site in h.filtered_graph.nodes() if + h.site_properties.center[site][2]<=z_center] + bottom_sites=[site for site in bottom_sites_unfiltered if h.graph.has_edge(site,1)] + else: + top_sites_unfiltered = [site for site in h.filtered_graph.nodes() if + h.site_properties.center[site][2]<=z_center] + top_sites=[site for site in top_sites_unfiltered if h.graph.has_edge(1,site)] + bottom_sites_unfiltered = [site for site in h.filtered_graph.nodes() if + h.site_properties.center[site][2]>=z_center] + bottom_sites=[site for site in bottom_sites_unfiltered if h.graph.has_edge(site,1)] + for site in bottom_sites: + rate_sum_bottom+=h.graph[site][1]['k'] + for site in top_sites: + rate_sum_top+=h.graph[1][site]['k'] + + bulk_time_top=bulk_time_calculator(top_sites) + + bulk_time_bottom=bulk_time_calculator(bottom_sites) + + cycle_time_top=bulk_time_top/delta + + cycle_time_bottom=bulk_time_bottom/delta + + steps_elapsed_top=0 + + steps_elapsed_bottom=0 + + counts=0 + rate=0 + rate_observation=0 + steps_elapsed_total=0 + rates=open(filename+'.txt','w') + rates.write('up flux' + ' ' + str(up_flux) + 'delta' + ' ' + str(delta) + ' ' + 'steps' + ' ' + str(steps) + '\n') + entropy_production=0 + writeout_index=0 + trajectory_part=np.zeros((4,particle_num,writeout_time)) + writeout_cycle=0 + for step in xrange(steps): + if step>=1: + writeout_cycle+=1 + if step % writeout_time-1==0 and writeout==True: + writeout_cycle=0 + trajectory_part[:,:,writeout_cycle]=trajectories + if step % writeout_time-1==0 and writeout==True: + writeout_index+=1 + np.save(filename + str(writeout_index),trajectory_part) + trajectory_part=np.zeros((4,particle_num,writeout_time)) + steps_elapsed_total+=1 + random.seed() + occupied=[site for site in trajectories[0] if site!=0] + occupied_bottom=[site for site in occupied if site in bottom_sites] + if steps_elapsed_top>cycle_time_top: + trajectories=add_particles(trajectories,h,top_sites,rate_sum_top) + steps_elapsed_top=0 + else: + steps_elapsed_top+=1 + if steps_elapsed_bottom>cycle_time_bottom: + out=remove_particles(trajectories,h,bottom_sites,rate_sum_bottom) + if type(out)!=bool: + trajectories=out + steps_elapsed_bottom=0 + counts+=1 + else: + steps_elapsed_bottom+=1 + shuffle_list=range(len(trajectories[0])) + random.shuffle(shuffle_list) + random_draw_iter=iter(shuffle_list) + for trajectory in random_draw_iter: + site=trajectories[0][trajectory] + current_time=trajectories[1][trajectory] + waiting_time=trajectories[2][trajectory] + if site!=0.0: + trajectories=poisson_step(site,trajectory,current_time,waiting_time,delta,entropy_production) + entropy_production+=trajectories[3][site] + rate=np.divide(counts,delta*step,dtype=np.float64) + rates.write(str(rate)) + if steps_elapsed_total*delta>=1: + steps_elapsed_total=0 + rate=np.divide(counts,delta*steps,dtype=np.float64) + net_entropy_production=np.divide(entropy_production,steps*delta) + + return main(particle_num=particle_num,trajectories=trajectories,filename=filename) + diff --git a/scripts/hop-generate-densities.py b/scripts/hop-generate-densities.py index 8adbbc3..da05bf8 100755 --- a/scripts/hop-generate-densities.py +++ b/scripts/hop-generate-densities.py @@ -35,7 +35,7 @@ def generate_densities_locally(topology, trajectory, localcopy=False, **kwargs): - # default values in hop.density.DensityGenerator + # default values in hop.density.DensityCreator def _generate_densities(traj): return hop.interactive.generate_densities(topology, traj, **kwargs) if localcopy: diff --git a/setup.py b/setup.py index d1be8ab..98a1df7 100644 --- a/setup.py +++ b/setup.py @@ -11,6 +11,14 @@ sys.version_info[:2] print "Please upgrade your version of python." sys.exit(-1) +<<<<<<< HEAD +======= +if sys.version_info[:2] >= (2, 6): + networkx_requirements = 'networkx>1.9.1' +else: + # networkx 1.3 only works with 2.6+ so we fiddle the requirements + networkx_requirements = 'networkx==1.9.1' +>>>>>>> origin setup(name="Hop", version="0.3.5-dev",