Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2016-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 :
23 : #include "bias/Bias.h"
24 : #include "core/ActionRegister.h"
25 : #include "core/PlumedMain.h"
26 : #include "core/Value.h"
27 : #include "tools/File.h"
28 : #include "tools/OpenMP.h"
29 : #include "tools/Random.h"
30 : #include "tools/Communicator.h"
31 : #include <chrono>
32 : #include <numeric>
33 :
34 : #ifndef M_PI
35 : #define M_PI 3.14159265358979323846
36 : #endif
37 :
38 : namespace PLMD {
39 : namespace isdb {
40 :
41 : //+PLUMEDOC ISDB_BIAS METAINFERENCE
42 : /*
43 : Calculates the Metainference energy for a set of experimental data.
44 :
45 : Metainference \cite Bonomi:2016ip is a Bayesian framework
46 : to model heterogeneous systems by integrating prior information with noisy, ensemble-averaged data.
47 : Metainference models a system and quantifies the level of noise in the data by considering a set of replicas of the system.
48 :
49 : Calculated experimental data are given in input as ARG while reference experimental values
50 : can be given either from fixed components of other actions using PARARG or as numbers using
51 : PARAMETERS. The default behavior is that of averaging the data over the available replicas,
52 : if this is not wanted the keyword NOENSEMBLE prevent this averaging.
53 :
54 : Metadynamics Metainference \cite Bonomi:2016ge or more in general biased Metainference requires the knowledge of
55 : biasing potential in order to calculate the weighted average. In this case the value of the bias
56 : can be provided as the last argument in ARG and adding the keyword REWEIGHT. To avoid the noise
57 : resulting from the instantaneous value of the bias the weight of each replica can be averaged
58 : over a give time using the keyword AVERAGING.
59 :
60 : The data can be averaged by using multiple replicas and weighted for a bias if present.
61 : The functional form of Metainference can be chosen among four variants selected
62 : with NOISE=GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC which correspond to modelling the noise for
63 : the arguments as a single gaussian common to all the data points, a gaussian per data
64 : point, a single long-tailed gaussian common to all the data points, a log-tailed
65 : gaussian per data point or using two distinct noises as for the most general formulation of Metainference.
66 : In this latter case the noise of the replica-averaging is gaussian (one per data point) and the noise for
67 : the comparison with the experimental data can chosen using the keyword LIKELIHOOD
68 : between gaussian or log-normal (one per data point), furthermore the evolution of the estimated average
69 : over an infinite number of replicas is driven by DFTILDE.
70 :
71 : As for Metainference theory there are two sigma values: SIGMA_MEAN0 represent the
72 : error of calculating an average quantity using a finite set of replica and should
73 : be set as small as possible following the guidelines for replica-averaged simulations
74 : in the framework of the Maximum Entropy Principle. Alternatively, this can be obtained
75 : automatically using the internal sigma mean optimization as introduced in \cite Lohr:2017gc
76 : (OPTSIGMAMEAN=SEM), in this second case sigma_mean is estimated from the maximum standard error
77 : of the mean either over the simulation or over a defined time using the keyword AVERAGING.
78 : SIGMA_BIAS is an uncertainty parameter, sampled by a MC algorithm in the bounded interval
79 : defined by SIGMA_MIN and SIGMA_MAX. The initial value is set at SIGMA0. The MC move is a
80 : random displacement of maximum value equal to DSIGMA. If the number of data point is
81 : too large and the acceptance rate drops it is possible to make the MC move over mutually
82 : exclusive, random subset of size MC_CHUNKSIZE and run more than one move setting MC_STEPS
83 : in such a way that MC_CHUNKSIZE*MC_STEPS will cover all the data points.
84 :
85 : Calculated and experimental data can be compared modulo a scaling factor and/or an offset
86 : using SCALEDATA and/or ADDOFFSET, the sampling is obtained by a MC algorithm either using
87 : a flat or a gaussian prior setting it with SCALE_PRIOR or OFFSET_PRIOR.
88 :
89 : \par Examples
90 :
91 : In the following example we calculate a set of \ref RDC, take the replica-average of
92 : them and comparing them with a set of experimental values. RDCs are compared with
93 : the experimental data but for a multiplication factor SCALE that is also sampled by
94 : MC on-the-fly
95 :
96 : \plumedfile
97 : RDC ...
98 : LABEL=rdc
99 : SCALE=0.0001
100 : GYROM=-72.5388
101 : ATOMS1=22,23
102 : ATOMS2=25,27
103 : ATOMS3=29,31
104 : ATOMS4=33,34
105 : ... RDC
106 :
107 : METAINFERENCE ...
108 : ARG=rdc.*
109 : NOISETYPE=MGAUSS
110 : PARAMETERS=1.9190,2.9190,3.9190,4.9190
111 : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
112 : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
113 : SIGMA_MEAN0=0.001
114 : LABEL=spe
115 : ... METAINFERENCE
116 :
117 : PRINT ARG=spe.bias FILE=BIAS STRIDE=1
118 : \endplumedfile
119 :
120 : in the following example instead of using one uncertainty parameter per data point we use
121 : a single uncertainty value in a long-tailed gaussian to take into account for outliers, furthermore
122 : the data are weighted for the bias applied to other variables of the system.
123 :
124 : \plumedfile
125 : RDC ...
126 : LABEL=rdc
127 : SCALE=0.0001
128 : GYROM=-72.5388
129 : ATOMS1=22,23
130 : ATOMS2=25,27
131 : ATOMS3=29,31
132 : ATOMS4=33,34
133 : ... RDC
134 :
135 : cv1: TORSION ATOMS=1,2,3,4
136 : cv2: TORSION ATOMS=2,3,4,5
137 : mm: METAD ARG=cv1,cv2 HEIGHT=0.5 SIGMA=0.3,0.3 PACE=200 BIASFACTOR=8 WALKERS_MPI
138 :
139 : METAINFERENCE ...
140 : #SETTINGS NREPLICAS=2
141 : ARG=rdc.*,mm.bias
142 : REWEIGHT
143 : NOISETYPE=OUTLIERS
144 : PARAMETERS=1.9190,2.9190,3.9190,4.9190
145 : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
146 : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
147 : SIGMA_MEAN0=0.001
148 : LABEL=spe
149 : ... METAINFERENCE
150 : \endplumedfile
151 :
152 : (See also \ref RDC, \ref PBMETAD).
153 :
154 : */
155 : //+ENDPLUMEDOC
156 :
157 : class Metainference : public bias::Bias {
158 : // experimental values
159 : std::vector<double> parameters;
160 : // noise type
161 : unsigned noise_type_;
162 : enum { GAUSS, MGAUSS, OUTLIERS, MOUTLIERS, GENERIC };
163 : unsigned gen_likelihood_;
164 : enum { LIKE_GAUSS, LIKE_LOGN };
165 : // scale is data scaling factor
166 : // noise type
167 : unsigned scale_prior_;
168 : enum { SC_GAUSS, SC_FLAT };
169 : bool doscale_;
170 : double scale_;
171 : double scale_mu_;
172 : double scale_min_;
173 : double scale_max_;
174 : double Dscale_;
175 : // scale is data scaling factor
176 : // noise type
177 : unsigned offset_prior_;
178 : bool dooffset_;
179 : double offset_;
180 : double offset_mu_;
181 : double offset_min_;
182 : double offset_max_;
183 : double Doffset_;
184 : // scale and offset regression
185 : bool doregres_zero_;
186 : int nregres_zero_;
187 : // sigma is data uncertainty
188 : std::vector<double> sigma_;
189 : std::vector<double> sigma_min_;
190 : std::vector<double> sigma_max_;
191 : std::vector<double> Dsigma_;
192 : // sigma_mean is uncertainty in the mean estimate
193 : std::vector<double> sigma_mean2_;
194 : // this is the estimator of the mean value per replica for generic metainference
195 : std::vector<double> ftilde_;
196 : double Dftilde_;
197 :
198 : // temperature in kbt
199 : double kbt_;
200 :
201 : // Monte Carlo stuff
202 : std::vector<Random> random;
203 : unsigned MCsteps_;
204 : long long unsigned MCaccept_;
205 : long long unsigned MCacceptScale_;
206 : long long unsigned MCacceptFT_;
207 : long long unsigned MCtrial_;
208 : unsigned MCchunksize_;
209 :
210 : // output
211 : Value* valueScale;
212 : Value* valueOffset;
213 : Value* valueAccept;
214 : Value* valueAcceptScale;
215 : Value* valueAcceptFT;
216 : std::vector<Value*> valueSigma;
217 : std::vector<Value*> valueSigmaMean;
218 : std::vector<Value*> valueFtilde;
219 :
220 : // restart
221 : unsigned write_stride_;
222 : OFile sfile_;
223 :
224 : // others
225 : bool firstTime;
226 : std::vector<bool> firstTimeW;
227 : bool master;
228 : bool do_reweight_;
229 : unsigned do_optsigmamean_;
230 : unsigned nrep_;
231 : unsigned replica_;
232 : unsigned narg;
233 :
234 : // selector
235 : std::string selector_;
236 :
237 : // optimize sigma mean
238 : std::vector< std::vector < std::vector <double> > > sigma_mean2_last_;
239 : unsigned optsigmamean_stride_;
240 : // optimize sigma max
241 : unsigned N_optimized_step_;
242 : unsigned optimized_step_;
243 : bool sigmamax_opt_done_;
244 : std::vector<double> sigma_max_est_;
245 :
246 : // average weights
247 : unsigned average_weights_stride_;
248 : std::vector< std::vector <double> > average_weights_;
249 :
250 : double getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
251 : const double scale, const double offset);
252 : double getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
253 : const double scale, const double offset);
254 : double getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
255 : const double scale, const double offset);
256 : double getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
257 : const double scale, const double offset);
258 : double getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
259 : const double scale, const double offset);
260 : void moveTilde(const std::vector<double> &mean_, double &old_energy);
261 : void moveScaleOffset(const std::vector<double> &mean_, double &old_energy);
262 : void moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow);
263 : double doMonteCarlo(const std::vector<double> &mean);
264 : void getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
265 : void getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
266 : void getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
267 : void getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
268 : void getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
269 : void get_weights(const unsigned iselect, double &weight, double &norm, double &neff);
270 : void replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b);
271 : void get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean);
272 : void writeStatus();
273 : void do_regression_zero(const std::vector<double> &mean);
274 :
275 : public:
276 : explicit Metainference(const ActionOptions&);
277 : ~Metainference();
278 : void calculate() override;
279 : void update() override;
280 : static void registerKeywords(Keywords& keys);
281 : };
282 :
283 :
284 : PLUMED_REGISTER_ACTION(Metainference,"METAINFERENCE")
285 :
286 34 : void Metainference::registerKeywords(Keywords& keys) {
287 34 : Bias::registerKeywords(keys);
288 34 : keys.use("ARG");
289 68 : keys.add("optional","PARARG","reference values for the experimental data, these can be provided as arguments without derivatives");
290 68 : keys.add("optional","PARAMETERS","reference values for the experimental data");
291 68 : keys.addFlag("NOENSEMBLE",false,"don't perform any replica-averaging");
292 68 : keys.addFlag("REWEIGHT",false,"simple REWEIGHT using the latest ARG as energy");
293 68 : keys.add("optional","AVERAGING", "Stride for calculation of averaged weights and sigma_mean");
294 68 : keys.add("compulsory","NOISETYPE","MGAUSS","functional form of the noise (GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC)");
295 68 : keys.add("compulsory","LIKELIHOOD","GAUSS","the likelihood for the GENERIC metainference model, GAUSS or LOGN");
296 68 : keys.add("compulsory","DFTILDE","0.1","fraction of sigma_mean used to evolve ftilde");
297 68 : keys.addFlag("SCALEDATA",false,"Set to TRUE if you want to sample a scaling factor common to all values and replicas");
298 68 : keys.add("compulsory","SCALE0","1.0","initial value of the scaling factor");
299 68 : keys.add("compulsory","SCALE_PRIOR","FLAT","either FLAT or GAUSSIAN");
300 68 : keys.add("optional","SCALE_MIN","minimum value of the scaling factor");
301 68 : keys.add("optional","SCALE_MAX","maximum value of the scaling factor");
302 68 : keys.add("optional","DSCALE","maximum MC move of the scaling factor");
303 68 : keys.addFlag("ADDOFFSET",false,"Set to TRUE if you want to sample an offset common to all values and replicas");
304 68 : keys.add("compulsory","OFFSET0","0.0","initial value of the offset");
305 68 : keys.add("compulsory","OFFSET_PRIOR","FLAT","either FLAT or GAUSSIAN");
306 68 : keys.add("optional","OFFSET_MIN","minimum value of the offset");
307 68 : keys.add("optional","OFFSET_MAX","maximum value of the offset");
308 68 : keys.add("optional","DOFFSET","maximum MC move of the offset");
309 68 : keys.add("optional","REGRES_ZERO","stride for regression with zero offset");
310 68 : keys.add("compulsory","SIGMA0","1.0","initial value of the uncertainty parameter");
311 68 : keys.add("compulsory","SIGMA_MIN","0.0","minimum value of the uncertainty parameter");
312 68 : keys.add("compulsory","SIGMA_MAX","10.","maximum value of the uncertainty parameter");
313 68 : keys.add("optional","DSIGMA","maximum MC move of the uncertainty parameter");
314 68 : keys.add("compulsory","OPTSIGMAMEAN","NONE","Set to NONE/SEM to manually set sigma mean, or to estimate it on the fly");
315 68 : keys.add("optional","SIGMA_MEAN0","starting value for the uncertainty in the mean estimate");
316 68 : keys.add("optional","SIGMA_MAX_STEPS", "Number of steps used to optimise SIGMA_MAX, before that the SIGMA_MAX value is used");
317 68 : keys.add("optional","TEMP","the system temperature - this is only needed if code doesn't pass the temperature to plumed");
318 68 : keys.add("optional","MC_STEPS","number of MC steps");
319 68 : keys.add("optional","MC_CHUNKSIZE","MC chunksize");
320 68 : keys.add("optional","STATUS_FILE","write a file with all the data useful for restart/continuation of Metainference");
321 68 : keys.add("optional","FMT","specify format for HILLS files (useful for decrease the number of digits in regtests)");
322 68 : keys.add("compulsory","WRITE_STRIDE","10000","write the status to a file every N steps, this can be used for restart/continuation");
323 68 : keys.add("optional","SELECTOR","name of selector");
324 68 : keys.add("optional","NSELECT","range of values for selector [0, N-1]");
325 34 : keys.use("RESTART");
326 68 : keys.addOutputComponent("sigma", "default", "uncertainty parameter");
327 68 : keys.addOutputComponent("sigmaMean", "default", "uncertainty in the mean estimate");
328 68 : keys.addOutputComponent("neff", "default", "effective number of replicas");
329 68 : keys.addOutputComponent("acceptSigma", "default", "MC acceptance for sigma values");
330 68 : keys.addOutputComponent("acceptScale", "SCALEDATA", "MC acceptance for scale value");
331 68 : keys.addOutputComponent("acceptFT", "GENERIC", "MC acceptance for general metainference f tilde value");
332 68 : keys.addOutputComponent("weight", "REWEIGHT", "weights of the weighted average");
333 68 : keys.addOutputComponent("biasDer", "REWEIGHT", "derivatives with respect to the bias");
334 68 : keys.addOutputComponent("scale", "SCALEDATA", "scale parameter");
335 68 : keys.addOutputComponent("offset", "ADDOFFSET", "offset parameter");
336 68 : keys.addOutputComponent("ftilde", "GENERIC", "ensemble average estimator");
337 34 : }
338 :
339 32 : Metainference::Metainference(const ActionOptions&ao):
340 : PLUMED_BIAS_INIT(ao),
341 32 : doscale_(false),
342 32 : scale_(1.),
343 32 : scale_mu_(0),
344 32 : scale_min_(1),
345 32 : scale_max_(-1),
346 32 : Dscale_(-1),
347 32 : dooffset_(false),
348 32 : offset_(0.),
349 32 : offset_mu_(0),
350 32 : offset_min_(1),
351 32 : offset_max_(-1),
352 32 : Doffset_(-1),
353 32 : doregres_zero_(false),
354 32 : nregres_zero_(0),
355 32 : Dftilde_(0.1),
356 32 : random(3),
357 32 : MCsteps_(1),
358 32 : MCaccept_(0),
359 32 : MCacceptScale_(0),
360 32 : MCacceptFT_(0),
361 32 : MCtrial_(0),
362 32 : MCchunksize_(0),
363 32 : write_stride_(0),
364 32 : firstTime(true),
365 32 : do_reweight_(false),
366 32 : do_optsigmamean_(0),
367 32 : optsigmamean_stride_(0),
368 32 : N_optimized_step_(0),
369 32 : optimized_step_(0),
370 32 : sigmamax_opt_done_(false),
371 32 : average_weights_stride_(1) {
372 32 : bool noensemble = false;
373 32 : parseFlag("NOENSEMBLE", noensemble);
374 :
375 : // set up replica stuff
376 32 : master = (comm.Get_rank()==0);
377 32 : if(master) {
378 24 : nrep_ = multi_sim_comm.Get_size();
379 24 : replica_ = multi_sim_comm.Get_rank();
380 24 : if(noensemble) {
381 0 : nrep_ = 1;
382 : }
383 : } else {
384 8 : nrep_ = 0;
385 8 : replica_ = 0;
386 : }
387 32 : comm.Sum(&nrep_,1);
388 32 : comm.Sum(&replica_,1);
389 :
390 32 : unsigned nsel = 1;
391 32 : parse("SELECTOR", selector_);
392 64 : parse("NSELECT", nsel);
393 : // do checks
394 32 : if(selector_.length()>0 && nsel<=1) {
395 0 : error("With SELECTOR active, NSELECT must be greater than 1");
396 : }
397 32 : if(selector_.length()==0 && nsel>1) {
398 0 : error("With NSELECT greater than 1, you must specify SELECTOR");
399 : }
400 :
401 : // initialise firstTimeW
402 32 : firstTimeW.resize(nsel, true);
403 :
404 : // reweight implies a different number of arguments (the latest one must always be the bias)
405 32 : parseFlag("REWEIGHT", do_reweight_);
406 32 : if(do_reweight_&&nrep_<2) {
407 0 : error("REWEIGHT can only be used in parallel with 2 or more replicas");
408 : }
409 32 : if(!getRestart()) {
410 56 : average_weights_.resize(nsel, std::vector<double> (nrep_, 1./static_cast<double>(nrep_)));
411 : } else {
412 8 : average_weights_.resize(nsel, std::vector<double> (nrep_, 0.));
413 : }
414 32 : narg = getNumberOfArguments();
415 32 : if(do_reweight_) {
416 16 : narg--;
417 : }
418 :
419 32 : unsigned averaging=0;
420 32 : parse("AVERAGING", averaging);
421 32 : if(averaging>0) {
422 1 : average_weights_stride_ = averaging;
423 1 : optsigmamean_stride_ = averaging;
424 : }
425 :
426 64 : parseVector("PARAMETERS",parameters);
427 32 : if(parameters.size()!=static_cast<unsigned>(narg)&&!parameters.empty()) {
428 0 : error("Size of PARAMETERS array should be either 0 or the same as of the number of arguments in ARG1");
429 : }
430 :
431 : std::vector<Value*> arg2;
432 64 : parseArgumentList("PARARG",arg2);
433 32 : if(!arg2.empty()) {
434 4 : if(parameters.size()>0) {
435 0 : error("It is not possible to use PARARG and PARAMETERS together");
436 : }
437 4 : if(arg2.size()!=narg) {
438 0 : error("Size of PARARG array should be the same as number for arguments in ARG");
439 : }
440 2360 : for(unsigned i=0; i<arg2.size(); i++) {
441 2356 : parameters.push_back(arg2[i]->get());
442 2356 : if(arg2[i]->hasDerivatives()==true) {
443 0 : error("PARARG can only accept arguments without derivatives");
444 : }
445 : }
446 : }
447 :
448 32 : if(parameters.size()!=narg) {
449 0 : error("PARARG or PARAMETERS arrays should include the same number of elements as the arguments in ARG");
450 : }
451 :
452 : std::string stringa_noise;
453 64 : parse("NOISETYPE",stringa_noise);
454 32 : if(stringa_noise=="GAUSS") {
455 3 : noise_type_ = GAUSS;
456 29 : } else if(stringa_noise=="MGAUSS") {
457 8 : noise_type_ = MGAUSS;
458 21 : } else if(stringa_noise=="OUTLIERS") {
459 14 : noise_type_ = OUTLIERS;
460 7 : } else if(stringa_noise=="MOUTLIERS") {
461 4 : noise_type_ = MOUTLIERS;
462 3 : } else if(stringa_noise=="GENERIC") {
463 3 : noise_type_ = GENERIC;
464 : } else {
465 0 : error("Unknown noise type!");
466 : }
467 :
468 32 : if(noise_type_== GENERIC) {
469 : std::string stringa_like;
470 6 : parse("LIKELIHOOD",stringa_like);
471 3 : if(stringa_like=="GAUSS") {
472 2 : gen_likelihood_ = LIKE_GAUSS;
473 1 : } else if(stringa_like=="LOGN") {
474 1 : gen_likelihood_ = LIKE_LOGN;
475 : } else {
476 0 : error("Unknown likelihood type!");
477 : }
478 :
479 6 : parse("DFTILDE",Dftilde_);
480 : }
481 :
482 64 : parse("WRITE_STRIDE",write_stride_);
483 : std::string status_file_name_;
484 64 : parse("STATUS_FILE",status_file_name_);
485 32 : if(status_file_name_=="") {
486 64 : status_file_name_ = "MISTATUS"+getLabel();
487 : } else {
488 0 : status_file_name_ = status_file_name_+getLabel();
489 : }
490 :
491 : std::string stringa_optsigma;
492 64 : parse("OPTSIGMAMEAN", stringa_optsigma);
493 32 : if(stringa_optsigma=="NONE") {
494 30 : do_optsigmamean_=0;
495 2 : } else if(stringa_optsigma=="SEM") {
496 0 : do_optsigmamean_=1;
497 2 : } else if(stringa_optsigma=="SEM_MAX") {
498 0 : do_optsigmamean_=2;
499 : }
500 :
501 32 : unsigned aver_max_steps=0;
502 32 : parse("SIGMA_MAX_STEPS", aver_max_steps);
503 32 : if(aver_max_steps==0&&do_optsigmamean_==2) {
504 0 : aver_max_steps=averaging*2000;
505 : }
506 32 : if(aver_max_steps>0&&do_optsigmamean_<2) {
507 0 : error("SIGMA_MAX_STEPS can only be used together with OPTSIGMAMEAN=SEM_MAX");
508 : }
509 32 : if(aver_max_steps>0&&do_optsigmamean_==2) {
510 0 : N_optimized_step_=aver_max_steps;
511 : }
512 32 : if(aver_max_steps>0&&aver_max_steps<averaging) {
513 0 : error("SIGMA_MAX_STEPS must be greater than AVERAGING");
514 : }
515 :
516 : // resize std::vector for sigma_mean history
517 32 : sigma_mean2_last_.resize(nsel);
518 64 : for(unsigned i=0; i<nsel; i++) {
519 32 : sigma_mean2_last_[i].resize(narg);
520 : }
521 :
522 : std::vector<double> read_sigma_mean_;
523 32 : parseVector("SIGMA_MEAN0",read_sigma_mean_);
524 32 : if(do_optsigmamean_==0 && read_sigma_mean_.size()==0 && !getRestart()) {
525 0 : error("If you don't use OPTSIGMAMEAN and you are not RESTARTING then you MUST SET SIGMA_MEAN0");
526 : }
527 :
528 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
529 15 : if(read_sigma_mean_.size()==narg) {
530 0 : sigma_mean2_.resize(narg);
531 0 : for(unsigned i=0; i<narg; i++) {
532 0 : sigma_mean2_[i]=read_sigma_mean_[i]*read_sigma_mean_[i];
533 : }
534 15 : } else if(read_sigma_mean_.size()==1) {
535 15 : sigma_mean2_.resize(narg,read_sigma_mean_[0]*read_sigma_mean_[0]);
536 0 : } else if(read_sigma_mean_.size()==0) {
537 0 : sigma_mean2_.resize(narg,0.000001);
538 : } else {
539 0 : error("SIGMA_MEAN0 can accept either one single value or as many values as the arguments (with NOISETYPE=MGAUSS|MOUTLIERS)");
540 : }
541 : // set the initial value for the history
542 30 : for(unsigned i=0; i<nsel; i++)
543 2409 : for(unsigned j=0; j<narg; j++) {
544 2394 : sigma_mean2_last_[i][j].push_back(sigma_mean2_[j]);
545 : }
546 : } else {
547 17 : if(read_sigma_mean_.size()==1) {
548 17 : sigma_mean2_.resize(1, read_sigma_mean_[0]*read_sigma_mean_[0]);
549 0 : } else if(read_sigma_mean_.size()==0) {
550 0 : sigma_mean2_.resize(1, 0.000001);
551 : } else {
552 0 : error("If you want to use more than one SIGMA_MEAN0 you should use NOISETYPE=MGAUSS|MOUTLIERS");
553 : }
554 : // set the initial value for the history
555 34 : for(unsigned i=0; i<nsel; i++)
556 109 : for(unsigned j=0; j<narg; j++) {
557 92 : sigma_mean2_last_[i][j].push_back(sigma_mean2_[0]);
558 : }
559 : }
560 :
561 32 : parseFlag("SCALEDATA", doscale_);
562 32 : if(doscale_) {
563 : std::string stringa_noise;
564 24 : parse("SCALE_PRIOR",stringa_noise);
565 12 : if(stringa_noise=="GAUSSIAN") {
566 0 : scale_prior_ = SC_GAUSS;
567 12 : } else if(stringa_noise=="FLAT") {
568 12 : scale_prior_ = SC_FLAT;
569 : } else {
570 0 : error("Unknown SCALE_PRIOR type!");
571 : }
572 12 : parse("SCALE0",scale_);
573 12 : parse("DSCALE",Dscale_);
574 12 : if(Dscale_<0.) {
575 0 : error("DSCALE must be set when using SCALEDATA");
576 : }
577 12 : if(scale_prior_==SC_GAUSS) {
578 0 : scale_mu_=scale_;
579 : } else {
580 12 : parse("SCALE_MIN",scale_min_);
581 12 : parse("SCALE_MAX",scale_max_);
582 12 : if(scale_max_<scale_min_) {
583 0 : error("SCALE_MAX and SCALE_MIN must be set when using SCALE_PRIOR=FLAT");
584 : }
585 : }
586 : }
587 :
588 32 : parseFlag("ADDOFFSET", dooffset_);
589 32 : if(dooffset_) {
590 : std::string stringa_noise;
591 12 : parse("OFFSET_PRIOR",stringa_noise);
592 6 : if(stringa_noise=="GAUSSIAN") {
593 0 : offset_prior_ = SC_GAUSS;
594 6 : } else if(stringa_noise=="FLAT") {
595 6 : offset_prior_ = SC_FLAT;
596 : } else {
597 0 : error("Unknown OFFSET_PRIOR type!");
598 : }
599 6 : parse("OFFSET0",offset_);
600 6 : parse("DOFFSET",Doffset_);
601 6 : if(offset_prior_==SC_GAUSS) {
602 0 : offset_mu_=offset_;
603 0 : if(Doffset_<0.) {
604 0 : error("DOFFSET must be set when using OFFSET_PRIOR=GAUSS");
605 : }
606 : } else {
607 6 : parse("OFFSET_MIN",offset_min_);
608 6 : parse("OFFSET_MAX",offset_max_);
609 6 : if(Doffset_<0) {
610 0 : Doffset_ = 0.05*(offset_max_ - offset_min_);
611 : }
612 6 : if(offset_max_<offset_min_) {
613 0 : error("OFFSET_MAX and OFFSET_MIN must be set when using OFFSET_PRIOR=FLAT");
614 : }
615 : }
616 : }
617 :
618 : // regression with zero intercept
619 32 : parse("REGRES_ZERO", nregres_zero_);
620 32 : if(nregres_zero_>0) {
621 : // set flag
622 1 : doregres_zero_=true;
623 : // check if already sampling scale and offset
624 1 : if(doscale_) {
625 0 : error("REGRES_ZERO and SCALEDATA are mutually exclusive");
626 : }
627 1 : if(dooffset_) {
628 0 : error("REGRES_ZERO and ADDOFFSET are mutually exclusive");
629 : }
630 : }
631 :
632 : std::vector<double> readsigma;
633 32 : parseVector("SIGMA0",readsigma);
634 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma.size()>1) {
635 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
636 : }
637 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
638 15 : sigma_.resize(readsigma.size());
639 15 : sigma_=readsigma;
640 : } else {
641 17 : sigma_.resize(1, readsigma[0]);
642 : }
643 :
644 : std::vector<double> readsigma_min;
645 32 : parseVector("SIGMA_MIN",readsigma_min);
646 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_min.size()>1) {
647 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
648 : }
649 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
650 15 : sigma_min_.resize(readsigma_min.size());
651 15 : sigma_min_=readsigma_min;
652 : } else {
653 17 : sigma_min_.resize(1, readsigma_min[0]);
654 : }
655 :
656 : std::vector<double> readsigma_max;
657 32 : parseVector("SIGMA_MAX",readsigma_max);
658 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
659 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
660 : }
661 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
662 15 : sigma_max_.resize(readsigma_max.size());
663 15 : sigma_max_=readsigma_max;
664 : } else {
665 17 : sigma_max_.resize(1, readsigma_max[0]);
666 : }
667 :
668 32 : if(sigma_max_.size()!=sigma_min_.size()) {
669 0 : error("The number of values for SIGMA_MIN and SIGMA_MAX must be the same");
670 : }
671 :
672 : std::vector<double> read_dsigma;
673 32 : parseVector("DSIGMA",read_dsigma);
674 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
675 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
676 : }
677 32 : if(read_dsigma.size()>0) {
678 32 : Dsigma_.resize(read_dsigma.size());
679 32 : Dsigma_=read_dsigma;
680 : } else {
681 0 : Dsigma_.resize(sigma_max_.size(), -1.);
682 : /* in this case Dsigma is initialised after reading the restart file if present */
683 : }
684 :
685 : std::string fmt_;
686 32 : parse("FMT",fmt_);
687 :
688 : // monte carlo stuff
689 32 : parse("MC_STEPS",MCsteps_);
690 32 : parse("MC_CHUNKSIZE", MCchunksize_);
691 : // get temperature
692 32 : kbt_ = getkBT();
693 32 : if(kbt_==0.0) {
694 0 : error("Unless the MD engine passes the temperature to plumed, you must specify it using TEMP");
695 : }
696 :
697 32 : checkRead();
698 :
699 : // set sigma_bias
700 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
701 15 : if(sigma_.size()==1) {
702 15 : double tmp = sigma_[0];
703 15 : sigma_.resize(narg, tmp);
704 0 : } else if(sigma_.size()>1&&sigma_.size()!=narg) {
705 0 : error("SIGMA0 can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
706 : }
707 15 : if(sigma_min_.size()==1) {
708 15 : double tmp = sigma_min_[0];
709 15 : sigma_min_.resize(narg, tmp);
710 0 : } else if(sigma_min_.size()>1&&sigma_min_.size()!=narg) {
711 0 : error("SIGMA_MIN can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
712 : }
713 15 : if(sigma_max_.size()==1) {
714 15 : double tmp = sigma_max_[0];
715 15 : sigma_max_.resize(narg, tmp);
716 0 : } else if(sigma_max_.size()>1&&sigma_max_.size()!=narg) {
717 0 : error("SIGMA_MAX can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
718 : }
719 15 : if(Dsigma_.size()==1) {
720 15 : double tmp = Dsigma_[0];
721 15 : Dsigma_.resize(narg, tmp);
722 0 : } else if(Dsigma_.size()>1&&Dsigma_.size()!=narg) {
723 0 : error("DSIGMA can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
724 : }
725 : }
726 :
727 32 : sigma_max_est_.resize(sigma_max_.size(), 0.);
728 :
729 32 : IFile restart_sfile;
730 32 : restart_sfile.link(*this);
731 32 : if(getRestart()&&restart_sfile.FileExist(status_file_name_)) {
732 4 : firstTime = false;
733 8 : for(unsigned i=0; i<nsel; i++) {
734 : firstTimeW[i] = false;
735 : }
736 4 : restart_sfile.open(status_file_name_);
737 4 : log.printf(" Restarting from %s\n", status_file_name_.c_str());
738 : double dummy;
739 8 : if(restart_sfile.scanField("time",dummy)) {
740 : // check for syncronisation
741 4 : std::vector<double> dummy_time(nrep_,0);
742 4 : if(master&&nrep_>1) {
743 2 : dummy_time[replica_] = dummy;
744 2 : multi_sim_comm.Sum(dummy_time);
745 : }
746 4 : comm.Sum(dummy_time);
747 8 : for(unsigned i=1; i<nrep_; i++) {
748 4 : std::string msg = "METAINFERENCE restart files " + status_file_name_ + " are not in sync";
749 4 : if(dummy_time[i]!=dummy_time[0]) {
750 0 : plumed_merror(msg);
751 : }
752 : }
753 : // nsel
754 8 : for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
755 : std::string msg_i;
756 4 : Tools::convert(i,msg_i);
757 : // narg
758 4 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
759 20 : for(unsigned j=0; j<narg; ++j) {
760 : std::string msg_j;
761 16 : Tools::convert(j,msg_j);
762 16 : std::string msg = msg_i+"_"+msg_j;
763 : double read_sm;
764 16 : restart_sfile.scanField("sigmaMean_"+msg,read_sm);
765 16 : sigma_mean2_last_[i][j][0] = read_sm*read_sm;
766 : }
767 : }
768 4 : if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
769 : double read_sm;
770 : std::string msg_j;
771 0 : Tools::convert(0,msg_j);
772 0 : std::string msg = msg_i+"_"+msg_j;
773 0 : restart_sfile.scanField("sigmaMean_"+msg,read_sm);
774 0 : for(unsigned j=0; j<narg; j++) {
775 0 : sigma_mean2_last_[i][j][0] = read_sm*read_sm;
776 : }
777 : }
778 : }
779 :
780 20 : for(unsigned i=0; i<sigma_.size(); ++i) {
781 : std::string msg;
782 16 : Tools::convert(i,msg);
783 32 : restart_sfile.scanField("sigma_"+msg,sigma_[i]);
784 : }
785 20 : for(unsigned i=0; i<sigma_max_.size(); ++i) {
786 : std::string msg;
787 16 : Tools::convert(i,msg);
788 16 : restart_sfile.scanField("sigma_max_"+msg,sigma_max_[i]);
789 16 : sigmamax_opt_done_=true;
790 : }
791 4 : if(noise_type_==GENERIC) {
792 0 : for(unsigned i=0; i<ftilde_.size(); ++i) {
793 : std::string msg;
794 0 : Tools::convert(i,msg);
795 0 : restart_sfile.scanField("ftilde_"+msg,ftilde_[i]);
796 : }
797 : }
798 4 : restart_sfile.scanField("scale0_",scale_);
799 4 : restart_sfile.scanField("offset0_",offset_);
800 :
801 8 : for(unsigned i=0; i<nsel; i++) {
802 : std::string msg;
803 4 : Tools::convert(i,msg);
804 : double tmp_w;
805 4 : restart_sfile.scanField("weight_"+msg,tmp_w);
806 4 : if(master) {
807 2 : average_weights_[i][replica_] = tmp_w;
808 2 : if(nrep_>1) {
809 2 : multi_sim_comm.Sum(&average_weights_[i][0], nrep_);
810 : }
811 : }
812 4 : comm.Sum(&average_weights_[i][0], nrep_);
813 : }
814 :
815 : }
816 4 : restart_sfile.scanField();
817 4 : restart_sfile.close();
818 : }
819 :
820 : /* If DSIGMA is not yet initialised do it now */
821 2443 : for(unsigned i=0; i<sigma_max_.size(); i++)
822 2411 : if(Dsigma_[i]==-1) {
823 0 : Dsigma_[i] = 0.05*(sigma_max_[i] - sigma_min_[i]);
824 : }
825 :
826 32 : switch(noise_type_) {
827 3 : case GENERIC:
828 3 : log.printf(" with general metainference ");
829 3 : if(gen_likelihood_==LIKE_GAUSS) {
830 2 : log.printf(" and a gaussian likelihood\n");
831 1 : } else if(gen_likelihood_==LIKE_LOGN) {
832 1 : log.printf(" and a log-normal likelihood\n");
833 : }
834 3 : log.printf(" ensemble average parameter sampled with a step %lf of sigma_mean\n", Dftilde_);
835 : break;
836 3 : case GAUSS:
837 3 : log.printf(" with gaussian noise and a single noise parameter for all the data\n");
838 : break;
839 8 : case MGAUSS:
840 8 : log.printf(" with gaussian noise and a noise parameter for each data point\n");
841 : break;
842 14 : case OUTLIERS:
843 14 : log.printf(" with long tailed gaussian noise and a single noise parameter for all the data\n");
844 : break;
845 4 : case MOUTLIERS:
846 4 : log.printf(" with long tailed gaussian noise and a noise parameter for each data point\n");
847 : break;
848 : }
849 :
850 32 : if(doscale_) {
851 : // check that the scale value is the same for all replicas
852 12 : std::vector<double> dummy_scale(nrep_,0);
853 12 : if(master&&nrep_>1) {
854 6 : dummy_scale[replica_] = scale_;
855 6 : multi_sim_comm.Sum(dummy_scale);
856 : }
857 12 : comm.Sum(dummy_scale);
858 24 : for(unsigned i=1; i<nrep_; i++) {
859 12 : std::string msg = "The SCALE value must be the same for all replicas: check your input or restart file";
860 12 : if(dummy_scale[i]!=dummy_scale[0]) {
861 0 : plumed_merror(msg);
862 : }
863 : }
864 12 : log.printf(" sampling a common scaling factor with:\n");
865 12 : log.printf(" initial scale parameter %f\n",scale_);
866 12 : if(scale_prior_==SC_GAUSS) {
867 0 : log.printf(" gaussian prior with mean %f and width %f\n",scale_mu_,Dscale_);
868 : }
869 12 : if(scale_prior_==SC_FLAT) {
870 12 : log.printf(" flat prior between %f - %f\n",scale_min_,scale_max_);
871 12 : log.printf(" maximum MC move of scale parameter %f\n",Dscale_);
872 : }
873 : }
874 :
875 32 : if(dooffset_) {
876 : // check that the offset value is the same for all replicas
877 6 : std::vector<double> dummy_offset(nrep_,0);
878 6 : if(master&&nrep_>1) {
879 0 : dummy_offset[replica_] = offset_;
880 0 : multi_sim_comm.Sum(dummy_offset);
881 : }
882 6 : comm.Sum(dummy_offset);
883 6 : for(unsigned i=1; i<nrep_; i++) {
884 0 : std::string msg = "The OFFSET value must be the same for all replicas: check your input or restart file";
885 0 : if(dummy_offset[i]!=dummy_offset[0]) {
886 0 : plumed_merror(msg);
887 : }
888 : }
889 6 : log.printf(" sampling a common offset with:\n");
890 6 : log.printf(" initial offset parameter %f\n",offset_);
891 6 : if(offset_prior_==SC_GAUSS) {
892 0 : log.printf(" gaussian prior with mean %f and width %f\n",offset_mu_,Doffset_);
893 : }
894 6 : if(offset_prior_==SC_FLAT) {
895 6 : log.printf(" flat prior between %f - %f\n",offset_min_,offset_max_);
896 6 : log.printf(" maximum MC move of offset parameter %f\n",Doffset_);
897 : }
898 : }
899 :
900 32 : if(doregres_zero_) {
901 1 : log.printf(" doing regression with zero intercept with stride: %d\n", nregres_zero_);
902 : }
903 :
904 32 : log.printf(" number of experimental data points %u\n",narg);
905 32 : log.printf(" number of replicas %u\n",nrep_);
906 32 : log.printf(" initial data uncertainties");
907 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
908 2411 : log.printf(" %f", sigma_[i]);
909 : }
910 32 : log.printf("\n");
911 32 : log.printf(" minimum data uncertainties");
912 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
913 2411 : log.printf(" %f",sigma_min_[i]);
914 : }
915 32 : log.printf("\n");
916 32 : log.printf(" maximum data uncertainties");
917 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
918 2411 : log.printf(" %f",sigma_max_[i]);
919 : }
920 32 : log.printf("\n");
921 32 : log.printf(" maximum MC move of data uncertainties");
922 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
923 2411 : log.printf(" %f",Dsigma_[i]);
924 : }
925 32 : log.printf("\n");
926 32 : log.printf(" temperature of the system %f\n",kbt_);
927 32 : log.printf(" MC steps %u\n",MCsteps_);
928 32 : log.printf(" initial standard errors of the mean");
929 2443 : for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
930 2411 : log.printf(" %f", std::sqrt(sigma_mean2_[i]));
931 : }
932 32 : log.printf("\n");
933 :
934 32 : if(do_reweight_) {
935 32 : addComponent("biasDer");
936 32 : componentIsNotPeriodic("biasDer");
937 32 : addComponent("weight");
938 32 : componentIsNotPeriodic("weight");
939 : }
940 :
941 64 : addComponent("neff");
942 32 : componentIsNotPeriodic("neff");
943 :
944 32 : if(doscale_ || doregres_zero_) {
945 26 : addComponent("scale");
946 13 : componentIsNotPeriodic("scale");
947 13 : valueScale=getPntrToComponent("scale");
948 : }
949 :
950 32 : if(dooffset_) {
951 12 : addComponent("offset");
952 6 : componentIsNotPeriodic("offset");
953 6 : valueOffset=getPntrToComponent("offset");
954 : }
955 :
956 32 : if(dooffset_||doscale_) {
957 36 : addComponent("acceptScale");
958 18 : componentIsNotPeriodic("acceptScale");
959 18 : valueAcceptScale=getPntrToComponent("acceptScale");
960 : }
961 :
962 32 : if(noise_type_==GENERIC) {
963 6 : addComponent("acceptFT");
964 3 : componentIsNotPeriodic("acceptFT");
965 3 : valueAcceptFT=getPntrToComponent("acceptFT");
966 : }
967 :
968 64 : addComponent("acceptSigma");
969 32 : componentIsNotPeriodic("acceptSigma");
970 32 : valueAccept=getPntrToComponent("acceptSigma");
971 :
972 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
973 2409 : for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
974 : std::string num;
975 2394 : Tools::convert(i,num);
976 4788 : addComponent("sigmaMean-"+num);
977 2394 : componentIsNotPeriodic("sigmaMean-"+num);
978 2394 : valueSigmaMean.push_back(getPntrToComponent("sigmaMean-"+num));
979 4788 : getPntrToComponent("sigmaMean-"+num)->set(std::sqrt(sigma_mean2_[i]));
980 4788 : addComponent("sigma-"+num);
981 2394 : componentIsNotPeriodic("sigma-"+num);
982 2394 : valueSigma.push_back(getPntrToComponent("sigma-"+num));
983 2394 : getPntrToComponent("sigma-"+num)->set(sigma_[i]);
984 2394 : if(noise_type_==GENERIC) {
985 12 : addComponent("ftilde-"+num);
986 6 : componentIsNotPeriodic("ftilde-"+num);
987 6 : valueFtilde.push_back(getPntrToComponent("ftilde-"+num));
988 : }
989 : }
990 15 : } else {
991 34 : addComponent("sigmaMean");
992 17 : componentIsNotPeriodic("sigmaMean");
993 17 : valueSigmaMean.push_back(getPntrToComponent("sigmaMean"));
994 34 : getPntrToComponent("sigmaMean")->set(std::sqrt(sigma_mean2_[0]));
995 34 : addComponent("sigma");
996 17 : componentIsNotPeriodic("sigma");
997 17 : valueSigma.push_back(getPntrToComponent("sigma"));
998 34 : getPntrToComponent("sigma")->set(sigma_[0]);
999 : }
1000 :
1001 : // initialize random seed
1002 : unsigned iseed;
1003 32 : if(master) {
1004 24 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1005 24 : iseed = static_cast<unsigned>(ts)+replica_;
1006 : } else {
1007 8 : iseed = 0;
1008 : }
1009 32 : comm.Sum(&iseed, 1);
1010 : // this is used for ftilde and sigma both the move and the acceptance
1011 : // this is different for each replica
1012 32 : random[0].setSeed(-iseed);
1013 32 : if(doscale_||dooffset_) {
1014 : // in this case we want the same seed everywhere
1015 18 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1016 18 : iseed = static_cast<unsigned>(ts);
1017 18 : if(master&&nrep_>1) {
1018 6 : multi_sim_comm.Bcast(iseed,0);
1019 : }
1020 18 : comm.Bcast(iseed,0);
1021 : // this is used for scale and offset sampling and acceptance
1022 18 : random[1].setSeed(-iseed);
1023 : }
1024 : // this is used for random chunk of sigmas, and it is different for each replica
1025 32 : if(master) {
1026 24 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1027 24 : iseed = static_cast<unsigned>(ts)+replica_;
1028 : } else {
1029 8 : iseed = 0;
1030 : }
1031 32 : comm.Sum(&iseed, 1);
1032 32 : random[2].setSeed(-iseed);
1033 :
1034 : // outfile stuff
1035 32 : if(write_stride_>0) {
1036 32 : sfile_.link(*this);
1037 32 : sfile_.open(status_file_name_);
1038 32 : if(fmt_.length()>0) {
1039 1 : sfile_.fmtField(fmt_);
1040 : }
1041 : }
1042 :
1043 64 : log<<" Bibliography "<<plumed.cite("Bonomi, Camilloni, Cavalli, Vendruscolo, Sci. Adv. 2, e150117 (2016)");
1044 32 : if(do_reweight_) {
1045 32 : log<<plumed.cite("Bonomi, Camilloni, Vendruscolo, Sci. Rep. 6, 31232 (2016)");
1046 : }
1047 32 : if(do_optsigmamean_>0) {
1048 0 : log<<plumed.cite("Loehr, Jussupow, Camilloni, J. Chem. Phys. 146, 165102 (2017)");
1049 : }
1050 64 : log<<plumed.cite("Bonomi, Camilloni, Bioinformatics, 33, 3999 (2017)");
1051 32 : log<<"\n";
1052 64 : }
1053 :
1054 64 : Metainference::~Metainference() {
1055 32 : if(sfile_.isOpen()) {
1056 32 : sfile_.close();
1057 : }
1058 192 : }
1059 :
1060 264 : double Metainference::getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
1061 : const double scale, const double offset) {
1062 264 : const double scale2 = scale*scale;
1063 264 : const double sm2 = sigma_mean2_[0];
1064 : const double ss2 = sigma[0]*sigma[0] + scale2*sm2;
1065 264 : const double sss = sigma[0]*sigma[0] + sm2;
1066 :
1067 : double ene = 0.0;
1068 264 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1069 : {
1070 : #pragma omp for reduction( + : ene)
1071 : for(unsigned i=0; i<narg; ++i) {
1072 : const double dev = scale*mean[i]-parameters[i]+offset;
1073 : const double a2 = 0.5*dev*dev + ss2;
1074 : if(sm2 > 0.0) {
1075 : ene += std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
1076 : } else {
1077 : ene += std::log(2.0*a2);
1078 : }
1079 : }
1080 : }
1081 : // add one single Jeffrey's prior and one normalisation per data point
1082 264 : ene += 0.5*std::log(sss) + static_cast<double>(narg)*0.5*std::log(0.5*M_PI*M_PI/ss2);
1083 264 : if(doscale_ || doregres_zero_) {
1084 156 : ene += 0.5*std::log(sss);
1085 : }
1086 264 : if(dooffset_) {
1087 0 : ene += 0.5*std::log(sss);
1088 : }
1089 264 : return kbt_ * ene;
1090 : }
1091 :
1092 144 : double Metainference::getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
1093 : const double scale, const double offset) {
1094 144 : const double scale2 = scale*scale;
1095 : double ene = 0.0;
1096 144 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1097 : {
1098 : #pragma omp for reduction( + : ene)
1099 : for(unsigned i=0; i<narg; ++i) {
1100 : const double sm2 = sigma_mean2_[i];
1101 : const double ss2 = sigma[i]*sigma[i] + scale2*sm2;
1102 : const double sss = sigma[i]*sigma[i] + sm2;
1103 : const double dev = scale*mean[i]-parameters[i]+offset;
1104 : const double a2 = 0.5*dev*dev + ss2;
1105 : if(sm2 > 0.0) {
1106 : ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
1107 : } else {
1108 : ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2);
1109 : }
1110 : if(doscale_ || doregres_zero_) {
1111 : ene += 0.5*std::log(sss);
1112 : }
1113 : if(dooffset_) {
1114 : ene += 0.5*std::log(sss);
1115 : }
1116 : }
1117 : }
1118 144 : return kbt_ * ene;
1119 : }
1120 :
1121 144 : double Metainference::getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
1122 : const double scale, const double offset) {
1123 : double ene = 0.0;
1124 144 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1125 : {
1126 : #pragma omp for reduction( + : ene)
1127 : for(unsigned i=0; i<narg; ++i) {
1128 : const double inv_sb2 = 1./(sigma[i]*sigma[i]);
1129 : const double inv_sm2 = 1./sigma_mean2_[i];
1130 : double devb = 0;
1131 : if(gen_likelihood_==LIKE_GAUSS) {
1132 : devb = scale*ftilde[i]-parameters[i]+offset;
1133 : } else if(gen_likelihood_==LIKE_LOGN) {
1134 : devb = std::log(scale*ftilde[i]/parameters[i]);
1135 : }
1136 : double devm = mean[i] - ftilde[i];
1137 : // deviation + normalisation + jeffrey
1138 : double normb = 0.;
1139 : if(gen_likelihood_==LIKE_GAUSS) {
1140 : normb = -0.5*std::log(0.5/M_PI*inv_sb2);
1141 : } else if(gen_likelihood_==LIKE_LOGN) {
1142 : normb = -0.5*std::log(0.5/M_PI*inv_sb2/(parameters[i]*parameters[i]));
1143 : }
1144 : const double normm = -0.5*std::log(0.5/M_PI*inv_sm2);
1145 : const double jeffreys = -0.5*std::log(2.*inv_sb2);
1146 : ene += 0.5*devb*devb*inv_sb2 + 0.5*devm*devm*inv_sm2 + normb + normm + jeffreys;
1147 : if(doscale_ || doregres_zero_) {
1148 : ene += jeffreys;
1149 : }
1150 : if(dooffset_) {
1151 : ene += jeffreys;
1152 : }
1153 : }
1154 : }
1155 144 : return kbt_ * ene;
1156 : }
1157 :
1158 108 : double Metainference::getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
1159 : const double scale, const double offset) {
1160 108 : const double scale2 = scale*scale;
1161 108 : const double inv_s2 = 1./(sigma[0]*sigma[0] + scale2*sigma_mean2_[0]);
1162 108 : const double inv_sss = 1./(sigma[0]*sigma[0] + sigma_mean2_[0]);
1163 :
1164 : double ene = 0.0;
1165 108 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1166 : {
1167 : #pragma omp for reduction( + : ene)
1168 : for(unsigned i=0; i<narg; ++i) {
1169 : double dev = scale*mean[i]-parameters[i]+offset;
1170 : ene += 0.5*dev*dev*inv_s2;
1171 : }
1172 : }
1173 108 : const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
1174 108 : const double jeffreys = -0.5*std::log(2.*inv_sss);
1175 : // add Jeffrey's prior in case one sigma for all data points + one normalisation per datapoint
1176 108 : ene += jeffreys + static_cast<double>(narg)*normalisation;
1177 108 : if(doscale_ || doregres_zero_) {
1178 0 : ene += jeffreys;
1179 : }
1180 108 : if(dooffset_) {
1181 108 : ene += jeffreys;
1182 : }
1183 :
1184 108 : return kbt_ * ene;
1185 : }
1186 :
1187 152 : double Metainference::getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
1188 : const double scale, const double offset) {
1189 152 : const double scale2 = scale*scale;
1190 :
1191 : double ene = 0.0;
1192 152 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1193 : {
1194 : #pragma omp for reduction( + : ene)
1195 : for(unsigned i=0; i<narg; ++i) {
1196 : const double inv_s2 = 1./(sigma[i]*sigma[i] + scale2*sigma_mean2_[i]);
1197 : const double inv_sss = 1./(sigma[i]*sigma[i] + sigma_mean2_[i]);
1198 : double dev = scale*mean[i]-parameters[i]+offset;
1199 : // deviation + normalisation + jeffrey
1200 : const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
1201 : const double jeffreys = -0.5*std::log(2.*inv_sss);
1202 : ene += 0.5*dev*dev*inv_s2 + normalisation + jeffreys;
1203 : if(doscale_ || doregres_zero_) {
1204 : ene += jeffreys;
1205 : }
1206 : if(dooffset_) {
1207 : ene += jeffreys;
1208 : }
1209 : }
1210 : }
1211 152 : return kbt_ * ene;
1212 : }
1213 :
1214 36 : void Metainference::moveTilde(const std::vector<double> &mean_, double &old_energy) {
1215 36 : std::vector<double> new_ftilde(sigma_.size());
1216 36 : new_ftilde = ftilde_;
1217 :
1218 : // change all tildes
1219 108 : for(unsigned j=0; j<sigma_.size(); j++) {
1220 72 : const double r3 = random[0].Gaussian();
1221 72 : const double ds3 = Dftilde_*std::sqrt(sigma_mean2_[j])*r3;
1222 72 : new_ftilde[j] = ftilde_[j] + ds3;
1223 : }
1224 : // calculate new energy
1225 36 : double new_energy = getEnergyMIGEN(mean_,new_ftilde,sigma_,scale_,offset_);
1226 :
1227 : // accept or reject
1228 36 : const double delta = ( new_energy - old_energy ) / kbt_;
1229 : // if delta is negative always accept move
1230 36 : if( delta <= 0.0 ) {
1231 36 : old_energy = new_energy;
1232 36 : ftilde_ = new_ftilde;
1233 36 : MCacceptFT_++;
1234 : // otherwise extract random number
1235 : } else {
1236 0 : const double s = random[0].RandU01();
1237 0 : if( s < std::exp(-delta) ) {
1238 0 : old_energy = new_energy;
1239 0 : ftilde_ = new_ftilde;
1240 0 : MCacceptFT_++;
1241 : }
1242 : }
1243 36 : }
1244 :
1245 216 : void Metainference::moveScaleOffset(const std::vector<double> &mean_, double &old_energy) {
1246 216 : double new_scale = scale_;
1247 :
1248 216 : if(doscale_) {
1249 144 : if(scale_prior_==SC_FLAT) {
1250 144 : const double r1 = random[1].Gaussian();
1251 144 : const double ds1 = Dscale_*r1;
1252 144 : new_scale += ds1;
1253 : // check boundaries
1254 144 : if(new_scale > scale_max_) {
1255 0 : new_scale = 2.0 * scale_max_ - new_scale;
1256 : }
1257 144 : if(new_scale < scale_min_) {
1258 0 : new_scale = 2.0 * scale_min_ - new_scale;
1259 : }
1260 : } else {
1261 0 : const double r1 = random[1].Gaussian();
1262 0 : const double ds1 = 0.5*(scale_mu_-new_scale)+Dscale_*std::exp(1)/M_PI*r1;
1263 0 : new_scale += ds1;
1264 : }
1265 : }
1266 :
1267 216 : double new_offset = offset_;
1268 :
1269 216 : if(dooffset_) {
1270 72 : if(offset_prior_==SC_FLAT) {
1271 72 : const double r1 = random[1].Gaussian();
1272 72 : const double ds1 = Doffset_*r1;
1273 72 : new_offset += ds1;
1274 : // check boundaries
1275 72 : if(new_offset > offset_max_) {
1276 0 : new_offset = 2.0 * offset_max_ - new_offset;
1277 : }
1278 72 : if(new_offset < offset_min_) {
1279 0 : new_offset = 2.0 * offset_min_ - new_offset;
1280 : }
1281 : } else {
1282 0 : const double r1 = random[1].Gaussian();
1283 0 : const double ds1 = 0.5*(offset_mu_-new_offset)+Doffset_*std::exp(1)/M_PI*r1;
1284 0 : new_offset += ds1;
1285 : }
1286 : }
1287 :
1288 : // calculate new energy
1289 : double new_energy = 0.;
1290 :
1291 216 : switch(noise_type_) {
1292 36 : case GAUSS:
1293 36 : new_energy = getEnergyGJ(mean_,sigma_,new_scale,new_offset);
1294 : break;
1295 48 : case MGAUSS:
1296 48 : new_energy = getEnergyGJE(mean_,sigma_,new_scale,new_offset);
1297 : break;
1298 48 : case OUTLIERS:
1299 48 : new_energy = getEnergySP(mean_,sigma_,new_scale,new_offset);
1300 : break;
1301 48 : case MOUTLIERS:
1302 48 : new_energy = getEnergySPE(mean_,sigma_,new_scale,new_offset);
1303 : break;
1304 36 : case GENERIC:
1305 36 : new_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,new_scale,new_offset);
1306 : break;
1307 : }
1308 :
1309 : // for the scale/offset we need to consider the total energy
1310 216 : std::vector<double> totenergies(2);
1311 216 : if(master) {
1312 144 : totenergies[0] = old_energy;
1313 144 : totenergies[1] = new_energy;
1314 144 : if(nrep_>1) {
1315 72 : multi_sim_comm.Sum(totenergies);
1316 : }
1317 : } else {
1318 72 : totenergies[0] = 0;
1319 72 : totenergies[1] = 0;
1320 : }
1321 216 : comm.Sum(totenergies);
1322 :
1323 : // accept or reject
1324 216 : const double delta = ( totenergies[1] - totenergies[0] ) / kbt_;
1325 : // if delta is negative always accept move
1326 216 : if( delta <= 0.0 ) {
1327 216 : old_energy = new_energy;
1328 216 : scale_ = new_scale;
1329 216 : offset_ = new_offset;
1330 216 : MCacceptScale_++;
1331 : // otherwise extract random number
1332 : } else {
1333 0 : double s = random[1].RandU01();
1334 0 : if( s < std::exp(-delta) ) {
1335 0 : old_energy = new_energy;
1336 0 : scale_ = new_scale;
1337 0 : offset_ = new_offset;
1338 0 : MCacceptScale_++;
1339 : }
1340 : }
1341 216 : }
1342 :
1343 280 : void Metainference::moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow) {
1344 280 : std::vector<double> new_sigma(sigma_.size());
1345 280 : new_sigma = sigma_;
1346 :
1347 : // change MCchunksize_ sigmas
1348 280 : if (MCchunksize_ > 0) {
1349 6 : if ((MCchunksize_ * i) >= sigma_.size()) {
1350 : // This means we are not moving any sigma, so we should break immediately
1351 0 : breaknow = true;
1352 : }
1353 :
1354 : // change random sigmas
1355 12 : for(unsigned j=0; j<MCchunksize_; j++) {
1356 6 : const unsigned shuffle_index = j + MCchunksize_ * i;
1357 6 : if (shuffle_index >= sigma_.size()) {
1358 : // Going any further will segfault but we should still evaluate the sigmas we changed
1359 : break;
1360 : }
1361 6 : const unsigned index = indices[shuffle_index];
1362 6 : const double r2 = random[0].Gaussian();
1363 6 : const double ds2 = Dsigma_[index]*r2;
1364 6 : new_sigma[index] = sigma_[index] + ds2;
1365 : // check boundaries
1366 6 : if(new_sigma[index] > sigma_max_[index]) {
1367 0 : new_sigma[index] = 2.0 * sigma_max_[index] - new_sigma[index];
1368 : }
1369 6 : if(new_sigma[index] < sigma_min_[index]) {
1370 0 : new_sigma[index] = 2.0 * sigma_min_[index] - new_sigma[index];
1371 : }
1372 : }
1373 : } else {
1374 : // change all sigmas
1375 3224 : for(unsigned j=0; j<sigma_.size(); j++) {
1376 2950 : const double r2 = random[0].Gaussian();
1377 2950 : const double ds2 = Dsigma_[j]*r2;
1378 2950 : new_sigma[j] = sigma_[j] + ds2;
1379 : // check boundaries
1380 2950 : if(new_sigma[j] > sigma_max_[j]) {
1381 0 : new_sigma[j] = 2.0 * sigma_max_[j] - new_sigma[j];
1382 : }
1383 2950 : if(new_sigma[j] < sigma_min_[j]) {
1384 0 : new_sigma[j] = 2.0 * sigma_min_[j] - new_sigma[j];
1385 : }
1386 : }
1387 : }
1388 :
1389 280 : if (breaknow) {
1390 : // We didnt move any sigmas, so no sense in evaluating anything
1391 : return;
1392 : }
1393 :
1394 : // calculate new energy
1395 : double new_energy = 0.;
1396 280 : switch(noise_type_) {
1397 36 : case GAUSS:
1398 36 : new_energy = getEnergyGJ(mean_,new_sigma,scale_,offset_);
1399 : break;
1400 52 : case MGAUSS:
1401 52 : new_energy = getEnergyGJE(mean_,new_sigma,scale_,offset_);
1402 : break;
1403 108 : case OUTLIERS:
1404 108 : new_energy = getEnergySP(mean_,new_sigma,scale_,offset_);
1405 : break;
1406 48 : case MOUTLIERS:
1407 48 : new_energy = getEnergySPE(mean_,new_sigma,scale_,offset_);
1408 : break;
1409 36 : case GENERIC:
1410 36 : new_energy = getEnergyMIGEN(mean_,ftilde_,new_sigma,scale_,offset_);
1411 : break;
1412 : }
1413 :
1414 : // accept or reject
1415 280 : const double delta = ( new_energy - old_energy ) / kbt_;
1416 : // if delta is negative always accept move
1417 280 : if( delta <= 0.0 ) {
1418 280 : old_energy = new_energy;
1419 280 : sigma_ = new_sigma;
1420 280 : MCaccept_++;
1421 : // otherwise extract random number
1422 : } else {
1423 0 : const double s = random[0].RandU01();
1424 0 : if( s < std::exp(-delta) ) {
1425 0 : old_energy = new_energy;
1426 0 : sigma_ = new_sigma;
1427 0 : MCaccept_++;
1428 : }
1429 : }
1430 : }
1431 :
1432 280 : double Metainference::doMonteCarlo(const std::vector<double> &mean_) {
1433 : // calculate old energy with the updated coordinates
1434 280 : double old_energy=0.;
1435 :
1436 280 : switch(noise_type_) {
1437 36 : case GAUSS:
1438 36 : old_energy = getEnergyGJ(mean_,sigma_,scale_,offset_);
1439 36 : break;
1440 52 : case MGAUSS:
1441 52 : old_energy = getEnergyGJE(mean_,sigma_,scale_,offset_);
1442 52 : break;
1443 108 : case OUTLIERS:
1444 108 : old_energy = getEnergySP(mean_,sigma_,scale_,offset_);
1445 108 : break;
1446 48 : case MOUTLIERS:
1447 48 : old_energy = getEnergySPE(mean_,sigma_,scale_,offset_);
1448 48 : break;
1449 36 : case GENERIC:
1450 36 : old_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,scale_,offset_);
1451 36 : break;
1452 : }
1453 :
1454 : // do not run MC if this is a replica-exchange trial
1455 280 : if(!getExchangeStep()) {
1456 :
1457 : // Create std::vector of random sigma indices
1458 : std::vector<unsigned> indices;
1459 280 : if (MCchunksize_ > 0) {
1460 12 : for (unsigned j=0; j<sigma_.size(); j++) {
1461 6 : indices.push_back(j);
1462 : }
1463 6 : random[2].Shuffle(indices);
1464 : }
1465 280 : bool breaknow = false;
1466 :
1467 : // cycle on MC steps
1468 560 : for(unsigned i=0; i<MCsteps_; ++i) {
1469 280 : MCtrial_++;
1470 : // propose move for ftilde
1471 280 : if(noise_type_==GENERIC) {
1472 36 : moveTilde(mean_, old_energy);
1473 : }
1474 : // propose move for scale and/or offset
1475 280 : if(doscale_||dooffset_) {
1476 216 : moveScaleOffset(mean_, old_energy);
1477 : }
1478 : // propose move for sigma
1479 280 : moveSigmas(mean_, old_energy, i, indices, breaknow);
1480 : // exit from the loop if this is the case
1481 280 : if(breaknow) {
1482 : break;
1483 : }
1484 : }
1485 :
1486 : /* save the result of the sampling */
1487 : /* ftilde */
1488 280 : if(noise_type_==GENERIC) {
1489 36 : double accept = static_cast<double>(MCacceptFT_) / static_cast<double>(MCtrial_);
1490 36 : valueAcceptFT->set(accept);
1491 108 : for(unsigned i=0; i<sigma_.size(); i++) {
1492 72 : valueFtilde[i]->set(ftilde_[i]);
1493 : }
1494 : }
1495 : /* scale and offset */
1496 280 : if(doscale_ || doregres_zero_) {
1497 150 : valueScale->set(scale_);
1498 : }
1499 280 : if(dooffset_) {
1500 72 : valueOffset->set(offset_);
1501 : }
1502 280 : if(doscale_||dooffset_) {
1503 216 : double accept = static_cast<double>(MCacceptScale_) / static_cast<double>(MCtrial_);
1504 216 : valueAcceptScale->set(accept);
1505 : }
1506 : /* sigmas */
1507 3236 : for(unsigned i=0; i<sigma_.size(); i++) {
1508 2956 : valueSigma[i]->set(sigma_[i]);
1509 : }
1510 280 : double accept = static_cast<double>(MCaccept_) / static_cast<double>(MCtrial_);
1511 280 : valueAccept->set(accept);
1512 : }
1513 :
1514 : // here we sum the score over the replicas to get the full metainference score that we save as a bias
1515 280 : if(master) {
1516 206 : if(nrep_>1) {
1517 74 : multi_sim_comm.Sum(old_energy);
1518 : }
1519 : } else {
1520 74 : old_energy=0;
1521 : }
1522 280 : comm.Sum(old_energy);
1523 :
1524 : // this is the energy with current coordinates and parameters
1525 280 : return old_energy;
1526 : }
1527 :
1528 : /*
1529 : In the following energy-force functions we don't add the normalisation and the jeffreys priors
1530 : because they are not needed for the forces, the correct MetaInference energy is the one calculated
1531 : in the Monte-Carlo
1532 : */
1533 :
1534 108 : void Metainference::getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1535 : const std::vector<double> &dmean_b) {
1536 108 : const double scale2 = scale_*scale_;
1537 108 : const double sm2 = sigma_mean2_[0];
1538 108 : const double ss2 = sigma_[0]*sigma_[0] + scale2*sm2;
1539 108 : std::vector<double> f(narg,0);
1540 :
1541 108 : if(master) {
1542 84 : #pragma omp parallel num_threads(OpenMP::getNumThreads())
1543 : {
1544 : #pragma omp for
1545 : for(unsigned i=0; i<narg; ++i) {
1546 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1547 : const double a2 = 0.5*dev*dev + ss2;
1548 : if(sm2 > 0.0) {
1549 : const double t = std::exp(-a2/sm2);
1550 : const double dt = 1./t;
1551 : const double dit = 1./(1.-dt);
1552 : f[i] = -scale_*dev*(dit/sm2 + 1./a2);
1553 : } else {
1554 : f[i] = -scale_*dev*(1./a2);
1555 : }
1556 : }
1557 : }
1558 : // collect contribution to forces and energy from other replicas
1559 84 : if(nrep_>1) {
1560 24 : multi_sim_comm.Sum(&f[0],narg);
1561 : }
1562 : }
1563 : // intra-replica summation
1564 108 : comm.Sum(&f[0],narg);
1565 :
1566 : double w_tmp = 0.;
1567 720 : for(unsigned i=0; i<narg; ++i) {
1568 612 : setOutputForce(i, kbt_*f[i]*dmean_x[i]);
1569 612 : w_tmp += kbt_*f[i]*dmean_b[i];
1570 : }
1571 :
1572 108 : if(do_reweight_) {
1573 48 : setOutputForce(narg, w_tmp);
1574 96 : getPntrToComponent("biasDer")->set(-w_tmp);
1575 : }
1576 108 : }
1577 :
1578 48 : void Metainference::getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1579 : const std::vector<double> &dmean_b) {
1580 48 : const double scale2 = scale_*scale_;
1581 48 : std::vector<double> f(narg,0);
1582 :
1583 48 : if(master) {
1584 24 : #pragma omp parallel num_threads(OpenMP::getNumThreads())
1585 : {
1586 : #pragma omp for
1587 : for(unsigned i=0; i<narg; ++i) {
1588 : const double sm2 = sigma_mean2_[i];
1589 : const double ss2 = sigma_[i]*sigma_[i] + scale2*sm2;
1590 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1591 : const double a2 = 0.5*dev*dev + ss2;
1592 : if(sm2 > 0.0) {
1593 : const double t = std::exp(-a2/sm2);
1594 : const double dt = 1./t;
1595 : const double dit = 1./(1.-dt);
1596 : f[i] = -scale_*dev*(dit/sm2 + 1./a2);
1597 : } else {
1598 : f[i] = -scale_*dev*(1./a2);
1599 : }
1600 : }
1601 : }
1602 : // collect contribution to forces and energy from other replicas
1603 24 : if(nrep_>1) {
1604 24 : multi_sim_comm.Sum(&f[0],narg);
1605 : }
1606 : }
1607 48 : comm.Sum(&f[0],narg);
1608 :
1609 : double w_tmp = 0.;
1610 240 : for(unsigned i=0; i<narg; ++i) {
1611 192 : setOutputForce(i, kbt_ * dmean_x[i] * f[i]);
1612 192 : w_tmp += kbt_ * dmean_b[i] *f[i];
1613 : }
1614 :
1615 48 : if(do_reweight_) {
1616 48 : setOutputForce(narg, w_tmp);
1617 96 : getPntrToComponent("biasDer")->set(-w_tmp);
1618 : }
1619 48 : }
1620 :
1621 36 : void Metainference::getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1622 : const std::vector<double> &dmean_b) {
1623 36 : const double scale2 = scale_*scale_;
1624 36 : double inv_s2=0.;
1625 :
1626 36 : if(master) {
1627 36 : inv_s2 = 1./(sigma_[0]*sigma_[0] + scale2*sigma_mean2_[0]);
1628 36 : if(nrep_>1) {
1629 0 : multi_sim_comm.Sum(inv_s2);
1630 : }
1631 : }
1632 36 : comm.Sum(inv_s2);
1633 :
1634 : double w_tmp = 0.;
1635 36 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
1636 : {
1637 : #pragma omp for reduction( + : w_tmp)
1638 : for(unsigned i=0; i<narg; ++i) {
1639 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1640 : const double mult = dev*scale_*inv_s2;
1641 : setOutputForce(i, -kbt_*dmean_x[i]*mult);
1642 : w_tmp += kbt_*dmean_b[i]*mult;
1643 : }
1644 : }
1645 :
1646 36 : if(do_reweight_) {
1647 0 : setOutputForce(narg, -w_tmp);
1648 0 : getPntrToComponent("biasDer")->set(w_tmp);
1649 : }
1650 36 : }
1651 :
1652 52 : void Metainference::getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1653 : const std::vector<double> &dmean_b) {
1654 52 : const double scale2 = scale_*scale_;
1655 52 : std::vector<double> inv_s2(sigma_.size(),0.);
1656 :
1657 52 : if(master) {
1658 1300 : for(unsigned i=0; i<sigma_.size(); ++i) {
1659 1274 : inv_s2[i] = 1./(sigma_[i]*sigma_[i] + scale2*sigma_mean2_[i]);
1660 : }
1661 26 : if(nrep_>1) {
1662 26 : multi_sim_comm.Sum(&inv_s2[0],sigma_.size());
1663 : }
1664 : }
1665 52 : comm.Sum(&inv_s2[0],sigma_.size());
1666 :
1667 : double w_tmp = 0.;
1668 52 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
1669 : {
1670 : #pragma omp for reduction( + : w_tmp)
1671 : for(unsigned i=0; i<narg; ++i) {
1672 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1673 : const double mult = dev*scale_*inv_s2[i];
1674 : setOutputForce(i, -kbt_*dmean_x[i]*mult);
1675 : w_tmp += kbt_*dmean_b[i]*mult;
1676 : }
1677 : }
1678 :
1679 52 : if(do_reweight_) {
1680 52 : setOutputForce(narg, -w_tmp);
1681 104 : getPntrToComponent("biasDer")->set(w_tmp);
1682 : }
1683 52 : }
1684 :
1685 36 : void Metainference::getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b) {
1686 36 : std::vector<double> inv_s2(sigma_.size(),0.);
1687 36 : std::vector<double> dev(sigma_.size(),0.);
1688 36 : std::vector<double> dev2(sigma_.size(),0.);
1689 :
1690 108 : for(unsigned i=0; i<sigma_.size(); ++i) {
1691 72 : inv_s2[i] = 1./sigma_mean2_[i];
1692 72 : if(master) {
1693 72 : dev[i] = (mean[i]-ftilde_[i]);
1694 72 : dev2[i] = dev[i]*dev[i];
1695 : }
1696 : }
1697 36 : if(master&&nrep_>1) {
1698 0 : multi_sim_comm.Sum(&dev[0],dev.size());
1699 0 : multi_sim_comm.Sum(&dev2[0],dev2.size());
1700 : }
1701 36 : comm.Sum(&dev[0],dev.size());
1702 36 : comm.Sum(&dev2[0],dev2.size());
1703 :
1704 : double dene_b = 0.;
1705 36 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(dene_b)
1706 : {
1707 : #pragma omp for reduction( + : dene_b)
1708 : for(unsigned i=0; i<narg; ++i) {
1709 : const double dene_x = kbt_*inv_s2[i]*dmean_x[i]*dev[i];
1710 : dene_b += kbt_*inv_s2[i]*dmean_b[i]*dev[i];
1711 : setOutputForce(i, -dene_x);
1712 : }
1713 : }
1714 :
1715 36 : if(do_reweight_) {
1716 0 : setOutputForce(narg, -dene_b);
1717 0 : getPntrToComponent("biasDer")->set(dene_b);
1718 : }
1719 36 : }
1720 :
1721 280 : void Metainference::get_weights(const unsigned iselect, double &weight, double &norm, double &neff) {
1722 280 : const double dnrep = static_cast<double>(nrep_);
1723 : // calculate the weights either from BIAS
1724 280 : if(do_reweight_) {
1725 148 : std::vector<double> bias(nrep_,0);
1726 148 : if(master) {
1727 74 : bias[replica_] = getArgument(narg);
1728 74 : if(nrep_>1) {
1729 74 : multi_sim_comm.Sum(&bias[0], nrep_);
1730 : }
1731 : }
1732 148 : comm.Sum(&bias[0], nrep_);
1733 :
1734 : // accumulate weights
1735 148 : const double decay = 1./static_cast<double> (average_weights_stride_);
1736 148 : if(!firstTimeW[iselect]) {
1737 408 : for(unsigned i=0; i<nrep_; ++i) {
1738 272 : const double delta=bias[i]-average_weights_[iselect][i];
1739 272 : average_weights_[iselect][i]+=decay*delta;
1740 : }
1741 : } else {
1742 : firstTimeW[iselect] = false;
1743 36 : for(unsigned i=0; i<nrep_; ++i) {
1744 24 : average_weights_[iselect][i] = bias[i];
1745 : }
1746 : }
1747 :
1748 : // set average back into bias and set norm to one
1749 148 : const double maxbias = *(std::max_element(average_weights_[iselect].begin(), average_weights_[iselect].end()));
1750 444 : for(unsigned i=0; i<nrep_; ++i) {
1751 296 : bias[i] = std::exp((average_weights_[iselect][i]-maxbias)/kbt_);
1752 : }
1753 : // set local weight, norm and weight variance
1754 148 : weight = bias[replica_];
1755 : double w2=0.;
1756 444 : for(unsigned i=0; i<nrep_; ++i) {
1757 296 : w2 += bias[i]*bias[i];
1758 296 : norm += bias[i];
1759 : }
1760 148 : neff = norm*norm/w2;
1761 296 : getPntrToComponent("weight")->set(weight/norm);
1762 : } else {
1763 : // or arithmetic ones
1764 132 : neff = dnrep;
1765 132 : weight = 1.0;
1766 132 : norm = dnrep;
1767 : }
1768 280 : getPntrToComponent("neff")->set(neff);
1769 280 : }
1770 :
1771 280 : void Metainference::get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean) {
1772 280 : const double dnrep = static_cast<double>(nrep_);
1773 280 : std::vector<double> sigma_mean2_tmp(sigma_mean2_.size(), 0.);
1774 :
1775 280 : if(do_optsigmamean_>0) {
1776 : // remove first entry of the history std::vector
1777 0 : if(sigma_mean2_last_[iselect][0].size()==optsigmamean_stride_&&optsigmamean_stride_>0)
1778 0 : for(unsigned i=0; i<narg; ++i) {
1779 0 : sigma_mean2_last_[iselect][i].erase(sigma_mean2_last_[iselect][i].begin());
1780 : }
1781 : /* this is the current estimate of sigma mean for each argument
1782 : there is one of this per argument in any case because it is
1783 : the maximum among these to be used in case of GAUSS/OUTLIER */
1784 0 : std::vector<double> sigma_mean2_now(narg,0);
1785 0 : if(master) {
1786 0 : for(unsigned i=0; i<narg; ++i) {
1787 0 : sigma_mean2_now[i] = weight*(getArgument(i)-mean[i])*(getArgument(i)-mean[i]);
1788 : }
1789 0 : if(nrep_>1) {
1790 0 : multi_sim_comm.Sum(&sigma_mean2_now[0], narg);
1791 : }
1792 : }
1793 0 : comm.Sum(&sigma_mean2_now[0], narg);
1794 0 : for(unsigned i=0; i<narg; ++i) {
1795 0 : sigma_mean2_now[i] *= 1.0/(neff-1.)/norm;
1796 : }
1797 :
1798 : // add sigma_mean2 to history
1799 0 : if(optsigmamean_stride_>0) {
1800 0 : for(unsigned i=0; i<narg; ++i) {
1801 0 : sigma_mean2_last_[iselect][i].push_back(sigma_mean2_now[i]);
1802 : }
1803 : } else {
1804 0 : for(unsigned i=0; i<narg; ++i)
1805 0 : if(sigma_mean2_now[i] > sigma_mean2_last_[iselect][i][0]) {
1806 0 : sigma_mean2_last_[iselect][i][0] = sigma_mean2_now[i];
1807 : }
1808 : }
1809 :
1810 0 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1811 0 : for(unsigned i=0; i<narg; ++i) {
1812 : /* set to the maximum in history std::vector */
1813 0 : sigma_mean2_tmp[i] = *max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end());
1814 : /* the standard error of the mean */
1815 0 : valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
1816 0 : if(noise_type_==GENERIC) {
1817 0 : sigma_min_[i] = std::sqrt(sigma_mean2_tmp[i]);
1818 0 : if(sigma_[i] < sigma_min_[i]) {
1819 0 : sigma_[i] = sigma_min_[i];
1820 : }
1821 : }
1822 : }
1823 0 : } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1824 : // find maximum for each data point
1825 : std::vector <double> max_values;
1826 0 : for(unsigned i=0; i<narg; ++i) {
1827 0 : max_values.push_back(*max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end()));
1828 : }
1829 : // find maximum across data points
1830 0 : const double max_now = *max_element(max_values.begin(), max_values.end());
1831 : // set new value
1832 0 : sigma_mean2_tmp[0] = max_now;
1833 0 : valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
1834 : }
1835 : // endif sigma mean optimization
1836 : // start sigma max optimization
1837 0 : if(do_optsigmamean_>1&&!sigmamax_opt_done_) {
1838 0 : for(unsigned i=0; i<sigma_max_.size(); i++) {
1839 0 : if(sigma_max_est_[i]<sigma_mean2_tmp[i]&&optimized_step_>optsigmamean_stride_) {
1840 0 : sigma_max_est_[i]=sigma_mean2_tmp[i];
1841 : }
1842 : // ready to set once and for all the value of sigma_max
1843 0 : if(optimized_step_==N_optimized_step_) {
1844 0 : sigmamax_opt_done_=true;
1845 0 : for(unsigned i=0; i<sigma_max_.size(); i++) {
1846 0 : sigma_max_[i]=std::sqrt(sigma_max_est_[i]*dnrep);
1847 0 : Dsigma_[i] = 0.05*(sigma_max_[i] - sigma_min_[i]);
1848 0 : if(sigma_[i]>sigma_max_[i]) {
1849 0 : sigma_[i]=sigma_max_[i];
1850 : }
1851 : }
1852 : }
1853 : }
1854 0 : optimized_step_++;
1855 : }
1856 : // end sigma max optimization
1857 : } else {
1858 280 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1859 2948 : for(unsigned i=0; i<narg; ++i) {
1860 2812 : sigma_mean2_tmp[i] = sigma_mean2_last_[iselect][i][0];
1861 2812 : valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
1862 : }
1863 144 : } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1864 144 : sigma_mean2_tmp[0] = sigma_mean2_last_[iselect][0][0];
1865 144 : valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
1866 : }
1867 : }
1868 :
1869 280 : sigma_mean2_ = sigma_mean2_tmp;
1870 280 : }
1871 :
1872 280 : void Metainference::replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b) {
1873 280 : if(master) {
1874 2236 : for(unsigned i=0; i<narg; ++i) {
1875 2030 : mean[i] = weight/norm*getArgument(i);
1876 : }
1877 206 : if(nrep_>1) {
1878 74 : multi_sim_comm.Sum(&mean[0], narg);
1879 : }
1880 : }
1881 280 : comm.Sum(&mean[0], narg);
1882 : // set the derivative of the mean with respect to the bias
1883 3776 : for(unsigned i=0; i<narg; ++i) {
1884 3496 : dmean_b[i] = weight/norm/kbt_*(getArgument(i)-mean[i])/static_cast<double>(average_weights_stride_);
1885 : }
1886 :
1887 : // this is only for generic metainference
1888 280 : if(firstTime) {
1889 28 : ftilde_ = mean;
1890 28 : firstTime = false;
1891 : }
1892 280 : }
1893 :
1894 6 : void Metainference::do_regression_zero(const std::vector<double> &mean) {
1895 : // parameters[i] = scale_ * mean[i]: find scale_ with linear regression
1896 : double num = 0.0;
1897 : double den = 0.0;
1898 48 : for(unsigned i=0; i<parameters.size(); ++i) {
1899 42 : num += mean[i] * parameters[i];
1900 42 : den += mean[i] * mean[i];
1901 : }
1902 6 : if(den>0) {
1903 6 : scale_ = num / den;
1904 : } else {
1905 0 : scale_ = 1.0;
1906 : }
1907 6 : }
1908 :
1909 280 : void Metainference::calculate() {
1910 : // get step
1911 280 : const long long int step = getStep();
1912 :
1913 : unsigned iselect = 0;
1914 : // set the value of selector for REM-like stuff
1915 280 : if(selector_.length()>0) {
1916 0 : iselect = static_cast<unsigned>(plumed.passMap[selector_]);
1917 : }
1918 :
1919 : /* 1) collect weights */
1920 280 : double weight = 0.;
1921 280 : double neff = 0.;
1922 280 : double norm = 0.;
1923 280 : get_weights(iselect, weight, norm, neff);
1924 :
1925 : /* 2) calculate average */
1926 280 : std::vector<double> mean(narg,0);
1927 : // this is the derivative of the mean with respect to the argument
1928 280 : std::vector<double> dmean_x(narg,weight/norm);
1929 : // this is the derivative of the mean with respect to the bias
1930 280 : std::vector<double> dmean_b(narg,0);
1931 : // calculate it
1932 280 : replica_averaging(weight, norm, mean, dmean_b);
1933 :
1934 : /* 3) calculates parameters */
1935 280 : get_sigma_mean(iselect, weight, norm, neff, mean);
1936 :
1937 : // in case of regression with zero intercept, calculate scale
1938 280 : if(doregres_zero_ && step%nregres_zero_==0) {
1939 6 : do_regression_zero(mean);
1940 : }
1941 :
1942 : /* 4) run monte carlo */
1943 280 : double ene = doMonteCarlo(mean);
1944 :
1945 : // calculate bias and forces
1946 280 : switch(noise_type_) {
1947 36 : case GAUSS:
1948 36 : getEnergyForceGJ(mean, dmean_x, dmean_b);
1949 : break;
1950 52 : case MGAUSS:
1951 52 : getEnergyForceGJE(mean, dmean_x, dmean_b);
1952 : break;
1953 108 : case OUTLIERS:
1954 108 : getEnergyForceSP(mean, dmean_x, dmean_b);
1955 : break;
1956 48 : case MOUTLIERS:
1957 48 : getEnergyForceSPE(mean, dmean_x, dmean_b);
1958 : break;
1959 36 : case GENERIC:
1960 36 : getEnergyForceMIGEN(mean, dmean_x, dmean_b);
1961 : break;
1962 : }
1963 :
1964 280 : setBias(ene);
1965 280 : }
1966 :
1967 32 : void Metainference::writeStatus() {
1968 32 : sfile_.rewind();
1969 32 : sfile_.printField("time",getTimeStep()*getStep());
1970 : //nsel
1971 64 : for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
1972 : std::string msg_i,msg_j;
1973 32 : Tools::convert(i,msg_i);
1974 : std::vector <double> max_values;
1975 : //narg
1976 2518 : for(unsigned j=0; j<narg; ++j) {
1977 2486 : Tools::convert(j,msg_j);
1978 2486 : std::string msg = msg_i+"_"+msg_j;
1979 2486 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1980 7182 : sfile_.printField("sigmaMean_"+msg,std::sqrt(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end())));
1981 : } else {
1982 : // find maximum for each data point
1983 184 : max_values.push_back(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end()));
1984 : }
1985 : }
1986 32 : if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1987 : // find maximum across data points
1988 17 : const double max_now = std::sqrt(*max_element(max_values.begin(), max_values.end()));
1989 17 : Tools::convert(0,msg_j);
1990 17 : std::string msg = msg_i+"_"+msg_j;
1991 34 : sfile_.printField("sigmaMean_"+msg, max_now);
1992 : }
1993 : }
1994 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
1995 : std::string msg;
1996 2411 : Tools::convert(i,msg);
1997 4822 : sfile_.printField("sigma_"+msg,sigma_[i]);
1998 : }
1999 2443 : for(unsigned i=0; i<sigma_max_.size(); ++i) {
2000 : std::string msg;
2001 2411 : Tools::convert(i,msg);
2002 4822 : sfile_.printField("sigma_max_"+msg,sigma_max_[i]);
2003 : }
2004 32 : if(noise_type_==GENERIC) {
2005 9 : for(unsigned i=0; i<ftilde_.size(); ++i) {
2006 : std::string msg;
2007 6 : Tools::convert(i,msg);
2008 12 : sfile_.printField("ftilde_"+msg,ftilde_[i]);
2009 : }
2010 : }
2011 32 : sfile_.printField("scale0_",scale_);
2012 32 : sfile_.printField("offset0_",offset_);
2013 64 : for(unsigned i=0; i<average_weights_.size(); i++) {
2014 : std::string msg_i;
2015 32 : Tools::convert(i,msg_i);
2016 64 : sfile_.printField("weight_"+msg_i,average_weights_[i][replica_]);
2017 : }
2018 32 : sfile_.printField();
2019 32 : sfile_.flush();
2020 32 : }
2021 :
2022 280 : void Metainference::update() {
2023 : // write status file
2024 280 : if(write_stride_>0&& (getStep()%write_stride_==0 || getCPT()) ) {
2025 32 : writeStatus();
2026 : }
2027 280 : }
2028 :
2029 : }
2030 : }
2031 :
|