LCOV - code coverage report
Current view: top level - isdb - Metainference.cpp (source / functions) Hit Total Coverage
Test: plumed test coverage Lines: 834 996 83.7 %
Date: 2025-12-04 11:19:34 Functions: 25 27 92.6 %

          Line data    Source code
       1             : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
       2             :    Copyright (c) 2016-2023 The plumed team
       3             :    (see the PEOPLE file at the root of the distribution for a list of names)
       4             : 
       5             :    See http://www.plumed.org for more information.
       6             : 
       7             :    This file is part of plumed, version 2.
       8             : 
       9             :    plumed is free software: you can redistribute it and/or modify
      10             :    it under the terms of the GNU Lesser General Public License as published by
      11             :    the Free Software Foundation, either version 3 of the License, or
      12             :    (at your option) any later version.
      13             : 
      14             :    plumed is distributed in the hope that it will be useful,
      15             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      16             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      17             :    GNU Lesser General Public License for more details.
      18             : 
      19             :    You should have received a copy of the GNU Lesser General Public License
      20             :    along with plumed.  If not, see <http://www.gnu.org/licenses/>.
      21             : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
      22             : 
      23             : #include "bias/Bias.h"
      24             : #include "core/ActionRegister.h"
      25             : #include "core/PlumedMain.h"
      26             : #include "core/Value.h"
      27             : #include "tools/File.h"
      28             : #include "tools/OpenMP.h"
      29             : #include "tools/Random.h"
      30             : #include "tools/Communicator.h"
      31             : #include <chrono>
      32             : #include <numeric>
      33             : 
      34             : #ifndef M_PI
      35             : #define M_PI           3.14159265358979323846
      36             : #endif
      37             : 
      38             : namespace PLMD {
      39             : namespace isdb {
      40             : 
      41             : //+PLUMEDOC ISDB_BIAS METAINFERENCE
      42             : /*
      43             : Calculates the Metainference energy for a set of experimental data.
      44             : 
      45             : The metainference method that is introduced in the first paper cited below is a Bayesian framework
      46             : to model heterogeneous systems by integrating prior information with noisy, ensemble-averaged data.
      47             : Metainference models a system and quantifies the level of noise in the data by considering a set of replicas of the system.
      48             : 
      49             : Calculated experimental data are given in input as ARG while reference experimental values
      50             : can be given either from fixed components of other actions using PARARG or as numbers using
      51             : PARAMETERS. The default behavior is that of averaging the data over the available replicas,
      52             : if this is not wanted the keyword NOENSEMBLE prevent this averaging.
      53             : 
      54             : Metadynamics Metainference (see second paper cited below) more in general biased Metainference requires the knowledge of
      55             : biasing potential in order to calculate the weighted average. In this case the value of the bias
      56             : can be provided as the last argument in ARG and adding the keyword REWEIGHT. To avoid the noise
      57             : resulting from the instantaneous value of the bias the weight of each replica can be averaged
      58             : over a give time using the keyword AVERAGING.
      59             : 
      60             : The data can be averaged by using multiple replicas and weighted for a bias if present.
      61             : The functional form of Metainference can be chosen among four variants selected
      62             : with NOISE=GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC which correspond to modelling the noise for
      63             : the arguments as a single gaussian common to all the data points, a gaussian per data
      64             : point, a single long-tailed gaussian common to all the data points, a log-tailed
      65             :  gaussian per data point or using two distinct noises as for the most general formulation of Metainference.
      66             : In this latter case the noise of the replica-averaging is gaussian (one per data point) and the noise for
      67             : the comparison with the experimental data can chosen using the keyword LIKELIHOOD
      68             : between gaussian or log-normal (one per data point), furthermore the evolution of the estimated average
      69             : over an infinite number of replicas is driven by DFTILDE.
      70             : 
      71             : As for Metainference theory there are two sigma values: SIGMA_MEAN0 represent the
      72             : error of calculating an average quantity using a finite set of replica and should
      73             : be set as small as possible following the guidelines for replica-averaged simulations
      74             : in the framework of the Maximum Entropy Principle. Alternatively, this can be obtained
      75             : automatically using the internal sigma mean optimization as introduced in the third paper cited below
      76             : (OPTSIGMAMEAN=SEM), in this second case sigma_mean is estimated from the maximum standard error
      77             : of the mean either over the simulation or over a defined time using the keyword AVERAGING.
      78             : SIGMA_BIAS is an uncertainty parameter, sampled by a MC algorithm in the bounded interval
      79             : defined by SIGMA_MIN and SIGMA_MAX. The initial value is set at SIGMA0. The MC move is a
      80             : random displacement of maximum value equal to DSIGMA. If the number of data point is
      81             : too large and the acceptance rate drops it is possible to make the MC move over mutually
      82             : exclusive, random subset of size MC_CHUNKSIZE and run more than one move setting MC_STEPS
      83             : in such a way that MC_CHUNKSIZE*MC_STEPS will cover all the data points.
      84             : 
      85             : Calculated and experimental data can be compared modulo a scaling factor and/or an offset
      86             : using SCALEDATA and/or ADDOFFSET, the sampling is obtained by a MC algorithm either using
      87             : a flat or a gaussian prior setting it with SCALE_PRIOR or OFFSET_PRIOR.
      88             : 
      89             : ## Examples
      90             : 
      91             : In the following example we calculate a set of [RDC](RDC.md), take the replica-average of
      92             : them and comparing them with a set of experimental values. RDCs are compared with
      93             : the experimental data but for a multiplication factor SCALE that is also sampled by
      94             : MC on-the-fly
      95             : 
      96             : ```plumed
      97             : RDC ...
      98             : LABEL=rdc
      99             : SCALE=0.0001
     100             : GYROM=-72.5388
     101             : ATOMS1=22,23
     102             : ATOMS2=25,27
     103             : ATOMS3=29,31
     104             : ATOMS4=33,34
     105             : ... RDC
     106             : 
     107             : METAINFERENCE ...
     108             : ARG=rdc.*
     109             : NOISETYPE=MGAUSS
     110             : PARAMETERS=1.9190,2.9190,3.9190,4.9190
     111             : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
     112             : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
     113             : SIGMA_MEAN0=0.001
     114             : LABEL=spe
     115             : ... METAINFERENCE
     116             : 
     117             : PRINT ARG=spe.bias FILE=BIAS STRIDE=1
     118             : ```
     119             : 
     120             : in the following example instead of using one uncertainty parameter per data point we use
     121             : a single uncertainty value in a long-tailed gaussian to take into account for outliers, furthermore
     122             : the data are weighted for the bias applied to other variables of the system.
     123             : 
     124             : ```plumed
     125             : RDC ...
     126             : LABEL=rdc
     127             : SCALE=0.0001
     128             : GYROM=-72.5388
     129             : ATOMS1=22,23
     130             : ATOMS2=25,27
     131             : ATOMS3=29,31
     132             : ATOMS4=33,34
     133             : ... RDC
     134             : 
     135             : cv1: TORSION ATOMS=1,2,3,4
     136             : cv2: TORSION ATOMS=2,3,4,5
     137             : mm: METAD ARG=cv1,cv2 HEIGHT=0.5 SIGMA=0.3,0.3 PACE=200 BIASFACTOR=8 WALKERS_MPI
     138             : 
     139             : METAINFERENCE ...
     140             : #SETTINGS NREPLICAS=2
     141             : ARG=rdc.*,mm.bias
     142             : REWEIGHT
     143             : NOISETYPE=OUTLIERS
     144             : PARAMETERS=1.9190,2.9190,3.9190,4.9190
     145             : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
     146             : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
     147             : SIGMA_MEAN0=0.001
     148             : LABEL=spe
     149             : ... METAINFERENCE
     150             : ```
     151             : 
     152             : (See also [RDC](RDC.md), [PBMETAD](PBMETAD.md)).
     153             : 
     154             : */
     155             : //+ENDPLUMEDOC
     156             : 
     157             : class Metainference : public bias::Bias {
     158             :   // experimental values
     159             :   std::vector<double> parameters;
     160             :   // noise type
     161             :   unsigned noise_type_;
     162             :   enum { GAUSS, MGAUSS, OUTLIERS, MOUTLIERS, GENERIC };
     163             :   unsigned gen_likelihood_;
     164             :   enum { LIKE_GAUSS, LIKE_LOGN };
     165             :   // scale is data scaling factor
     166             :   // noise type
     167             :   unsigned scale_prior_;
     168             :   enum { SC_GAUSS, SC_FLAT };
     169             :   bool   doscale_;
     170             :   double scale_;
     171             :   double scale_mu_;
     172             :   double scale_min_;
     173             :   double scale_max_;
     174             :   double Dscale_;
     175             :   // scale is data scaling factor
     176             :   // noise type
     177             :   unsigned offset_prior_;
     178             :   bool   dooffset_;
     179             :   double offset_;
     180             :   double offset_mu_;
     181             :   double offset_min_;
     182             :   double offset_max_;
     183             :   double Doffset_;
     184             :   // scale and offset regression
     185             :   bool doregres_zero_;
     186             :   int  nregres_zero_;
     187             :   // sigma is data uncertainty
     188             :   std::vector<double> sigma_;
     189             :   std::vector<double> sigma_min_;
     190             :   std::vector<double> sigma_max_;
     191             :   std::vector<double> Dsigma_;
     192             :   // sigma_mean is uncertainty in the mean estimate
     193             :   std::vector<double> sigma_mean2_;
     194             :   // this is the estimator of the mean value per replica for generic metainference
     195             :   std::vector<double> ftilde_;
     196             :   double Dftilde_;
     197             : 
     198             :   // temperature in kbt
     199             :   double   kbt_;
     200             : 
     201             :   // Monte Carlo stuff
     202             :   std::vector<Random> random;
     203             :   unsigned MCsteps_;
     204             :   long long unsigned MCaccept_;
     205             :   long long unsigned MCacceptScale_;
     206             :   long long unsigned MCacceptFT_;
     207             :   long long unsigned MCtrial_;
     208             :   unsigned MCchunksize_;
     209             : 
     210             :   // output
     211             :   Value*   valueScale;
     212             :   Value*   valueOffset;
     213             :   Value*   valueAccept;
     214             :   Value*   valueAcceptScale;
     215             :   Value*   valueAcceptFT;
     216             :   std::vector<Value*> valueSigma;
     217             :   std::vector<Value*> valueSigmaMean;
     218             :   std::vector<Value*> valueFtilde;
     219             : 
     220             :   // restart
     221             :   unsigned write_stride_;
     222             :   OFile    sfile_;
     223             : 
     224             :   // others
     225             :   bool         firstTime;
     226             :   std::vector<bool> firstTimeW;
     227             :   bool     master;
     228             :   bool     do_reweight_;
     229             :   unsigned do_optsigmamean_;
     230             :   unsigned nrep_;
     231             :   unsigned replica_;
     232             :   unsigned narg;
     233             : 
     234             :   // selector
     235             :   std::string selector_;
     236             : 
     237             :   // optimize sigma mean
     238             :   std::vector< std::vector < std::vector <double> > > sigma_mean2_last_;
     239             :   unsigned optsigmamean_stride_;
     240             :   // optimize sigma max
     241             :   unsigned N_optimized_step_;
     242             :   unsigned optimized_step_;
     243             :   bool sigmamax_opt_done_;
     244             :   std::vector<double> sigma_max_est_;
     245             : 
     246             :   // average weights
     247             :   unsigned                   average_weights_stride_;
     248             :   std::vector< std::vector <double> >  average_weights_;
     249             : 
     250             :   double getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
     251             :                         const double scale, const double offset);
     252             :   double getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
     253             :                      const double scale, const double offset);
     254             :   double getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
     255             :                       const double scale, const double offset);
     256             :   double getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
     257             :                      const double scale, const double offset);
     258             :   double getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
     259             :                       const double scale, const double offset);
     260             :   void moveTilde(const std::vector<double> &mean_, double &old_energy);
     261             :   void moveScaleOffset(const std::vector<double> &mean_, double &old_energy);
     262             :   void moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow);
     263             :   double doMonteCarlo(const std::vector<double> &mean);
     264             :   void getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
     265             :   void getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
     266             :   void getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
     267             :   void getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
     268             :   void getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
     269             :   void get_weights(const unsigned iselect, double &weight, double &norm, double &neff);
     270             :   void replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b);
     271             :   void get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean);
     272             :   void writeStatus();
     273             :   void do_regression_zero(const std::vector<double> &mean);
     274             : 
     275             : public:
     276             :   explicit Metainference(const ActionOptions&);
     277             :   ~Metainference();
     278             :   void calculate() override;
     279             :   void update() override;
     280             :   static void registerKeywords(Keywords& keys);
     281             : };
     282             : 
     283             : 
     284             : PLUMED_REGISTER_ACTION(Metainference,"METAINFERENCE")
     285             : 
     286          34 : void Metainference::registerKeywords(Keywords& keys) {
     287          34 :   Bias::registerKeywords(keys);
     288          68 :   keys.addInputKeyword("optional","PARARG","scalar","reference values for the experimental data, these can be provided as arguments without derivatives");
     289          34 :   keys.add("optional","PARAMETERS","reference values for the experimental data");
     290          34 :   keys.addFlag("NOENSEMBLE",false,"don't perform any replica-averaging");
     291          34 :   keys.addFlag("REWEIGHT",false,"simple REWEIGHT using the latest ARG as energy");
     292          34 :   keys.add("optional","AVERAGING", "Stride for calculation of averaged weights and sigma_mean");
     293          34 :   keys.add("compulsory","NOISETYPE","MGAUSS","functional form of the noise (GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC)");
     294          34 :   keys.add("compulsory","LIKELIHOOD","GAUSS","the likelihood for the GENERIC metainference model, GAUSS or LOGN");
     295          34 :   keys.add("compulsory","DFTILDE","0.1","fraction of sigma_mean used to evolve ftilde");
     296          34 :   keys.addFlag("SCALEDATA",false,"Set to TRUE if you want to sample a scaling factor common to all values and replicas");
     297          34 :   keys.add("compulsory","SCALE0","1.0","initial value of the scaling factor");
     298          34 :   keys.add("compulsory","SCALE_PRIOR","FLAT","either FLAT or GAUSSIAN");
     299          34 :   keys.add("optional","SCALE_MIN","minimum value of the scaling factor");
     300          34 :   keys.add("optional","SCALE_MAX","maximum value of the scaling factor");
     301          34 :   keys.add("optional","DSCALE","maximum MC move of the scaling factor");
     302          34 :   keys.addFlag("ADDOFFSET",false,"Set to TRUE if you want to sample an offset common to all values and replicas");
     303          34 :   keys.add("compulsory","OFFSET0","0.0","initial value of the offset");
     304          34 :   keys.add("compulsory","OFFSET_PRIOR","FLAT","either FLAT or GAUSSIAN");
     305          34 :   keys.add("optional","OFFSET_MIN","minimum value of the offset");
     306          34 :   keys.add("optional","OFFSET_MAX","maximum value of the offset");
     307          34 :   keys.add("optional","DOFFSET","maximum MC move of the offset");
     308          34 :   keys.add("optional","REGRES_ZERO","stride for regression with zero offset");
     309          34 :   keys.add("compulsory","SIGMA0","1.0","initial value of the uncertainty parameter");
     310          34 :   keys.add("compulsory","SIGMA_MIN","0.0","minimum value of the uncertainty parameter");
     311          34 :   keys.add("compulsory","SIGMA_MAX","10.","maximum value of the uncertainty parameter");
     312          34 :   keys.add("optional","DSIGMA","maximum MC move of the uncertainty parameter");
     313          34 :   keys.add("compulsory","OPTSIGMAMEAN","NONE","Set to NONE/SEM to manually set sigma mean, or to estimate it on the fly");
     314          34 :   keys.add("optional","SIGMA_MEAN0","starting value for the uncertainty in the mean estimate");
     315          34 :   keys.add("optional","SIGMA_MAX_STEPS", "Number of steps used to optimise SIGMA_MAX, before that the SIGMA_MAX value is used");
     316          34 :   keys.add("optional","TEMP","the system temperature - this is only needed if code doesn't pass the temperature to plumed");
     317          34 :   keys.add("optional","MC_STEPS","number of MC steps");
     318          34 :   keys.add("optional","MC_CHUNKSIZE","MC chunksize");
     319          34 :   keys.add("optional","STATUS_FILE","write a file with all the data useful for restart/continuation of Metainference");
     320          34 :   keys.add("optional","FMT","specify format for HILLS files (useful for decrease the number of digits in regtests)");
     321          34 :   keys.add("compulsory","WRITE_STRIDE","10000","write the status to a file every N steps, this can be used for restart/continuation");
     322          34 :   keys.add("optional","SELECTOR","name of selector");
     323          34 :   keys.add("optional","NSELECT","range of values for selector [0, N-1]");
     324          34 :   keys.use("RESTART");
     325          68 :   keys.addOutputComponent("sigma",        "default",      "scalar","uncertainty parameter");
     326          68 :   keys.addOutputComponent("sigmaMean",    "default",      "scalar","uncertainty in the mean estimate");
     327          68 :   keys.addOutputComponent("neff",         "default",      "scalar","effective number of replicas");
     328          68 :   keys.addOutputComponent("acceptSigma",  "default",      "scalar","MC acceptance for sigma values");
     329          68 :   keys.addOutputComponent("acceptScale",  "SCALEDATA",    "scalar","MC acceptance for scale value");
     330          68 :   keys.addOutputComponent("acceptFT",     "GENERIC",      "scalar","MC acceptance for general metainference f tilde value");
     331          68 :   keys.addOutputComponent("weight",       "REWEIGHT",     "scalar","weights of the weighted average");
     332          68 :   keys.addOutputComponent("biasDer",      "REWEIGHT",     "scalar","derivatives with respect to the bias");
     333          68 :   keys.addOutputComponent("scale",        "SCALEDATA",    "scalar","scale parameter");
     334          68 :   keys.addOutputComponent("offset",       "ADDOFFSET",    "scalar","offset parameter");
     335          68 :   keys.addOutputComponent("ftilde",       "GENERIC",      "scalar","ensemble average estimator");
     336          34 :   keys.addDOI("10.1126/sciadv.1501177");
     337          34 :   keys.addDOI("10.1038/srep31232");
     338          34 :   keys.addDOI("10.1063/1.4981211");
     339          34 : }
     340             : 
     341          32 : Metainference::Metainference(const ActionOptions&ao):
     342             :   PLUMED_BIAS_INIT(ao),
     343          32 :   doscale_(false),
     344          32 :   scale_(1.),
     345          32 :   scale_mu_(0),
     346          32 :   scale_min_(1),
     347          32 :   scale_max_(-1),
     348          32 :   Dscale_(-1),
     349          32 :   dooffset_(false),
     350          32 :   offset_(0.),
     351          32 :   offset_mu_(0),
     352          32 :   offset_min_(1),
     353          32 :   offset_max_(-1),
     354          32 :   Doffset_(-1),
     355          32 :   doregres_zero_(false),
     356          32 :   nregres_zero_(0),
     357          32 :   Dftilde_(0.1),
     358          32 :   random(3),
     359          32 :   MCsteps_(1),
     360          32 :   MCaccept_(0),
     361          32 :   MCacceptScale_(0),
     362          32 :   MCacceptFT_(0),
     363          32 :   MCtrial_(0),
     364          32 :   MCchunksize_(0),
     365          32 :   write_stride_(0),
     366          32 :   firstTime(true),
     367          32 :   do_reweight_(false),
     368          32 :   do_optsigmamean_(0),
     369          32 :   optsigmamean_stride_(0),
     370          32 :   N_optimized_step_(0),
     371          32 :   optimized_step_(0),
     372          32 :   sigmamax_opt_done_(false),
     373          32 :   average_weights_stride_(1) {
     374          32 :   bool noensemble = false;
     375          32 :   parseFlag("NOENSEMBLE", noensemble);
     376             : 
     377             :   // set up replica stuff
     378          32 :   master = (comm.Get_rank()==0);
     379          32 :   if(master) {
     380          24 :     nrep_    = multi_sim_comm.Get_size();
     381          24 :     replica_ = multi_sim_comm.Get_rank();
     382          24 :     if(noensemble) {
     383           0 :       nrep_ = 1;
     384             :     }
     385             :   } else {
     386           8 :     nrep_    = 0;
     387           8 :     replica_ = 0;
     388             :   }
     389          32 :   comm.Sum(&nrep_,1);
     390          32 :   comm.Sum(&replica_,1);
     391             : 
     392          32 :   unsigned nsel = 1;
     393          32 :   parse("SELECTOR", selector_);
     394          64 :   parse("NSELECT", nsel);
     395             :   // do checks
     396          32 :   if(selector_.length()>0 && nsel<=1) {
     397           0 :     error("With SELECTOR active, NSELECT must be greater than 1");
     398             :   }
     399          32 :   if(selector_.length()==0 && nsel>1) {
     400           0 :     error("With NSELECT greater than 1, you must specify SELECTOR");
     401             :   }
     402             : 
     403             :   // initialise firstTimeW
     404          32 :   firstTimeW.resize(nsel, true);
     405             : 
     406             :   // reweight implies a different number of arguments (the latest one must always be the bias)
     407          32 :   parseFlag("REWEIGHT", do_reweight_);
     408          32 :   if(do_reweight_&&nrep_<2) {
     409           0 :     error("REWEIGHT can only be used in parallel with 2 or more replicas");
     410             :   }
     411          32 :   if(!getRestart()) {
     412          56 :     average_weights_.resize(nsel, std::vector<double> (nrep_, 1./static_cast<double>(nrep_)));
     413             :   } else {
     414           8 :     average_weights_.resize(nsel, std::vector<double> (nrep_, 0.));
     415             :   }
     416          32 :   narg = getNumberOfArguments();
     417          32 :   if(do_reweight_) {
     418          16 :     narg--;
     419             :   }
     420             : 
     421          32 :   unsigned averaging=0;
     422          32 :   parse("AVERAGING", averaging);
     423          32 :   if(averaging>0) {
     424           1 :     average_weights_stride_ = averaging;
     425           1 :     optsigmamean_stride_    = averaging;
     426             :   }
     427             : 
     428          64 :   parseVector("PARAMETERS",parameters);
     429          32 :   if(parameters.size()!=static_cast<unsigned>(narg)&&!parameters.empty()) {
     430           0 :     error("Size of PARAMETERS array should be either 0 or the same as of the number of arguments in ARG1");
     431             :   }
     432             : 
     433             :   std::vector<Value*> arg2;
     434          64 :   parseArgumentList("PARARG",arg2);
     435          32 :   if(!arg2.empty()) {
     436           4 :     if(parameters.size()>0) {
     437           0 :       error("It is not possible to use PARARG and PARAMETERS together");
     438             :     }
     439           4 :     if(arg2.size()!=narg) {
     440           0 :       error("Size of PARARG array should be the same as number for arguments in ARG");
     441             :     }
     442        2360 :     for(unsigned i=0; i<arg2.size(); i++) {
     443        2356 :       parameters.push_back(arg2[i]->get());
     444        2356 :       if(arg2[i]->hasDerivatives()==true) {
     445           0 :         error("PARARG can only accept arguments without derivatives");
     446             :       }
     447             :     }
     448             :   }
     449             : 
     450          32 :   if(parameters.size()!=narg) {
     451           0 :     error("PARARG or PARAMETERS arrays should include the same number of elements as the arguments in ARG");
     452             :   }
     453             :   {
     454             :     std::string stringa_noise;
     455          64 :     parse("NOISETYPE",stringa_noise);
     456          32 :     if(stringa_noise=="GAUSS") {
     457           3 :       noise_type_ = GAUSS;
     458          29 :     } else if(stringa_noise=="MGAUSS") {
     459           8 :       noise_type_ = MGAUSS;
     460          21 :     } else if(stringa_noise=="OUTLIERS") {
     461          14 :       noise_type_ = OUTLIERS;
     462           7 :     } else if(stringa_noise=="MOUTLIERS") {
     463           4 :       noise_type_ = MOUTLIERS;
     464           3 :     } else if(stringa_noise=="GENERIC") {
     465           3 :       noise_type_ = GENERIC;
     466             :     } else {
     467           0 :       error("Unknown noise type!");
     468             :     }
     469             :   }
     470          32 :   if(noise_type_== GENERIC) {
     471             :     std::string stringa_like;
     472           6 :     parse("LIKELIHOOD",stringa_like);
     473           3 :     if(stringa_like=="GAUSS") {
     474           2 :       gen_likelihood_ = LIKE_GAUSS;
     475           1 :     } else if(stringa_like=="LOGN") {
     476           1 :       gen_likelihood_ = LIKE_LOGN;
     477             :     } else {
     478           0 :       error("Unknown likelihood type!");
     479             :     }
     480             : 
     481           6 :     parse("DFTILDE",Dftilde_);
     482             :   }
     483             : 
     484          64 :   parse("WRITE_STRIDE",write_stride_);
     485             :   std::string status_file_name_;
     486          64 :   parse("STATUS_FILE",status_file_name_);
     487          32 :   if(status_file_name_=="") {
     488          64 :     status_file_name_ = "MISTATUS"+getLabel();
     489             :   } else {
     490           0 :     status_file_name_ = status_file_name_+getLabel();
     491             :   }
     492             : 
     493             :   std::string stringa_optsigma;
     494          64 :   parse("OPTSIGMAMEAN", stringa_optsigma);
     495          32 :   if(stringa_optsigma=="NONE") {
     496          30 :     do_optsigmamean_=0;
     497           2 :   } else if(stringa_optsigma=="SEM") {
     498           0 :     do_optsigmamean_=1;
     499           2 :   } else if(stringa_optsigma=="SEM_MAX") {
     500           0 :     do_optsigmamean_=2;
     501             :   }
     502             : 
     503          32 :   unsigned aver_max_steps=0;
     504          32 :   parse("SIGMA_MAX_STEPS", aver_max_steps);
     505          32 :   if(aver_max_steps==0&&do_optsigmamean_==2) {
     506           0 :     aver_max_steps=averaging*2000;
     507             :   }
     508          32 :   if(aver_max_steps>0&&do_optsigmamean_<2) {
     509           0 :     error("SIGMA_MAX_STEPS can only be used together with OPTSIGMAMEAN=SEM_MAX");
     510             :   }
     511          32 :   if(aver_max_steps>0&&do_optsigmamean_==2) {
     512           0 :     N_optimized_step_=aver_max_steps;
     513             :   }
     514          32 :   if(aver_max_steps>0&&aver_max_steps<averaging) {
     515           0 :     error("SIGMA_MAX_STEPS must be greater than AVERAGING");
     516             :   }
     517             : 
     518             :   // resize std::vector for sigma_mean history
     519          32 :   sigma_mean2_last_.resize(nsel);
     520          64 :   for(unsigned i=0; i<nsel; i++) {
     521          32 :     sigma_mean2_last_[i].resize(narg);
     522             :   }
     523             : 
     524             :   std::vector<double> read_sigma_mean_;
     525          32 :   parseVector("SIGMA_MEAN0",read_sigma_mean_);
     526          32 :   if(do_optsigmamean_==0 && read_sigma_mean_.size()==0 && !getRestart()) {
     527           0 :     error("If you don't use OPTSIGMAMEAN and you are not RESTARTING then you MUST SET SIGMA_MEAN0");
     528             :   }
     529             : 
     530          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     531          15 :     if(read_sigma_mean_.size()==narg) {
     532           0 :       sigma_mean2_.resize(narg);
     533           0 :       for(unsigned i=0; i<narg; i++) {
     534           0 :         sigma_mean2_[i]=read_sigma_mean_[i]*read_sigma_mean_[i];
     535             :       }
     536          15 :     } else if(read_sigma_mean_.size()==1) {
     537          15 :       sigma_mean2_.resize(narg,read_sigma_mean_[0]*read_sigma_mean_[0]);
     538           0 :     } else if(read_sigma_mean_.size()==0) {
     539           0 :       sigma_mean2_.resize(narg,0.000001);
     540             :     } else {
     541           0 :       error("SIGMA_MEAN0 can accept either one single value or as many values as the arguments (with NOISETYPE=MGAUSS|MOUTLIERS)");
     542             :     }
     543             :     // set the initial value for the history
     544          30 :     for(unsigned i=0; i<nsel; i++)
     545        2409 :       for(unsigned j=0; j<narg; j++) {
     546        2394 :         sigma_mean2_last_[i][j].push_back(sigma_mean2_[j]);
     547             :       }
     548             :   } else {
     549          17 :     if(read_sigma_mean_.size()==1) {
     550          17 :       sigma_mean2_.resize(1, read_sigma_mean_[0]*read_sigma_mean_[0]);
     551           0 :     } else if(read_sigma_mean_.size()==0) {
     552           0 :       sigma_mean2_.resize(1, 0.000001);
     553             :     } else {
     554           0 :       error("If you want to use more than one SIGMA_MEAN0 you should use NOISETYPE=MGAUSS|MOUTLIERS");
     555             :     }
     556             :     // set the initial value for the history
     557          34 :     for(unsigned i=0; i<nsel; i++)
     558         109 :       for(unsigned j=0; j<narg; j++) {
     559          92 :         sigma_mean2_last_[i][j].push_back(sigma_mean2_[0]);
     560             :       }
     561             :   }
     562             : 
     563          32 :   parseFlag("SCALEDATA", doscale_);
     564          32 :   if(doscale_) {
     565             :     std::string stringa_noise;
     566          24 :     parse("SCALE_PRIOR",stringa_noise);
     567          12 :     if(stringa_noise=="GAUSSIAN") {
     568           0 :       scale_prior_ = SC_GAUSS;
     569          12 :     } else if(stringa_noise=="FLAT") {
     570          12 :       scale_prior_ = SC_FLAT;
     571             :     } else {
     572           0 :       error("Unknown SCALE_PRIOR type!");
     573             :     }
     574          12 :     parse("SCALE0",scale_);
     575          12 :     parse("DSCALE",Dscale_);
     576          12 :     if(Dscale_<0.) {
     577           0 :       error("DSCALE must be set when using SCALEDATA");
     578             :     }
     579          12 :     if(scale_prior_==SC_GAUSS) {
     580           0 :       scale_mu_=scale_;
     581             :     } else {
     582          12 :       parse("SCALE_MIN",scale_min_);
     583          12 :       parse("SCALE_MAX",scale_max_);
     584          12 :       if(scale_max_<scale_min_) {
     585           0 :         error("SCALE_MAX and SCALE_MIN must be set when using SCALE_PRIOR=FLAT");
     586             :       }
     587             :     }
     588             :   }
     589             : 
     590          32 :   parseFlag("ADDOFFSET", dooffset_);
     591          32 :   if(dooffset_) {
     592             :     std::string stringa_noise;
     593          12 :     parse("OFFSET_PRIOR",stringa_noise);
     594           6 :     if(stringa_noise=="GAUSSIAN") {
     595           0 :       offset_prior_ = SC_GAUSS;
     596           6 :     } else if(stringa_noise=="FLAT") {
     597           6 :       offset_prior_ = SC_FLAT;
     598             :     } else {
     599           0 :       error("Unknown OFFSET_PRIOR type!");
     600             :     }
     601           6 :     parse("OFFSET0",offset_);
     602           6 :     parse("DOFFSET",Doffset_);
     603           6 :     if(offset_prior_==SC_GAUSS) {
     604           0 :       offset_mu_=offset_;
     605           0 :       if(Doffset_<0.) {
     606           0 :         error("DOFFSET must be set when using OFFSET_PRIOR=GAUSS");
     607             :       }
     608             :     } else {
     609           6 :       parse("OFFSET_MIN",offset_min_);
     610           6 :       parse("OFFSET_MAX",offset_max_);
     611           6 :       if(Doffset_<0) {
     612           0 :         Doffset_ = 0.05*(offset_max_ - offset_min_);
     613             :       }
     614           6 :       if(offset_max_<offset_min_) {
     615           0 :         error("OFFSET_MAX and OFFSET_MIN must be set when using OFFSET_PRIOR=FLAT");
     616             :       }
     617             :     }
     618             :   }
     619             : 
     620             :   // regression with zero intercept
     621          32 :   parse("REGRES_ZERO", nregres_zero_);
     622          32 :   if(nregres_zero_>0) {
     623             :     // set flag
     624           1 :     doregres_zero_=true;
     625             :     // check if already sampling scale and offset
     626           1 :     if(doscale_) {
     627           0 :       error("REGRES_ZERO and SCALEDATA are mutually exclusive");
     628             :     }
     629           1 :     if(dooffset_) {
     630           0 :       error("REGRES_ZERO and ADDOFFSET are mutually exclusive");
     631             :     }
     632             :   }
     633             : 
     634             :   std::vector<double> readsigma;
     635          32 :   parseVector("SIGMA0",readsigma);
     636          32 :   if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma.size()>1) {
     637           0 :     error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
     638             :   }
     639          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     640          15 :     sigma_.resize(readsigma.size());
     641          15 :     sigma_=readsigma;
     642             :   } else {
     643          17 :     sigma_.resize(1, readsigma[0]);
     644             :   }
     645             : 
     646             :   std::vector<double> readsigma_min;
     647          32 :   parseVector("SIGMA_MIN",readsigma_min);
     648          32 :   if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_min.size()>1) {
     649           0 :     error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
     650             :   }
     651          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     652          15 :     sigma_min_.resize(readsigma_min.size());
     653          15 :     sigma_min_=readsigma_min;
     654             :   } else {
     655          17 :     sigma_min_.resize(1, readsigma_min[0]);
     656             :   }
     657             : 
     658             :   std::vector<double> readsigma_max;
     659          32 :   parseVector("SIGMA_MAX",readsigma_max);
     660          32 :   if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
     661           0 :     error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
     662             :   }
     663          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     664          15 :     sigma_max_.resize(readsigma_max.size());
     665          15 :     sigma_max_=readsigma_max;
     666             :   } else {
     667          17 :     sigma_max_.resize(1, readsigma_max[0]);
     668             :   }
     669             : 
     670          32 :   if(sigma_max_.size()!=sigma_min_.size()) {
     671           0 :     error("The number of values for SIGMA_MIN and SIGMA_MAX must be the same");
     672             :   }
     673             : 
     674             :   std::vector<double> read_dsigma;
     675          32 :   parseVector("DSIGMA",read_dsigma);
     676          32 :   if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
     677           0 :     error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
     678             :   }
     679          32 :   if(read_dsigma.size()>0) {
     680          32 :     Dsigma_.resize(read_dsigma.size());
     681          32 :     Dsigma_=read_dsigma;
     682             :   } else {
     683           0 :     Dsigma_.resize(sigma_max_.size(), -1.);
     684             :     /* in this case Dsigma is initialised after reading the restart file if present */
     685             :   }
     686             : 
     687             :   std::string fmt_;
     688          32 :   parse("FMT",fmt_);
     689             : 
     690             :   // monte carlo stuff
     691          32 :   parse("MC_STEPS",MCsteps_);
     692          32 :   parse("MC_CHUNKSIZE", MCchunksize_);
     693             :   // get temperature
     694          32 :   kbt_ = getkBT();
     695          32 :   if(kbt_==0.0) {
     696           0 :     error("Unless the MD engine passes the temperature to plumed, you must specify it using TEMP");
     697             :   }
     698             : 
     699          32 :   checkRead();
     700             : 
     701             :   // set sigma_bias
     702          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     703          15 :     if(sigma_.size()==1) {
     704          15 :       double tmp = sigma_[0];
     705          15 :       sigma_.resize(narg, tmp);
     706           0 :     } else if(sigma_.size()>1&&sigma_.size()!=narg) {
     707           0 :       error("SIGMA0 can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
     708             :     }
     709          15 :     if(sigma_min_.size()==1) {
     710          15 :       double tmp = sigma_min_[0];
     711          15 :       sigma_min_.resize(narg, tmp);
     712           0 :     } else if(sigma_min_.size()>1&&sigma_min_.size()!=narg) {
     713           0 :       error("SIGMA_MIN can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
     714             :     }
     715          15 :     if(sigma_max_.size()==1) {
     716          15 :       double tmp = sigma_max_[0];
     717          15 :       sigma_max_.resize(narg, tmp);
     718           0 :     } else if(sigma_max_.size()>1&&sigma_max_.size()!=narg) {
     719           0 :       error("SIGMA_MAX can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
     720             :     }
     721          15 :     if(Dsigma_.size()==1) {
     722          15 :       double tmp = Dsigma_[0];
     723          15 :       Dsigma_.resize(narg, tmp);
     724           0 :     } else if(Dsigma_.size()>1&&Dsigma_.size()!=narg) {
     725           0 :       error("DSIGMA can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
     726             :     }
     727             :   }
     728             : 
     729          32 :   sigma_max_est_.resize(sigma_max_.size(), 0.);
     730             : 
     731          32 :   IFile restart_sfile;
     732          32 :   restart_sfile.link(*this);
     733          32 :   if(getRestart()&&restart_sfile.FileExist(status_file_name_)) {
     734           4 :     firstTime = false;
     735           8 :     for(unsigned i=0; i<nsel; i++) {
     736             :       firstTimeW[i] = false;
     737             :     }
     738           4 :     restart_sfile.open(status_file_name_);
     739           4 :     log.printf("  Restarting from %s\n", status_file_name_.c_str());
     740             :     double dummy;
     741           8 :     if(restart_sfile.scanField("time",dummy)) {
     742             :       // check for syncronisation
     743           4 :       std::vector<double> dummy_time(nrep_,0);
     744           4 :       if(master&&nrep_>1) {
     745           2 :         dummy_time[replica_] = dummy;
     746           2 :         multi_sim_comm.Sum(dummy_time);
     747             :       }
     748           4 :       comm.Sum(dummy_time);
     749           8 :       for(unsigned i=1; i<nrep_; i++) {
     750           4 :         std::string msg = "METAINFERENCE restart files " + status_file_name_ + "  are not in sync";
     751           4 :         if(dummy_time[i]!=dummy_time[0]) {
     752           0 :           plumed_merror(msg);
     753             :         }
     754             :       }
     755             :       // nsel
     756           8 :       for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
     757             :         std::string msg_i;
     758           4 :         Tools::convert(i,msg_i);
     759             :         // narg
     760           4 :         if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     761          20 :           for(unsigned j=0; j<narg; ++j) {
     762             :             std::string msg_j;
     763          16 :             Tools::convert(j,msg_j);
     764          16 :             std::string msg = msg_i+"_"+msg_j;
     765             :             double read_sm;
     766          16 :             restart_sfile.scanField("sigmaMean_"+msg,read_sm);
     767          16 :             sigma_mean2_last_[i][j][0] = read_sm*read_sm;
     768             :           }
     769             :         }
     770           4 :         if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
     771             :           double read_sm;
     772             :           std::string msg_j;
     773           0 :           Tools::convert(0,msg_j);
     774           0 :           std::string msg = msg_i+"_"+msg_j;
     775           0 :           restart_sfile.scanField("sigmaMean_"+msg,read_sm);
     776           0 :           for(unsigned j=0; j<narg; j++) {
     777           0 :             sigma_mean2_last_[i][j][0] = read_sm*read_sm;
     778             :           }
     779             :         }
     780             :       }
     781             : 
     782          20 :       for(unsigned i=0; i<sigma_.size(); ++i) {
     783             :         std::string msg;
     784          16 :         Tools::convert(i,msg);
     785          32 :         restart_sfile.scanField("sigma_"+msg,sigma_[i]);
     786             :       }
     787          20 :       for(unsigned i=0; i<sigma_max_.size(); ++i) {
     788             :         std::string msg;
     789          16 :         Tools::convert(i,msg);
     790          16 :         restart_sfile.scanField("sigma_max_"+msg,sigma_max_[i]);
     791          16 :         sigmamax_opt_done_=true;
     792             :       }
     793           4 :       if(noise_type_==GENERIC) {
     794           0 :         for(unsigned i=0; i<ftilde_.size(); ++i) {
     795             :           std::string msg;
     796           0 :           Tools::convert(i,msg);
     797           0 :           restart_sfile.scanField("ftilde_"+msg,ftilde_[i]);
     798             :         }
     799             :       }
     800           4 :       restart_sfile.scanField("scale0_",scale_);
     801           4 :       restart_sfile.scanField("offset0_",offset_);
     802             : 
     803           8 :       for(unsigned i=0; i<nsel; i++) {
     804             :         std::string msg;
     805           4 :         Tools::convert(i,msg);
     806             :         double tmp_w;
     807           4 :         restart_sfile.scanField("weight_"+msg,tmp_w);
     808           4 :         if(master) {
     809           2 :           average_weights_[i][replica_] = tmp_w;
     810           2 :           if(nrep_>1) {
     811           2 :             multi_sim_comm.Sum(&average_weights_[i][0], nrep_);
     812             :           }
     813             :         }
     814           4 :         comm.Sum(&average_weights_[i][0], nrep_);
     815             :       }
     816             : 
     817             :     }
     818           4 :     restart_sfile.scanField();
     819           4 :     restart_sfile.close();
     820             :   }
     821             : 
     822             :   /* If DSIGMA is not yet initialised do it now */
     823        2443 :   for(unsigned i=0; i<sigma_max_.size(); i++)
     824        2411 :     if(Dsigma_[i]==-1) {
     825           0 :       Dsigma_[i] = 0.05*(sigma_max_[i] - sigma_min_[i]);
     826             :     }
     827             : 
     828          32 :   switch(noise_type_) {
     829           3 :   case GENERIC:
     830           3 :     log.printf("  with general metainference ");
     831           3 :     if(gen_likelihood_==LIKE_GAUSS) {
     832           2 :       log.printf(" and a gaussian likelihood\n");
     833           1 :     } else if(gen_likelihood_==LIKE_LOGN) {
     834           1 :       log.printf(" and a log-normal likelihood\n");
     835             :     }
     836           3 :     log.printf("  ensemble average parameter sampled with a step %lf of sigma_mean\n", Dftilde_);
     837             :     break;
     838           3 :   case GAUSS:
     839           3 :     log.printf("  with gaussian noise and a single noise parameter for all the data\n");
     840             :     break;
     841           8 :   case MGAUSS:
     842           8 :     log.printf("  with gaussian noise and a noise parameter for each data point\n");
     843             :     break;
     844          14 :   case OUTLIERS:
     845          14 :     log.printf("  with long tailed gaussian noise and a single noise parameter for all the data\n");
     846             :     break;
     847           4 :   case MOUTLIERS:
     848           4 :     log.printf("  with long tailed gaussian noise and a noise parameter for each data point\n");
     849             :     break;
     850             :   }
     851             : 
     852          32 :   if(doscale_) {
     853             :     // check that the scale value is the same for all replicas
     854          12 :     std::vector<double> dummy_scale(nrep_,0);
     855          12 :     if(master&&nrep_>1) {
     856           6 :       dummy_scale[replica_] = scale_;
     857           6 :       multi_sim_comm.Sum(dummy_scale);
     858             :     }
     859          12 :     comm.Sum(dummy_scale);
     860          24 :     for(unsigned i=1; i<nrep_; i++) {
     861          12 :       std::string msg = "The SCALE value must be the same for all replicas: check your input or restart file";
     862          12 :       if(dummy_scale[i]!=dummy_scale[0]) {
     863           0 :         plumed_merror(msg);
     864             :       }
     865             :     }
     866          12 :     log.printf("  sampling a common scaling factor with:\n");
     867          12 :     log.printf("    initial scale parameter %f\n",scale_);
     868          12 :     if(scale_prior_==SC_GAUSS) {
     869           0 :       log.printf("    gaussian prior with mean %f and width %f\n",scale_mu_,Dscale_);
     870             :     }
     871          12 :     if(scale_prior_==SC_FLAT) {
     872          12 :       log.printf("    flat prior between %f - %f\n",scale_min_,scale_max_);
     873          12 :       log.printf("    maximum MC move of scale parameter %f\n",Dscale_);
     874             :     }
     875             :   }
     876             : 
     877          32 :   if(dooffset_) {
     878             :     // check that the offset value is the same for all replicas
     879           6 :     std::vector<double> dummy_offset(nrep_,0);
     880           6 :     if(master&&nrep_>1) {
     881           0 :       dummy_offset[replica_] = offset_;
     882           0 :       multi_sim_comm.Sum(dummy_offset);
     883             :     }
     884           6 :     comm.Sum(dummy_offset);
     885           6 :     for(unsigned i=1; i<nrep_; i++) {
     886           0 :       std::string msg = "The OFFSET value must be the same for all replicas: check your input or restart file";
     887           0 :       if(dummy_offset[i]!=dummy_offset[0]) {
     888           0 :         plumed_merror(msg);
     889             :       }
     890             :     }
     891           6 :     log.printf("  sampling a common offset with:\n");
     892           6 :     log.printf("    initial offset parameter %f\n",offset_);
     893           6 :     if(offset_prior_==SC_GAUSS) {
     894           0 :       log.printf("    gaussian prior with mean %f and width %f\n",offset_mu_,Doffset_);
     895             :     }
     896           6 :     if(offset_prior_==SC_FLAT) {
     897           6 :       log.printf("    flat prior between %f - %f\n",offset_min_,offset_max_);
     898           6 :       log.printf("    maximum MC move of offset parameter %f\n",Doffset_);
     899             :     }
     900             :   }
     901             : 
     902          32 :   if(doregres_zero_) {
     903           1 :     log.printf("  doing regression with zero intercept with stride: %d\n", nregres_zero_);
     904             :   }
     905             : 
     906          32 :   log.printf("  number of experimental data points %u\n",narg);
     907          32 :   log.printf("  number of replicas %u\n",nrep_);
     908          32 :   log.printf("  initial data uncertainties");
     909        2443 :   for(unsigned i=0; i<sigma_.size(); ++i) {
     910        2411 :     log.printf(" %f", sigma_[i]);
     911             :   }
     912          32 :   log.printf("\n");
     913          32 :   log.printf("  minimum data uncertainties");
     914        2443 :   for(unsigned i=0; i<sigma_.size(); ++i) {
     915        2411 :     log.printf(" %f",sigma_min_[i]);
     916             :   }
     917          32 :   log.printf("\n");
     918          32 :   log.printf("  maximum data uncertainties");
     919        2475 :   for(unsigned i=0; i<sigma_.size(); ++i) {
     920        2411 :     log.printf(" %f",sigma_max_[i]);
     921             :   }
     922          32 :   log.printf("\n");
     923          32 :   log.printf("  maximum MC move of data uncertainties");
     924        2443 :   for(unsigned i=0; i<sigma_.size(); ++i) {
     925        2411 :     log.printf(" %f",Dsigma_[i]);
     926             :   }
     927          32 :   log.printf("\n");
     928          32 :   log.printf("  temperature of the system %f\n",kbt_);
     929          32 :   log.printf("  MC steps %u\n",MCsteps_);
     930          32 :   log.printf("  initial standard errors of the mean");
     931        2443 :   for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
     932        2411 :     log.printf(" %f", std::sqrt(sigma_mean2_[i]));
     933             :   }
     934          32 :   log.printf("\n");
     935             : 
     936          32 :   if(do_reweight_) {
     937          32 :     addComponent("biasDer");
     938          32 :     componentIsNotPeriodic("biasDer");
     939          32 :     addComponent("weight");
     940          32 :     componentIsNotPeriodic("weight");
     941             :   }
     942             : 
     943          64 :   addComponent("neff");
     944          32 :   componentIsNotPeriodic("neff");
     945             : 
     946          32 :   if(doscale_ || doregres_zero_) {
     947          26 :     addComponent("scale");
     948          13 :     componentIsNotPeriodic("scale");
     949          13 :     valueScale=getPntrToComponent("scale");
     950             :   }
     951             : 
     952          32 :   if(dooffset_) {
     953          12 :     addComponent("offset");
     954           6 :     componentIsNotPeriodic("offset");
     955           6 :     valueOffset=getPntrToComponent("offset");
     956             :   }
     957             : 
     958          32 :   if(dooffset_||doscale_) {
     959          36 :     addComponent("acceptScale");
     960          18 :     componentIsNotPeriodic("acceptScale");
     961          18 :     valueAcceptScale=getPntrToComponent("acceptScale");
     962             :   }
     963             : 
     964          32 :   if(noise_type_==GENERIC) {
     965           6 :     addComponent("acceptFT");
     966           3 :     componentIsNotPeriodic("acceptFT");
     967           3 :     valueAcceptFT=getPntrToComponent("acceptFT");
     968             :   }
     969             : 
     970          64 :   addComponent("acceptSigma");
     971          32 :   componentIsNotPeriodic("acceptSigma");
     972          32 :   valueAccept=getPntrToComponent("acceptSigma");
     973             : 
     974          32 :   if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
     975        2409 :     for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
     976             :       std::string num;
     977        2394 :       Tools::convert(i,num);
     978        4788 :       addComponent("sigmaMean-"+num);
     979        2394 :       componentIsNotPeriodic("sigmaMean-"+num);
     980        2394 :       valueSigmaMean.push_back(getPntrToComponent("sigmaMean-"+num));
     981        4788 :       getPntrToComponent("sigmaMean-"+num)->set(std::sqrt(sigma_mean2_[i]));
     982        4788 :       addComponent("sigma-"+num);
     983        2394 :       componentIsNotPeriodic("sigma-"+num);
     984        2394 :       valueSigma.push_back(getPntrToComponent("sigma-"+num));
     985        2394 :       getPntrToComponent("sigma-"+num)->set(sigma_[i]);
     986        2394 :       if(noise_type_==GENERIC) {
     987          12 :         addComponent("ftilde-"+num);
     988           6 :         componentIsNotPeriodic("ftilde-"+num);
     989           6 :         valueFtilde.push_back(getPntrToComponent("ftilde-"+num));
     990             :       }
     991             :     }
     992          15 :   } else {
     993          34 :     addComponent("sigmaMean");
     994          17 :     componentIsNotPeriodic("sigmaMean");
     995          17 :     valueSigmaMean.push_back(getPntrToComponent("sigmaMean"));
     996          34 :     getPntrToComponent("sigmaMean")->set(std::sqrt(sigma_mean2_[0]));
     997          34 :     addComponent("sigma");
     998          17 :     componentIsNotPeriodic("sigma");
     999          17 :     valueSigma.push_back(getPntrToComponent("sigma"));
    1000          34 :     getPntrToComponent("sigma")->set(sigma_[0]);
    1001             :   }
    1002             : 
    1003             :   // initialize random seed
    1004             :   unsigned iseed;
    1005          32 :   if(master) {
    1006          24 :     auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
    1007          24 :     iseed = static_cast<unsigned>(ts)+replica_;
    1008             :   } else {
    1009           8 :     iseed = 0;
    1010             :   }
    1011          32 :   comm.Sum(&iseed, 1);
    1012             :   // this is used for ftilde and sigma both the move and the acceptance
    1013             :   // this is different for each replica
    1014          32 :   random[0].setSeed(-iseed);
    1015          32 :   if(doscale_||dooffset_) {
    1016             :     // in this case we want the same seed everywhere
    1017          18 :     auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
    1018          18 :     iseed = static_cast<unsigned>(ts);
    1019          18 :     if(master&&nrep_>1) {
    1020           6 :       multi_sim_comm.Bcast(iseed,0);
    1021             :     }
    1022          18 :     comm.Bcast(iseed,0);
    1023             :     // this is used for scale and offset sampling and acceptance
    1024          18 :     random[1].setSeed(-iseed);
    1025             :   }
    1026             :   // this is used for random chunk of sigmas, and it is different for each replica
    1027          32 :   if(master) {
    1028          24 :     auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
    1029          24 :     iseed = static_cast<unsigned>(ts)+replica_;
    1030             :   } else {
    1031           8 :     iseed = 0;
    1032             :   }
    1033          32 :   comm.Sum(&iseed, 1);
    1034          32 :   random[2].setSeed(-iseed);
    1035             : 
    1036             :   // outfile stuff
    1037          32 :   if(write_stride_>0) {
    1038          32 :     sfile_.link(*this);
    1039          32 :     sfile_.open(status_file_name_);
    1040          32 :     if(fmt_.length()>0) {
    1041           1 :       sfile_.fmtField(fmt_);
    1042             :     }
    1043             :   }
    1044             : 
    1045          64 :   log<<"  Bibliography "<<plumed.cite("Bonomi, Camilloni, Cavalli, Vendruscolo, Sci. Adv. 2, e150117 (2016)");
    1046          32 :   if(do_reweight_) {
    1047          32 :     log<<plumed.cite("Bonomi, Camilloni, Vendruscolo, Sci. Rep. 6, 31232 (2016)");
    1048             :   }
    1049          32 :   if(do_optsigmamean_>0) {
    1050           0 :     log<<plumed.cite("Loehr, Jussupow, Camilloni, J. Chem. Phys. 146, 165102 (2017)");
    1051             :   }
    1052          64 :   log<<plumed.cite("Bonomi, Camilloni, Bioinformatics, 33, 3999 (2017)");
    1053          32 :   log<<"\n";
    1054          64 : }
    1055             : 
    1056          64 : Metainference::~Metainference() {
    1057          32 :   if(sfile_.isOpen()) {
    1058          32 :     sfile_.close();
    1059             :   }
    1060         192 : }
    1061             : 
    1062         264 : double Metainference::getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
    1063             :                                   const double scale, const double offset) {
    1064         264 :   const double scale2 = scale*scale;
    1065         264 :   const double sm2    = sigma_mean2_[0];
    1066         264 :   const double ss2    = sigma[0]*sigma[0] + scale2*sm2;
    1067         264 :   const double sss    = sigma[0]*sigma[0] + sm2;
    1068             : 
    1069             :   double ene = 0.0;
    1070         264 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
    1071             :   {
    1072             :     #pragma omp for reduction( + : ene)
    1073             :     for(unsigned i=0; i<narg; ++i) {
    1074             :       const double dev = scale*mean[i]-parameters[i]+offset;
    1075             :       const double a2 = 0.5*dev*dev + ss2;
    1076             :       if(sm2 > 0.0) {
    1077             :         ene += std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
    1078             :       } else {
    1079             :         ene += std::log(2.0*a2);
    1080             :       }
    1081             :     }
    1082             :   }
    1083             :   // add one single Jeffrey's prior and one normalisation per data point
    1084         264 :   ene += 0.5*std::log(sss) + static_cast<double>(narg)*0.5*std::log(0.5*M_PI*M_PI/ss2);
    1085         264 :   if(doscale_ || doregres_zero_) {
    1086         156 :     ene += 0.5*std::log(sss);
    1087             :   }
    1088         264 :   if(dooffset_) {
    1089           0 :     ene += 0.5*std::log(sss);
    1090             :   }
    1091         264 :   return kbt_ * ene;
    1092             : }
    1093             : 
    1094         144 : double Metainference::getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
    1095             :                                    const double scale, const double offset) {
    1096         144 :   const double scale2 = scale*scale;
    1097             :   double ene = 0.0;
    1098         144 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
    1099             :   {
    1100             :     #pragma omp for reduction( + : ene)
    1101             :     for(unsigned i=0; i<narg; ++i) {
    1102             :       const double sm2 = sigma_mean2_[i];
    1103             :       const double ss2 = sigma[i]*sigma[i] + scale2*sm2;
    1104             :       const double sss = sigma[i]*sigma[i] + sm2;
    1105             :       const double dev = scale*mean[i]-parameters[i]+offset;
    1106             :       const double a2  = 0.5*dev*dev + ss2;
    1107             :       if(sm2 > 0.0) {
    1108             :         ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
    1109             :       } else {
    1110             :         ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2);
    1111             :       }
    1112             :       if(doscale_ || doregres_zero_) {
    1113             :         ene += 0.5*std::log(sss);
    1114             :       }
    1115             :       if(dooffset_) {
    1116             :         ene += 0.5*std::log(sss);
    1117             :       }
    1118             :     }
    1119             :   }
    1120         144 :   return kbt_ * ene;
    1121             : }
    1122             : 
    1123         144 : double Metainference::getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
    1124             :                                      const double scale, const double offset) {
    1125             :   double ene = 0.0;
    1126         144 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
    1127             :   {
    1128             :     #pragma omp for reduction( + : ene)
    1129             :     for(unsigned i=0; i<narg; ++i) {
    1130             :       const double inv_sb2  = 1./(sigma[i]*sigma[i]);
    1131             :       const double inv_sm2  = 1./sigma_mean2_[i];
    1132             :       double devb = 0;
    1133             :       if(gen_likelihood_==LIKE_GAUSS) {
    1134             :         devb = scale*ftilde[i]-parameters[i]+offset;
    1135             :       } else if(gen_likelihood_==LIKE_LOGN) {
    1136             :         devb = std::log(scale*ftilde[i]/parameters[i]);
    1137             :       }
    1138             :       double devm = mean[i] - ftilde[i];
    1139             :       // deviation + normalisation + jeffrey
    1140             :       double normb = 0.;
    1141             :       if(gen_likelihood_==LIKE_GAUSS) {
    1142             :         normb = -0.5*std::log(0.5/M_PI*inv_sb2);
    1143             :       } else if(gen_likelihood_==LIKE_LOGN) {
    1144             :         normb = -0.5*std::log(0.5/M_PI*inv_sb2/(parameters[i]*parameters[i]));
    1145             :       }
    1146             :       const double normm         = -0.5*std::log(0.5/M_PI*inv_sm2);
    1147             :       const double jeffreys      = -0.5*std::log(2.*inv_sb2);
    1148             :       ene += 0.5*devb*devb*inv_sb2 + 0.5*devm*devm*inv_sm2 + normb + normm + jeffreys;
    1149             :       if(doscale_ || doregres_zero_) {
    1150             :         ene += jeffreys;
    1151             :       }
    1152             :       if(dooffset_) {
    1153             :         ene += jeffreys;
    1154             :       }
    1155             :     }
    1156             :   }
    1157         144 :   return kbt_ * ene;
    1158             : }
    1159             : 
    1160         108 : double Metainference::getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
    1161             :                                   const double scale, const double offset) {
    1162         108 :   const double scale2  = scale*scale;
    1163         108 :   const double inv_s2  = 1./(sigma[0]*sigma[0] + scale2*sigma_mean2_[0]);
    1164         108 :   const double inv_sss = 1./(sigma[0]*sigma[0] + sigma_mean2_[0]);
    1165             : 
    1166             :   double ene = 0.0;
    1167         108 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
    1168             :   {
    1169             :     #pragma omp for reduction( + : ene)
    1170             :     for(unsigned i=0; i<narg; ++i) {
    1171             :       double dev = scale*mean[i]-parameters[i]+offset;
    1172             :       ene += 0.5*dev*dev*inv_s2;
    1173             :     }
    1174             :   }
    1175         108 :   const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
    1176         108 :   const double jeffreys = -0.5*std::log(2.*inv_sss);
    1177             :   // add Jeffrey's prior in case one sigma for all data points + one normalisation per datapoint
    1178         108 :   ene += jeffreys + static_cast<double>(narg)*normalisation;
    1179         108 :   if(doscale_ || doregres_zero_) {
    1180           0 :     ene += jeffreys;
    1181             :   }
    1182         108 :   if(dooffset_) {
    1183         108 :     ene += jeffreys;
    1184             :   }
    1185             : 
    1186         108 :   return kbt_ * ene;
    1187             : }
    1188             : 
    1189         152 : double Metainference::getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
    1190             :                                    const double scale, const double offset) {
    1191         152 :   const double scale2 = scale*scale;
    1192             : 
    1193             :   double ene = 0.0;
    1194         152 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
    1195             :   {
    1196             :     #pragma omp for reduction( + : ene)
    1197             :     for(unsigned i=0; i<narg; ++i) {
    1198             :       const double inv_s2  = 1./(sigma[i]*sigma[i] + scale2*sigma_mean2_[i]);
    1199             :       const double inv_sss = 1./(sigma[i]*sigma[i] + sigma_mean2_[i]);
    1200             :       double dev = scale*mean[i]-parameters[i]+offset;
    1201             :       // deviation + normalisation + jeffrey
    1202             :       const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
    1203             :       const double jeffreys      = -0.5*std::log(2.*inv_sss);
    1204             :       ene += 0.5*dev*dev*inv_s2 + normalisation + jeffreys;
    1205             :       if(doscale_ || doregres_zero_) {
    1206             :         ene += jeffreys;
    1207             :       }
    1208             :       if(dooffset_) {
    1209             :         ene += jeffreys;
    1210             :       }
    1211             :     }
    1212             :   }
    1213         152 :   return kbt_ * ene;
    1214             : }
    1215             : 
    1216          36 : void Metainference::moveTilde(const std::vector<double> &mean_, double &old_energy) {
    1217          36 :   std::vector<double> new_ftilde(sigma_.size());
    1218          36 :   new_ftilde = ftilde_;
    1219             : 
    1220             :   // change all tildes
    1221         108 :   for(unsigned j=0; j<sigma_.size(); j++) {
    1222          72 :     const double r3 = random[0].Gaussian();
    1223          72 :     const double ds3 = Dftilde_*std::sqrt(sigma_mean2_[j])*r3;
    1224          72 :     new_ftilde[j] = ftilde_[j] + ds3;
    1225             :   }
    1226             :   // calculate new energy
    1227          36 :   double new_energy = getEnergyMIGEN(mean_,new_ftilde,sigma_,scale_,offset_);
    1228             : 
    1229             :   // accept or reject
    1230          36 :   const double delta = ( new_energy - old_energy ) / kbt_;
    1231             :   // if delta is negative always accept move
    1232          36 :   if( delta <= 0.0 ) {
    1233          36 :     old_energy = new_energy;
    1234          36 :     ftilde_ = new_ftilde;
    1235          36 :     MCacceptFT_++;
    1236             :     // otherwise extract random number
    1237             :   } else {
    1238           0 :     const double s = random[0].RandU01();
    1239           0 :     if( s < std::exp(-delta) ) {
    1240           0 :       old_energy = new_energy;
    1241           0 :       ftilde_ = new_ftilde;
    1242           0 :       MCacceptFT_++;
    1243             :     }
    1244             :   }
    1245          36 : }
    1246             : 
    1247         216 : void Metainference::moveScaleOffset(const std::vector<double> &mean_, double &old_energy) {
    1248         216 :   double new_scale = scale_;
    1249             : 
    1250         216 :   if(doscale_) {
    1251         144 :     if(scale_prior_==SC_FLAT) {
    1252         144 :       const double r1 = random[1].Gaussian();
    1253         144 :       const double ds1 = Dscale_*r1;
    1254         144 :       new_scale += ds1;
    1255             :       // check boundaries
    1256         144 :       if(new_scale > scale_max_) {
    1257           0 :         new_scale = 2.0 * scale_max_ - new_scale;
    1258             :       }
    1259         144 :       if(new_scale < scale_min_) {
    1260           0 :         new_scale = 2.0 * scale_min_ - new_scale;
    1261             :       }
    1262             :     } else {
    1263           0 :       const double r1 = random[1].Gaussian();
    1264           0 :       const double ds1 = 0.5*(scale_mu_-new_scale)+Dscale_*std::exp(1)/M_PI*r1;
    1265           0 :       new_scale += ds1;
    1266             :     }
    1267             :   }
    1268             : 
    1269         216 :   double new_offset = offset_;
    1270             : 
    1271         216 :   if(dooffset_) {
    1272          72 :     if(offset_prior_==SC_FLAT) {
    1273          72 :       const double r1 = random[1].Gaussian();
    1274          72 :       const double ds1 = Doffset_*r1;
    1275          72 :       new_offset += ds1;
    1276             :       // check boundaries
    1277          72 :       if(new_offset > offset_max_) {
    1278           0 :         new_offset = 2.0 * offset_max_ - new_offset;
    1279             :       }
    1280          72 :       if(new_offset < offset_min_) {
    1281           0 :         new_offset = 2.0 * offset_min_ - new_offset;
    1282             :       }
    1283             :     } else {
    1284           0 :       const double r1 = random[1].Gaussian();
    1285           0 :       const double ds1 = 0.5*(offset_mu_-new_offset)+Doffset_*std::exp(1)/M_PI*r1;
    1286           0 :       new_offset += ds1;
    1287             :     }
    1288             :   }
    1289             : 
    1290             :   // calculate new energy
    1291             :   double new_energy = 0.;
    1292             : 
    1293         216 :   switch(noise_type_) {
    1294          36 :   case GAUSS:
    1295          36 :     new_energy = getEnergyGJ(mean_,sigma_,new_scale,new_offset);
    1296             :     break;
    1297          48 :   case MGAUSS:
    1298          48 :     new_energy = getEnergyGJE(mean_,sigma_,new_scale,new_offset);
    1299             :     break;
    1300          48 :   case OUTLIERS:
    1301          48 :     new_energy = getEnergySP(mean_,sigma_,new_scale,new_offset);
    1302             :     break;
    1303          48 :   case MOUTLIERS:
    1304          48 :     new_energy = getEnergySPE(mean_,sigma_,new_scale,new_offset);
    1305             :     break;
    1306          36 :   case GENERIC:
    1307          36 :     new_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,new_scale,new_offset);
    1308             :     break;
    1309             :   }
    1310             : 
    1311             :   // for the scale/offset we need to consider the total energy
    1312         216 :   std::vector<double> totenergies(2);
    1313         216 :   if(master) {
    1314         144 :     totenergies[0] = old_energy;
    1315         144 :     totenergies[1] = new_energy;
    1316         144 :     if(nrep_>1) {
    1317          72 :       multi_sim_comm.Sum(totenergies);
    1318             :     }
    1319             :   } else {
    1320          72 :     totenergies[0] = 0;
    1321          72 :     totenergies[1] = 0;
    1322             :   }
    1323         216 :   comm.Sum(totenergies);
    1324             : 
    1325             :   // accept or reject
    1326         216 :   const double delta = ( totenergies[1] - totenergies[0] ) / kbt_;
    1327             :   // if delta is negative always accept move
    1328         216 :   if( delta <= 0.0 ) {
    1329         216 :     old_energy = new_energy;
    1330         216 :     scale_ = new_scale;
    1331         216 :     offset_ = new_offset;
    1332         216 :     MCacceptScale_++;
    1333             :     // otherwise extract random number
    1334             :   } else {
    1335           0 :     double s = random[1].RandU01();
    1336           0 :     if( s < std::exp(-delta) ) {
    1337           0 :       old_energy = new_energy;
    1338           0 :       scale_ = new_scale;
    1339           0 :       offset_ = new_offset;
    1340           0 :       MCacceptScale_++;
    1341             :     }
    1342             :   }
    1343         216 : }
    1344             : 
    1345         280 : void Metainference::moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow) {
    1346         280 :   std::vector<double> new_sigma(sigma_.size());
    1347         280 :   new_sigma = sigma_;
    1348             : 
    1349             :   // change MCchunksize_ sigmas
    1350         280 :   if (MCchunksize_ > 0) {
    1351           6 :     if ((MCchunksize_ * i) >= sigma_.size()) {
    1352             :       // This means we are not moving any sigma, so we should break immediately
    1353           0 :       breaknow = true;
    1354             :     }
    1355             : 
    1356             :     // change random sigmas
    1357          12 :     for(unsigned j=0; j<MCchunksize_; j++) {
    1358           6 :       const unsigned shuffle_index = j + MCchunksize_ * i;
    1359           6 :       if (shuffle_index >= sigma_.size()) {
    1360             :         // Going any further will segfault but we should still evaluate the sigmas we changed
    1361             :         break;
    1362             :       }
    1363           6 :       const unsigned index = indices[shuffle_index];
    1364           6 :       const double r2 = random[0].Gaussian();
    1365           6 :       const double ds2 = Dsigma_[index]*r2;
    1366           6 :       new_sigma[index] = sigma_[index] + ds2;
    1367             :       // check boundaries
    1368           6 :       if(new_sigma[index] > sigma_max_[index]) {
    1369           0 :         new_sigma[index] = 2.0 * sigma_max_[index] - new_sigma[index];
    1370             :       }
    1371           6 :       if(new_sigma[index] < sigma_min_[index]) {
    1372           0 :         new_sigma[index] = 2.0 * sigma_min_[index] - new_sigma[index];
    1373             :       }
    1374             :     }
    1375             :   } else {
    1376             :     // change all sigmas
    1377        3224 :     for(unsigned j=0; j<sigma_.size(); j++) {
    1378        2950 :       const double r2 = random[0].Gaussian();
    1379        2950 :       const double ds2 = Dsigma_[j]*r2;
    1380        2950 :       new_sigma[j] = sigma_[j] + ds2;
    1381             :       // check boundaries
    1382        2950 :       if(new_sigma[j] > sigma_max_[j]) {
    1383           0 :         new_sigma[j] = 2.0 * sigma_max_[j] - new_sigma[j];
    1384             :       }
    1385        2950 :       if(new_sigma[j] < sigma_min_[j]) {
    1386           0 :         new_sigma[j] = 2.0 * sigma_min_[j] - new_sigma[j];
    1387             :       }
    1388             :     }
    1389             :   }
    1390             : 
    1391         280 :   if (breaknow) {
    1392             :     // We didnt move any sigmas, so no sense in evaluating anything
    1393             :     return;
    1394             :   }
    1395             : 
    1396             :   // calculate new energy
    1397             :   double new_energy = 0.;
    1398         280 :   switch(noise_type_) {
    1399          36 :   case GAUSS:
    1400          36 :     new_energy = getEnergyGJ(mean_,new_sigma,scale_,offset_);
    1401             :     break;
    1402          52 :   case MGAUSS:
    1403          52 :     new_energy = getEnergyGJE(mean_,new_sigma,scale_,offset_);
    1404             :     break;
    1405         108 :   case OUTLIERS:
    1406         108 :     new_energy = getEnergySP(mean_,new_sigma,scale_,offset_);
    1407             :     break;
    1408          48 :   case MOUTLIERS:
    1409          48 :     new_energy = getEnergySPE(mean_,new_sigma,scale_,offset_);
    1410             :     break;
    1411          36 :   case GENERIC:
    1412          36 :     new_energy = getEnergyMIGEN(mean_,ftilde_,new_sigma,scale_,offset_);
    1413             :     break;
    1414             :   }
    1415             : 
    1416             :   // accept or reject
    1417         280 :   const double delta = ( new_energy - old_energy ) / kbt_;
    1418             :   // if delta is negative always accept move
    1419         280 :   if( delta <= 0.0 ) {
    1420         280 :     old_energy = new_energy;
    1421         280 :     sigma_ = new_sigma;
    1422         280 :     MCaccept_++;
    1423             :     // otherwise extract random number
    1424             :   } else {
    1425           0 :     const double s = random[0].RandU01();
    1426           0 :     if( s < std::exp(-delta) ) {
    1427           0 :       old_energy = new_energy;
    1428           0 :       sigma_ = new_sigma;
    1429           0 :       MCaccept_++;
    1430             :     }
    1431             :   }
    1432             : }
    1433             : 
    1434         280 : double Metainference::doMonteCarlo(const std::vector<double> &mean_) {
    1435             :   // calculate old energy with the updated coordinates
    1436         280 :   double old_energy=0.;
    1437             : 
    1438         280 :   switch(noise_type_) {
    1439          36 :   case GAUSS:
    1440          36 :     old_energy = getEnergyGJ(mean_,sigma_,scale_,offset_);
    1441          36 :     break;
    1442          52 :   case MGAUSS:
    1443          52 :     old_energy = getEnergyGJE(mean_,sigma_,scale_,offset_);
    1444          52 :     break;
    1445         108 :   case OUTLIERS:
    1446         108 :     old_energy = getEnergySP(mean_,sigma_,scale_,offset_);
    1447         108 :     break;
    1448          48 :   case MOUTLIERS:
    1449          48 :     old_energy = getEnergySPE(mean_,sigma_,scale_,offset_);
    1450          48 :     break;
    1451          36 :   case GENERIC:
    1452          36 :     old_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,scale_,offset_);
    1453          36 :     break;
    1454             :   }
    1455             : 
    1456             :   // do not run MC if this is a replica-exchange trial
    1457         280 :   if(!getExchangeStep()) {
    1458             : 
    1459             :     // Create std::vector of random sigma indices
    1460             :     std::vector<unsigned> indices;
    1461         280 :     if (MCchunksize_ > 0) {
    1462          12 :       for (unsigned j=0; j<sigma_.size(); j++) {
    1463           6 :         indices.push_back(j);
    1464             :       }
    1465           6 :       random[2].Shuffle(indices);
    1466             :     }
    1467         280 :     bool breaknow = false;
    1468             : 
    1469             :     // cycle on MC steps
    1470         560 :     for(unsigned i=0; i<MCsteps_; ++i) {
    1471         280 :       MCtrial_++;
    1472             :       // propose move for ftilde
    1473         280 :       if(noise_type_==GENERIC) {
    1474          36 :         moveTilde(mean_, old_energy);
    1475             :       }
    1476             :       // propose move for scale and/or offset
    1477         280 :       if(doscale_||dooffset_) {
    1478         216 :         moveScaleOffset(mean_, old_energy);
    1479             :       }
    1480             :       // propose move for sigma
    1481         280 :       moveSigmas(mean_, old_energy, i, indices, breaknow);
    1482             :       // exit from the loop if this is the case
    1483         280 :       if(breaknow) {
    1484             :         break;
    1485             :       }
    1486             :     }
    1487             : 
    1488             :     /* save the result of the sampling */
    1489             :     /* ftilde */
    1490         280 :     if(noise_type_==GENERIC) {
    1491          36 :       double accept = static_cast<double>(MCacceptFT_) / static_cast<double>(MCtrial_);
    1492          36 :       valueAcceptFT->set(accept);
    1493         108 :       for(unsigned i=0; i<sigma_.size(); i++) {
    1494          72 :         valueFtilde[i]->set(ftilde_[i]);
    1495             :       }
    1496             :     }
    1497             :     /* scale and offset */
    1498         280 :     if(doscale_ || doregres_zero_) {
    1499         150 :       valueScale->set(scale_);
    1500             :     }
    1501         280 :     if(dooffset_) {
    1502          72 :       valueOffset->set(offset_);
    1503             :     }
    1504         280 :     if(doscale_||dooffset_) {
    1505         216 :       double accept = static_cast<double>(MCacceptScale_) / static_cast<double>(MCtrial_);
    1506         216 :       valueAcceptScale->set(accept);
    1507             :     }
    1508             :     /* sigmas */
    1509        3236 :     for(unsigned i=0; i<sigma_.size(); i++) {
    1510        2956 :       valueSigma[i]->set(sigma_[i]);
    1511             :     }
    1512         280 :     double accept = static_cast<double>(MCaccept_) / static_cast<double>(MCtrial_);
    1513         280 :     valueAccept->set(accept);
    1514             :   }
    1515             : 
    1516             :   // here we sum the score over the replicas to get the full metainference score that we save as a bias
    1517         280 :   if(master) {
    1518         206 :     if(nrep_>1) {
    1519          74 :       multi_sim_comm.Sum(old_energy);
    1520             :     }
    1521             :   } else {
    1522          74 :     old_energy=0;
    1523             :   }
    1524         280 :   comm.Sum(old_energy);
    1525             : 
    1526             :   // this is the energy with current coordinates and parameters
    1527         280 :   return old_energy;
    1528             : }
    1529             : 
    1530             : /*
    1531             :    In the following energy-force functions we don't add the normalisation and the jeffreys priors
    1532             :    because they are not needed for the forces, the correct MetaInference energy is the one calculated
    1533             :    in the Monte-Carlo
    1534             : */
    1535             : 
    1536         108 : void Metainference::getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x,
    1537             :                                      const std::vector<double> &dmean_b) {
    1538         108 :   const double scale2 = scale_*scale_;
    1539         108 :   const double sm2    = sigma_mean2_[0];
    1540         108 :   const double ss2    = sigma_[0]*sigma_[0] + scale2*sm2;
    1541         108 :   std::vector<double> f(narg,0);
    1542             : 
    1543         108 :   if(master) {
    1544          84 :     #pragma omp parallel num_threads(OpenMP::getNumThreads())
    1545             :     {
    1546             :       #pragma omp for
    1547             :       for(unsigned i=0; i<narg; ++i) {
    1548             :         const double dev = scale_*mean[i]-parameters[i]+offset_;
    1549             :         const double a2 = 0.5*dev*dev + ss2;
    1550             :         if(sm2 > 0.0) {
    1551             :           const double t = std::exp(-a2/sm2);
    1552             :           const double dt = 1./t;
    1553             :           const double dit = 1./(1.-dt);
    1554             :           f[i] = -scale_*dev*(dit/sm2 + 1./a2);
    1555             :         } else {
    1556             :           f[i] = -scale_*dev*(1./a2);
    1557             :         }
    1558             :       }
    1559             :     }
    1560             :     // collect contribution to forces and energy from other replicas
    1561          84 :     if(nrep_>1) {
    1562          24 :       multi_sim_comm.Sum(&f[0],narg);
    1563             :     }
    1564             :   }
    1565             :   // intra-replica summation
    1566         108 :   comm.Sum(&f[0],narg);
    1567             : 
    1568             :   double w_tmp = 0.;
    1569         720 :   for(unsigned i=0; i<narg; ++i) {
    1570         612 :     setOutputForce(i, kbt_*f[i]*dmean_x[i]);
    1571         612 :     w_tmp += kbt_*f[i]*dmean_b[i];
    1572             :   }
    1573             : 
    1574         108 :   if(do_reweight_) {
    1575          48 :     setOutputForce(narg, w_tmp);
    1576          96 :     getPntrToComponent("biasDer")->set(-w_tmp);
    1577             :   }
    1578         108 : }
    1579             : 
    1580          48 : void Metainference::getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
    1581             :                                       const std::vector<double> &dmean_b) {
    1582          48 :   const double scale2 = scale_*scale_;
    1583          48 :   std::vector<double> f(narg,0);
    1584             : 
    1585          48 :   if(master) {
    1586          24 :     #pragma omp parallel num_threads(OpenMP::getNumThreads())
    1587             :     {
    1588             :       #pragma omp for
    1589             :       for(unsigned i=0; i<narg; ++i) {
    1590             :         const double sm2 = sigma_mean2_[i];
    1591             :         const double ss2 = sigma_[i]*sigma_[i] + scale2*sm2;
    1592             :         const double dev = scale_*mean[i]-parameters[i]+offset_;
    1593             :         const double a2  = 0.5*dev*dev + ss2;
    1594             :         if(sm2 > 0.0) {
    1595             :           const double t   = std::exp(-a2/sm2);
    1596             :           const double dt  = 1./t;
    1597             :           const double dit = 1./(1.-dt);
    1598             :           f[i] = -scale_*dev*(dit/sm2 + 1./a2);
    1599             :         } else {
    1600             :           f[i] = -scale_*dev*(1./a2);
    1601             :         }
    1602             :       }
    1603             :     }
    1604             :     // collect contribution to forces and energy from other replicas
    1605          24 :     if(nrep_>1) {
    1606          24 :       multi_sim_comm.Sum(&f[0],narg);
    1607             :     }
    1608             :   }
    1609          48 :   comm.Sum(&f[0],narg);
    1610             : 
    1611             :   double w_tmp = 0.;
    1612         240 :   for(unsigned i=0; i<narg; ++i) {
    1613         192 :     setOutputForce(i, kbt_ * dmean_x[i] * f[i]);
    1614         192 :     w_tmp += kbt_ * dmean_b[i] *f[i];
    1615             :   }
    1616             : 
    1617          48 :   if(do_reweight_) {
    1618          48 :     setOutputForce(narg, w_tmp);
    1619          96 :     getPntrToComponent("biasDer")->set(-w_tmp);
    1620             :   }
    1621          48 : }
    1622             : 
    1623          36 : void Metainference::getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x,
    1624             :                                      const std::vector<double> &dmean_b) {
    1625          36 :   const double scale2 = scale_*scale_;
    1626          36 :   double inv_s2=0.;
    1627             : 
    1628          36 :   if(master) {
    1629          36 :     inv_s2 = 1./(sigma_[0]*sigma_[0] + scale2*sigma_mean2_[0]);
    1630          36 :     if(nrep_>1) {
    1631           0 :       multi_sim_comm.Sum(inv_s2);
    1632             :     }
    1633             :   }
    1634          36 :   comm.Sum(inv_s2);
    1635             : 
    1636             :   double w_tmp = 0.;
    1637          36 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
    1638             :   {
    1639             :     #pragma omp for reduction( + : w_tmp)
    1640             :     for(unsigned i=0; i<narg; ++i) {
    1641             :       const double dev = scale_*mean[i]-parameters[i]+offset_;
    1642             :       const double mult = dev*scale_*inv_s2;
    1643             :       setOutputForce(i, -kbt_*dmean_x[i]*mult);
    1644             :       w_tmp += kbt_*dmean_b[i]*mult;
    1645             :     }
    1646             :   }
    1647             : 
    1648          36 :   if(do_reweight_) {
    1649           0 :     setOutputForce(narg, -w_tmp);
    1650           0 :     getPntrToComponent("biasDer")->set(w_tmp);
    1651             :   }
    1652          36 : }
    1653             : 
    1654          52 : void Metainference::getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
    1655             :                                       const std::vector<double> &dmean_b) {
    1656          52 :   const double scale2 = scale_*scale_;
    1657          52 :   std::vector<double> inv_s2(sigma_.size(),0.);
    1658             : 
    1659          52 :   if(master) {
    1660        1300 :     for(unsigned i=0; i<sigma_.size(); ++i) {
    1661        1274 :       inv_s2[i] = 1./(sigma_[i]*sigma_[i] + scale2*sigma_mean2_[i]);
    1662             :     }
    1663          26 :     if(nrep_>1) {
    1664          26 :       multi_sim_comm.Sum(&inv_s2[0],sigma_.size());
    1665             :     }
    1666             :   }
    1667          52 :   comm.Sum(&inv_s2[0],sigma_.size());
    1668             : 
    1669             :   double w_tmp = 0.;
    1670          52 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
    1671             :   {
    1672             :     #pragma omp for reduction( + : w_tmp)
    1673             :     for(unsigned i=0; i<narg; ++i) {
    1674             :       const double dev  = scale_*mean[i]-parameters[i]+offset_;
    1675             :       const double mult = dev*scale_*inv_s2[i];
    1676             :       setOutputForce(i, -kbt_*dmean_x[i]*mult);
    1677             :       w_tmp += kbt_*dmean_b[i]*mult;
    1678             :     }
    1679             :   }
    1680             : 
    1681          52 :   if(do_reweight_) {
    1682          52 :     setOutputForce(narg, -w_tmp);
    1683         104 :     getPntrToComponent("biasDer")->set(w_tmp);
    1684             :   }
    1685          52 : }
    1686             : 
    1687          36 : void Metainference::getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b) {
    1688          36 :   std::vector<double> inv_s2(sigma_.size(),0.);
    1689          36 :   std::vector<double> dev(sigma_.size(),0.);
    1690          36 :   std::vector<double> dev2(sigma_.size(),0.);
    1691             : 
    1692         108 :   for(unsigned i=0; i<sigma_.size(); ++i) {
    1693          72 :     inv_s2[i]   = 1./sigma_mean2_[i];
    1694          72 :     if(master) {
    1695          72 :       dev[i]  = (mean[i]-ftilde_[i]);
    1696          72 :       dev2[i] = dev[i]*dev[i];
    1697             :     }
    1698             :   }
    1699          36 :   if(master&&nrep_>1) {
    1700           0 :     multi_sim_comm.Sum(&dev[0],dev.size());
    1701           0 :     multi_sim_comm.Sum(&dev2[0],dev2.size());
    1702             :   }
    1703          36 :   comm.Sum(&dev[0],dev.size());
    1704          36 :   comm.Sum(&dev2[0],dev2.size());
    1705             : 
    1706             :   double dene_b = 0.;
    1707          36 :   #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(dene_b)
    1708             :   {
    1709             :     #pragma omp for reduction( + : dene_b)
    1710             :     for(unsigned i=0; i<narg; ++i) {
    1711             :       const double dene_x  = kbt_*inv_s2[i]*dmean_x[i]*dev[i];
    1712             :       dene_b += kbt_*inv_s2[i]*dmean_b[i]*dev[i];
    1713             :       setOutputForce(i, -dene_x);
    1714             :     }
    1715             :   }
    1716             : 
    1717          36 :   if(do_reweight_) {
    1718           0 :     setOutputForce(narg, -dene_b);
    1719           0 :     getPntrToComponent("biasDer")->set(dene_b);
    1720             :   }
    1721          36 : }
    1722             : 
    1723         280 : void Metainference::get_weights(const unsigned iselect, double &weight, double &norm, double &neff) {
    1724         280 :   const double dnrep = static_cast<double>(nrep_);
    1725             :   // calculate the weights either from BIAS
    1726         280 :   if(do_reweight_) {
    1727         148 :     std::vector<double> bias(nrep_,0);
    1728         148 :     if(master) {
    1729          74 :       bias[replica_] = getArgument(narg);
    1730          74 :       if(nrep_>1) {
    1731          74 :         multi_sim_comm.Sum(&bias[0], nrep_);
    1732             :       }
    1733             :     }
    1734         148 :     comm.Sum(&bias[0], nrep_);
    1735             : 
    1736             :     // accumulate weights
    1737         148 :     const double decay = 1./static_cast<double> (average_weights_stride_);
    1738         148 :     if(!firstTimeW[iselect]) {
    1739         408 :       for(unsigned i=0; i<nrep_; ++i) {
    1740         272 :         const double delta=bias[i]-average_weights_[iselect][i];
    1741         272 :         average_weights_[iselect][i]+=decay*delta;
    1742             :       }
    1743             :     } else {
    1744             :       firstTimeW[iselect] = false;
    1745          36 :       for(unsigned i=0; i<nrep_; ++i) {
    1746          24 :         average_weights_[iselect][i] = bias[i];
    1747             :       }
    1748             :     }
    1749             : 
    1750             :     // set average back into bias and set norm to one
    1751         148 :     const double maxbias = *(std::max_element(average_weights_[iselect].begin(), average_weights_[iselect].end()));
    1752         444 :     for(unsigned i=0; i<nrep_; ++i) {
    1753         296 :       bias[i] = std::exp((average_weights_[iselect][i]-maxbias)/kbt_);
    1754             :     }
    1755             :     // set local weight, norm and weight variance
    1756         148 :     weight = bias[replica_];
    1757             :     double w2=0.;
    1758         444 :     for(unsigned i=0; i<nrep_; ++i) {
    1759         296 :       w2 += bias[i]*bias[i];
    1760         296 :       norm += bias[i];
    1761             :     }
    1762         148 :     neff = norm*norm/w2;
    1763         296 :     getPntrToComponent("weight")->set(weight/norm);
    1764             :   } else {
    1765             :     // or arithmetic ones
    1766         132 :     neff = dnrep;
    1767         132 :     weight = 1.0;
    1768         132 :     norm = dnrep;
    1769             :   }
    1770         280 :   getPntrToComponent("neff")->set(neff);
    1771         280 : }
    1772             : 
    1773         280 : void Metainference::get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean) {
    1774         280 :   const double dnrep    = static_cast<double>(nrep_);
    1775         280 :   std::vector<double> sigma_mean2_tmp(sigma_mean2_.size(), 0.);
    1776             : 
    1777         280 :   if(do_optsigmamean_>0) {
    1778             :     // remove first entry of the history std::vector
    1779           0 :     if(sigma_mean2_last_[iselect][0].size()==optsigmamean_stride_&&optsigmamean_stride_>0)
    1780           0 :       for(unsigned i=0; i<narg; ++i) {
    1781           0 :         sigma_mean2_last_[iselect][i].erase(sigma_mean2_last_[iselect][i].begin());
    1782             :       }
    1783             :     /* this is the current estimate of sigma mean for each argument
    1784             :        there is one of this per argument in any case  because it is
    1785             :        the maximum among these to be used in case of GAUSS/OUTLIER */
    1786           0 :     std::vector<double> sigma_mean2_now(narg,0);
    1787           0 :     if(master) {
    1788           0 :       for(unsigned i=0; i<narg; ++i) {
    1789           0 :         sigma_mean2_now[i] = weight*(getArgument(i)-mean[i])*(getArgument(i)-mean[i]);
    1790             :       }
    1791           0 :       if(nrep_>1) {
    1792           0 :         multi_sim_comm.Sum(&sigma_mean2_now[0], narg);
    1793             :       }
    1794             :     }
    1795           0 :     comm.Sum(&sigma_mean2_now[0], narg);
    1796           0 :     for(unsigned i=0; i<narg; ++i) {
    1797           0 :       sigma_mean2_now[i] *= 1.0/(neff-1.)/norm;
    1798             :     }
    1799             : 
    1800             :     // add sigma_mean2 to history
    1801           0 :     if(optsigmamean_stride_>0) {
    1802           0 :       for(unsigned i=0; i<narg; ++i) {
    1803           0 :         sigma_mean2_last_[iselect][i].push_back(sigma_mean2_now[i]);
    1804             :       }
    1805             :     } else {
    1806           0 :       for(unsigned i=0; i<narg; ++i)
    1807           0 :         if(sigma_mean2_now[i] > sigma_mean2_last_[iselect][i][0]) {
    1808           0 :           sigma_mean2_last_[iselect][i][0] = sigma_mean2_now[i];
    1809             :         }
    1810             :     }
    1811             : 
    1812           0 :     if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
    1813           0 :       for(unsigned i=0; i<narg; ++i) {
    1814             :         /* set to the maximum in history std::vector */
    1815           0 :         sigma_mean2_tmp[i] = *max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end());
    1816             :         /* the standard error of the mean */
    1817           0 :         valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
    1818           0 :         if(noise_type_==GENERIC) {
    1819           0 :           sigma_min_[i] = std::sqrt(sigma_mean2_tmp[i]);
    1820           0 :           if(sigma_[i] < sigma_min_[i]) {
    1821           0 :             sigma_[i] = sigma_min_[i];
    1822             :           }
    1823             :         }
    1824             :       }
    1825           0 :     } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
    1826             :       // find maximum for each data point
    1827             :       std::vector <double> max_values;
    1828           0 :       for(unsigned i=0; i<narg; ++i) {
    1829           0 :         max_values.push_back(*max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end()));
    1830             :       }
    1831             :       // find maximum across data points
    1832           0 :       const double max_now = *max_element(max_values.begin(), max_values.end());
    1833             :       // set new value
    1834           0 :       sigma_mean2_tmp[0] = max_now;
    1835           0 :       valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
    1836             :     }
    1837             :     // endif sigma mean optimization
    1838             :     // start sigma max optimization
    1839           0 :     if(do_optsigmamean_>1&&!sigmamax_opt_done_) {
    1840           0 :       for(unsigned i=0; i<sigma_max_.size(); i++) {
    1841           0 :         if(sigma_max_est_[i]<sigma_mean2_tmp[i]&&optimized_step_>optsigmamean_stride_) {
    1842           0 :           sigma_max_est_[i]=sigma_mean2_tmp[i];
    1843             :         }
    1844             :         // ready to set once and for all the value of sigma_max
    1845           0 :         if(optimized_step_==N_optimized_step_) {
    1846           0 :           sigmamax_opt_done_=true;
    1847           0 :           for(unsigned ii=0; ii<sigma_max_.size(); ii++) {
    1848           0 :             sigma_max_[ii]=std::sqrt(sigma_max_est_[ii]*dnrep);
    1849           0 :             Dsigma_[ii] = 0.05*(sigma_max_[ii] - sigma_min_[ii]);
    1850           0 :             if(sigma_[ii]>sigma_max_[ii]) {
    1851           0 :               sigma_[ii]=sigma_max_[ii];
    1852             :             }
    1853             :           }
    1854             :         }
    1855             :       }
    1856           0 :       optimized_step_++;
    1857             :     }
    1858             :     // end sigma max optimization
    1859             :   } else {
    1860         280 :     if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
    1861        2948 :       for(unsigned i=0; i<narg; ++i) {
    1862        2812 :         sigma_mean2_tmp[i] = sigma_mean2_last_[iselect][i][0];
    1863        2812 :         valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
    1864             :       }
    1865         144 :     } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
    1866         144 :       sigma_mean2_tmp[0] = sigma_mean2_last_[iselect][0][0];
    1867         144 :       valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
    1868             :     }
    1869             :   }
    1870             : 
    1871         280 :   sigma_mean2_ = sigma_mean2_tmp;
    1872         280 : }
    1873             : 
    1874         280 : void Metainference::replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b) {
    1875         280 :   if(master) {
    1876        2236 :     for(unsigned i=0; i<narg; ++i) {
    1877        2030 :       mean[i] = weight/norm*getArgument(i);
    1878             :     }
    1879         206 :     if(nrep_>1) {
    1880          74 :       multi_sim_comm.Sum(&mean[0], narg);
    1881             :     }
    1882             :   }
    1883         280 :   comm.Sum(&mean[0], narg);
    1884             :   // set the derivative of the mean with respect to the bias
    1885        3776 :   for(unsigned i=0; i<narg; ++i) {
    1886        3496 :     dmean_b[i] = weight/norm/kbt_*(getArgument(i)-mean[i])/static_cast<double>(average_weights_stride_);
    1887             :   }
    1888             : 
    1889             :   // this is only for generic metainference
    1890         280 :   if(firstTime) {
    1891          28 :     ftilde_ = mean;
    1892          28 :     firstTime = false;
    1893             :   }
    1894         280 : }
    1895             : 
    1896           6 : void Metainference::do_regression_zero(const std::vector<double> &mean) {
    1897             : // parameters[i] = scale_ * mean[i]: find scale_ with linear regression
    1898             :   double num = 0.0;
    1899             :   double den = 0.0;
    1900          48 :   for(unsigned i=0; i<parameters.size(); ++i) {
    1901          42 :     num += mean[i] * parameters[i];
    1902          42 :     den += mean[i] * mean[i];
    1903             :   }
    1904           6 :   if(den>0) {
    1905           6 :     scale_ = num / den;
    1906             :   } else {
    1907           0 :     scale_ = 1.0;
    1908             :   }
    1909           6 : }
    1910             : 
    1911         280 : void Metainference::calculate() {
    1912             :   // get step
    1913         280 :   const long long int step = getStep();
    1914             : 
    1915             :   unsigned iselect = 0;
    1916             :   // set the value of selector for  REM-like stuff
    1917         280 :   if(selector_.length()>0) {
    1918           0 :     iselect = static_cast<unsigned>(plumed.passMap[selector_]);
    1919             :   }
    1920             : 
    1921             :   /* 1) collect weights */
    1922         280 :   double weight = 0.;
    1923         280 :   double neff = 0.;
    1924         280 :   double norm = 0.;
    1925         280 :   get_weights(iselect, weight, norm, neff);
    1926             : 
    1927             :   /* 2) calculate average */
    1928         280 :   std::vector<double> mean(narg,0);
    1929             :   // this is the derivative of the mean with respect to the argument
    1930         280 :   std::vector<double> dmean_x(narg,weight/norm);
    1931             :   // this is the derivative of the mean with respect to the bias
    1932         280 :   std::vector<double> dmean_b(narg,0);
    1933             :   // calculate it
    1934         280 :   replica_averaging(weight, norm, mean, dmean_b);
    1935             : 
    1936             :   /* 3) calculates parameters */
    1937         280 :   get_sigma_mean(iselect, weight, norm, neff, mean);
    1938             : 
    1939             :   // in case of regression with zero intercept, calculate scale
    1940         280 :   if(doregres_zero_ && step%nregres_zero_==0) {
    1941           6 :     do_regression_zero(mean);
    1942             :   }
    1943             : 
    1944             :   /* 4) run monte carlo */
    1945         280 :   double ene = doMonteCarlo(mean);
    1946             : 
    1947             :   // calculate bias and forces
    1948         280 :   switch(noise_type_) {
    1949          36 :   case GAUSS:
    1950          36 :     getEnergyForceGJ(mean, dmean_x, dmean_b);
    1951             :     break;
    1952          52 :   case MGAUSS:
    1953          52 :     getEnergyForceGJE(mean, dmean_x, dmean_b);
    1954             :     break;
    1955         108 :   case OUTLIERS:
    1956         108 :     getEnergyForceSP(mean, dmean_x, dmean_b);
    1957             :     break;
    1958          48 :   case MOUTLIERS:
    1959          48 :     getEnergyForceSPE(mean, dmean_x, dmean_b);
    1960             :     break;
    1961          36 :   case GENERIC:
    1962          36 :     getEnergyForceMIGEN(mean, dmean_x, dmean_b);
    1963             :     break;
    1964             :   }
    1965             : 
    1966         280 :   setBias(ene);
    1967         280 : }
    1968             : 
    1969          32 : void Metainference::writeStatus() {
    1970          32 :   sfile_.rewind();
    1971          32 :   sfile_.printField("time",getTimeStep()*getStep());
    1972             :   //nsel
    1973          64 :   for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
    1974             :     std::string msg_i,msg_j;
    1975          32 :     Tools::convert(i,msg_i);
    1976             :     std::vector <double> max_values;
    1977             :     //narg
    1978        2518 :     for(unsigned j=0; j<narg; ++j) {
    1979        2486 :       Tools::convert(j,msg_j);
    1980        2486 :       std::string msg = msg_i+"_"+msg_j;
    1981        2486 :       if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
    1982        7182 :         sfile_.printField("sigmaMean_"+msg,std::sqrt(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end())));
    1983             :       } else {
    1984             :         // find maximum for each data point
    1985         184 :         max_values.push_back(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end()));
    1986             :       }
    1987             :     }
    1988          32 :     if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
    1989             :       // find maximum across data points
    1990          17 :       const double max_now = std::sqrt(*max_element(max_values.begin(), max_values.end()));
    1991          17 :       Tools::convert(0,msg_j);
    1992          17 :       std::string msg = msg_i+"_"+msg_j;
    1993          34 :       sfile_.printField("sigmaMean_"+msg, max_now);
    1994             :     }
    1995             :   }
    1996        2443 :   for(unsigned i=0; i<sigma_.size(); ++i) {
    1997             :     std::string msg;
    1998        2411 :     Tools::convert(i,msg);
    1999        4822 :     sfile_.printField("sigma_"+msg,sigma_[i]);
    2000             :   }
    2001        2443 :   for(unsigned i=0; i<sigma_max_.size(); ++i) {
    2002             :     std::string msg;
    2003        2411 :     Tools::convert(i,msg);
    2004        4822 :     sfile_.printField("sigma_max_"+msg,sigma_max_[i]);
    2005             :   }
    2006          32 :   if(noise_type_==GENERIC) {
    2007           9 :     for(unsigned i=0; i<ftilde_.size(); ++i) {
    2008             :       std::string msg;
    2009           6 :       Tools::convert(i,msg);
    2010          12 :       sfile_.printField("ftilde_"+msg,ftilde_[i]);
    2011             :     }
    2012             :   }
    2013          32 :   sfile_.printField("scale0_",scale_);
    2014          32 :   sfile_.printField("offset0_",offset_);
    2015          64 :   for(unsigned i=0; i<average_weights_.size(); i++) {
    2016             :     std::string msg_i;
    2017          32 :     Tools::convert(i,msg_i);
    2018          64 :     sfile_.printField("weight_"+msg_i,average_weights_[i][replica_]);
    2019             :   }
    2020          32 :   sfile_.printField();
    2021          32 :   sfile_.flush();
    2022          32 : }
    2023             : 
    2024         280 : void Metainference::update() {
    2025             :   // write status file
    2026         280 :   if(write_stride_>0&& (getStep()%write_stride_==0 || getCPT()) ) {
    2027          32 :     writeStatus();
    2028             :   }
    2029         280 : }
    2030             : 
    2031             : }
    2032             : }
    2033             : 

Generated by: LCOV version 1.16