Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2016-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "Bias.h"
23 : #include "core/PlumedMain.h"
24 : #include "core/Atoms.h"
25 : #include "core/ActionRegister.h"
26 : #include "core/ActionWithValue.h"
27 : #include "tools/Communicator.h"
28 : #include "tools/File.h"
29 :
30 : // The original implementation of this method was contributed
31 : // by Andrea Cesari (andreacesari90@gmail.com).
32 : // Copyright has been then transferred to PLUMED developers
33 : // (see https://github.com/plumed/plumed2/blob/master/.github/CONTRIBUTING.md)
34 :
35 : namespace PLMD {
36 : namespace bias {
37 :
38 : //+PLUMEDOC BIAS MAXENT
39 : /*
40 : Add a linear biasing potential on one or more variables that satisfies a maximum entropy principle.
41 :
42 : Add a linear biasing potential on one or more variables \f$f_{i}\left(\boldsymbol{x}\right)\f$ satisfying the maximum entropy principle as proposed in Ref. \cite cesari2016maxent .
43 :
44 : \warning
45 : Notice that syntax is still under revision and might change
46 :
47 : The resulting biasing potential is given by:
48 : \f[
49 : V_{BIAS}(\boldsymbol{x},t)=K_{B}T\sum_{i=1}^{\#arguments}f_{i}(\boldsymbol{x},t)\lambda_{i}(t)
50 : \f]
51 : Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, according to the following update rule:
52 : \f[
53 : \lambda_{i}=\lambda_{i}+\frac{k_{i}}{1+\frac{t}{\tau_{i}}}\left(f_{exp,i}+\xi_{i}\lambda_{i}-f_{i}(\boldsymbol{x})\right)
54 : \f]
55 : \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA.
56 : The number of components for any KAPPA vector must be equal to the number of arguments of the action.
57 :
58 : Variable \f$ \xi_{i}(\lambda) \f$ is related to the chosen prior to model experimental errors. If a GAUSSIAN prior is used then:
59 : \f[
60 : \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2}
61 : \f]
62 : where \f$ \sigma \f$ is the typical expected error on the observable \f$ f_i\f$.
63 : For a LAPLACE prior:
64 : \f[
65 : \xi_{i}(\lambda)=-\frac{\lambda_{i}\sigma^{2}}{1-\frac{\lambda^{2}\sigma^{2}}{2}}
66 :
67 : \f]
68 : The value of \f$ \xi(\lambda,t)\f$ is written in output as a component named: argument name followed by the string _error.
69 : Setting \f$ \sigma =0\f$ is equivalent to enforce a pure Maximum Entropy restraint without any noise modelling.
70 : This method can be also used to enforce inequality restraint as shown in following examples.
71 :
72 : Notice that a similar method is available as \ref EDS, although with different features and using a different optimization algorithm.
73 :
74 : \par Examples
75 :
76 : The following input tells plumed to restrain the distance between atoms 7 and 15
77 : and the distance between atoms 2 and 19, at different equilibrium
78 : values, and to print the energy of the restraint.
79 : Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps.
80 : Moreover plumed will compute the average of each Lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers.
81 : \plumedfile
82 : DISTANCE ATOMS=7,15 LABEL=d1
83 : DISTANCE ATOMS=2,19 LABEL=d2
84 : MAXENT ...
85 : ARG=d1,d2
86 : TYPE=EQUAL
87 : AT=0.2,0.5
88 : KAPPA=35000.0,35000.0
89 : TAU=0.02,0.02
90 : PACE=200
91 : TSTART=100
92 : TEND=500
93 : LABEL=restraint
94 : ... MAXENT
95 : PRINT ARG=restraint.bias
96 : \endplumedfile
97 : Lagrangian multipliers will be printed on a file called restraint.bias
98 : The following input tells plumed to restrain the distance between atoms 7 and 15
99 : to be greater than 0.2 and to print the energy of the restraint
100 : \plumedfile
101 : DISTANCE ATOMS=7,15 LABEL=d
102 : MAXENT ARG=d TYPE=INEQUAL> AT=0.02 KAPPA=35000.0 TAU=3 LABEL=restraint
103 : PRINT ARG=restraint.bias
104 : \endplumedfile
105 :
106 : (See also \ref DISTANCE and \ref PRINT).
107 :
108 : */
109 : //+ENDPLUMEDOC
110 :
111 : class MaxEnt : public Bias {
112 : std::vector<double> at;
113 : std::vector<double> kappa;
114 : std::vector<double> lambda;
115 : std::vector<double> avgx;
116 : std::vector<double> work;
117 : std::vector<double> oldlambda;
118 : std::vector<double> tau;
119 : std::vector<double> avglambda;
120 : std::vector<double> avglambda_restart;
121 : std::vector<double> expected_eps;
122 : std::vector<double> apply_weights;
123 : double sigma;
124 : double tstart;
125 : double tend;
126 : double avgstep; //current number of samples over which to compute the average. Check if could be replaced bu getStep()
127 : long long int pace_;
128 : long long int stride_;
129 : double totalWork;
130 : double BetaReweightBias;
131 : double simtemp;
132 : std::vector<ActionWithValue*> biases;
133 : std::string type;
134 : std::string error_type;
135 : double alpha;
136 : double avg_counter;
137 : int learn_replica;
138 : Value* valueForce2;
139 : Value* valueWork;
140 : OFile lagmultOfile_;
141 : IFile ifile;
142 : std::string lagmultfname;
143 : std::string ifilesnames;
144 : std::string fmt;
145 : bool isFirstStep;
146 : bool reweight;
147 : bool no_broadcast;
148 : bool printFirstStep;
149 : std::vector<bool> done_average;
150 : int myrep,nrep;
151 : public:
152 : explicit MaxEnt(const ActionOptions&);
153 : void calculate() override;
154 : void update() override;
155 : void update_lambda();
156 : static void registerKeywords(Keywords& keys);
157 : void ReadLagrangians(IFile &ifile);
158 : void WriteLagrangians(std::vector<double> &lagmult,OFile &file);
159 : double compute_error(const std::string &err_type,double l);
160 : double convert_lambda(const std::string &type,double lold);
161 : void check_lambda_boundaries(const std::string &err_type,double &l);
162 : };
163 13889 : PLUMED_REGISTER_ACTION(MaxEnt,"MAXENT")
164 :
165 56 : void MaxEnt::registerKeywords(Keywords& keys) {
166 56 : Bias::registerKeywords(keys);
167 56 : componentsAreNotOptional(keys);
168 56 : keys.use("ARG");
169 112 : keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate");
170 112 : keys.add("compulsory","TAU","Specify the dumping time for the learning rate.");
171 112 : keys.add("compulsory","TYPE","specify the restraint type. "
172 : "EQUAL to restrain the variable at a given equilibrium value "
173 : "INEQUAL< to restrain the variable to be smaller than a given value "
174 : "INEQUAL> to restrain the variable to be greater than a given value");
175 112 : keys.add("optional","ERROR_TYPE","specify the prior on the error to use."
176 : "GAUSSIAN: use a Gaussian prior "
177 : "LAPLACE: use a Laplace prior");
178 112 : keys.add("optional","TSTART","time from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps");
179 112 : keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;");
180 112 : keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior.");
181 112 : keys.add("compulsory","AT","the position of the restraint");
182 112 : keys.add("optional","SIGMA","The typical errors expected on observable");
183 112 : keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT ");
184 112 : keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used.");
185 112 : keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondence of each replica that will receive the Lagrangian multiplier from the current one.");
186 112 : keys.add("optional","PACE","the frequency for Lagrangian multipliers update");
187 112 : keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE).");
188 112 : keys.add("optional","FMT","specify format for Lagrangian multipliers files (useful to decrease the number of digits in regtests)");
189 112 : keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori");
190 112 : keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be communicated to other replicas.");
191 112 : keys.add("optional","TEMP","the system temperature. This is required if you are reweighting.");
192 112 : keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential");
193 112 : keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force");
194 112 : keys.addOutputComponent("_work","default","the instantaneous value of the work done by the biasing force for each argument. "
195 : "These quantities will named with the arguments of the bias followed by "
196 : "the character string _work.");
197 112 : keys.addOutputComponent("_error","default","Instantaneous values of the discrepancy between the observable and the restraint center");
198 112 : keys.addOutputComponent("_coupling","default","Instantaneous values of Lagrangian multipliers. They are also written by default in a separate output file.");
199 56 : keys.use("RESTART");
200 56 : }
201 52 : MaxEnt::MaxEnt(const ActionOptions&ao):
202 : PLUMED_BIAS_INIT(ao),
203 104 : at(getNumberOfArguments()),
204 52 : kappa(getNumberOfArguments(),0.0),
205 52 : lambda(getNumberOfArguments(),0.0),
206 52 : avgx(getNumberOfArguments(),0.0),
207 52 : oldlambda(getNumberOfArguments(),0.0),
208 52 : tau(getNumberOfArguments(),getTimeStep()),
209 52 : avglambda(getNumberOfArguments(),0.0),
210 52 : avglambda_restart(getNumberOfArguments(),0.0),
211 52 : expected_eps(getNumberOfArguments(),0.0),
212 52 : sigma(0.0),
213 52 : pace_(100),
214 52 : stride_(100),
215 52 : alpha(1.0),
216 52 : avg_counter(0.0),
217 52 : isFirstStep(true),
218 52 : reweight(false),
219 52 : no_broadcast(false),
220 52 : printFirstStep(true),
221 156 : done_average(getNumberOfArguments(),false) {
222 52 : if(comm.Get_rank()==0) {
223 44 : nrep=multi_sim_comm.Get_size();
224 : }
225 52 : if(comm.Get_rank()==0) {
226 44 : myrep=multi_sim_comm.Get_rank();
227 : }
228 52 : comm.Bcast(nrep,0);
229 52 : comm.Bcast(myrep,0);
230 52 : parseFlag("NO_BROADCAST",no_broadcast);
231 : //if(no_broadcast){
232 : //for(int irep=0;irep<nrep;irep++){
233 : // if(irep!=myrep)
234 : // apply_weights[irep]=0.0;}
235 : //}
236 52 : avgstep=1.0;
237 52 : tstart=-1.0;
238 52 : tend=-1.0;
239 52 : totalWork=0.0;
240 52 : learn_replica=0;
241 :
242 52 : parse("LEARN_REPLICA",learn_replica);
243 104 : parseVector("APPLY_WEIGHTS",apply_weights);
244 52 : if(apply_weights.size()==0) {
245 52 : apply_weights.resize(nrep,1.0);
246 : }
247 52 : parseVector("KAPPA",kappa);
248 52 : parseVector("AT",at);
249 52 : parseVector("TAU",tau);
250 104 : parse("TYPE",type);
251 : error_type="GAUSSIAN";
252 52 : parse("ERROR_TYPE",error_type);
253 52 : parse("ALPHA",alpha);
254 52 : parse("SIGMA",sigma);
255 52 : parse("TSTART",tstart);
256 52 : if(tstart <0 && tstart != -1.0) {
257 0 : error("TSTART should be a positive number");
258 : }
259 52 : parse("TEND",tend);
260 52 : if(tend<0 && tend != -1.0) {
261 0 : error("TSTART should be a positive number");
262 : }
263 52 : if(tend<tstart) {
264 0 : error("TEND should be >= TSTART");
265 : }
266 52 : lagmultfname=getLabel()+".LAGMULT";
267 52 : parse("FILE",lagmultfname);
268 52 : parse("FMT",fmt);
269 52 : parse("PACE",pace_);
270 52 : if(pace_<=0 ) {
271 0 : error("frequency for Lagrangian multipliers update (PACE) is nonsensical");
272 : }
273 52 : stride_=pace_; //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update
274 52 : parse("PRINT_STRIDE",stride_);
275 52 : if(stride_<=0 ) {
276 0 : error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical");
277 : }
278 52 : simtemp=0.;
279 52 : parse("TEMP",simtemp);
280 52 : if(simtemp>0) {
281 52 : simtemp*=plumed.getAtoms().getKBoltzmann();
282 : } else {
283 0 : simtemp=plumed.getAtoms().getKbT();
284 : }
285 52 : parseFlag("REWEIGHT",reweight);
286 52 : if(simtemp<=0 && reweight) {
287 0 : error("Set the temperature (TEMP) if you want to do reweighting.");
288 : }
289 :
290 52 : checkRead();
291 :
292 52 : log.printf(" at");
293 548 : for(unsigned i=0; i<at.size(); i++) {
294 496 : log.printf(" %f",at[i]);
295 : }
296 52 : log.printf("\n");
297 52 : log.printf(" with initial learning rate for optimization of");
298 548 : for(unsigned i=0; i<kappa.size(); i++) {
299 496 : log.printf(" %f",kappa[i]);
300 : }
301 52 : log.printf("\n");
302 52 : log.printf("Dumping times for the learning rates are (ps): ");
303 548 : for(unsigned i=0; i<tau.size(); i++) {
304 496 : log.printf(" %f",tau[i]);
305 : }
306 52 : log.printf("\n");
307 52 : log.printf("Lagrangian multipliers are updated every %lld steps (PACE)\n",pace_);
308 52 : log.printf("Lagrangian multipliers output file %s\n",lagmultfname.c_str());
309 52 : log.printf("Lagrangian multipliers are written every %lld steps (PRINT_STRIDE)\n",stride_);
310 52 : if(fmt.length()>0) {
311 52 : log.printf("The format for real number in Lagrangian multipliers file is %s\n",fmt.c_str());
312 : }
313 52 : if(tstart >-1.0 && tend>-1.0) {
314 16 : log.printf("Lagrangian multipliers are averaged from %lf ps to %lf ps\n",tstart,tend);
315 : }
316 52 : if(no_broadcast) {
317 0 : log.printf("Using NO_BROADCAST options. Lagrangian multipliers will not be comunicated to other replicas.\n");
318 : }
319 : //for(int irep=0;irep<nrep;irep++){
320 : // if(apply_weights[irep]!=0)
321 : // log.printf("%d",irep);
322 : // }
323 52 : addComponent("force2");
324 52 : componentIsNotPeriodic("force2");
325 52 : addComponent("work");
326 52 : componentIsNotPeriodic("work");
327 52 : valueForce2=getPntrToComponent("force2");
328 52 : valueWork=getPntrToComponent("work");
329 :
330 : std::string comp;
331 548 : for(unsigned i=0; i< getNumberOfArguments() ; i++) {
332 496 : comp=getPntrToArgument(i)->getName()+"_coupling";
333 496 : addComponent(comp);
334 496 : componentIsNotPeriodic(comp);
335 496 : comp=getPntrToArgument(i)->getName()+"_work";
336 496 : addComponent(comp);
337 496 : componentIsNotPeriodic(comp);
338 496 : work.push_back(0.); // initialize the work value
339 496 : comp=getPntrToArgument(i)->getName()+"_error";
340 496 : addComponent(comp);
341 496 : componentIsNotPeriodic(comp);
342 : }
343 : std::string fname;
344 : fname=lagmultfname;
345 52 : ifile.link(*this);
346 52 : if(ifile.FileExist(fname)) {
347 37 : ifile.open(fname);
348 37 : if(getRestart()) {
349 37 : log.printf(" Restarting from: %s\n",fname.c_str());
350 37 : ReadLagrangians(ifile);
351 37 : printFirstStep=false;
352 : }
353 37 : ifile.reset(false);
354 : }
355 :
356 52 : lagmultOfile_.link(*this);
357 52 : lagmultOfile_.open(fname);
358 52 : if(fmt.length()>0) {
359 52 : fmt=" "+fmt;
360 52 : lagmultOfile_.fmtField(fmt);
361 : }
362 52 : }
363 : ////MEMBER FUNCTIONS
364 37 : void MaxEnt::ReadLagrangians(IFile &ifile) {
365 : double dummy;
366 888 : while(ifile.scanField("time",dummy)) {
367 4708 : for(unsigned j=0; j<getNumberOfArguments(); ++j) {
368 4301 : ifile.scanField(getPntrToArgument(j)->getName()+"_coupling",lambda[j]);
369 4301 : if(dummy>=tstart && dummy <=tend) {
370 42 : avglambda[j]+=lambda[j];
371 : }
372 4301 : if(dummy>=tend) {
373 4231 : avglambda[j]=lambda[j];
374 : done_average[j]=true;
375 : }
376 : }
377 407 : if(dummy>=tstart && dummy <=tend) {
378 6 : avg_counter++;
379 : }
380 407 : ifile.scanField();
381 : }
382 37 : }
383 572 : void MaxEnt::WriteLagrangians(std::vector<double> &lagmult,OFile &file) {
384 572 : if(printFirstStep) {
385 165 : unsigned ncv=getNumberOfArguments();
386 165 : file.printField("time",getTimeStep()*getStep());
387 1320 : for(unsigned i=0; i<ncv; ++i) {
388 2310 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
389 : }
390 165 : file.printField();
391 : } else {
392 407 : if(!isFirstStep) {
393 370 : unsigned ncv=getNumberOfArguments();
394 370 : file.printField("time",getTimeStep()*getStep());
395 4280 : for(unsigned i=0; i<ncv; ++i) {
396 7820 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
397 : }
398 370 : file.printField();
399 : }
400 : }
401 572 : }
402 5456 : double MaxEnt::compute_error(const std::string &err_type,double l) {
403 5456 : double sigma2=std::pow(sigma,2.0);
404 5456 : double l2=convert_lambda(type,l);
405 : double return_error=0;
406 5456 : if(err_type=="GAUSSIAN" && sigma!=0.0) {
407 0 : return_error=-l2*sigma2;
408 : } else {
409 5456 : if(err_type=="LAPLACE" && sigma!=0) {
410 5456 : return_error=-l2*sigma2/(1.0-l2*l2*sigma2/(alpha+1));
411 : }
412 : }
413 5456 : return return_error;
414 : }
415 122646 : double MaxEnt::convert_lambda(const std::string &type,double lold) {
416 : double return_lambda=0;
417 122646 : if(type=="EQUAL") {
418 : return_lambda=lold;
419 : } else {
420 8830 : if(type=="INEQUAL>") {
421 1687 : if(lold>0.0) {
422 : return_lambda=0.0;
423 : } else {
424 : return_lambda=lold;
425 : }
426 : } else {
427 7143 : if(type=="INEQUAL<") {
428 1687 : if(lold<0.0) {
429 : return_lambda=0.0;
430 : } else {
431 : return_lambda=lold;
432 : }
433 : }
434 : }
435 : }
436 122646 : return return_lambda;
437 : }
438 5456 : void MaxEnt::check_lambda_boundaries(const std::string &err_type,double &l) {
439 5456 : if(err_type=="LAPLACE" && sigma !=0 ) {
440 5456 : double l2=convert_lambda(err_type,l);
441 5456 : if(l2 <-(std::sqrt(alpha+1)/sigma-0.01)) {
442 0 : l=-(std::sqrt(alpha+1)/sigma-0.01);
443 0 : log.printf("Lambda exceeded the allowed range\n");
444 : }
445 5456 : if(l2>(std::sqrt(alpha+1)/sigma-0.01)) {
446 0 : l=std::sqrt(alpha+1)/sigma-0.01;
447 0 : log.printf("Lambda exceeded the allowed range\n");
448 : }
449 : }
450 5456 : }
451 :
452 572 : void MaxEnt::update_lambda() {
453 :
454 : double totalWork_=0.0;
455 572 : const double time=getTime();
456 572 : const double step=getStep();
457 572 : double KbT=simtemp;
458 : double learning_rate;
459 572 : if(reweight) {
460 396 : BetaReweightBias=plumed.getBias()/KbT;
461 : } else {
462 176 : BetaReweightBias=0.0;
463 : }
464 :
465 6028 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
466 5456 : const double k=kappa[i];
467 5456 : double cv=(getArgument(i)+compute_error(error_type,lambda[i])-at[i]);
468 5456 : if(reweight) {
469 4224 : learning_rate=1.0*k/(1+step/tau[i]);
470 : } else {
471 1232 : learning_rate=1.0*k/(1+time/tau[i]);
472 : }
473 5456 : lambda[i]+=learning_rate*cv*std::exp(-BetaReweightBias); //update Lagrangian multipliers and reweight them if REWEIGHT is set
474 5456 : check_lambda_boundaries(error_type,lambda[i]); //check that Lagrangians multipliers not exceed the allowed range
475 6128 : if(time>=tstart && time <=tend && !done_average[i]) {
476 630 : avglambda[i]+=convert_lambda(type,lambda[i]); //compute the average of Lagrangian multipliers over the required time window
477 : }
478 5456 : if(time>=tend && tend >=0) { //setting tend<0 will disable this feature
479 112 : if(!done_average[i]) {
480 105 : avglambda[i]=avglambda[i]/avg_counter;
481 : done_average[i]=true;
482 105 : lambda[i]=avglambda[i];
483 : } else {
484 7 : lambda[i]=avglambda[i]; //keep Lagrangian multipliers fixed to the previously computed average.
485 : }
486 : }
487 5456 : work[i]+=(convert_lambda(type,lambda[i])-oldlambda[i])*getArgument(i); //compute the work performed in updating lambda
488 5456 : totalWork_+=work[i];
489 5456 : totalWork=totalWork_;
490 5456 : oldlambda[i]=convert_lambda(type,lambda[i]);
491 : };
492 572 : if(time>=tstart && time <=tend) {
493 96 : avg_counter++;
494 : }
495 572 : }
496 :
497 5252 : void MaxEnt::calculate() {
498 : double totf2=0.0;
499 : double ene=0.0;
500 5252 : double KbT=simtemp;
501 55348 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
502 100192 : getPntrToComponent(getPntrToArgument(i)->getName()+"_error")->set(expected_eps[i]);
503 50096 : getPntrToComponent(getPntrToArgument(i)->getName()+"_work")->set(work[i]);
504 50096 : valueWork->set(totalWork);
505 50096 : getPntrToComponent(getPntrToArgument(i)->getName()+"_coupling")->set(lambda[i]);
506 50096 : const double f=-KbT*convert_lambda(type,lambda[i])*apply_weights[myrep];
507 50096 : totf2+=f*f;
508 50096 : ene+=KbT*convert_lambda(type,lambda[i])*getArgument(i)*apply_weights[myrep];
509 50096 : setOutputForce(i,f);
510 : }
511 : setBias(ene);
512 5252 : valueForce2->set(totf2);
513 5252 : }
514 :
515 5252 : void MaxEnt::update() {
516 :
517 5252 : if(getStep()%stride_ == 0) {
518 572 : WriteLagrangians(lambda,lagmultOfile_);
519 : }
520 5252 : if(getStep()%pace_ == 0) {
521 572 : update_lambda();
522 572 : if(!no_broadcast) {
523 572 : if(comm.Get_rank()==0) { //Comunicate Lagrangian multipliers from reference replica to higher ones
524 484 : multi_sim_comm.Bcast(lambda,learn_replica);
525 : }
526 : }
527 572 : comm.Bcast(lambda,0);
528 : }
529 5252 : isFirstStep=false;
530 5252 : }
531 :
532 : }
533 :
534 : }
|