Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2016-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "Bias.h"
23 : #include "core/PlumedMain.h"
24 : #include "core/ActionRegister.h"
25 : #include "core/ActionWithValue.h"
26 : #include "tools/Communicator.h"
27 : #include "tools/File.h"
28 :
29 : // The original implementation of this method was contributed
30 : // by Andrea Cesari (andreacesari90@gmail.com).
31 : // Copyright has been then transferred to PLUMED developers
32 : // (see https://github.com/plumed/plumed2/blob/master/.github/CONTRIBUTING.md)
33 :
34 : namespace PLMD {
35 : namespace bias {
36 :
37 : //+PLUMEDOC BIAS MAXENT
38 : /*
39 : Add a linear biasing potential on one or more variables that satisfies a maximum entropy principle.
40 :
41 : Add a linear biasing potential on one or more variables \f$f_{i}\left(\boldsymbol{x}\right)\f$ satisfying the maximum entropy principle as proposed in Ref. \cite cesari2016maxent .
42 :
43 : \warning
44 : Notice that syntax is still under revision and might change
45 :
46 : The resulting biasing potential is given by:
47 : \f[
48 : V_{BIAS}(\boldsymbol{x},t)=K_{B}T\sum_{i=1}^{\#arguments}f_{i}(\boldsymbol{x},t)\lambda_{i}(t)
49 : \f]
50 : Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, according to the following update rule:
51 : \f[
52 : \lambda_{i}=\lambda_{i}+\frac{k_{i}}{1+\frac{t}{\tau_{i}}}\left(f_{exp,i}+\xi_{i}\lambda_{i}-f_{i}(\boldsymbol{x})\right)
53 : \f]
54 : \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA.
55 : The number of components for any KAPPA vector must be equal to the number of arguments of the action.
56 :
57 : Variable \f$ \xi_{i}(\lambda) \f$ is related to the chosen prior to model experimental errors. If a GAUSSIAN prior is used then:
58 : \f[
59 : \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2}
60 : \f]
61 : where \f$ \sigma \f$ is the typical expected error on the observable \f$ f_i\f$.
62 : For a LAPLACE prior:
63 : \f[
64 : \xi_{i}(\lambda)=-\frac{\lambda_{i}\sigma^{2}}{1-\frac{\lambda^{2}\sigma^{2}}{2}}
65 :
66 : \f]
67 : The value of \f$ \xi(\lambda,t)\f$ is written in output as a component named: argument name followed by the string _error.
68 : Setting \f$ \sigma =0\f$ is equivalent to enforce a pure Maximum Entropy restraint without any noise modelling.
69 : This method can be also used to enforce inequality restraint as shown in following examples.
70 :
71 : Notice that a similar method is available as \ref EDS, although with different features and using a different optimization algorithm.
72 :
73 : \par Examples
74 :
75 : The following input tells plumed to restrain the distance between atoms 7 and 15
76 : and the distance between atoms 2 and 19, at different equilibrium
77 : values, and to print the energy of the restraint.
78 : Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps.
79 : Moreover plumed will compute the average of each Lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers.
80 : \plumedfile
81 : DISTANCE ATOMS=7,15 LABEL=d1
82 : DISTANCE ATOMS=2,19 LABEL=d2
83 : MAXENT ...
84 : ARG=d1,d2
85 : TYPE=EQUAL
86 : AT=0.2,0.5
87 : KAPPA=35000.0,35000.0
88 : TAU=0.02,0.02
89 : PACE=200
90 : TSTART=100
91 : TEND=500
92 : LABEL=restraint
93 : ... MAXENT
94 : PRINT ARG=restraint.bias
95 : \endplumedfile
96 : Lagrangian multipliers will be printed on a file called restraint.bias
97 : The following input tells plumed to restrain the distance between atoms 7 and 15
98 : to be greater than 0.2 and to print the energy of the restraint
99 : \plumedfile
100 : DISTANCE ATOMS=7,15 LABEL=d
101 : MAXENT ARG=d TYPE=INEQUAL> AT=0.02 KAPPA=35000.0 TAU=3 LABEL=restraint
102 : PRINT ARG=restraint.bias
103 : \endplumedfile
104 :
105 : (See also \ref DISTANCE and \ref PRINT).
106 :
107 : */
108 : //+ENDPLUMEDOC
109 :
110 : class MaxEnt : public Bias {
111 : std::vector<double> at;
112 : std::vector<double> kappa;
113 : std::vector<double> lambda;
114 : std::vector<double> avgx;
115 : std::vector<double> work;
116 : std::vector<double> oldlambda;
117 : std::vector<double> tau;
118 : std::vector<double> avglambda;
119 : std::vector<double> avglambda_restart;
120 : std::vector<double> expected_eps;
121 : std::vector<double> apply_weights;
122 : double sigma;
123 : double tstart;
124 : double tend;
125 : double avgstep; //current number of samples over which to compute the average. Check if could be replaced bu getStep()
126 : long long int pace_;
127 : long long int stride_;
128 : double totalWork;
129 : double BetaReweightBias;
130 : double simtemp;
131 : std::vector<ActionWithValue*> biases;
132 : std::string type;
133 : std::string error_type;
134 : double alpha;
135 : double avg_counter;
136 : int learn_replica;
137 : Value* valueForce2;
138 : Value* valueWork;
139 : OFile lagmultOfile_;
140 : IFile ifile;
141 : std::string lagmultfname;
142 : std::string ifilesnames;
143 : std::string fmt;
144 : bool isFirstStep;
145 : bool reweight;
146 : bool no_broadcast;
147 : bool printFirstStep;
148 : std::vector<bool> done_average;
149 : int myrep,nrep;
150 : public:
151 : explicit MaxEnt(const ActionOptions&);
152 : void calculate() override;
153 : void update() override;
154 : void update_lambda();
155 : static void registerKeywords(Keywords& keys);
156 : void ReadLagrangians(IFile &ifile);
157 : void WriteLagrangians(std::vector<double> &lagmult,OFile &file);
158 : double compute_error(const std::string &err_type,double l);
159 : double convert_lambda(const std::string &type,double lold);
160 : void check_lambda_boundaries(const std::string &err_type,double &l);
161 : };
162 : PLUMED_REGISTER_ACTION(MaxEnt,"MAXENT")
163 :
164 54 : void MaxEnt::registerKeywords(Keywords& keys) {
165 54 : Bias::registerKeywords(keys);
166 54 : keys.use("ARG");
167 108 : keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate");
168 108 : keys.add("compulsory","TAU","Specify the dumping time for the learning rate.");
169 108 : keys.add("compulsory","TYPE","specify the restraint type. "
170 : "EQUAL to restrain the variable at a given equilibrium value "
171 : "INEQUAL< to restrain the variable to be smaller than a given value "
172 : "INEQUAL> to restrain the variable to be greater than a given value");
173 108 : keys.add("optional","ERROR_TYPE","specify the prior on the error to use."
174 : "GAUSSIAN: use a Gaussian prior "
175 : "LAPLACE: use a Laplace prior");
176 108 : keys.add("optional","TSTART","time from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps");
177 108 : keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;");
178 108 : keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior.");
179 108 : keys.add("compulsory","AT","the position of the restraint");
180 108 : keys.add("optional","SIGMA","The typical errors expected on observable");
181 108 : keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT ");
182 108 : keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used.");
183 108 : keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondence of each replica that will receive the Lagrangian multiplier from the current one.");
184 108 : keys.add("optional","PACE","the frequency for Lagrangian multipliers update");
185 108 : keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE).");
186 108 : keys.add("optional","FMT","specify format for Lagrangian multipliers files (useful to decrease the number of digits in regtests)");
187 108 : keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori");
188 108 : keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be communicated to other replicas.");
189 108 : keys.add("optional","TEMP","the system temperature. This is required if you are reweighting.");
190 108 : keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential");
191 108 : keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force");
192 108 : keys.addOutputComponent("_work","default","the instantaneous value of the work done by the biasing force for each argument. "
193 : "These quantities will named with the arguments of the bias followed by "
194 : "the character string _work.");
195 108 : keys.addOutputComponent("_error","default","Instantaneous values of the discrepancy between the observable and the restraint center");
196 108 : keys.addOutputComponent("_coupling","default","Instantaneous values of Lagrangian multipliers. They are also written by default in a separate output file.");
197 54 : keys.use("RESTART");
198 54 : }
199 52 : MaxEnt::MaxEnt(const ActionOptions&ao):
200 : PLUMED_BIAS_INIT(ao),
201 104 : at(getNumberOfArguments()),
202 52 : kappa(getNumberOfArguments(),0.0),
203 52 : lambda(getNumberOfArguments(),0.0),
204 52 : avgx(getNumberOfArguments(),0.0),
205 52 : oldlambda(getNumberOfArguments(),0.0),
206 52 : tau(getNumberOfArguments(),getTimeStep()),
207 52 : avglambda(getNumberOfArguments(),0.0),
208 52 : avglambda_restart(getNumberOfArguments(),0.0),
209 52 : expected_eps(getNumberOfArguments(),0.0),
210 52 : sigma(0.0),
211 52 : pace_(100),
212 52 : stride_(100),
213 52 : alpha(1.0),
214 52 : avg_counter(0.0),
215 52 : isFirstStep(true),
216 52 : reweight(false),
217 52 : no_broadcast(false),
218 52 : printFirstStep(true),
219 156 : done_average(getNumberOfArguments(),false) {
220 52 : if(comm.Get_rank()==0) {
221 44 : nrep=multi_sim_comm.Get_size();
222 : }
223 52 : if(comm.Get_rank()==0) {
224 44 : myrep=multi_sim_comm.Get_rank();
225 : }
226 52 : comm.Bcast(nrep,0);
227 52 : comm.Bcast(myrep,0);
228 52 : parseFlag("NO_BROADCAST",no_broadcast);
229 : //if(no_broadcast){
230 : //for(int irep=0;irep<nrep;irep++){
231 : // if(irep!=myrep)
232 : // apply_weights[irep]=0.0;}
233 : //}
234 52 : avgstep=1.0;
235 52 : tstart=-1.0;
236 52 : tend=-1.0;
237 52 : totalWork=0.0;
238 52 : learn_replica=0;
239 :
240 52 : parse("LEARN_REPLICA",learn_replica);
241 104 : parseVector("APPLY_WEIGHTS",apply_weights);
242 52 : if(apply_weights.size()==0) {
243 52 : apply_weights.resize(nrep,1.0);
244 : }
245 52 : parseVector("KAPPA",kappa);
246 52 : parseVector("AT",at);
247 52 : parseVector("TAU",tau);
248 104 : parse("TYPE",type);
249 : error_type="GAUSSIAN";
250 52 : parse("ERROR_TYPE",error_type);
251 52 : parse("ALPHA",alpha);
252 52 : parse("SIGMA",sigma);
253 52 : parse("TSTART",tstart);
254 52 : if(tstart <0 && tstart != -1.0) {
255 0 : error("TSTART should be a positive number");
256 : }
257 52 : parse("TEND",tend);
258 52 : if(tend<0 && tend != -1.0) {
259 0 : error("TSTART should be a positive number");
260 : }
261 52 : if(tend<tstart) {
262 0 : error("TEND should be >= TSTART");
263 : }
264 52 : lagmultfname=getLabel()+".LAGMULT";
265 52 : parse("FILE",lagmultfname);
266 52 : parse("FMT",fmt);
267 52 : parse("PACE",pace_);
268 52 : if(pace_<=0 ) {
269 0 : error("frequency for Lagrangian multipliers update (PACE) is nonsensical");
270 : }
271 52 : stride_=pace_; //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update
272 52 : parse("PRINT_STRIDE",stride_);
273 52 : if(stride_<=0 ) {
274 0 : error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical");
275 : }
276 52 : simtemp=getkBT();
277 52 : parseFlag("REWEIGHT",reweight);
278 52 : if(simtemp<=0 && reweight) {
279 0 : error("Set the temperature (TEMP) if you want to do reweighting.");
280 : }
281 :
282 52 : checkRead();
283 :
284 52 : log.printf(" at");
285 548 : for(unsigned i=0; i<at.size(); i++) {
286 496 : log.printf(" %f",at[i]);
287 : }
288 52 : log.printf("\n");
289 52 : log.printf(" with initial learning rate for optimization of");
290 548 : for(unsigned i=0; i<kappa.size(); i++) {
291 496 : log.printf(" %f",kappa[i]);
292 : }
293 52 : log.printf("\n");
294 52 : log.printf("Dumping times for the learning rates are (ps): ");
295 548 : for(unsigned i=0; i<tau.size(); i++) {
296 496 : log.printf(" %f",tau[i]);
297 : }
298 52 : log.printf("\n");
299 52 : log.printf("Lagrangian multipliers are updated every %lld steps (PACE)\n",pace_);
300 52 : log.printf("Lagrangian multipliers output file %s\n",lagmultfname.c_str());
301 52 : log.printf("Lagrangian multipliers are written every %lld steps (PRINT_STRIDE)\n",stride_);
302 52 : if(fmt.length()>0) {
303 52 : log.printf("The format for real number in Lagrangian multipliers file is %s\n",fmt.c_str());
304 : }
305 52 : if(tstart >-1.0 && tend>-1.0) {
306 16 : log.printf("Lagrangian multipliers are averaged from %lf ps to %lf ps\n",tstart,tend);
307 : }
308 52 : if(no_broadcast) {
309 0 : log.printf("Using NO_BROADCAST options. Lagrangian multipliers will not be comunicated to other replicas.\n");
310 : }
311 : //for(int irep=0;irep<nrep;irep++){
312 : // if(apply_weights[irep]!=0)
313 : // log.printf("%d",irep);
314 : // }
315 104 : addComponent("force2");
316 104 : componentIsNotPeriodic("force2");
317 104 : addComponent("work");
318 52 : componentIsNotPeriodic("work");
319 52 : valueForce2=getPntrToComponent("force2");
320 52 : valueWork=getPntrToComponent("work");
321 :
322 : std::string comp;
323 548 : for(unsigned i=0; i< getNumberOfArguments() ; i++) {
324 992 : comp=getPntrToArgument(i)->getName()+"_coupling";
325 496 : addComponent(comp);
326 496 : componentIsNotPeriodic(comp);
327 992 : comp=getPntrToArgument(i)->getName()+"_work";
328 496 : addComponent(comp);
329 496 : componentIsNotPeriodic(comp);
330 496 : work.push_back(0.); // initialize the work value
331 992 : comp=getPntrToArgument(i)->getName()+"_error";
332 496 : addComponent(comp);
333 496 : componentIsNotPeriodic(comp);
334 : }
335 : std::string fname;
336 : fname=lagmultfname;
337 52 : ifile.link(*this);
338 52 : if(ifile.FileExist(fname)) {
339 37 : ifile.open(fname);
340 37 : if(getRestart()) {
341 37 : log.printf(" Restarting from: %s\n",fname.c_str());
342 37 : ReadLagrangians(ifile);
343 37 : printFirstStep=false;
344 : }
345 37 : ifile.reset(false);
346 : }
347 :
348 52 : lagmultOfile_.link(*this);
349 52 : lagmultOfile_.open(fname);
350 52 : if(fmt.length()>0) {
351 52 : fmt=" "+fmt;
352 52 : lagmultOfile_.fmtField(fmt);
353 : }
354 52 : }
355 : ////MEMBER FUNCTIONS
356 37 : void MaxEnt::ReadLagrangians(IFile &ifile) {
357 : double dummy;
358 888 : while(ifile.scanField("time",dummy)) {
359 4708 : for(unsigned j=0; j<getNumberOfArguments(); ++j) {
360 4301 : ifile.scanField(getPntrToArgument(j)->getName()+"_coupling",lambda[j]);
361 4301 : if(dummy>=tstart && dummy <=tend) {
362 42 : avglambda[j]+=lambda[j];
363 : }
364 4301 : if(dummy>=tend) {
365 4231 : avglambda[j]=lambda[j];
366 : done_average[j]=true;
367 : }
368 : }
369 407 : if(dummy>=tstart && dummy <=tend) {
370 6 : avg_counter++;
371 : }
372 407 : ifile.scanField();
373 : }
374 37 : }
375 572 : void MaxEnt::WriteLagrangians(std::vector<double> &lagmult,OFile &file) {
376 572 : if(printFirstStep) {
377 165 : unsigned ncv=getNumberOfArguments();
378 165 : file.printField("time",getTimeStep()*getStep());
379 1320 : for(unsigned i=0; i<ncv; ++i) {
380 2310 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
381 : }
382 165 : file.printField();
383 : } else {
384 407 : if(!isFirstStep) {
385 370 : unsigned ncv=getNumberOfArguments();
386 370 : file.printField("time",getTimeStep()*getStep());
387 4280 : for(unsigned i=0; i<ncv; ++i) {
388 7820 : file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
389 : }
390 370 : file.printField();
391 : }
392 : }
393 572 : }
394 5456 : double MaxEnt::compute_error(const std::string &err_type,double l) {
395 5456 : double sigma2=std::pow(sigma,2.0);
396 5456 : double l2=convert_lambda(type,l);
397 : double return_error=0;
398 5456 : if(err_type=="GAUSSIAN" && sigma!=0.0) {
399 0 : return_error=-l2*sigma2;
400 : } else {
401 5456 : if(err_type=="LAPLACE" && sigma!=0) {
402 5456 : return_error=-l2*sigma2/(1.0-l2*l2*sigma2/(alpha+1));
403 : }
404 : }
405 5456 : return return_error;
406 : }
407 122646 : double MaxEnt::convert_lambda(const std::string &type,double lold) {
408 : double return_lambda=0;
409 122646 : if(type=="EQUAL") {
410 : return_lambda=lold;
411 : } else {
412 8830 : if(type=="INEQUAL>") {
413 1687 : if(lold>0.0) {
414 : return_lambda=0.0;
415 : } else {
416 : return_lambda=lold;
417 : }
418 : } else {
419 7143 : if(type=="INEQUAL<") {
420 1687 : if(lold<0.0) {
421 : return_lambda=0.0;
422 : } else {
423 : return_lambda=lold;
424 : }
425 : }
426 : }
427 : }
428 122646 : return return_lambda;
429 : }
430 5456 : void MaxEnt::check_lambda_boundaries(const std::string &err_type,double &l) {
431 5456 : if(err_type=="LAPLACE" && sigma !=0 ) {
432 5456 : double l2=convert_lambda(err_type,l);
433 5456 : if(l2 <-(std::sqrt(alpha+1)/sigma-0.01)) {
434 0 : l=-(std::sqrt(alpha+1)/sigma-0.01);
435 0 : log.printf("Lambda exceeded the allowed range\n");
436 : }
437 5456 : if(l2>(std::sqrt(alpha+1)/sigma-0.01)) {
438 0 : l=std::sqrt(alpha+1)/sigma-0.01;
439 0 : log.printf("Lambda exceeded the allowed range\n");
440 : }
441 : }
442 5456 : }
443 :
444 572 : void MaxEnt::update_lambda() {
445 :
446 : double totalWork_=0.0;
447 572 : const double time=getTime();
448 572 : const double step=getStep();
449 572 : double KbT=simtemp;
450 : double learning_rate;
451 572 : if(reweight) {
452 396 : BetaReweightBias=plumed.getBias()/KbT;
453 : } else {
454 176 : BetaReweightBias=0.0;
455 : }
456 :
457 6028 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
458 5456 : const double k=kappa[i];
459 5456 : double cv=(getArgument(i)+compute_error(error_type,lambda[i])-at[i]);
460 5456 : if(reweight) {
461 4224 : learning_rate=1.0*k/(1+step/tau[i]);
462 : } else {
463 1232 : learning_rate=1.0*k/(1+time/tau[i]);
464 : }
465 5456 : lambda[i]+=learning_rate*cv*std::exp(-BetaReweightBias); //update Lagrangian multipliers and reweight them if REWEIGHT is set
466 5456 : check_lambda_boundaries(error_type,lambda[i]); //check that Lagrangians multipliers not exceed the allowed range
467 6128 : if(time>=tstart && time <=tend && !done_average[i]) {
468 630 : avglambda[i]+=convert_lambda(type,lambda[i]); //compute the average of Lagrangian multipliers over the required time window
469 : }
470 5456 : if(time>=tend && tend >=0) { //setting tend<0 will disable this feature
471 112 : if(!done_average[i]) {
472 105 : avglambda[i]=avglambda[i]/avg_counter;
473 : done_average[i]=true;
474 105 : lambda[i]=avglambda[i];
475 : } else {
476 7 : lambda[i]=avglambda[i]; //keep Lagrangian multipliers fixed to the previously computed average.
477 : }
478 : }
479 5456 : work[i]+=(convert_lambda(type,lambda[i])-oldlambda[i])*getArgument(i); //compute the work performed in updating lambda
480 5456 : totalWork_+=work[i];
481 5456 : totalWork=totalWork_;
482 5456 : oldlambda[i]=convert_lambda(type,lambda[i]);
483 : };
484 572 : if(time>=tstart && time <=tend) {
485 96 : avg_counter++;
486 : }
487 572 : }
488 :
489 5252 : void MaxEnt::calculate() {
490 : double totf2=0.0;
491 : double ene=0.0;
492 5252 : double KbT=simtemp;
493 55348 : for(unsigned i=0; i<getNumberOfArguments(); ++i) {
494 100192 : getPntrToComponent(getPntrToArgument(i)->getName()+"_error")->set(expected_eps[i]);
495 50096 : getPntrToComponent(getPntrToArgument(i)->getName()+"_work")->set(work[i]);
496 50096 : valueWork->set(totalWork);
497 50096 : getPntrToComponent(getPntrToArgument(i)->getName()+"_coupling")->set(lambda[i]);
498 50096 : const double f=-KbT*convert_lambda(type,lambda[i])*apply_weights[myrep];
499 50096 : totf2+=f*f;
500 50096 : ene+=KbT*convert_lambda(type,lambda[i])*getArgument(i)*apply_weights[myrep];
501 50096 : setOutputForce(i,f);
502 : }
503 5252 : setBias(ene);
504 5252 : valueForce2->set(totf2);
505 5252 : }
506 :
507 5252 : void MaxEnt::update() {
508 :
509 5252 : if(getStep()%stride_ == 0) {
510 572 : WriteLagrangians(lambda,lagmultOfile_);
511 : }
512 5252 : if(getStep()%pace_ == 0) {
513 572 : update_lambda();
514 572 : if(!no_broadcast) {
515 572 : if(comm.Get_rank()==0) { //Comunicate Lagrangian multipliers from reference replica to higher ones
516 484 : multi_sim_comm.Bcast(lambda,learn_replica);
517 : }
518 : }
519 572 : comm.Bcast(lambda,0);
520 : }
521 5252 : isFirstStep=false;
522 5252 : }
523 :
524 : }
525 :
526 : }
|