Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2011-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "Atoms.h"
23 : #include "ActionAtomistic.h"
24 : #include "MDAtoms.h"
25 : #include "PlumedMain.h"
26 : #include "tools/Pbc.h"
27 : #include "tools/Tools.h"
28 : #include <algorithm>
29 : #include <iostream>
30 : #include <string>
31 : #include <cmath>
32 :
33 : namespace PLMD {
34 :
35 : /// We assume that charges and masses are constant along the simulation
36 : /// Set this to false if you want to revert to the original (expensive) behavior
37 : static const bool shareMassAndChargeOnlyAtFirstStep=true;
38 :
39 : /// Use a priority_queue to merge unique vectors.
40 : /// export PLUMED_MERGE_VECTORS_PRIORITY_QUEUE=yes to use a priority_queue.
41 : /// Might be faster with some settings, but appears to not be in practice.
42 : /// This option is for testing and might be removed.
43 27378 : static bool getenvMergeVectorsPriorityQueue() noexcept {
44 27378 : static const auto* res=std::getenv("PLUMED_MERGE_VECTORS_PRIORITY_QUEUE");
45 27378 : return res;
46 : }
47 :
48 : /// Use unique list of atoms to manipulate forces and positions.
49 : /// A unique list of atoms is used to manipulate forces and positions in MPI parallel runs.
50 : /// In serial runs, this is done if convenient. The code currently contain
51 : /// some heuristic to decide if the unique list should be used or not.
52 : /// An env var can be used to override this decision.
53 : /// export PLUMED_FORCE_UNIQUE=yes # enforce using the unique list in serial runs
54 : /// export PLUMED_FORCE_UNIQUE=no # enforce not using the unique list in serial runs
55 : /// export PLUMED_FORCE_UNIQUE=auto # choose heuristically
56 : /// default: auto
57 55629 : static const char* getenvForceUnique() noexcept {
58 55629 : static const auto* res=std::getenv("PLUMED_FORCE_UNIQUE");
59 55629 : return res;
60 : }
61 :
62 : class PlumedMain;
63 :
64 805723 : Atoms::Atoms(PlumedMain&plumed):
65 805723 : natoms(0),
66 805723 : md_energy(0.0),
67 805723 : energy(0.0),
68 805723 : dataCanBeSet(false),
69 805723 : collectEnergy(false),
70 805723 : energyHasBeenSet(false),
71 805723 : positionsHaveBeenSet(0),
72 805723 : massesHaveBeenSet(false),
73 805723 : chargesHaveBeenSet(false),
74 805723 : boxHasBeenSet(false),
75 805723 : forcesHaveBeenSet(0),
76 805723 : virialHasBeenSet(false),
77 805723 : massAndChargeOK(false),
78 805723 : shuffledAtoms(0),
79 805723 : mdatoms(MDAtomsBase::create(sizeof(double))),
80 805723 : plumed(plumed),
81 805723 : naturalUnits(false),
82 805723 : MDnaturalUnits(false),
83 805723 : timestep(0.0),
84 805723 : forceOnEnergy(0.0),
85 805723 : zeroallforces(false),
86 805723 : kbT(0.0),
87 805723 : asyncSent(false),
88 805723 : atomsNeeded(false),
89 1611446 : ddStep(0) {
90 805723 : }
91 :
92 805723 : Atoms::~Atoms() {
93 805723 : if(actions.size()>0) {
94 0 : std::cerr<<"WARNING: there is some inconsistency in action added to atoms, as some of them were not properly destroyed. This might indicate an internal bug!!\n";
95 : }
96 1611446 : }
97 :
98 258483 : void Atoms::startStep() {
99 258483 : collectEnergy=false;
100 258483 : energyHasBeenSet=false;
101 258483 : positionsHaveBeenSet=0;
102 258483 : massesHaveBeenSet=false;
103 258483 : chargesHaveBeenSet=false;
104 258483 : boxHasBeenSet=false;
105 258483 : forcesHaveBeenSet=0;
106 258483 : virialHasBeenSet=false;
107 258483 : dataCanBeSet=true;
108 258483 : resetExtraCVNeeded();
109 258483 : }
110 :
111 53986 : void Atoms::setBox(const TypesafePtr & p) {
112 53986 : mdatoms->setBox(p);
113 53986 : Tensor b;
114 53986 : mdatoms->getBox(b);
115 53986 : boxHasBeenSet=true;
116 53986 : }
117 :
118 58127 : void Atoms::setPositions(const TypesafePtr & p) {
119 58127 : plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
120 58127 : plumed_massert( p || gatindex.size()==0, "NULL position pointer with non-zero local atoms");
121 58127 : mdatoms->setp(p);
122 58127 : positionsHaveBeenSet=3;
123 58127 : }
124 :
125 58131 : void Atoms::setMasses(const TypesafePtr & p) {
126 58134 : plumed_massert( dataCanBeSet,"setMasses must be called after setStep in MD code interface");
127 58128 : plumed_massert( p || gatindex.size()==0, "NULL mass pointer with non-zero local atoms");
128 58128 : mdatoms->setm(p);
129 58128 : massesHaveBeenSet=true;
130 :
131 58128 : }
132 :
133 48220 : void Atoms::setCharges(const TypesafePtr & p) {
134 48220 : plumed_massert( dataCanBeSet, "setCharges must be called after setStep in MD code interface");
135 48220 : plumed_massert( p || gatindex.size()==0, "NULL charges pointer with non-zero local atoms");
136 48220 : mdatoms->setc(p);
137 48220 : chargesHaveBeenSet=true;
138 48220 : }
139 :
140 48328 : void Atoms::setVirial(const TypesafePtr & p) {
141 48328 : plumed_massert( dataCanBeSet,"setVirial must be called after setStep in MD code interface");
142 48328 : mdatoms->setVirial(p);
143 48326 : virialHasBeenSet=true;
144 48326 : }
145 :
146 9792 : void Atoms::setEnergy(const TypesafePtr & p) {
147 9792 : plumed_massert( dataCanBeSet,"setEnergy must be called after setStep in MD code interface");
148 9792 : MD2double(p,md_energy);
149 9792 : md_energy*=MDUnits.getEnergy()/units.getEnergy();
150 9792 : energyHasBeenSet=true;
151 9792 : }
152 :
153 58127 : void Atoms::setForces(const TypesafePtr & p) {
154 58127 : plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
155 58127 : plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
156 58127 : forcesHaveBeenSet=3;
157 58127 : mdatoms->setf(p);
158 58127 : }
159 :
160 3 : void Atoms::setPositions(const TypesafePtr & p,int i) {
161 3 : plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
162 3 : plumed_massert( p || gatindex.size()==0, "NULL positions pointer with non-zero local atoms");
163 3 : mdatoms->setp(p,i);
164 3 : positionsHaveBeenSet++;
165 3 : }
166 :
167 3 : void Atoms::setForces(const TypesafePtr & p,int i) {
168 3 : plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
169 3 : plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
170 3 : mdatoms->setf(p,i);
171 3 : forcesHaveBeenSet++;
172 3 : }
173 :
174 56534 : void Atoms::share() {
175 : // At first step I scatter all the atoms so as to store their mass and charge
176 : // Notice that this works with the assumption that charges and masses are
177 : // not changing during the simulation!
178 56534 : if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
179 905 : shareAll();
180 905 : return;
181 : }
182 :
183 55629 : if(!getenvForceUnique() || !std::strcmp(getenvForceUnique(),"auto")) {
184 : unsigned largest=0;
185 267842 : for(unsigned i=0; i<actions.size(); i++) {
186 212213 : if(actions[i]->isActive()) {
187 : auto l=actions[i]->getUnique().size();
188 162031 : if(l>largest) {
189 57377 : largest=l;
190 : }
191 : }
192 : }
193 55629 : if(largest*2<natoms) {
194 26881 : unique_serial=true;
195 : } else {
196 28748 : unique_serial=false;
197 : }
198 0 : } else if(!std::strcmp(getenvForceUnique(),"yes")) {
199 0 : unique_serial=true;
200 0 : } else if(!std::strcmp(getenvForceUnique(),"no")) {
201 0 : unique_serial=false;
202 : } else {
203 0 : plumed_error()<<"PLUMED_FORCE_UNIQUE set to unknown value "<<getenvForceUnique();
204 : }
205 :
206 55629 : if(unique_serial || !(int(gatindex.size())==natoms && shuffledAtoms==0)) {
207 : std::vector<const std::vector<AtomNumber>*> vectors;
208 27269 : vectors.reserve(actions.size());
209 120099 : for(unsigned i=0; i<actions.size(); i++) {
210 92830 : if(actions[i]->isActive()) {
211 67858 : if(!actions[i]->getUnique().empty()) {
212 : // unique are the local atoms
213 56782 : vectors.push_back(&actions[i]->getUniqueLocal());
214 : }
215 : }
216 : }
217 27269 : if(!vectors.empty()) {
218 26026 : atomsNeeded=true;
219 : }
220 27269 : unique.clear();
221 27269 : Tools::mergeSortedVectors(vectors,unique,getenvMergeVectorsPriorityQueue());
222 : } else {
223 147743 : for(unsigned i=0; i<actions.size(); i++) {
224 119383 : if(actions[i]->isActive()) {
225 94173 : if(!actions[i]->getUnique().empty()) {
226 82821 : atomsNeeded=true;
227 : }
228 : }
229 : }
230 :
231 : }
232 :
233 55629 : share(unique);
234 : }
235 :
236 1019 : void Atoms::shareAll() {
237 1019 : unique.clear();
238 : // keep in unique only those atoms that are local
239 1019 : if(dd && shuffledAtoms>0) {
240 17616 : for(int i=0; i<natoms; i++)
241 17430 : if(g2l[i]>=0) {
242 8003 : unique.push_back(AtomNumber::index(i)); // already sorted
243 : }
244 : } else {
245 833 : unique.resize(natoms);
246 847543 : for(int i=0; i<natoms; i++) {
247 846710 : unique[i]=AtomNumber::index(i);
248 : }
249 : }
250 1019 : atomsNeeded=true;
251 1019 : share(unique);
252 1019 : }
253 :
254 56648 : void Atoms::share(const std::vector<AtomNumber>& unique) {
255 56648 : plumed_assert( positionsHaveBeenSet==3 && massesHaveBeenSet );
256 :
257 56648 : virial.zero();
258 56648 : if(zeroallforces || (int(gatindex.size())==natoms && !unique_serial)) {
259 29252 : Tools::set_to_zero(forces);
260 : } else {
261 425739 : for(const auto & p : unique) {
262 398343 : forces[p.index()].zero();
263 : }
264 29245 : for(unsigned i=getNatoms(); i<positions.size(); i++) {
265 1849 : forces[i].zero(); // virtual atoms
266 : }
267 : }
268 56648 : forceOnEnergy=0.0;
269 56648 : mdatoms->getBox(box);
270 :
271 56648 : if(!atomsNeeded) {
272 : return;
273 : }
274 55406 : atomsNeeded=false;
275 :
276 55406 : if(!(int(gatindex.size())==natoms && shuffledAtoms==0)) {
277 2214 : uniq_index.resize(unique.size());
278 32727 : for(unsigned i=0; i<unique.size(); i++) {
279 30513 : uniq_index[i]=g2l[unique[i].index()];
280 : }
281 2214 : mdatoms->getPositions(unique,uniq_index,positions);
282 53192 : } else if(unique_serial) {
283 24001 : uniq_index.resize(unique.size());
284 393633 : for(unsigned i=0; i<unique.size(); i++) {
285 369632 : uniq_index[i]=unique[i].index();
286 : }
287 24001 : mdatoms->getPositions(unique,uniq_index,positions);
288 : } else {
289 : // faster version, which retrieves all atoms
290 29191 : mdatoms->getPositions(0,natoms,positions);
291 : }
292 :
293 :
294 : // how many double per atom should be scattered:
295 : int ndata=3;
296 55406 : if(!massAndChargeOK) {
297 : ndata=5;
298 905 : masses.assign(masses.size(),std::numeric_limits<double>::quiet_NaN());
299 905 : charges.assign(charges.size(),std::numeric_limits<double>::quiet_NaN());
300 905 : mdatoms->getCharges(gatindex,charges);
301 905 : mdatoms->getMasses(gatindex,masses);
302 : }
303 :
304 55406 : if(dd && shuffledAtoms>0) {
305 2142 : if(dd.async) {
306 9510 : for(unsigned i=0; i<dd.mpi_request_positions.size(); i++) {
307 7388 : dd.mpi_request_positions[i].wait();
308 : }
309 9510 : for(unsigned i=0; i<dd.mpi_request_index.size(); i++) {
310 7388 : dd.mpi_request_index[i].wait();
311 : }
312 : }
313 2142 : int count=0;
314 29223 : for(const auto & p : unique) {
315 27081 : dd.indexToBeSent[count]=p.index();
316 27081 : dd.positionsToBeSent[ndata*count+0]=positions[p.index()][0];
317 27081 : dd.positionsToBeSent[ndata*count+1]=positions[p.index()][1];
318 27081 : dd.positionsToBeSent[ndata*count+2]=positions[p.index()][2];
319 27081 : if(!massAndChargeOK) {
320 2477 : dd.positionsToBeSent[ndata*count+3]=masses[p.index()];
321 2477 : dd.positionsToBeSent[ndata*count+4]=charges[p.index()];
322 : }
323 27081 : count++;
324 : }
325 2142 : if(dd.async) {
326 2122 : asyncSent=true;
327 2122 : dd.mpi_request_positions.resize(dd.Get_size());
328 2122 : dd.mpi_request_index.resize(dd.Get_size());
329 9710 : for(int i=0; i<dd.Get_size(); i++) {
330 7588 : dd.mpi_request_index[i]=dd.Isend(&dd.indexToBeSent[0],count,i,666);
331 7588 : dd.mpi_request_positions[i]=dd.Isend(&dd.positionsToBeSent[0],ndata*count,i,667);
332 : }
333 : } else {
334 20 : const int n=(dd.Get_size());
335 20 : std::vector<int> counts(n);
336 20 : std::vector<int> displ(n);
337 20 : std::vector<int> counts5(n);
338 20 : std::vector<int> displ5(n);
339 20 : dd.Allgather(count,counts);
340 20 : displ[0]=0;
341 80 : for(int i=1; i<n; ++i) {
342 60 : displ[i]=displ[i-1]+counts[i-1];
343 : }
344 100 : for(int i=0; i<n; ++i) {
345 80 : counts5[i]=counts[i]*ndata;
346 : }
347 100 : for(int i=0; i<n; ++i) {
348 80 : displ5[i]=displ[i]*ndata;
349 : }
350 20 : dd.Allgatherv(&dd.indexToBeSent[0],count,&dd.indexToBeReceived[0],&counts[0],&displ[0]);
351 20 : dd.Allgatherv(&dd.positionsToBeSent[0],ndata*count,&dd.positionsToBeReceived[0],&counts5[0],&displ5[0]);
352 20 : int tot=displ[n-1]+counts[n-1];
353 1620 : for(int i=0; i<tot; i++) {
354 1600 : positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
355 1600 : positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
356 1600 : positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
357 1600 : if(!massAndChargeOK) {
358 432 : masses[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+3];
359 432 : charges[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+4];
360 : }
361 : }
362 : }
363 : }
364 : }
365 :
366 56648 : void Atoms::wait() {
367 56648 : dataCanBeSet=false; // Everything should be set by this stage
368 : // How many double per atom should be scattered
369 : std::size_t ndata=3;
370 56648 : if(!massAndChargeOK) {
371 : ndata=5;
372 : }
373 :
374 56648 : if(dd) {
375 23624 : dd.Bcast(box,0);
376 : }
377 56648 : pbc.setBox(box);
378 :
379 56648 : if(collectEnergy) {
380 3989 : energy=md_energy;
381 : }
382 :
383 56648 : if(dd && shuffledAtoms>0) {
384 : // receive toBeReceived
385 2142 : if(asyncSent) {
386 : Communicator::Status status;
387 : std::size_t count=0;
388 9710 : for(int i=0; i<dd.Get_size(); i++) {
389 7588 : dd.Recv(&dd.indexToBeReceived[count],dd.indexToBeReceived.size()-count,i,666,status);
390 7588 : int c=status.Get_count<int>();
391 7588 : dd.Recv(&dd.positionsToBeReceived[ndata*count],dd.positionsToBeReceived.size()-ndata*count,i,667);
392 7588 : count+=c;
393 : }
394 64820 : for(int i=0; i<count; i++) {
395 62698 : positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
396 62698 : positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
397 62698 : positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
398 62698 : if(!massAndChargeOK) {
399 5946 : masses[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+3];
400 5946 : charges[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+4];
401 : }
402 : }
403 2122 : asyncSent=false;
404 : }
405 2142 : if(collectEnergy) {
406 0 : dd.Sum(energy);
407 : }
408 : }
409 : // I take note that masses and charges have been set once for all
410 : // at the beginning of the simulation.
411 : if(shareMassAndChargeOnlyAtFirstStep) {
412 56648 : massAndChargeOK=true;
413 : }
414 56648 : }
415 :
416 56524 : void Atoms::updateForces() {
417 56524 : plumed_assert( forcesHaveBeenSet==3 );
418 56524 : if(forceOnEnergy*forceOnEnergy>epsilon) {
419 50 : double alpha=1.0-forceOnEnergy;
420 50 : mdatoms->rescaleForces(gatindex,alpha);
421 50 : mdatoms->updateForces(gatindex,forces);
422 : } else {
423 56474 : if(!unique_serial && int(gatindex.size())==natoms && shuffledAtoms==0) {
424 29190 : mdatoms->updateForces(gatindex,forces);
425 : } else {
426 27284 : mdatoms->updateForces(unique,uniq_index,forces);
427 : }
428 : }
429 56524 : if( !plumed.novirial && dd.Get_rank()==0 ) {
430 41942 : plumed_assert( virialHasBeenSet );
431 41942 : mdatoms->updateVirial(virial);
432 : }
433 56524 : }
434 :
435 1015 : void Atoms::setNatoms(int n) {
436 1015 : natoms=n;
437 1015 : positions.resize(n);
438 1015 : forces.resize(n);
439 1015 : masses.resize(n);
440 1015 : charges.resize(n);
441 1015 : gatindex.resize(n);
442 855301 : for(unsigned i=0; i<gatindex.size(); i++) {
443 854286 : gatindex[i]=i;
444 : }
445 1015 : }
446 :
447 :
448 10380 : void Atoms::add(ActionAtomistic*a) {
449 10380 : actions.push_back(a);
450 10380 : }
451 :
452 10380 : void Atoms::remove(ActionAtomistic*a) {
453 10380 : auto f=find(actions.begin(),actions.end(),a);
454 10380 : plumed_massert(f!=actions.end(),"cannot remove an action registered to atoms");
455 10380 : actions.erase(f);
456 10380 : }
457 :
458 :
459 348 : void Atoms::DomainDecomposition::enable(Communicator& c) {
460 348 : on=true;
461 348 : Set_comm(c.Get_comm());
462 348 : async=Get_size()<10;
463 348 : if(std::getenv("PLUMED_ASYNC_SHARE")) {
464 4 : std::string s(std::getenv("PLUMED_ASYNC_SHARE"));
465 4 : if(s=="yes") {
466 0 : async=true;
467 4 : } else if(s=="no") {
468 4 : async=false;
469 : } else {
470 0 : plumed_merror("PLUMED_ASYNC_SHARE variable is set to " + s + "; should be yes or no");
471 : }
472 : }
473 348 : }
474 :
475 1490 : void Atoms::setAtomsNlocal(int n) {
476 1490 : gatindex.resize(n);
477 1490 : g2l.resize(natoms,-1);
478 1490 : if(dd) {
479 : // Since these vectors are sent with MPI by using e.g.
480 : // &dd.positionsToBeSent[0]
481 : // we make sure they are non-zero-sized so as to
482 : // avoid errors when doing boundary check
483 1456 : if(n==0) {
484 8 : n++;
485 : }
486 1456 : dd.positionsToBeSent.resize(n*5,0.0);
487 1456 : dd.positionsToBeReceived.resize(natoms*5,0.0);
488 1456 : dd.indexToBeSent.resize(n,0);
489 1456 : dd.indexToBeReceived.resize(natoms,0);
490 : }
491 1490 : }
492 :
493 990 : void Atoms::setAtomsGatindex(const TypesafePtr & g,bool fortran) {
494 990 : plumed_massert( g || gatindex.size()==0, "NULL gatindex pointer with non-zero local atoms");
495 : auto gg=g.get<const int*>(gatindex.size());
496 990 : ddStep=plumed.getStep();
497 990 : if(fortran) {
498 22 : for(unsigned i=0; i<gatindex.size(); i++) {
499 20 : gatindex[i]=gg[i]-1;
500 : }
501 : } else {
502 22140 : for(unsigned i=0; i<gatindex.size(); i++) {
503 21152 : gatindex[i]=gg[i];
504 : }
505 : }
506 57552 : for(unsigned i=0; i<g2l.size(); i++) {
507 56562 : g2l[i]=-1;
508 : }
509 990 : if( gatindex.size()==natoms ) {
510 11 : shuffledAtoms=0;
511 1007 : for(unsigned i=0; i<gatindex.size(); i++) {
512 998 : if( gatindex[i]!=i ) {
513 2 : shuffledAtoms=1;
514 2 : break;
515 : }
516 : }
517 : } else {
518 979 : shuffledAtoms=1;
519 : }
520 990 : if(dd) {
521 956 : dd.Sum(shuffledAtoms);
522 : }
523 22162 : for(unsigned i=0; i<gatindex.size(); i++) {
524 21172 : g2l[gatindex[i]]=i;
525 : }
526 :
527 10394 : for(unsigned i=0; i<actions.size(); i++) {
528 : // keep in unique only those atoms that are local
529 9404 : actions[i]->updateUniqueLocal();
530 : }
531 : unique.clear();
532 990 : }
533 :
534 500 : void Atoms::setAtomsContiguous(int start) {
535 500 : ddStep=plumed.getStep();
536 192036 : for(unsigned i=0; i<gatindex.size(); i++) {
537 191536 : gatindex[i]=start+i;
538 : }
539 204348 : for(unsigned i=0; i<g2l.size(); i++) {
540 203848 : g2l[i]=-1;
541 : }
542 192036 : for(unsigned i=0; i<gatindex.size(); i++) {
543 191536 : g2l[gatindex[i]]=i;
544 : }
545 500 : if(gatindex.size()<natoms) {
546 152 : shuffledAtoms=1;
547 : }
548 748 : for(unsigned i=0; i<actions.size(); i++) {
549 : // keep in unique only those atoms that are local
550 248 : actions[i]->updateUniqueLocal();
551 : }
552 : unique.clear();
553 500 : }
554 :
555 904 : void Atoms::setRealPrecision(int p) {
556 1808 : mdatoms=MDAtomsBase::create(p);
557 904 : }
558 :
559 1929 : int Atoms::getRealPrecision()const {
560 1929 : return mdatoms->getRealPrecision();
561 : }
562 :
563 13306 : void Atoms::MD2double(const TypesafePtr & m,double&d)const {
564 13306 : plumed_assert(mdatoms);
565 13306 : mdatoms->MD2double(m,d);
566 13306 : }
567 2390 : void Atoms::double2MD(const double&d,const TypesafePtr & m)const {
568 2390 : plumed_assert(mdatoms);
569 2390 : mdatoms->double2MD(d,m);
570 2390 : }
571 :
572 1056 : void Atoms::updateUnits() {
573 1056 : mdatoms->setUnits(units,MDUnits);
574 1056 : }
575 :
576 893 : void Atoms::setTimeStep(const TypesafePtr & p) {
577 893 : MD2double(p,timestep);
578 : // The following is to avoid extra digits in case the MD code uses floats
579 : // e.g.: float f=0.002 when converted to double becomes 0.002000000094995
580 : // To avoid this, we keep only up to 6 significant digits after first one
581 893 : if(getRealPrecision()<=4) {
582 0 : double magnitude=std::pow(10,std::floor(std::log10(timestep)));
583 0 : timestep=std::round(timestep/magnitude*1e6)/1e6*magnitude;
584 : }
585 893 : }
586 :
587 3250894 : double Atoms::getTimeStep()const {
588 3250894 : return timestep/units.getTime()*MDUnits.getTime();
589 : }
590 :
591 59 : void Atoms::setKbT(const TypesafePtr & p) {
592 59 : MD2double(p,kbT);
593 59 : }
594 :
595 1336 : double Atoms::getKbT()const {
596 1336 : return kbT/units.getEnergy()*MDUnits.getEnergy();
597 : }
598 :
599 :
600 116 : void Atoms::createFullList(const TypesafePtr & n) {
601 116 : if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
602 7 : n.set(int(natoms));
603 7 : fullList.resize(natoms);
604 803 : for(unsigned i=0; i<natoms; i++) {
605 796 : fullList[i]=i;
606 : }
607 : } else {
608 : // We update here the unique list defined at Atoms::unique.
609 : // This is not very clear, and probably should be coded differently.
610 : // Hopefully this fix the longstanding issue with NAMD.
611 109 : unique.clear();
612 : std::vector<const std::vector<AtomNumber>*> vectors;
613 892 : for(unsigned i=0; i<actions.size(); i++) {
614 783 : if(actions[i]->isActive()) {
615 625 : if(!actions[i]->getUnique().empty()) {
616 546 : atomsNeeded=true;
617 : // unique are the local atoms
618 546 : vectors.push_back(&actions[i]->getUnique());
619 : }
620 : }
621 : }
622 : unique.clear();
623 109 : Tools::mergeSortedVectors(vectors,unique,getenvMergeVectorsPriorityQueue());
624 109 : fullList.clear();
625 109 : fullList.reserve(unique.size());
626 5012 : for(const auto & p : unique) {
627 4903 : fullList.push_back(p.index());
628 : }
629 109 : n.set(int(fullList.size()));
630 : }
631 116 : }
632 :
633 116 : void Atoms::getFullList(const TypesafePtr & x) {
634 : auto xx=x.template get<const int**>();
635 116 : if(!fullList.empty()) {
636 110 : *xx=&fullList[0];
637 : } else {
638 6 : *xx=NULL;
639 : }
640 116 : }
641 :
642 116 : void Atoms::clearFullList() {
643 116 : fullList.resize(0);
644 116 : }
645 :
646 1036 : void Atoms::init() {
647 : // Default: set domain decomposition to NO-decomposition, waiting for
648 : // further instruction
649 1036 : if(dd) {
650 348 : setAtomsNlocal(natoms);
651 348 : setAtomsContiguous(0);
652 : }
653 1036 : }
654 :
655 348 : void Atoms::setDomainDecomposition(Communicator& comm) {
656 348 : dd.enable(comm);
657 348 : }
658 :
659 14356 : void Atoms::resizeVectors(unsigned n) {
660 14356 : positions.resize(n);
661 14356 : forces.resize(n);
662 14356 : masses.resize(n);
663 14356 : charges.resize(n);
664 14356 : }
665 :
666 7178 : AtomNumber Atoms::addVirtualAtom(ActionWithVirtualAtom*a) {
667 7178 : unsigned n=positions.size();
668 7178 : resizeVectors(n+1);
669 7178 : virtualAtomsActions.push_back(a);
670 7178 : return AtomNumber::index(n);
671 : }
672 :
673 7178 : void Atoms::removeVirtualAtom(ActionWithVirtualAtom*a) {
674 7178 : unsigned n=positions.size();
675 7178 : plumed_massert(a==virtualAtomsActions[virtualAtomsActions.size()-1],"virtual atoms should be destroyed in reverse creation order");
676 7178 : resizeVectors(n-1);
677 : virtualAtomsActions.pop_back();
678 7178 : }
679 :
680 137 : void Atoms::insertGroup(const std::string&name,const std::vector<AtomNumber>&a) {
681 0 : plumed_massert(groups.count(name)==0,"group named "+name+" already exists");
682 137 : groups[name]=a;
683 137 : }
684 :
685 137 : void Atoms::removeGroup(const std::string&name) {
686 0 : plumed_massert(groups.count(name)==1,"cannot remove group named "+name);
687 : groups.erase(name);
688 137 : }
689 :
690 114 : void Atoms::writeBinary(std::ostream&o)const {
691 114 : o.write(reinterpret_cast<const char*>(&positions[0][0]),natoms*3*sizeof(double));
692 114 : o.write(reinterpret_cast<const char*>(&box(0,0)),9*sizeof(double));
693 114 : o.write(reinterpret_cast<const char*>(&energy),sizeof(double));
694 114 : }
695 :
696 114 : void Atoms::readBinary(std::istream&i) {
697 114 : i.read(reinterpret_cast<char*>(&positions[0][0]),natoms*3*sizeof(double));
698 114 : i.read(reinterpret_cast<char*>(&box(0,0)),9*sizeof(double));
699 114 : i.read(reinterpret_cast<char*>(&energy),sizeof(double));
700 114 : pbc.setBox(box);
701 114 : }
702 :
703 633 : double Atoms::getKBoltzmann()const {
704 633 : if(naturalUnits || MDnaturalUnits) {
705 : return 1.0;
706 : } else {
707 627 : return kBoltzmann/units.getEnergy();
708 : }
709 : }
710 :
711 0 : double Atoms::getMDKBoltzmann()const {
712 0 : if(naturalUnits || MDnaturalUnits) {
713 : return 1.0;
714 : } else {
715 0 : return kBoltzmann/MDUnits.getEnergy();
716 : }
717 : }
718 :
719 0 : void Atoms::getLocalMasses(std::vector<double>& localMasses) {
720 0 : localMasses.resize(gatindex.size());
721 0 : for(unsigned i=0; i<gatindex.size(); i++) {
722 0 : localMasses[i] = masses[gatindex[i]];
723 : }
724 0 : }
725 :
726 450 : void Atoms::getLocalPositions(std::vector<Vector>& localPositions) {
727 450 : localPositions.resize(gatindex.size());
728 450 : mdatoms->getLocalPositions(localPositions);
729 450 : }
730 :
731 450 : void Atoms::getLocalForces(std::vector<Vector>& localForces) {
732 450 : localForces.resize(gatindex.size());
733 16650 : for(unsigned i=0; i<gatindex.size(); i++) {
734 16200 : localForces[i] = forces[gatindex[i]];
735 : }
736 450 : }
737 :
738 0 : void Atoms::getLocalMDForces(std::vector<Vector>& localForces) {
739 0 : localForces.resize(gatindex.size());
740 0 : for(unsigned i=0; i<gatindex.size(); i++) {
741 0 : localForces[i] = mdatoms->getMDforces(i);
742 : }
743 0 : }
744 :
745 30 : void Atoms::setExtraCV(const std::string &name,const TypesafePtr & p) {
746 30 : mdatoms->setExtraCV(name,p);
747 30 : }
748 :
749 30 : void Atoms::setExtraCVForce(const std::string &name,const TypesafePtr & p) {
750 30 : mdatoms->setExtraCVForce(name,p);
751 30 : }
752 :
753 72 : double Atoms::getExtraCV(const std::string &name) {
754 72 : return mdatoms->getExtraCV(name);
755 : }
756 :
757 48 : void Atoms::updateExtraCVForce(const std::string &name,double f) {
758 48 : mdatoms->updateExtraCVForce(name,f);
759 48 : }
760 :
761 72 : void Atoms::setExtraCVNeeded(const std::string &name,bool needed) {
762 72 : mdatoms->setExtraCVNeeded(name,needed);
763 72 : }
764 :
765 10 : bool Atoms::isExtraCVNeeded(const std::string &name) const {
766 10 : return mdatoms->isExtraCVNeeded(name);
767 : }
768 :
769 258483 : void Atoms::resetExtraCVNeeded() {
770 258483 : mdatoms->resetExtraCVNeeded();
771 258483 : }
772 :
773 :
774 : }
|