Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2011-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "MDAtoms.h"
23 : #include "tools/Tools.h"
24 : #include "tools/OpenMP.h"
25 : #include "tools/Exception.h"
26 : #include "tools/Units.h"
27 : #include <algorithm>
28 : #include <string>
29 : #include <map>
30 :
31 : namespace PLMD {
32 :
33 : template<typename T>
34 112430 : static void getPointers(const TypesafePtr & p,const TypesafePtr & px,const TypesafePtr & py,const TypesafePtr & pz,unsigned maxel,T*&ppx,T*&ppy,T*&ppz,unsigned & stride) {
35 112430 : if(p) {
36 112392 : auto p_=p.get<T*>({maxel,3});
37 112392 : ppx=p_;
38 112392 : ppy=p_+1;
39 112392 : ppz=p_+2;
40 112392 : stride=3;
41 38 : } else if(px && py && pz) {
42 2 : ppx=px.get<T*>(maxel);
43 2 : ppy=py.get<T*>(maxel);
44 2 : ppz=pz.get<T*>(maxel);
45 2 : stride=1;
46 : } else {
47 36 : ppx=nullptr;
48 36 : ppy=nullptr;
49 36 : ppz=nullptr;
50 36 : stride=0;
51 : }
52 112430 : }
53 :
54 : /// Class containing the pointers to the MD data
55 : /// It is templated so that single and double precision versions coexist
56 : /// IT IS STILL UNDOCUMENTED. IT PROBABLY NEEDS A STRONG CLEANUP
57 : template <class T>
58 : class MDAtomsTyped:
59 : public MDAtomsBase {
60 : T scalep=1.0; // factor to scale positions
61 : T scalef=1.0; // factor to scale forces
62 : T scaleb=1.0; // factor to scale box
63 : T scalev=1.0; // factor to scale virial
64 : T scalec=1.0; // factor to scale charges
65 : T scalem=1.0; // factor to scale masses
66 : TypesafePtr m;
67 : TypesafePtr c;
68 : TypesafePtr p;
69 : TypesafePtr px,py,pz;
70 : TypesafePtr f;
71 : TypesafePtr fx,fy,fz;
72 : TypesafePtr box;
73 : TypesafePtr virial;
74 : std::map<std::string,TypesafePtr> extraCV;
75 : std::map<std::string,TypesafePtr> extraCVForce;
76 : std::map<std::string,bool> extraCVNeeded;
77 : public:
78 : void setm(const TypesafePtr & m) override;
79 : void setc(const TypesafePtr & m) override;
80 : void setBox(const TypesafePtr & ) override;
81 : void setp(const TypesafePtr & p) override;
82 : void setVirial(const TypesafePtr & ) override;
83 : void setf(const TypesafePtr & f) override;
84 : void setp(const TypesafePtr & p,int i) override;
85 : void setf(const TypesafePtr & f,int i) override;
86 : void setUnits(const Units&,const Units&) override;
87 30 : void setExtraCV(const std::string &name,const TypesafePtr & p) override {
88 30 : p.get<T>(); // just check type and discard pointer
89 30 : extraCV[name]=p.copy();
90 30 : }
91 30 : void setExtraCVForce(const std::string &name,const TypesafePtr & p) override {
92 30 : p.get<T*>(); // just check type and discard pointer
93 30 : extraCVForce[name]=p.copy();
94 30 : }
95 72 : double getExtraCV(const std::string &name) override {
96 : auto search=extraCV.find(name);
97 72 : if(search != extraCV.end()) {
98 72 : return static_cast<double>(search->second.template get<T>());
99 : } else {
100 0 : plumed_error() << "Unable to access extra cv named '" << name << "'.\nNotice that extra cvs need to be calculated in the MD code.";
101 : }
102 : }
103 :
104 48 : void updateExtraCVForce(const std::string &name,double f) override {
105 48 : *extraCVForce[name].template get<T*>()+=static_cast<T>(f);
106 48 : }
107 :
108 72 : void setExtraCVNeeded(const std::string &name,bool needed=true) override {
109 72 : extraCVNeeded[name]=needed;
110 72 : }
111 :
112 10 : bool isExtraCVNeeded(const std::string &name) const override {
113 : auto search=extraCVNeeded.find(name);
114 10 : if(search != extraCVNeeded.end()) {
115 10 : return search->second;
116 : }
117 : return false;
118 : }
119 :
120 258483 : void resetExtraCVNeeded() override {
121 258510 : for(auto & i : extraCVNeeded) {
122 27 : i.second=false;
123 : }
124 258483 : }
125 :
126 13306 : void MD2double(const TypesafePtr & m,double&d)const override {
127 13306 : d=double(m.template get<T>());
128 13306 : }
129 2390 : void double2MD(const double&d,const TypesafePtr & m)const override {
130 2390 : m.set(T(d));
131 2390 : }
132 0 : Vector getMDforces(const unsigned index)const override {
133 : unsigned stride;
134 : const T* ffx;
135 : const T* ffy;
136 : const T* ffz;
137 : // node: index+1 because we are supposed to pass here the size of the array, which should be at least the element we are asking for + 1
138 0 : getPointers(f,fx,fy,fz,index+1,ffx,ffy,ffz,stride);
139 0 : Vector force(ffx[stride*index],ffy[stride*index],ffz[stride*index]);
140 0 : return force/scalef;
141 : }
142 : void getBox(Tensor &) const override;
143 : void getPositions(const std::vector<int>&index,std::vector<Vector>&positions) const override;
144 : void getPositions(const std::vector<AtomNumber>&index,const std::vector<unsigned>&i,std::vector<Vector>&positions) const override;
145 : void getPositions(unsigned j,unsigned k,std::vector<Vector>&positions) const override;
146 : void getLocalPositions(std::vector<Vector>&p) const override;
147 : void getMasses(const std::vector<int>&index,std::vector<double>&) const override;
148 : void getCharges(const std::vector<int>&index,std::vector<double>&) const override;
149 : void updateVirial(const Tensor&) const override;
150 : void updateForces(const std::vector<int>&index,const std::vector<Vector>&) override;
151 : void updateForces(const std::vector<AtomNumber>&index,const std::vector<unsigned>&i,const std::vector<Vector>&forces) override;
152 : void rescaleForces(const std::vector<int>&index,double factor) override;
153 : unsigned getRealPrecision()const override;
154 : };
155 :
156 : template <class T>
157 1056 : void MDAtomsTyped<T>::setUnits(const Units& units,const Units& MDUnits) {
158 1056 : double lscale=units.getLength()/MDUnits.getLength();
159 1056 : double escale=units.getEnergy()/MDUnits.getEnergy();
160 1056 : double cscale=units.getCharge()/MDUnits.getCharge();
161 1056 : double mscale=units.getMass()/MDUnits.getMass();
162 : // scalep and scaleb are used to convert MD to plumed
163 1056 : scalep=1.0/lscale;
164 1056 : scaleb=1.0/lscale;
165 : // scalef and scalev are used to convert plumed to MD
166 1056 : scalef=escale/lscale;
167 1056 : scalev=escale;
168 1056 : scalec=1.0/cscale;
169 1056 : scalem=1.0/mscale;
170 1056 : }
171 :
172 : template <class T>
173 110634 : void MDAtomsTyped<T>::getBox(Tensor&box)const {
174 110634 : auto b=this->box.template get<const T*>({3,3});
175 110634 : if(b)
176 426368 : for(int i=0; i<3; i++)
177 1279104 : for(int j=0; j<3; j++) {
178 959328 : box(i,j)=b[3*i+j]*scaleb;
179 : } else {
180 4042 : box.zero();
181 : }
182 110634 : }
183 :
184 : template <class T>
185 0 : void MDAtomsTyped<T>::getPositions(const std::vector<int>&index,std::vector<Vector>&positions)const {
186 : unsigned stride;
187 : const T* ppx;
188 : const T* ppy;
189 : const T* ppz;
190 0 : getPointers(p,px,py,pz,index.size(),ppx,ppy,ppz,stride);
191 0 : plumed_assert(index.size()==0 || (ppx && ppy && ppz));
192 : // cannot be parallelized with omp because access to positions is not ordered
193 0 : for(unsigned i=0; i<index.size(); ++i) {
194 0 : positions[index[i]][0]=ppx[stride*i]*scalep;
195 0 : positions[index[i]][1]=ppy[stride*i]*scalep;
196 0 : positions[index[i]][2]=ppz[stride*i]*scalep;
197 : }
198 0 : }
199 :
200 : template <class T>
201 26215 : void MDAtomsTyped<T>::getPositions(const std::vector<AtomNumber>&index,const std::vector<unsigned>&i, std::vector<Vector>&positions)const {
202 : unsigned stride;
203 : const T* ppx;
204 : const T* ppy;
205 : const T* ppz;
206 : #ifndef NDEBUG
207 : // bounds are only checked in debug mode since they require this extra step that is potentially expensive
208 : const unsigned maxel=(i.size()>0?*std::max_element(i.begin(),i.end())+1:0);
209 : #else
210 : const unsigned maxel=0;
211 : #endif
212 26215 : getPointers(p,px,py,pz,maxel,ppx,ppy,ppz,stride);
213 26215 : plumed_assert(index.size()==0 || (ppx && ppy && ppz));
214 : // cannot be parallelized with omp because access to positions is not ordered
215 : unsigned k=0;
216 426360 : for(const auto & p : index) {
217 400145 : positions[p.index()][0]=ppx[stride*i[k]]*scalep;
218 400145 : positions[p.index()][1]=ppy[stride*i[k]]*scalep;
219 400145 : positions[p.index()][2]=ppz[stride*i[k]]*scalep;
220 400145 : k++;
221 : }
222 26215 : }
223 :
224 : template <class T>
225 29191 : void MDAtomsTyped<T>::getPositions(unsigned j,unsigned k,std::vector<Vector>&positions)const {
226 : unsigned stride;
227 : const T* ppx;
228 : const T* ppy;
229 : const T* ppz;
230 29191 : getPointers(p,px,py,pz,k,ppx,ppy,ppz,stride);
231 29191 : plumed_assert(k==j || (ppx && ppy && ppz));
232 29191 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(&positions[j],(k-j)))
233 : for(unsigned i=j; i<k; ++i) {
234 : positions[i][0]=ppx[stride*i]*scalep;
235 : positions[i][1]=ppy[stride*i]*scalep;
236 : positions[i][2]=ppz[stride*i]*scalep;
237 : }
238 29191 : }
239 :
240 :
241 : template <class T>
242 450 : void MDAtomsTyped<T>::getLocalPositions(std::vector<Vector>&positions)const {
243 : unsigned stride;
244 : const T* ppx;
245 : const T* ppy;
246 : const T* ppz;
247 450 : getPointers(p,px,py,pz,positions.size(),ppx,ppy,ppz,stride);
248 450 : plumed_assert(positions.size()==0 || (ppx && ppy && ppz));
249 450 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(positions))
250 : for(unsigned i=0; i<positions.size(); ++i) {
251 : positions[i][0]=ppx[stride*i]*scalep;
252 : positions[i][1]=ppy[stride*i]*scalep;
253 : positions[i][2]=ppz[stride*i]*scalep;
254 : }
255 450 : }
256 :
257 :
258 : template <class T>
259 905 : void MDAtomsTyped<T>::getMasses(const std::vector<int>&index,std::vector<double>&masses)const {
260 905 : auto mm=m.get<const T*>(index.size());
261 905 : if(mm)
262 850092 : for(unsigned i=0; i<index.size(); ++i) {
263 849187 : masses[index[i]]=scalem*mm[i];
264 : } else
265 0 : for(unsigned i=0; i<index.size(); ++i) {
266 0 : masses[index[i]]=0.0;
267 : }
268 905 : }
269 :
270 : template <class T>
271 905 : void MDAtomsTyped<T>::getCharges(const std::vector<int>&index,std::vector<double>&charges)const {
272 905 : auto cc=c.get<const T*>(index.size());
273 905 : if(cc)
274 848483 : for(unsigned i=0; i<index.size(); ++i) {
275 847697 : charges[index[i]]=scalec*cc[i];
276 : } else
277 1609 : for(unsigned i=0; i<index.size(); ++i) {
278 1490 : charges[index[i]]=0.0;
279 : }
280 905 : }
281 :
282 : template <class T>
283 41942 : void MDAtomsTyped<T>::updateVirial(const Tensor&virial)const {
284 41942 : auto v=this->virial.template get<T*>({3,3});
285 41942 : if(v)
286 167768 : for(int i=0; i<3; i++)
287 503304 : for(int j=0; j<3; j++) {
288 377478 : v[3*i+j]+=T(virial(i,j)*scalev);
289 : }
290 41942 : }
291 :
292 : template <class T>
293 27284 : void MDAtomsTyped<T>::updateForces(const std::vector<AtomNumber>&index,const std::vector<unsigned>&i,const std::vector<Vector>&forces) {
294 : unsigned stride;
295 : T* ffx;
296 : T* ffy;
297 : T* ffz;
298 : #ifndef NDEBUG
299 : // bounds are only checked in debug mode since they require this extra step that is potentially expensive
300 : const unsigned maxel=(i.size()>0?*std::max_element(i.begin(),i.end())+1:0);
301 : #else
302 : const unsigned maxel=0;
303 : #endif
304 27284 : getPointers(f,fx,fy,fz,maxel,ffx,ffy,ffz,stride);
305 27284 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
306 : unsigned k=0;
307 421775 : for(const auto & p : index) {
308 394491 : ffx[stride*i[k]]+=scalef*T(forces[p.index()][0]);
309 394491 : ffy[stride*i[k]]+=scalef*T(forces[p.index()][1]);
310 394491 : ffz[stride*i[k]]+=scalef*T(forces[p.index()][2]);
311 394491 : k++;
312 : }
313 27284 : }
314 :
315 : template <class T>
316 29240 : void MDAtomsTyped<T>::updateForces(const std::vector<int>&index,const std::vector<Vector>&forces) {
317 : unsigned stride;
318 : T* ffx;
319 : T* ffy;
320 : T* ffz;
321 29240 : getPointers(f,fx,fy,fz,index.size(),ffx,ffy,ffz,stride);
322 29240 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
323 29240 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(ffx,stride*index.size()))
324 : for(unsigned i=0; i<index.size(); ++i) {
325 : ffx[stride*i]+=scalef*T(forces[index[i]][0]);
326 : ffy[stride*i]+=scalef*T(forces[index[i]][1]);
327 : ffz[stride*i]+=scalef*T(forces[index[i]][2]);
328 : }
329 29240 : }
330 :
331 : template <class T>
332 50 : void MDAtomsTyped<T>::rescaleForces(const std::vector<int>&index,double factor) {
333 : unsigned stride;
334 : T* ffx;
335 : T* ffy;
336 : T* ffz;
337 50 : getPointers(f,fx,fy,fz,index.size(),ffx,ffy,ffz,stride);
338 50 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
339 50 : auto v=virial.get<T*>({3,3});
340 50 : if(v)
341 0 : for(unsigned i=0; i<3; i++)
342 0 : for(unsigned j=0; j<3; j++) {
343 0 : v[3*i+j]*=T(factor);
344 : }
345 50 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(ffx,stride*index.size()))
346 : for(unsigned i=0; i<index.size(); ++i) {
347 : ffx[stride*i]*=T(factor);
348 : ffy[stride*i]*=T(factor);
349 : ffz[stride*i]*=T(factor);
350 : }
351 50 : }
352 :
353 : template <class T>
354 1929 : unsigned MDAtomsTyped<T>::getRealPrecision()const {
355 1929 : return sizeof(T);
356 : }
357 :
358 : template <class T>
359 58127 : void MDAtomsTyped<T>::setp(const TypesafePtr & pp) {
360 58127 : pp.get<const T*>(); // just check type and discard pointer
361 58127 : p=pp.copy();
362 58127 : px=TypesafePtr();
363 58127 : py=TypesafePtr();
364 58127 : pz=TypesafePtr();
365 58127 : }
366 :
367 : template <class T>
368 53986 : void MDAtomsTyped<T>::setBox(const TypesafePtr & pp) {
369 53986 : pp.get<const T*>({3,3}); // just check type and size and discard pointer
370 53986 : box=pp.copy();
371 53986 : }
372 :
373 :
374 : template <class T>
375 58127 : void MDAtomsTyped<T>::setf(const TypesafePtr & ff) {
376 58127 : ff.get<T*>(); // just check type and discard pointer
377 58127 : f=ff.copy();
378 58127 : fx=TypesafePtr();
379 58127 : fy=TypesafePtr();
380 58127 : fz=TypesafePtr();
381 58127 : }
382 :
383 : template <class T>
384 3 : void MDAtomsTyped<T>::setp(const TypesafePtr & pp,int i) {
385 3 : p=TypesafePtr();
386 3 : pp.get<const T*>(); // just check type and discard pointer
387 3 : if(i==0) {
388 1 : px=pp.copy();
389 : }
390 3 : if(i==1) {
391 1 : py=pp.copy();
392 : }
393 3 : if(i==2) {
394 1 : pz=pp.copy();
395 : }
396 3 : }
397 :
398 : template <class T>
399 48328 : void MDAtomsTyped<T>::setVirial(const TypesafePtr & pp) {
400 48328 : pp.get<T*>({3,3}); // just check type and size and discard pointer
401 48326 : virial=pp.copy();
402 48326 : }
403 :
404 :
405 : template <class T>
406 3 : void MDAtomsTyped<T>::setf(const TypesafePtr & ff,int i) {
407 3 : f=TypesafePtr();;
408 3 : ff.get<T*>(); // just check type and discard pointer
409 3 : if(i==0) {
410 1 : fx=ff.copy();
411 : }
412 3 : if(i==1) {
413 1 : fy=ff.copy();
414 : }
415 3 : if(i==2) {
416 1 : fz=ff.copy();
417 : }
418 3 : }
419 :
420 : template <class T>
421 58128 : void MDAtomsTyped<T>::setm(const TypesafePtr & m) {
422 58128 : m.get<const T*>(); // just check type and discard pointer
423 58128 : this->m=m.copy();
424 58128 : }
425 :
426 : template <class T>
427 48220 : void MDAtomsTyped<T>::setc(const TypesafePtr & c) {
428 48220 : c.get<const T*>(); // just check type and discard pointer
429 48220 : this->c=c.copy();
430 48220 : }
431 :
432 806627 : std::unique_ptr<MDAtomsBase> MDAtomsBase::create(unsigned p) {
433 806627 : if(p==sizeof(double)) {
434 806626 : return Tools::make_unique<MDAtomsTyped<double>>();
435 1 : } else if (p==sizeof(float)) {
436 1 : return Tools::make_unique<MDAtomsTyped<float>>();
437 : }
438 0 : plumed_error() << "Cannot create an MD interface with sizeof(real)==" << p;
439 : }
440 :
441 : }
442 :
|