Simpatico  v1.10
Exchanger.cpp
1 /*
2 * Simpatico - Simulation Package for Polymeric and Molecular Liquids
3 *
4 * Copyright 2010 - 2017, The Regents of the University of Minnesota
5 * Distributed under the terms of the GNU General Public License.
6 */
7 
8 #include "Exchanger.h"
9 #include "Domain.h"
10 #include "Buffer.h"
11 #include <ddMd/chemistry/Atom.h>
12 #include <ddMd/chemistry/Group.h>
13 #include <ddMd/storage/AtomStorage.h>
14 #include <ddMd/storage/AtomIterator.h>
15 #include <ddMd/storage/GhostIterator.h>
16 #include <ddMd/storage/GroupExchanger.h>
17 #include <util/format/Dbl.h>
18 #include <util/global.h>
19 
20 #include <algorithm>
21 #include <string>
22 
23 #ifdef UTIL_DEBUG
24 //#define DDMD_EXCHANGER_DEBUG
25 #endif
26 
27 namespace DdMd
28 {
29  using namespace Util;
30 
31  /*
32  * Constructor.
33  */
35  : sendArray_(),
36  recvArray_(),
37  bound_(),
38  inner_(),
39  outer_(),
40  gridFlags_(),
41  boundaryPtr_(0),
42  domainPtr_(0),
43  atomStoragePtr_(0),
44  groupExchangers_(),
45  bufferPtr_(0),
46  pairCutoff_(-1.0),
47  timer_(Exchanger::NTime)
48  { groupExchangers_.reserve(8); }
49 
50  /*
51  * Destructor.
52  */
54  {}
55 
56  /*
57  * Set pointers to associated objects.
58  */
59  void Exchanger::associate(const Domain& domain, const Boundary& boundary,
60  AtomStorage& atomStorage, Buffer& buffer)
61  {
62  domainPtr_ = &domain;
63  boundaryPtr_ = &boundary;
64  atomStoragePtr_ = &atomStorage;
65  bufferPtr_ = &buffer;
66  }
67 
68  /*
69  * Add a GroupExchanger to an internal list.
70  */
72  { groupExchangers_.append(groupExchanger); }
73 
74  /*
75  * Allocate memory.
76  */
78  {
79  // Preconditions
80  if (!bufferPtr_->isInitialized()) {
81  UTIL_THROW("Buffer must be allocated before Exchanger");
82  }
83 
84  int sendRecvCapacity = bufferPtr_->ghostCapacity()/2;
85 
86  // Reserve space for all ghost send and recv arrays
87  int i, j;
88  for (i = 0; i < Dimension; ++i) {
89  for (j = 0; j < 2; ++j) {
90  sendArray_(i, j).reserve(sendRecvCapacity);
91  recvArray_(i, j).reserve(sendRecvCapacity);
92  }
93  }
94  }
95 
96  /*
97  * Set slab width used for ghosts.
98  */
99  void Exchanger::setPairCutoff(double pairCutoff)
100  { pairCutoff_ = pairCutoff; }
101 
102  #ifdef UTIL_MPI
103 
107  {
108  if (atomStoragePtr_->isCartesian()) {
109  UTIL_THROW("Error: Coordinates are Cartesian on entry to exchange");
110  }
111  exchangeAtoms();
112  exchangeGhosts();
113  }
114 
115  /*
116  * void Exchanger::exchangeAtoms()
117  *
118  * Exchange ownership of local atoms and groups, set ghost plan.
119  *
120  * ------------------------------------------------------------------
121  * Precondition:
122  *
123  * Atomic coordinates must be in scaled [0,1] form on entry.
124  *
125  * ------------------------------------------------------------------
126  * Algorithm:
127  *
128  * - Loop over local atoms, create exchange and ghost communication
129  * plans for those beyond or near processor domain boundaries.
130  *
131  * - Add local atoms that will be retained by this processor but
132  * sent as ghosts to appropriate ghost send arrays.
133  *
134  * - Call GroupExchanger::markSpanningGroups for each registered
135  * GroupExchanger (e.g., bond, angle, dihedral). In this function:
136  *
137  * For each Group<N> {
138  * - Set ghost communication flags for groups that span
139  * or may span boundaries.
140  * - Clear pointers to ghost atoms in the group.
141  * }
142  * }
143  *
144  * - Clear all ghosts from the AtomStorage
145  *
146  * - Main loop over transfer directions:
147  *
148  * For each transfer directions (i and j) {
149  *
150  * For each local atom {
151  * if marked for exchange(i, j) {
152  * if gridDimension[i] > 1 {
153  * - add to sendAtoms array for removal
154  * - pack into send buffer
155  * } else {
156  * shift position to apply periodic b.c.
157  * }
158  * }
159  * }
160  *
161  * if gridDimension[i] > 1 {
162  * - Call packGroups for each group type.
163  * This packs groups containing atoms that are sent.
164  * - Remove exchanged atoms and empty groups
165  * - Send and receive data buffers
166  * for each atom in the receive buffer {
167  * - Unpack atom into AtomStorage
168  * - shift periodic boundary conditions
169  * - Determine if this is new home (or way station)
170  * - If atom is home, add to appropriate ghost arrays.
171  * }
172  * - Call unpackGroups for each group type.
173  * }
174  *
175  * } // end loop over transfer directions
176  *
177  * - Call GroupExchanger::markGhosts each type of group, to create
178  * ghost plans for local atoms that must be transferred because
179  * they belong to imcomplete groups. Within this function:
180  *
181  * for each Group<N>{
182  * if group is incomplete{
183  * for each direction (i and j) {
184  * if group is marked for ghost communication {
185  * set ghost flags in plan for local atoms in group
186  * }
187  * }
188  * }
189  * }
190  *
191  * ------------------------------------------------------------------
192  * Postconditions: Upon return:
193  * Each processor owns all atoms in its domain, and no others.
194  * Each processor owns all groups containing one or more local atoms.
195  * Ghost plans are set for all local atoms.
196  * Send arrays contain local atoms marked for sending as ghosts.
197  * The AtomStorage contains no ghost atoms.
198  *
199  * ------------------------------------------------------------------
200  */
201  void Exchanger::exchangeAtoms()
202  {
203  stamp(START);
204  Vector lengths = boundaryPtr_->lengths();
205  double bound, slabWidth;
206  double coordinate, rshift;
207  AtomIterator atomIter;
208  Atom* atomPtr;
209  Plan* planPtr;
210  int i, j, jc, ip, jp, k, source, dest, nSend;
211  int shift;
212  bool isHome;
213  bool isGhost;
214 
215  // Set domain and slab boundaries
216  for (i = 0; i < Dimension; ++i) {
217  slabWidth = pairCutoff_/lengths[i];
218  for (j = 0; j < 2; ++j) {
219  // j = 0 sends to lower coordinate i, bound is minimum
220  // j = 1 sends to higher coordinate i, bound is maximum
221  bound = domainPtr_->domainBound(i, j);
222  bound_(i, j) = bound;
223  if (j == 0) { // Communicate with lower index
224  inner_(i,j) = bound + slabWidth;
225  outer_(i, j)= bound - slabWidth;
226  } else { // j == 1, communicate with upper index
227  inner_(i, j) = bound - slabWidth;
228  outer_(i, j) = bound + slabWidth;
229  }
230  sendArray_(i, j).clear();
231  }
232  if (domainPtr_->grid().dimension(i) > 1) {
233  gridFlags_[i] = 1;
234  } else {
235  gridFlags_[i] = 0;
236  }
237  }
238 
239  // Compute communication plan for every local atom
240  atomStoragePtr_->begin(atomIter);
241  for ( ; atomIter.notEnd(); ++atomIter) {
242 
243  planPtr = &atomIter->plan();
244  planPtr->clearFlags();
245  isHome = true;
246  isGhost = false;
247 
248  // Cartesian directions
249  for (i = 0; i < Dimension; ++i) {
250 
251  coordinate = atomIter->position()[i];
252 
253  // Transmission direction
254  for (j = 0; j < 2; ++j) {
255 
256  // j = 0 sends to lower coordinate i
257  // j = 1 sends to higher coordinate i
258 
259  // Index for conjugate (reverse) direction
260  if (j == 0) jc = 1;
261  if (j == 1) jc = 0;
262 
263  if (j == 0) { // Communicate with lower index
264  if (coordinate < bound_(i, j)) {
265  planPtr->setExchange(i, j);
266  if (gridFlags_[i]) {
267  isHome = false;
268  }
269  if (coordinate > outer_(i, j)) {
270  planPtr->setGhost(i, jc);
271  isGhost = true;
272  }
273  } else {
274  if (coordinate < inner_(i, j)) {
275  planPtr->setGhost(i, j);
276  isGhost = true;
277  }
278  }
279  } else { // j == 1, communicate with upper index
280  if (coordinate > bound_(i, j)) {
281  planPtr->setExchange(i, j);
282  if (gridFlags_[i]) {
283  isHome = false;
284  }
285  if (coordinate < outer_(i, j)) {
286  planPtr->setGhost(i, jc);
287  isGhost = true;
288  }
289  } else {
290  if (coordinate > inner_(i, j)) {
291  planPtr->setGhost(i, j);
292  isGhost = true;
293  }
294  }
295  }
296 
297  } // end for j
298  } // end for i
299 
300  // Add atoms that will be retained by this processor,
301  // but will be communicated as ghosts to sendArray_
302  if (isGhost && isHome) {
303  for (i = 0; i < Dimension; ++i) {
304  for (j = 0; j < 2; ++j) {
305  if (planPtr->ghost(i, j)) {
306  sendArray_(i, j).append(*atomIter);
307  }
308  }
309  }
310  }
311 
312  } // end atom loop, end compute plan
313  stamp(ATOM_PLAN);
314 
315  /*
316  * Set ghost communication flags for groups that span boundaries and
317  * clear pointers to ghosts in each Group after inspecting the Group.
318  * This function information uses old coordinate information about
319  * ghosts atoms, and so must be called before all ghosts are cleared.
320  *
321  * If a group has atoms both "inside" and "outside" boundary (i, j),
322  * the group is said to "span" the boundary, and ghost communication
323  * flag (i, j) is set for the Group. Otherwise, this ghost communication
324  * flag for the Group cleared. After finishing inspection of a Group,
325  * all pointers to ghost atoms within the Group are cleared.
326  *
327  * The function interface is defined in GroupExchanger, and is
328  * implemented by the GroupStorage<int N> class template.
329  */
330 
331  // Set ghost communication flags for groups (see above)
332  for (k = 0; k < groupExchangers_.size(); ++k) {
333  groupExchangers_[k].markSpanningGroups(bound_, inner_, outer_,
334  gridFlags_);
335  }
336  stamp(INIT_GROUP_PLAN);
337 
338  // Clear all ghost atoms from AtomStorage
339  atomStoragePtr_->clearGhosts();
340  stamp(CLEAR_GHOSTS);
341 
342  #ifdef UTIL_DEBUG
343  #ifdef DDMD_EXCHANGER_DEBUG
344  int nAtomTotal;
345  atomStoragePtr_->computeNAtomTotal(domainPtr_->communicator());
346  int myRank = domainPtr_->gridRank();
347  if (myRank == 0) {
348  nAtomTotal = atomStoragePtr_->nAtomTotal();
349  }
350  for (k = 0; k < groupExchangers_.size(); ++k) {
351  groupExchangers_[k].isValid(*atomStoragePtr_,
352  domainPtr_->communicator(), false);
353  }
354  #endif
355  #endif
356 
357  // Cartesian directions for exchange (0=x, 1=y, 2=z)
358  for (i = 0; i < Dimension; ++i) {
359 
360  // Transmission direction
361  // j = 0 sends down to processor with lower grid coordinate i
362  // j = 1 sends up to processor with higher grid coordinate i
363  for (j = 0; j < 2; ++j) {
364 
365  // Index for conjugate (reverse) direction
366  if (j == 0) jc = 1;
367  if (j == 1) jc = 0;
368 
369  source = domainPtr_->sourceRank(i, j); // rank to receive from
370  dest = domainPtr_->destRank(i, j); // rank to send to
371  bound = domainPtr_->domainBound(i, j); // bound for send
372  shift = domainPtr_->shift(i, j); // shift for periodic b.c.
373  rshift = double(shift);
374 
375  #ifdef UTIL_MPI
376  if (gridFlags_[i]) {
377  bufferPtr_->clearSendBuffer();
378  bufferPtr_->beginSendBlock(Buffer::ATOM);
379  }
380  #endif
381 
382  // Choose atoms for sending, pack and mark for removal.
383  sentAtoms_.clear();
384  atomStoragePtr_->begin(atomIter);
385  for ( ; atomIter.notEnd(); ++atomIter) {
386 
387  #ifdef UTIL_DEBUG
388  coordinate = atomIter->position()[i];
389  #ifdef DDMD_EXCHANGER_DEBUG
390  {
391  bool choose;
392  if (j == 0) {
393  choose = (coordinate < bound);
394  } else {
395  choose = (coordinate > bound);
396  }
397  assert(choose == atomIter->plan().exchange(i, j));
398  }
399  #endif
400  #endif
401 
402  if (atomIter->plan().exchange(i, j)) {
403 
404  #ifdef UTIL_MPI
405  if (gridFlags_[i]) {
406  sentAtoms_.append(*atomIter);
407  atomIter->packAtom(*bufferPtr_);
408  } else
409  #endif
410  {
411 
412  #ifdef UTIL_DEBUG
413  #ifdef DDMD_EXCHANGER_DEBUG
414  assert(shift);
415  assert(coordinate > -1.0*fabs(rshift));
416  assert(coordinate < 2.0*fabs(rshift));
417  #endif
418  #endif
419 
420  // Shift position if required by periodic b.c.
421  if (shift) {
422  atomIter->position()[i] += rshift;
423  }
424 
425  #ifdef UTIL_DEBUG
426  coordinate = atomIter->position()[i];
427  assert(coordinate >= domainPtr_->domainBound(i, 0));
428  assert(coordinate < domainPtr_->domainBound(i, 1));
429  #endif
430 
431  // For gridDimension==1, only nonbonded ghosts exist.
432  // The following assertion applies to these.
433  assert(!atomIter->plan().ghost(i, j));
434 
435  #if UTIL_DEBUG
436  // Check ghost communication plan
437  if (j == 0 && atomIter->position()[i] > inner_(i, jc)) {
438  assert(atomIter->plan().ghost(i, 1));
439  } else
440  if (j == 1 && atomIter->position()[i] < inner_(i, jc)) {
441  assert(atomIter->plan().ghost(i, 0));
442  }
443  #endif
444 
445  }
446  }
447 
448  } // end atom loop
449  stamp(PACK_ATOMS);
450 
451  /*
452  * Notes:
453  *
454  * (1) Removal of atoms cannot be done within the atom packing
455  * loop because element removal would invalidate the atom
456  * iterator.
457  *
458  * (2) Groups must be packed for sending before atoms are removed
459  * because the algorithm for identifying which groups to send
460  * references pointers to associated atoms.
461  */
462 
463  #ifdef UTIL_MPI
464  // Send & receive buffers iff processor grid dimension(i) > 1
465  if (gridFlags_[i]) {
466 
467  // End atom send block
468  bufferPtr_->endSendBlock();
469 
470  // Pack groups that contain atoms marked for exchange.
471  // Remove empty groups from each GroupStorage.
472  for (k = 0; k < groupExchangers_.size(); ++k) {
473  groupExchangers_[k].pack(i, j, *bufferPtr_);
474  }
475  stamp(PACK_GROUPS);
476 
477  // Remove chosen atoms (from sentAtoms) from atomStorage
478  nSend = sentAtoms_.size();
479  for (k = 0; k < nSend; ++k) {
480  atomStoragePtr_->removeAtom(&sentAtoms_[k]);
481  }
482  stamp(REMOVE_ATOMS);
483 
484  // Send to processor dest and receive from processor source
485  bufferPtr_->sendRecv(domainPtr_->communicator(),
486  source, dest);
487  stamp(SEND_RECV_ATOMS);
488 
489  // Unpack atoms into atomStorage
490  bufferPtr_->beginRecvBlock();
491  while (bufferPtr_->recvSize() > 0) {
492 
493  atomPtr = atomStoragePtr_->newAtomPtr();
494  planPtr = &atomPtr->plan();
495  atomPtr->unpackAtom(*bufferPtr_);
496  atomStoragePtr_->addNewAtom();
497 
498  if (shift) {
499  atomPtr->position()[i] += rshift;
500  }
501 
502  #ifdef UTIL_DEBUG
503  // Check bounds on atom coordinate
504  coordinate = atomPtr->position()[i];
505  assert(coordinate > domainPtr_->domainBound(i, 0));
506  assert(coordinate < domainPtr_->domainBound(i, 1));
507 
508  // Check ghost plan
509  assert(!planPtr->ghost(i, j));
510  if (j == 0) {
511  assert(planPtr->ghost(i, 1)
512  == (coordinate > inner_(i, 1)));
513  } else {
514  assert(planPtr->ghost(i, 0)
515  == (coordinate < inner_(i, 0)));
516  }
517  #endif
518 
519  // Determine if new atom will stay on this processor.
520  isHome = true;
521  if (i < Dimension - 1) {
522  for (ip = i + 1; ip < Dimension; ++ip) {
523  if (gridFlags_[ip]) {
524  for (jp = 0; jp < 2; ++jp) {
525  if (planPtr->exchange(ip, jp)) {
526  isHome = false;
527  }
528  }
529  }
530  }
531  }
532 
533  // If atom will stay, add to sendArrays for ghosts
534  if (isHome) {
535  for (ip = 0; ip < Dimension; ++ip) {
536  for (jp = 0; jp < 2; ++jp) {
537  if (planPtr->ghost(ip, jp)) {
538  sendArray_(ip, jp).append(*atomPtr);
539  }
540  }
541  }
542  }
543 
544  }
545  bufferPtr_->endRecvBlock();
546  assert(bufferPtr_->recvSize() == 0);
547  stamp(UNPACK_ATOMS);
548 
549  // Unpack groups
550  for (k = 0; k < groupExchangers_.size(); ++k) {
551  groupExchangers_[k].unpack(*bufferPtr_, *atomStoragePtr_);
552  }
553  stamp(UNPACK_GROUPS);
554 
555  } // end if gridDimension > 1
556  #endif // ifdef UTIL_MPI
557 
558  } // end for j (direction 0, 1)
559  } // end for i (Cartesian index)
560 
561  /*
562  * At this point:
563  * No ghost atoms exist in AtomStorage.
564  * All local atoms are on correct processor.
565  * No Groups are empty.
566  * All pointers to local atoms in Groups are set correctly.
567  * All pointers to ghost atoms in Groups are null.
568  */
569 
570  #ifdef UTIL_DEBUG
571  #ifdef DDMD_EXCHANGER_DEBUG
572  // Validity checks
573  atomStoragePtr_->computeNAtomTotal(domainPtr_->communicator());
574  if (myRank == 0) {
575  assert(nAtomTotal = atomStoragePtr_->nAtomTotal());
576  }
577  atomStoragePtr_->isValid();
578  for (k = 0; k < groupExchangers_.size(); ++k) {
579  groupExchangers_[k].isValid(*atomStoragePtr_,
580  domainPtr_->communicator(), false);
581  }
582  #endif // ifdef DDMD_EXCHANGER_DEBUG
583  #endif // ifdef UTIL_DEBUG
584 
585  // Set ghost communication flags for atoms in incomplete groups
586  for (k = 0; k < groupExchangers_.size(); ++k) {
587  groupExchangers_[k].markGhosts(*atomStoragePtr_, sendArray_,
588  gridFlags_);
589  }
590  stamp(MARK_GROUP_GHOSTS);
591  }
592 
593  /*
594  * Exchange ghost atoms.
595  *
596  * Call immediately after exchangeAtoms and before rebuilding the
597  * neighbor list on time steps that require reneighboring. Uses
598  * ghost communication plans computed in exchangeAtoms.
599  */
600  void Exchanger::exchangeGhosts()
601  {
602  stamp(START);
603 
604  // Preconditions
605  assert(bufferPtr_->isInitialized());
606  assert(domainPtr_->isInitialized());
607  assert(domainPtr_->hasBoundary());
608  if (atomStoragePtr_->nGhost() != 0) {
609  UTIL_THROW("atomStoragePtr_->nGhost() != 0");
610  }
611 
612  double rshift;
613  Atom* atomPtr;
614  Atom* sendPtr;
615  int i, j, ip, jp, k, source, dest, shift, size;
616 
617  // Clear all receive arrays
618  for (i = 0; i < Dimension; ++i) {
619  for (j = 0; j < 2; ++j) {
620  recvArray_(i, j).clear();
621  }
622  }
623 
624  #ifdef UTIL_DEBUG
625  double coordinate;
626  #ifdef DDMD_EXCHANGER_DEBUG
627  // Check send arrays
628  {
629  // Count local atoms marked for sending as ghosts.
630  FMatrix<int, Dimension, 2> sendCounter;
631  for (i = 0; i < Dimension; ++i) {
632  for (j = 0; j < 2; ++j) {
633  sendCounter(i, j) = 0;
634  }
635  }
636  AtomIterator localIter;
637  Plan* planPtr;
638  atomStoragePtr_->begin(localIter);
639  for ( ; localIter.notEnd(); ++localIter) {
640  planPtr = &localIter->plan();
641  for (i = 0; i < Dimension; ++i) {
642  for (j = 0; j < 2; ++j) {
643  if (planPtr->ghost(i, j)) {
644  ++sendCounter(i, j);
645  }
646  }
647  }
648  }
649  // Check consistency of counts with sendArray sizes.
650  for (i = 0; i < Dimension; ++i) {
651  for (j = 0; j < 2; ++j) {
652  if (sendCounter(i, j) != sendArray_(i, j).size()) {
653  UTIL_THROW("Incorrect sendArray size");
654  }
655  }
656  }
657  }
658  #endif // ifdef DDMD_EXCHANGER_DEBUG
659  #endif // ifdef UTIL_DEBUG
660  stamp(INIT_SEND_ARRAYS);
661 
662  // Cartesian directions
663  for (i = 0; i < Dimension; ++i) {
664 
665  // Transmit directions
666  for (j = 0; j < 2; ++j) {
667 
668  // j = 0: Send ghosts near minimum bound to lower coordinate
669  // j = 1: Send ghosts near maximum bound to higher coordinate
670 
671  // Shift on receiving node for periodic b.c.s
672  shift = domainPtr_->shift(i, j);
673  rshift = 1.0*shift;
674 
675  #ifdef UTIL_MPI
676  if (gridFlags_[i]) {
677  bufferPtr_->clearSendBuffer();
678  bufferPtr_->beginSendBlock(Buffer::GHOST);
679  }
680  #endif
681 
682  // Pack atoms in sendArray_(i, j)
683  size = sendArray_(i, j).size();
684  for (k = 0; k < size; ++k) {
685 
686  sendPtr = &sendArray_(i, j)[k];
687 
688  #ifdef UTIL_MPI
689  if (gridFlags_[i]) {
690  // If grid dimension > 1, pack atom for sending
691  sendPtr->packGhost(*bufferPtr_);
692  } else
693  #endif
694  { // if grid dimension == 1
695 
696  // Make a ghost copy of local atom on this processor
697  atomPtr = atomStoragePtr_->newGhostPtr();
698  recvArray_(i, j).append(*atomPtr);
699  atomPtr->copyLocalGhost(*sendPtr);
700  #if 0
701  atomPtr->setId(sendPtr->id());
702  atomPtr->setTypeId(sendPtr->typeId());
703  atomPtr->plan().setFlags(sendPtr->plan().flags());
704  atomPtr->position() = sendPtr->position();
705  #endif
706  if (shift) {
707  atomPtr->position()[i] += rshift;
708  }
709  atomStoragePtr_->addNewGhost();
710 
711  #ifdef UTIL_DEBUG
712  // Validate shifted ghost coordinate
713  coordinate = atomPtr->position()[i];
714  if (j == 0) {
715  assert(coordinate > bound_(i, 1));
716  } else {
717  assert(coordinate < bound_(i, 0));
718  }
719  #endif
720 
721  // Add to send arrays for any remaining directions
722  if (i < Dimension - 1) {
723  for (ip = i + 1; ip < Dimension; ++ip) {
724  for (jp = 0; jp < 2; ++jp) {
725  if (atomPtr->plan().ghost(ip, jp)) {
726  sendArray_(ip, jp).append(*atomPtr);
727  }
728  }
729  }
730  }
731 
732  }
733 
734  }
735  stamp(PACK_GHOSTS);
736 
737  #ifdef UTIL_MPI
738  // Send and receive buffers
739  if (gridFlags_[i]) {
740 
741  bufferPtr_->endSendBlock();
742 
743  source = domainPtr_->sourceRank(i, j);
744  dest = domainPtr_->destRank(i, j);
745  bufferPtr_->sendRecv(domainPtr_->communicator(), source, dest);
746  stamp(SEND_RECV_GHOSTS);
747 
748  // Unpack ghosts and add to recvArray
749  bufferPtr_->beginRecvBlock();
750  while (bufferPtr_->recvSize() > 0) {
751 
752  atomPtr = atomStoragePtr_->newGhostPtr();
753  atomPtr->unpackGhost(*bufferPtr_);
754  if (shift) {
755  atomPtr->position()[i] += rshift;
756  }
757  recvArray_(i, j).append(*atomPtr);
758  atomStoragePtr_->addNewGhost();
759 
760  // Prohibit sending back ghost in reverse direction
761  if (j == 0) {
762  atomPtr->plan().clearGhost(i, 1);
763  }
764 
765  // Add to send arrays for remaining directions
766  if (i < Dimension - 1) {
767  for (ip = i + 1; ip < Dimension; ++ip) {
768  for (jp = 0; jp < 2; ++jp) {
769  if (atomPtr->plan().ghost(ip, jp)) {
770  sendArray_(ip, jp).append(*atomPtr);
771  }
772  }
773  }
774  }
775 
776  #ifdef UTIL_DEBUG
777  // Validate ghost coordinate on the receiving processor.
778  coordinate = atomPtr->position()[i];
779  if (j == 0) {
780  assert(coordinate > bound_(i, 1));
781  } else {
782  assert(coordinate < bound_(i, 0));
783  }
784  #endif
785 
786  }
787  bufferPtr_->endRecvBlock();
788  stamp(UNPACK_GHOSTS);
789 
790  }
791  #endif // ifdef UTIL_MPI
792 
793  } // end for transmit direction j = 0, 1
794 
795  } // end for Cartesian index i
796 
797 
798  // Find ghost atoms for all incomplete groups
799  for (k = 0; k < groupExchangers_.size(); ++k) {
800  groupExchangers_[k].findGhosts(*atomStoragePtr_);
801  }
802 
803  #ifdef UTIL_DEBUG
804  #ifdef DDMD_EXCHANGER_DEBUG
805  atomStoragePtr_->isValid();
806  for (k = 0; k < groupExchangers_.size(); ++k) {
807  groupExchangers_[k].isValid(*atomStoragePtr_,
808  domainPtr_->communicator(), true);
809  }
810  #endif // ifdef DDMD_EXCHANGER_DEBUG
811  #endif // ifdef UTIL_DEBUG
812 
813  stamp(FIND_GROUP_GHOSTS);
814  }
815 
816  /*
817  * Update ghost atom coordinates.
818  *
819  * Call on time steps for which no reneighboring is required.
820  */
822  {
823  stamp(START);
824  if (!atomStoragePtr_->isCartesian()) {
825  UTIL_THROW("Error: Coordinates not Cartesian on entry to update");
826  }
827 
828  Atom* atomPtr;
829  int i, j, k, source, dest, size, shift;
830 
831  for (i = 0; i < Dimension; ++i) {
832  for (j = 0; j < 2; ++j) {
833 
834  // Shift on receiving processor for periodic boundary conditions
835  shift = domainPtr_->shift(i, j);
836 
837  if (gridFlags_[i]) {
838 
839  // Pack ghost positions for sending
840  bufferPtr_->clearSendBuffer();
841  bufferPtr_->beginSendBlock(Buffer::UPDATE);
842  size = sendArray_(i, j).size();
843  for (k = 0; k < size; ++k) {
844  atomPtr = &sendArray_(i, j)[k];
845  atomPtr->packUpdate(*bufferPtr_);
846  }
847  bufferPtr_->endSendBlock();
848  stamp(PACK_UPDATE);
849 
850  // Send and receive buffers
851  source = domainPtr_->sourceRank(i, j);
852  dest = domainPtr_->destRank(i, j);
853  bufferPtr_->sendRecv(domainPtr_->communicator(), source, dest);
854  stamp(SEND_RECV_UPDATE);
855 
856  // Unpack ghost positions
857  bufferPtr_->beginRecvBlock();
858  size = recvArray_(i, j).size();
859  for (k = 0; k < size; ++k) {
860  atomPtr = &recvArray_(i, j)[k];
861  atomPtr->unpackUpdate(*bufferPtr_);
862  if (shift) {
863  boundaryPtr_->applyShift(atomPtr->position(), i, shift);
864  }
865  }
866  bufferPtr_->endRecvBlock();
867  stamp(UNPACK_UPDATE);
868 
869  } else {
870 
871  // If grid().dimension(i) == 1, then copy positions of atoms
872  // listed in sendArray to those listed in the recvArray.
873 
874  size = sendArray_(i, j).size();
875  assert(size == recvArray_(i, j).size());
876  for (k = 0; k < size; ++k) {
877  atomPtr = &recvArray_(i, j)[k];
878  atomPtr->copyLocalUpdate(sendArray_(i, j)[k]);
879  #if 0
880  atomPtr->position() = sendArray_(i, j)[k].position();
881  #endif
882  if (shift) {
883  boundaryPtr_->applyShift(atomPtr->position(), i, shift);
884  }
885  }
886  stamp(LOCAL_UPDATE);
887 
888  }
889 
890  } // transmit direction j = 0, 1
891 
892  } // Cartesian direction i = 0, ..., Dimension - 1
893 
894  }
895 
896  /*
897  * Update ghost atom forces.
898  *
899  * Call on time steps for which no reneighboring is required,
900  * if reverse communication is enabled.
901  */
903  {
904  stamp(START);
905  Atom* atomPtr;
906  int i, j, k, source, dest, size;
907 
908  for (i = Dimension - 1; i >= 0; --i) {
909  for (j = 1; j >= 0; --j) {
910 
911  if (gridFlags_[i]) {
912 
913  // Pack ghost forces for sending
914  bufferPtr_->clearSendBuffer();
915  bufferPtr_->beginSendBlock(Buffer::FORCE);
916  size = recvArray_(i, j).size();
917  for (k = 0; k < size; ++k) {
918  atomPtr = &recvArray_(i, j)[k];
919  atomPtr->packForce(*bufferPtr_);
920  }
921  bufferPtr_->endSendBlock();
922  stamp(PACK_FORCE);
923 
924  // Send and receive buffers (reverse direction)
925  source = domainPtr_->destRank(i, j);
926  dest = domainPtr_->sourceRank(i, j);
927  bufferPtr_->sendRecv(domainPtr_->communicator(),
928  source, dest);
929  stamp(SEND_RECV_FORCE);
930 
931  // Unpack ghost forces
932  bufferPtr_->beginRecvBlock();
933  size = sendArray_(i, j).size();
934  for (k = 0; k < size; ++k) {
935  atomPtr = &sendArray_(i, j)[k];
936  atomPtr->unpackForce(*bufferPtr_);
937  }
938  bufferPtr_->endRecvBlock();
939  stamp(UNPACK_FORCE);
940 
941  } else {
942 
943  // If grid().dimension(i) == 1, then copy forces of atoms
944  // listed in sendArray to those listed in the recvArray.
945 
946  size = recvArray_(i, j).size();
947  assert(size == sendArray_(i, j).size());
948  for (k = 0; k < size; ++k) {
949  atomPtr = &sendArray_(i, j)[k];
950  atomPtr->force() += recvArray_(i, j)[k].force();
951  }
952  stamp(LOCAL_FORCE);
953 
954  }
955 
956  } // transmit direction j = 1 or 0
957 
958  } // Cartesian direction i
959 
960  }
961 
962  /*
963  * Output statistics.
964  */
965  void Exchanger::outputStatistics(std::ostream& out, double time, int nStep)
966  {
967  // Precondition
968  if (!domainPtr_->isMaster()) {
969  UTIL_THROW("May be called only on domain master");
970  }
971 
972  /*
973  * Just before calling this function on the master processor, the
974  * invoking function must call the following functions on all processors:
975  *
976  * - Integrator::computeStatistics(MPI::IntraComm&)
977  * - Exchanger::timer().reduce(MPI::IntraComm&)
978  */
979 
980  int nAtomTot = atomStoragePtr_->nAtomTotal();
981  int nProc = 1;
982  #ifdef UTIL_MPI
983  nProc = domainPtr_->communicator().Get_size();
984  #endif
985 
986 
987  double factor1 = 1.0/double(nStep);
988  double factor2 = double(nProc)/(double(nStep)*double(nAtomTot));
989  double factor3 = 100.0/time;
990 
991  out << std::endl;
992  out << " "
993  << " T/M [sec] "
994  << " T*P/(N*M) "
995  << " Percent (%)" << std::endl;
996 
997  double atomExchangeT = 0.0;
998  double ghostExchangeT = 0.0;
999  double updateT = 0.0;
1000 
1001  // Exchanger::exchangeAtoms()
1002  double AtomPlanT = timer_.time(Exchanger::ATOM_PLAN);
1003  atomExchangeT += AtomPlanT;
1004  out << "AtomPlan "
1005  << Dbl(AtomPlanT*factor1, 12, 6) << " "
1006  << Dbl(AtomPlanT*factor2, 12, 6) << " "
1007  << Dbl(AtomPlanT*factor3, 12, 6, true) << std::endl;
1008  double InitGroupPlanT = timer_.time(Exchanger::INIT_GROUP_PLAN);
1009  atomExchangeT += InitGroupPlanT;
1010  out << "InitGroupPlan "
1011  << Dbl(InitGroupPlanT*factor1, 12, 6) << " "
1012  << Dbl(InitGroupPlanT*factor2, 12, 6) << " "
1013  << Dbl(InitGroupPlanT*factor3, 12, 6, true) << std::endl;
1014  double ClearGhostsT = timer_.time(Exchanger::CLEAR_GHOSTS);
1015  atomExchangeT += ClearGhostsT;
1016  out << "ClearGhosts "
1017  << Dbl(ClearGhostsT*factor1, 12, 6) << " "
1018  << Dbl(ClearGhostsT*factor2, 12, 6) << " "
1019  << Dbl(ClearGhostsT*factor3, 12, 6, true) << std::endl;
1020  double PackAtomsT = timer_.time(Exchanger::PACK_ATOMS);
1021  atomExchangeT += PackAtomsT;
1022  out << "PackAtoms "
1023  << Dbl(PackAtomsT*factor1, 12, 6) << " "
1024  << Dbl(PackAtomsT*factor2, 12, 6) << " "
1025  << Dbl(PackAtomsT*factor3, 12, 6, true) << std::endl;
1026  double PackGroupsT = timer_.time(Exchanger::PACK_GROUPS);
1027  atomExchangeT += PackGroupsT;
1028  out << "PackGroups "
1029  << Dbl(PackGroupsT*factor1, 12, 6) << " "
1030  << Dbl(PackGroupsT*factor2, 12, 6) << " "
1031  << Dbl(PackGroupsT*factor3, 12, 6, true) << std::endl;
1032  double RemoveAtomsT = timer_.time(Exchanger::REMOVE_ATOMS);
1033  atomExchangeT += RemoveAtomsT;
1034  out << "RemoveAtoms "
1035  << Dbl(RemoveAtomsT*factor1, 12, 6) << " "
1036  << Dbl(RemoveAtomsT*factor2, 12, 6) << " "
1037  << Dbl(RemoveAtomsT*factor3, 12, 6, true) << std::endl;
1038  double SendRecvAtomsT = timer_.time(Exchanger::SEND_RECV_ATOMS);
1039  atomExchangeT += SendRecvAtomsT;
1040  out << "SendRecvAtoms "
1041  << Dbl(SendRecvAtomsT*factor1, 12, 6) << " "
1042  << Dbl(SendRecvAtomsT*factor2, 12, 6) << " "
1043  << Dbl(SendRecvAtomsT*factor3, 12, 6, true) << std::endl;
1044  double UnpackAtomsT = timer_.time(Exchanger::UNPACK_ATOMS);
1045  atomExchangeT += UnpackAtomsT;
1046  out << "UnpackAtoms "
1047  << Dbl(UnpackAtomsT*factor1, 12, 6) << " "
1048  << Dbl(UnpackAtomsT*factor2, 12, 6) << " "
1049  << Dbl(UnpackAtomsT*factor3, 12, 6, true) << std::endl;
1050  double UnpackGroupsT = timer_.time(Exchanger::UNPACK_GROUPS);
1051  atomExchangeT += UnpackGroupsT;
1052  out << "UnpackGroups "
1053  << Dbl(UnpackGroupsT*factor1, 12, 6) << " "
1054  << Dbl(UnpackGroupsT*factor2, 12, 6) << " "
1055  << Dbl(UnpackGroupsT*factor3, 12, 6, true) << std::endl;
1056  double MarkGroupGhostsT = timer_.time(Exchanger::MARK_GROUP_GHOSTS);
1057  atomExchangeT += MarkGroupGhostsT;
1058  out << "MarkGroupGhosts "
1059  << Dbl(MarkGroupGhostsT*factor1, 12, 6) << " "
1060  << Dbl(MarkGroupGhostsT*factor2, 12, 6) << " "
1061  << Dbl(MarkGroupGhostsT*factor3, 12, 6, true) << std::endl;
1062  double SendArraysT = timer_.time(Exchanger::INIT_SEND_ARRAYS);
1063  atomExchangeT += SendArraysT;
1064  out << "SendArrays "
1065  << Dbl(SendArraysT*factor1, 12, 6) << " "
1066  << Dbl(SendArraysT*factor2, 12, 6) << " "
1067  << Dbl(SendArraysT*factor3, 12, 6, true) << std::endl;
1068  out << "Atom Exchange (Tot) "
1069  << Dbl(atomExchangeT*factor1, 12, 6) << " "
1070  << Dbl(atomExchangeT*factor2, 12, 6) << " "
1071  << Dbl(atomExchangeT*factor3, 12, 6, true) << std::endl;
1072  out << std::endl;
1073 
1074  // Exchanger::exchangeGhosts
1075  double PackGhostsT = timer_.time(Exchanger::PACK_GHOSTS);
1076  ghostExchangeT += PackGhostsT;
1077  out << "PackGhosts "
1078  << Dbl(PackGhostsT*factor1, 12, 6) << " "
1079  << Dbl(PackGhostsT*factor2, 12, 6) << " "
1080  << Dbl(PackGhostsT*factor3, 12, 6, true) << std::endl;
1081  double SendRecvGhostsT = timer_.time(Exchanger::SEND_RECV_GHOSTS);
1082  ghostExchangeT += SendRecvGhostsT;
1083  out << "SendRecvGhosts "
1084  << Dbl(SendRecvGhostsT*factor1, 12, 6) << " "
1085  << Dbl(SendRecvGhostsT*factor2, 12, 6) << " "
1086  << Dbl(SendRecvGhostsT*factor3, 12, 6, true) << std::endl;
1087  double UnpackGhostsT = timer_.time(Exchanger::UNPACK_GHOSTS);
1088  ghostExchangeT += UnpackGhostsT;
1089  out << "UnpackGhosts "
1090  << Dbl(UnpackGhostsT*factor1, 12, 6) << " "
1091  << Dbl(UnpackGhostsT*factor2, 12, 6) << " "
1092  << Dbl(UnpackGhostsT*factor3, 12, 6, true) << std::endl;
1093  double FindGroupGhostsT = timer_.time(Exchanger::FIND_GROUP_GHOSTS);
1094  ghostExchangeT += FindGroupGhostsT;
1095  out << "FindGroupGhosts "
1096  << Dbl(FindGroupGhostsT*factor1, 12, 6) << " "
1097  << Dbl(FindGroupGhostsT*factor2, 12, 6) << " "
1098  << Dbl(FindGroupGhostsT*factor3, 12, 6, true) << std::endl;
1099  out << "Ghost Exchange (Tot) "
1100  << Dbl(ghostExchangeT*factor1, 12, 6) << " "
1101  << Dbl(ghostExchangeT*factor2, 12, 6) << " "
1102  << Dbl(ghostExchangeT*factor3, 12, 6, true) << std::endl;
1103  out << std::endl;
1104 
1105  // Exchanger::update()
1106  double PackUpdateT = timer_.time(Exchanger::PACK_UPDATE);
1107  updateT += PackUpdateT;
1108  out << "PackUpdate "
1109  << Dbl(PackUpdateT*factor1, 12, 6) << " "
1110  << Dbl(PackUpdateT*factor2, 12, 6) << " "
1111  << Dbl(PackUpdateT*factor3, 12, 6, true) << std::endl;
1112  double SendRecvUpdateT = timer_.time(Exchanger::SEND_RECV_UPDATE);
1113  updateT += SendRecvUpdateT;
1114  out << "SendRecvUpdate "
1115  << Dbl(SendRecvUpdateT*factor1, 12, 6) << " "
1116  << Dbl(SendRecvUpdateT*factor2, 12, 6) << " "
1117  << Dbl(SendRecvUpdateT*factor3, 12, 6, true) << std::endl;
1118  double UnpackUpdateT = timer_.time(Exchanger::UNPACK_UPDATE);
1119  updateT += UnpackUpdateT;
1120  out << "UnpackUpdate "
1121  << Dbl(UnpackUpdateT*factor1, 12, 6) << " "
1122  << Dbl(UnpackUpdateT*factor2, 12, 6) << " "
1123  << Dbl(UnpackUpdateT*factor3, 12, 6, true) << std::endl;
1124  double LocalUpdateT = timer_.time(Exchanger::LOCAL_UPDATE);
1125  updateT += LocalUpdateT;
1126  out << "LocalUpdate "
1127  << Dbl(LocalUpdateT*factor1, 12, 6) << " "
1128  << Dbl(LocalUpdateT*factor2, 12, 6) << " "
1129  << Dbl(LocalUpdateT*factor3, 12, 6, true) << std::endl;
1130  out << "Update (Tot) "
1131  << Dbl(updateT*factor1, 12, 6) << " "
1132  << Dbl(updateT*factor2, 12, 6) << " "
1133  << Dbl(updateT*factor3, 12, 6, true) << std::endl;
1134  out << std::endl;
1135 
1136  }
1137  #endif // ifdef UTIL_MPI
1138 
1139 }
void addNewGhost()
Register the most recent new ghost atom.
void setGhost(int i, int j)
Set ghost flag for direction i, j (set true).
Definition: Plan.h:83
bool beginRecvBlock()
Begin to receive a block from the recv buffer.
Definition: Buffer.cpp:271
Atom * newAtomPtr()
Returns pointer an address available for a new Atom.
int typeId() const
Get atom type index.
const int Dimension
Dimensionality of space.
Definition: Dimension.h:19
void update()
Update ghost atom coordinates.
Definition: Exchanger.cpp:821
A Vector is a Cartesian vector.
Definition: Vector.h:75
void unpackUpdate(Buffer &buffer)
Unpack updated ghost position from recv buffer.
int nAtomTotal() const
Get total number of atoms on all processors.
void sendRecv(MPI::Intracomm &comm, int source, int dest)
Receive from processor send and send to processor recv.
Definition: Buffer.cpp:319
unsigned int flags() const
Return raw flags unsigned int.
Definition: Plan.h:128
int ghostCapacity() const
Maximum number of ghost atoms for which space is available.
Definition: Buffer.cpp:152
bool isInitialized() const
Has this Domain been initialized by calling readParam?
Definition: Domain.h:349
int sourceRank(int i, int j) const
Rank of the processor from which to receive for transfer (i, j).
Definition: Domain.h:322
void setTypeId(int Id)
Set the atom type index.
void setId(int Id)
Set unique global id for this Atom.
const Vector & lengths() const
Get Vector of unit cell lengths by const reference.
An orthorhombic periodic unit cell.
int shift(int i, int j) const
Shift to be applied to sent coordinates in transfer (i, j).
Definition: Domain.h:340
void unpackForce(Buffer &buffer)
Unpack updated position of ghost Atom from recv buffer.
bool exchange(int i, int j) const
Get bool exchange flag for direction i, j.
Definition: Plan.h:113
Vector & position()
Get position Vector by reference.
bool ghost(int i, int j) const
Get ghost flag for direction i, j.
Definition: Plan.h:122
void beginSendBlock(int sendType)
Initialize a data block.
Definition: Buffer.cpp:223
void setFlags(unsigned int flags)
Set all flags (contains all bits).
Definition: Plan.h:98
void allocate()
Allocate all required memory.
Definition: Exchanger.cpp:77
Wrapper for a double precision number, for formatted ostream output.
Definition: Dbl.h:39
File containing preprocessor macros for error handling.
void clearFlags()
Clear all flags (set all to false, set flags_ = 0).
Definition: Plan.h:104
A point particle in an MD simulation.
Parallel domain decomposition (DD) MD simulation.
void unpackAtom(Buffer &buffer)
Unpack an atom from a recv buffer and receive ownership.
void setExchange(int i, int j)
Set exchange flag for direction i, j (set true).
Definition: Plan.h:65
int id() const
Get unique global index for this atom.
void addGroupExchanger(GroupExchanger &groupExchanger)
Add a GroupExchanger to an internal list.
Definition: Exchanger.cpp:71
void addNewAtom()
Finalize addition of the most recent new atom.
void copyLocalUpdate(Atom const &sendAtom)
Copies position of local atom to update this ghost atom.
int dimension(int i) const
Get grid dimension along Cartesian direction i.
Definition: Grid.h:159
MPI::Intracomm & communicator() const
Return Cartesian communicator by reference.
Definition: Domain.h:257
void setPairCutoff(double pairCutoff)
Set width of slab for ghosts.
Definition: Exchanger.cpp:99
void packForce(Buffer &buffer)
Pack update of ghost Atom force into send buffer.
void associate(const Domain &domain, const Boundary &boundary, AtomStorage &atomStorage, Buffer &buffer)
Set pointers to associated objects.
Definition: Exchanger.cpp:59
int gridRank() const
Get rank of this processor in the processor grid.
Definition: Domain.h:304
#define UTIL_THROW(msg)
Macro for throwing an Exception, reporting function, file and line number.
Definition: global.h:51
double time(int id) const
Get accumulated time for interval i, average per processor.
Definition: DdTimer.cpp:61
void packGhost(Buffer &buffer)
Pack a ghost Atom into a send buffer.
~Exchanger()
Destructor.
Definition: Exchanger.cpp:53
Exchanger()
Constructor.
Definition: Exchanger.cpp:34
int nGhost() const
Return current number of ghost atoms on this processor.
bool notEnd() const
Is the current pointer not at the end of the PArray?
Utility classes for scientific computation.
Definition: accumulators.mod:1
void outputStatistics(std::ostream &out, double time, int nStep)
Output statistics.
Definition: Exchanger.cpp:965
int recvSize() const
Number of unread items left in current recv block.
Definition: Buffer.cpp:495
A container for all the atoms and ghost atoms on this processor.
const Grid & grid() const
Return processor Grid by const reference.
Definition: Domain.h:268
Atom * newGhostPtr()
Returns pointer an address available for a new ghost Atom.
void packUpdate(Buffer &buffer)
Pack updated ghost position into send buffer.
void computeNAtomTotal(MPI::Intracomm &communicator)
Compute the total number of local atoms on all processors.
bool isValid() const
Return true if the container is valid, or throw an Exception.
bool isMaster() const
Is this the master processor (gridRank == 0) ?
Definition: Domain.h:313
Buffer for interprocessor communication.
Definition: Buffer.h:217
Decomposition of the system into domains associated with processors.
Definition: Domain.h:31
Vector & force()
Get force Vector by reference.
void copyLocalGhost(Atom const &sendAtom)
Copies data from local atom to update this ghost atom.
Class for exchanging Atoms, Ghosts and Groups between processors.
Definition: Exchanger.h:35
double domainBound(int i, int j) const
Get one boundary of the domain owned by this processor.
Definition: Domain.cpp:190
void applyShift(Vector &r, int i, int t) const
Shift Cartesian Vector r by multiple t of a Bravais lattice vector.
bool isCartesian() const
Are atom coordinates Cartesian (true) or generalized (false)?
Interface for a GroupStorage<N> for use in Exchanger.
void clearGhost(int i, int j)
Clear ghost flag for direction i, j (set false).
Definition: Plan.h:92
Communication plan.
Definition: Plan.h:45
void endSendBlock(bool isComplete=true)
Finalize a block in the send buffer.
Definition: Buffer.cpp:247
bool isInitialized() const
Has this Buffer been initialized?
Definition: Buffer.cpp:158
int destRank(int i, int j) const
Rank of the processor to which to send for transfer (i, j).
Definition: Domain.h:331
void exchange()
Exchange local atoms and ghosts.
Definition: Exchanger.cpp:106
void clearGhosts()
Clear all ghost atoms.
void unpackGhost(Buffer &buffer)
Unpack a ghost Atom from a recv buffer.
bool hasBoundary() const
Has an associated Boundary been set?
Definition: Domain.h:355
Iterator for all atoms owned by an AtomStorage.
Definition: AtomIterator.h:24
Plan & plan()
Get communication plan by reference.
void clearSendBuffer()
Clear the send buffer.
Definition: Buffer.cpp:212
void endRecvBlock()
Finish processing a block in the recv buffer.
Definition: Buffer.cpp:301
void reverseUpdate()
Update ghost atom forces.
Definition: Exchanger.cpp:902
void begin(AtomIterator &iterator)
Set iterator to beginning of the set of atoms.
void removeAtom(Atom *atomPtr)
Remove a specific Atom.