Simpatico  v1.10
GroupStorage.tpp
1 #ifndef DDMD_GROUP_STORAGE_TPP
2 #define DDMD_GROUP_STORAGE_TPP
3 
4 /*
5 * Simpatico - Simulation Package for Polymeric and Molecular Liquids
6 *
7 * Copyright 2010 - 2017, The Regents of the University of Minnesota
8 * Distributed under the terms of the GNU General Public License.
9 */
10 
11 #include "GroupStorage.h"
12 #include "AtomStorage.h"
13 #include <util/format/Int.h>
14 #include <util/mpi/MpiLoader.h>
15 #include <ddMd/communicate/GroupDistributor.tpp> // member
16 #include <ddMd/communicate/GroupCollector.tpp> // member
17 
18 //#define DDMD_GROUP_STORAGE_DEBUG
19 
20 namespace DdMd
21 {
22 
23  using namespace Util;
24 
25  /*
26  * Default constructor.
27  */
28  template <int N>
30  : groups_(),
31  groupSet_(),
32  reservoir_(),
33  newPtr_(0),
34  capacity_(0),
35  totalCapacity_(0),
36  maxNGroupLocal_(0),
37  maxNGroup_(0),
38  nTotal_(0)
39  { emptyGroups_.reserve(128); }
40 
41  /*
42  * Destructor.
43  */
44  template <int N>
46  {}
47 
48  /*
49  * Create associations for distributor and collector.
50  */
51  template <int N>
52  void GroupStorage<N>::associate(Domain& domain, AtomStorage& atomStorage,
53  Buffer& buffer)
54  {
55  distributor_.associate(domain, atomStorage, *this, buffer);
56  collector_.associate(domain, *this, buffer);
57  }
58 
59  /*
60  * Set parameters and allocate memory.
61  */
62  template <int N>
64  {
65  capacity_ = capacity;
66  totalCapacity_ = totalCapacity;
67  allocate();
68  }
69 
70  /*
71  * Read parameters and allocate memory.
72  */
73  template <int N>
74  void GroupStorage<N>::readParameters(std::istream& in)
75  {
76  read<int>(in, "capacity", capacity_);
77  read<int>(in, "totalCapacity", totalCapacity_);
78  allocate();
79  }
80 
81  /*
82  * Load parameters from input archive and allocate memory.
83  */
84  template <int N>
86  {
87  loadParameter<int>(ar, "capacity", capacity_);
88  loadParameter<int>(ar, "totalCapacity", totalCapacity_);
89  allocate();
90 
91  MpiLoader<Serializable::IArchive> loader(*this, ar);
92  loader.load(maxNGroupLocal_);
93  maxNGroup_.set(maxNGroupLocal_);
94  }
95 
96  /*
97  * Save parameters to output archive.
98  */
99  template <int N>
101  {
102  ar & capacity_;
103  ar & totalCapacity_;
104  int max = maxNGroup_.value();
105  ar & max;
106  }
107 
108  /*
109  * Allocate and initialize all containers (private).
110  */
111  template <int N>
113  {
114  groups_.allocate(capacity_);
115  reservoir_.allocate(capacity_);
116  groupSet_.allocate(groups_);
117  groupPtrs_.allocate(totalCapacity_);
118 
119  // Push all groups onto reservoir stack, in reverse order.
120  for (int i = capacity_ - 1; i >=0; --i) {
121  reservoir_.push(groups_[i]);
122  }
123 
124  // Nullify all pointers in groupPtrs_ array.
125  for (int i = 0; i < totalCapacity_; ++i) {
126  groupPtrs_[i] = 0;
127  }
128 
129  }
130 
131  // Local group mutators
132 
133  /*
134  * Returns address for a new local Group.
135  */
136  template <int N>
138  {
139  // Precondition
140  if (newPtr_ != 0)
141  UTIL_THROW("Unregistered newPtr_ still active");
142  newPtr_ = &reservoir_.pop();
143  newPtr_->clear();
144  return newPtr_;
145  }
146 
147  /*
148  * Pushes unused pointer back onto reservoir.
149  */
150  template <int N>
152  {
153  // Preconditions
154  if (newPtr_ == 0)
155  UTIL_THROW("No active newPtr_");
156  newPtr_->setId(-1);
157  reservoir_.push(*newPtr_);
158  newPtr_ = 0;
159  }
160 
161  /*
162  * Register new local Group in internal data structures.
163  */
164  template <int N>
166  {
167 
168  // Preconditions
169  if (newPtr_ == 0) {
170  UTIL_THROW("No active newPtr_");
171  }
172  int groupId = newPtr_->id();
173  if (groupId < 0 || groupId >= totalCapacity_) {
174  Log::file() << "groupId = " << groupId << std::endl;
175  UTIL_THROW("Invalid group id");
176  }
177  if (groupPtrs_[groupId] != 0) {
178  UTIL_THROW("Group with specified id is already present");
179  }
180 
181  // Add Group<N> object to container
182  groupSet_.append(*newPtr_);
183  groupPtrs_[groupId] = newPtr_;
184 
185  // Release newPtr_ for reuse.
186  newPtr_ = 0;
187 
188  // Check maximum.
189  if (groupSet_.size() > maxNGroupLocal_) {
190  maxNGroupLocal_ = groupSet_.size();
191  }
192  }
193 
194  /*
195  * Add a new Group with a specified id, return pointer to new Group.
196  */
197  template <int N>
199  {
200  Group<N>* ptr = newPtr();
201  ptr->setId(id);
202  add();
203  return ptr;
204  }
205 
206  /*
207  * Remove a specific local Group.
208  */
209  template <int N>
211  {
212  int groupId = groupPtr->id();
213  if (groupId < 0 || groupId >= totalCapacity_) {
214  Log::file() << "Group id = " << groupId << std::endl;
215  UTIL_THROW("Invalid group id, out of range");
216  } else if (groupPtrs_[groupId] == 0) {
217  UTIL_THROW("Group does not exist on this processor");
218  }
219  reservoir_.push(*groupPtr);
220  groupSet_.remove(*groupPtr);
221  groupPtrs_[groupId] = 0;
222  groupPtr->setId(-1);
223  }
224 
225  /*
226  * Remove all groups.
227  */
228  template <int N>
230  {
231  Group<N>* groupPtr;
232  int groupId;
233  while (groupSet_.size() > 0) {
234  groupPtr = &groupSet_.pop();
235  groupId = groupPtr->id();
236  groupPtrs_[groupId] = 0;
237  groupPtr->setId(-1);
238  reservoir_.push(*groupPtr);
239  }
240 
241  if (groupSet_.size() != 0) {
242  UTIL_THROW("Nonzero ghostSet size at end of clearGhosts");
243  }
244  }
245 
246  // Accessors
247 
248  /*
249  * Check validity of this GroupStorage.
250  *
251  * Returns true if all is ok, or throws an Exception.
252  */
253  template <int N>
255  {
256 
257  if (size() + reservoir_.size() != capacity_)
258  UTIL_THROW("nGroup + reservoir size != local capacity");
259 
260  // Check consitency of pointers to atoms and atom ids
261  Group<N>* ptr;
262  int i, j;
263  j = 0;
264  for (i = 0; i < totalCapacity_ ; ++i) {
265  ptr = groupPtrs_[i];
266  if (ptr != 0) {
267  ++j;
268  if (ptr->id() != i) {
269  UTIL_THROW("ptr->id() != i");
270  }
271  }
272  }
273 
274  // Count local groups
275  GroupIterator<N> iter;
276  j = 0;
277  for (begin(iter); iter.notEnd(); ++iter) {
278  ++j;
279  ptr = find(iter->id());
280  if (ptr == 0)
281  UTIL_THROW("Unable to find local group returned by iterator");
282  if (ptr != iter.get())
283  UTIL_THROW("Inconsistent find(iter->id()");
284  }
285  if (j != size())
286  UTIL_THROW("Number from iterator != size()");
287 
288  return true;
289  }
290 
294  template <int N>
295  #ifdef UTIL_MPI
296  void GroupStorage<N>::computeNTotal(MPI::Intracomm& communicator)
297  #else
299  #endif
300  {
301  // If nTotal is already known, return and do nothing.
302  if (nTotal_.isSet()) return;
303 
304  // Loop over groups on this processor.
305  // Increment nLocal only if atom 0 is owned by this processor
306  GroupIterator<N> iterator;
307  Atom* atomPtr;
308  int nLocal = 0;
309  begin(iterator);
310  for ( ; iterator.notEnd(); ++iterator) {
311  atomPtr = iterator->atomPtr(0);
312  if (atomPtr) {
313  if (!atomPtr->isGhost()) {
314  ++nLocal;
315  }
316  }
317  }
318 
319  // Reduce data on all processors and set nTotal_ on master.
320  int nTot;
321  #ifdef UTIL_MPI
322  communicator.Reduce(&nLocal, &nTot, 1,
323  MPI::INT, MPI::SUM, 0);
324  if (communicator.Get_rank() !=0) {
325  nTot = -1;
326  }
327  nTotal_.set(nTot);
328  #else
329  nTotal_.set(nLocal);
330  #endif
331  }
332 
333  /*
334  * Compute memory usage statistics (call on all processors).
335  */
336  template <int N>
337  #ifdef UTIL_MPI
338  void GroupStorage<N>::computeStatistics(MPI::Intracomm& communicator)
339  #else
341  #endif
342  {
343  #ifdef UTIL_MPI
344  int maxNGroupGlobal;
345  communicator.Allreduce(&maxNGroupLocal_, &maxNGroupGlobal, 1,
346  MPI::INT, MPI::MAX);
347  maxNGroup_.set(maxNGroupGlobal);
348  maxNGroupLocal_ = maxNGroupGlobal;
349  #else
350  maxNGroup_.set(maxNGroupLocal_);
351  #endif
352  }
353 
354  /*
355  * Clear all statistics.
356  */
357  template <int N>
359  {
360  maxNGroupLocal_ = 0;
361  maxNGroup_.unset();
362  }
363 
364  /*
365  * Output statistics.
366  */
367  template <int N>
368  void GroupStorage<N>::outputStatistics(std::ostream& out)
369  {
370 
371  out << std::endl;
372  out << "GroupStorage<" << N << ">" << std::endl;
373  out << "NGroup: max, capacity "
374  << Int(maxNGroup_.value(), 10)
375  << Int(capacity_, 10)
376  << std::endl;
377  }
378 
379  /*
380  * Check validity of all groups on this processor.
381  */
382  template <int N>
383  #ifdef UTIL_MPI
385  MPI::Intracomm& communicator,
386  bool hasGhosts)
387  #else
388  bool GroupStorage<N>::isValid(AtomStorage& atomStorage, bool hasGhosts)
389  #endif
390  {
391  int i;
392  int atomId;
393  int nAtom; // Number of local atoms in particular group.
394  int nGhost; // Number of local atoms in particular group.
395  int nAtomGroup = 0; // Number of local atoms in all groups on processor
396  Atom* atomPtr;
397  ConstGroupIterator<N> groupIter;
398 
399  // Call simpler function that only checks storage data structures.
400  isValid();
401 
402  // Loop over groups.
403  const AtomMap& atomMap = atomStorage.map();
404  for (begin(groupIter); groupIter.notEnd(); ++groupIter) {
405  nAtom = 0;
406  nGhost = 0;
407  for (i = 0; i < N; ++i) {
408  atomId = groupIter->atomId(i);
409  if (atomId < 0 || atomId >= atomStorage.totalAtomCapacity()) {
410  UTIL_THROW("Invalid atom id in Group");
411  }
412  atomPtr = groupIter->atomPtr(i);
413  if (atomPtr) {
414  if (atomPtr != atomMap.find(atomId)) {
415  UTIL_THROW("Inconsistent non-null atom pointer in Group");
416  }
417  if (atomPtr->isGhost()) {
418  ++nGhost;
419  } else {
420  ++nAtom;
421  }
422  } else {
423  atomPtr = atomMap.find(atomId);
424  if (atomPtr != 0) {
425  if (atomPtr->isGhost()) {
426  if (hasGhosts) {
427  UTIL_THROW("Missing ghost atom");
428  }
429  } else {
430  UTIL_THROW("Missing local atom");
431  }
432  }
433  }
434  }
435  if (nAtom == 0) {
436  UTIL_THROW("Empty group");
437  }
438  if (hasGhosts && (nAtom + nGhost) < N) {
439  UTIL_THROW("Incomplete group");
440  }
441  nAtomGroup += nAtom;
442  }
443 
444  // Count number distinct groups.
445  #ifdef UTIL_MPI
446  unsetNTotal();
447  computeNTotal(communicator);
448  #endif
449 
450  #ifdef UTIL_MPI
451  // Count & return number of local atoms in groups on all processors.
452  int nAtomGroupTotal;
453  const int source = 0;
454  communicator.Reduce(&nAtomGroup, &nAtomGroupTotal, 1,
455  MPI::INT, MPI::SUM, source);
456  if (communicator.Get_rank() == source) {
457  if (!nTotal_.isSet()) {
458  UTIL_THROW("nTotal not set");
459  }
460  if (nAtomGroupTotal != N*nTotal()) {
461  Log::file() << "nAtomGroupTotal = " << nAtomGroupTotal << std::endl;
462  Log::file() << "nTotal*N = " << N*nTotal() << std::endl;
463  UTIL_THROW("Discrepancy in number of local atoms in Group objects");
464  }
465  }
466  #endif
467 
468  return true;
469  }
470 
471  /*
472  * Mark nTotal as unknown.
473  */
474  template <int N>
476  { nTotal_.unset(); }
477 
478  /*
479  * Identify groups that span boundaries.
480  *
481  * This function is called by exchangeAtoms, after computing plans for
482  * exchanging atoms, based on their position, but before exchanging
483  * atoms, and before clearing ghosts from any previous exchange.
484  *
485  * Algorithm: Loop over all Group<N> objects in the storage, identify
486  * groups that span boundaries of the processor domain associated with
487  * each of 6 transfer directions (3 Cartesian directions, and transfer
488  * "down" (j=0) and "up" (j=1) in each direction). This requires information
489  * about positions of ghost as well as local atoms. For each boundary of
490  * the domain, identify atoms whose positions are "inside" and "outside".
491  * Count ghost atoms very near the boundary as both inside and outside,
492  * for safety. If a group has atoms both inside and outside a domain
493  * boundary, it is marked for sending in the associated communication
494  * step.
495  *
496  * After calculating a ghost communication plan for each group, clear
497  * the pointers to all ghost atoms in the group. The exchangeAtoms
498  * function will clear the actual ghost atoms from the AtomStorage.
499  */
500  template <int N> void
504  IntVector& gridFlags)
505  {
506  double coordinate;
507  GroupIterator<N> groupIter;
508  Atom* atomPtr;
509  int nIn, nOut, i, j, k;
510  bool isComplete;
511  bool choose;
512 
513  // Loop over groups
514  begin(groupIter);
515  for ( ; groupIter.notEnd(); ++groupIter) {
516  groupIter->plan().clearFlags();
517 
518  isComplete = (groupIter->nPtr() == N); // Is this group complete?
519 
520  if (isComplete) {
521 
522  for (i = 0; i < Dimension; ++i) {
523  if (gridFlags[i]) {
524  for (j = 0; j < 2; ++j) {
525 
526  // Determine if Group may span boundary (i, j)
527  choose = false;
528  nIn = 0;
529  nOut = 0;
530  // Loop over atoms in group
531  for (k = 0; k < N; ++k) {
532  atomPtr = groupIter->atomPtr(k);
533  assert(atomPtr);
534  coordinate = atomPtr->position()[i];
535  if (atomPtr->isGhost()) {
536  if (j == 0) {
537  assert(inner(i, j) > bound(i, j));
538  if (coordinate < inner(i, j)) {
539  ++nOut;
540  }
541  if (coordinate > outer(i, j)) {
542  ++nIn;
543  }
544  } else { // if j = 1
545  assert(inner(i, j) < bound(i, j));
546  if (coordinate > inner(i, j)) {
547  ++nOut;
548  }
549  if (coordinate < outer(i, j)) {
550  ++nIn;
551  }
552  }
553  } else { // if atomPtr points to local atom
554  if (atomPtr->plan().exchange(i, j)) {
555  ++nOut;
556  } else {
557  ++nIn;
558  }
559  }
560  } // end for k (atoms in group)
561  if (nOut > 0 && nIn > 0) {
562  choose = true;
563  }
564  if (choose) {
565  groupIter->plan().setGhost(i, j);
566  } else {
567  groupIter->plan().clearGhost(i, j);
568  }
569  } // end for j = 0, 1
570 
571  #if 0
572  // A complete group may not span both lower (j=0) and upper (j=1) boundaries
573  if (groupIter->plan().ghost(i, 0) && groupIter->plan().ghost(i, 1)) {
574  Log::file() << "Direction " << i << std::endl;
575  Log::file() << "Inner / outer (j=0) = " << inner(i,0)
576  << " " << outer(i, 0) << std::endl;
577  Log::file() << "Inner / outer (j=1) = " << inner(i,1)
578  << " " << outer(i, 1) << std::endl;
579  for (k = 0; k < N; ++k) {
580  atomPtr = groupIter->atomPtr(k);
581  assert(atomPtr);
582  coordinate = atomPtr->position()[i];
583  Log::file() << k << " " << coordinate;
584  if (atomPtr->isGhost()) {
585  Log::file() << " ghost ";
586  } else {
587  Log::file() << " local "
588  << atomPtr->plan().exchange(i, 0) << " "
589  << atomPtr->plan().exchange(i, 1);
590  }
591  Log::file() << std::endl;
592  Log::file() << std::endl;
593  }
594  UTIL_THROW("Group spans both upper and lower boundaries");
595  }
596  #endif
597 
598  } // end if gridFlags[i]
599  } // end for i (Cartesian axes)
600 
601  } else { // if group is not complete
602 
603  // If not complete, mark ghost flag for all multi-processor directions
604  for (i = 0; i < Dimension; ++i) {
605  if (gridFlags[i]) {
606  for (j = 0; j < 2; ++j) {
607  groupIter->plan().setGhost(i, j);
608  }
609  }
610  }
611 
612  } // if-else (isComplete)
613 
614  // Clear pointers to all ghost atoms in this group
615  for (k = 0; k < N; ++k) {
616  atomPtr = groupIter->atomPtr(k);
617  if (atomPtr) {
618  if (atomPtr->isGhost()) {
619  groupIter->clearAtomPtr(k);
620  }
621  }
622  }
623 
624  }
625  }
626 
627  #ifdef UTIL_MPI
628  /*
629  * Pack groups for exchange.
630  *
631  * Pack groups that contain atoms marked for exchange in this
632  * direction (direction i, j), and remove empty groups.
633  *
634  * Algorithm: Loop over groups. If the group contains one or
635  * more atoms that are marked for exchange in direction i, j,
636  * pack the group for sending along. Remove empty groups in
637  * a separate loop.
638  */
639  template <int N>
640  void GroupStorage<N>::pack(int i, int j, Buffer& buffer)
641  {
642  GroupIterator<N> groupIter;
643  Atom* atomPtr;
644  int k, nAtom;
645  bool choose;
646  emptyGroups_.clear();
647 
648  // Pack Groups
649  buffer.beginSendBlock(Buffer::GROUP2 + N - 2);
650  begin(groupIter);
651  for ( ; groupIter.notEnd(); ++groupIter) {
652  choose = false;
653  nAtom = 0;
654  for (k = 0; k < N; ++k) {
655  atomPtr = groupIter->atomPtr(k);
656  if (atomPtr) {
657  if (atomPtr->plan().exchange(i, j)) {
658  choose = true;
659  groupIter->clearAtomPtr(k);
660  } else {
661  ++nAtom;
662  }
663  }
664  }
665  if (nAtom == 0) {
666  emptyGroups_.append(*groupIter);
667  }
668  if (choose) {
669  groupIter->pack(buffer);
670  }
671  }
672  buffer.endSendBlock();
673 
674  // Remove empty groups
675  int nEmpty = emptyGroups_.size();
676  for (int k = 0; k < nEmpty; ++k) {
677  remove(&(emptyGroups_[k]));
678  }
679  }
680 
681  /*
682  * Unpack groups into GroupStorage.
683  */
684  template <int N>
685  void GroupStorage<N>::unpack(Buffer& buffer, AtomStorage& atomStorage)
686  {
687  Group<N>* newGroupPtr;
688  Group<N>* oldGroupPtr;
689  const AtomMap& atomMap = atomStorage.map();
690  int groupId;
691 
692  buffer.beginRecvBlock();
693  while (buffer.recvSize() > 0) {
694  newGroupPtr = newPtr();
695  newGroupPtr->unpack(buffer);
696  groupId = newGroupPtr->id();
697  oldGroupPtr = find(groupId);
698  if (oldGroupPtr) {
699  returnPtr();
700  atomMap.findGroupLocalAtoms(*oldGroupPtr);
701  } else {
702  add();
703  atomMap.findGroupLocalAtoms(*newGroupPtr);
704  }
705  }
706  buffer.endRecvBlock();
707  assert(buffer.recvSize() == 0);
708  }
709  #endif // endif ifdef UTIL_MPI
710 
711  /*
712  * Set ghost communication flags for all atoms in incomplete groups.
713  *
714  * Precondition: This is called by exchangeAtoms after exchanging atoms
715  * and groups between neighboring processors. At this point, there are
716  * no ghosts atoms.
717  *
718  * Algorithm: Loop over all Group<N> objects in the group storage.
719  * For each group, check if the group is incomplete, implying that one or
720  * more atoms in the group are owned by another processor. If the group
721  * is incomplete, loop over 6 transfer directions. For each direction,
722  * if the group is marked for sending in that direction, set the ghost
723  * ghost communication flag for transfer in that direction for every
724  * local atom in the group. Also add each such atom to sendArray(i, j).
725  *
726  * Note: If a group is incomplete on this processor, and thus
727  * contains atoms owned by other processors, the algorithm assumes
728  * that the ghost communication flag for each atom will be set by
729  * the processor that owns the atom.
730  */
731  template <int N> void
733  FMatrix<GPArray<Atom>, Dimension, 2>& sendArray,
734  IntVector& gridFlags)
735  {
736  GroupIterator<N> groupIter;
737  Atom* atomPtr;
738  Plan* planPtr;
739  int i, j, k, nAtom;
740 
741  // Loop over groups
742  begin(groupIter);
743  for ( ; groupIter.notEnd(); ++groupIter) {
744 
745  #ifdef UTIL_DEBUG
746  #ifdef DDMD_GROUP_STORAGE_DEBUG
747  // Validate group
748  const AtomMap& atomMap = atomStorage.map();
749  int atomId;
750  nAtom = 0;
751  for (k = 0; k < N; ++k) {
752  atomPtr = groupIter->atomPtr(k);
753  atomId = groupIter->atomId(k);
754  if (atomPtr != 0) {
755  if (atomPtr != atomMap.find(atomId)) {
756  UTIL_THROW("Error in atom pointer in group");
757  }
758  if (atomPtr->isGhost()) {
759  UTIL_THROW("Pointer to ghost atom in group");
760  } else {
761  ++nAtom;
762  }
763  } else { // if atomPtr == 0
764  atomPtr = atomMap.find(atomId);
765  if (atomPtr) {
766  if (!atomPtr->isGhost()) {
767  UTIL_THROW("Missing pointer to local atom in group");
768  }
769  }
770  }
771  }
772  assert(nAtom == groupIter->nPtr());
773  if (nAtom == 0) {
774  UTIL_THROW("Empty group");
775  }
776  #endif // ifdef DDMD_GROUP_STORAGE_DEBUG
777  #endif // ifdef UTIL_DEBUG
778 
779  // If this group is incomplete, set ghost flags for atoms
780  nAtom = groupIter->nPtr();
781  if (nAtom < N) {
782  for (i = 0; i < Dimension; ++i) {
783  if (gridFlags[i]) {
784  for (j = 0; j < 2; ++j) {
785  if (groupIter->plan().ghost(i, j)) {
786  for (k = 0; k < N; ++k) {
787  atomPtr = groupIter->atomPtr(k);
788  if (atomPtr) {
789  assert(!atomPtr->isGhost());
790  planPtr = &atomPtr->plan();
791  if (!planPtr->ghost(i, j)) {
792  planPtr->setGhost(i, j);
793  sendArray(i, j).append(*atomPtr);
794  }
795  }
796  }
797  }
798  }
799  }
800  }
801  }
802 
803  }
804  }
805 
806  /*
807  * Find ghost members of groups after exchanging all ghosts.
808  */
809  template <int N>
811  {
812  GroupIterator<N> groupIter;
813  const AtomMap& atomMap = atomStorage.map();
814  int nAtom;
815  for (begin(groupIter); groupIter.notEnd(); ++groupIter) {
816  nAtom = groupIter->nPtr();
817  if (nAtom < N) {
818  nAtom = atomMap.findGroupGhostAtoms(*groupIter);
819  if (nAtom < N) {
820  UTIL_THROW("Incomplete group after search for ghosts");
821  }
822  }
823  }
824  }
825 
826 } // namespace DdMd
827 #endif
void setGhost(int i, int j)
Set ghost flag for direction i, j (set true).
Definition: Plan.h:83
void computeNTotal(MPI::Intracomm &communicator)
Compute and store the number of distinct groups on all processors.
bool beginRecvBlock()
Begin to receive a block from the recv buffer.
Definition: Buffer.cpp:271
Group< N > * newPtr()
Returns an address available for addition of a new Group.
const int Dimension
Dimensionality of space.
Definition: Dimension.h:19
void initialize(int capacity, int totalCapacity)
Set parameters, allocate memory and initialize.
virtual void findGhosts(AtomStorage &atomStorage)
Find all ghost members of groups.
GroupStorage()
Default constructor.
void set(const T &value)
Set the value and mark as set.
Definition: Setable.h:107
virtual void markGhosts(AtomStorage &atomStorage, FMatrix< GPArray< Atom >, Dimension, 2 > &sendArray, IntVector &gridFlags)
Set ghost communication flags for all atoms in incomplete groups.
void unpack(Buffer &buffer)
Unpack a Group from the recv buffer.
bool isSet() const
Is this object set (is the value known)?
Definition: Setable.h:124
bool exchange(int i, int j) const
Get bool exchange flag for direction i, j.
Definition: Plan.h:113
Vector & position()
Get position Vector by reference.
void add()
Complete addition of a new Group.
void beginSendBlock(int sendType)
Initialize a data block.
Definition: Buffer.cpp:223
virtual void save(Serializable::OArchive &ar)
Save internal state to an archive.
Associative container for finding atoms identified by integer id.
Definition: AtomMap.h:36
bool isValid()
Return true if the container is valid, or throw an Exception.
virtual void readParameters(std::istream &in)
Read parameters, allocate memory and initialize.
A point particle in an MD simulation.
Parallel domain decomposition (DD) MD simulation.
void associate(Domain &domain, AtomStorage &atomStorage, Buffer &buffer)
Create associations for distributor and collector members.
void unsetNTotal()
Mark nTotal as unknown.
void clearStatistics()
Clear statistical accumulators (call on all processors).
int capacity() const
Return capacity for groups on this processor.
Saving / output archive for binary ostream.
int findGroupGhostAtoms(Group< N > &group) const
Set handles to ghost atoms in a Group<N> object.
Definition: AtomMap.h:279
const T & value() const
Return value (if set).
Definition: Setable.h:132
virtual void computeStatistics(MPI::Intracomm &communicator)
Compute statistics (reduce from all processors).
#define UTIL_THROW(msg)
Macro for throwing an Exception, reporting function, file and line number.
Definition: global.h:51
void clearGroups()
Remove all groups.
bool isGhost() const
Is this atom a ghost?
~GroupStorage()
Destructor.
void outputStatistics(std::ostream &out)
Output statistics.
virtual void markSpanningGroups(FMatrix< double, Dimension, 2 > &bound, FMatrix< double, Dimension, 2 > &inner, FMatrix< double, Dimension, 2 > &outer, IntVector &gridFlags)
Find and mark groups that span boundaries.
Iterator for all Group < N > objects owned by a GroupStorage< N >.
Definition: GroupIterator.h:25
void remove(Group< N > *groupPtr)
Remove a specific Group.
bool notEnd() const
Is the current pointer not at the end of the PArray?
Group< N > * get() const
Return a pointer to the current data.
Utility classes for scientific computation.
Definition: accumulators.mod:1
void load(Data &value)
Load and broadcast a single Data value.
Definition: MpiLoader.h:137
const AtomMap & map() const
Return the AtomMap by const reference.
Const iterator for all Group < N > objects owned by a GroupStorage < N >.
Wrapper for an int, for formatted ostream output.
Definition: Int.h:36
A container for all the Group<N> objects on this processor.
int recvSize() const
Number of unread items left in current recv block.
Definition: Buffer.cpp:495
Atom * find(int atomId) const
Return pointer to Atom with specified id.
Definition: AtomMap.h:230
void unset()
Unset the value (mark as unknown).
Definition: Setable.h:116
virtual void pack(int i, int j, Buffer &buffer)
Pack groups for exchange.
A container for all the atoms and ghost atoms on this processor.
void begin(GroupIterator< N > &iterator)
Set iterator to beginning of the set of groups.
int findGroupLocalAtoms(Group< N > &group) const
Set handles to local atoms in a Group<N> object.
Definition: AtomMap.h:257
int totalCapacity() const
Return maximum allowed number of groups on all processors.
int totalAtomCapacity() const
Return maximum number of atoms on all processors.
virtual void unpack(Buffer &buffer, AtomStorage &atomStorage)
Unpack groups from buffer and locate available atoms.
Buffer for interprocessor communication.
Definition: Buffer.h:217
Decomposition of the system into domains associated with processors.
Definition: Domain.h:31
static std::ostream & file()
Get log ostream by reference.
Definition: Log.cpp:57
int nTotal() const
Return total number of distinct groups on all processors.
bool notEnd() const
Is the current pointer not at the end of the array?
Saving archive for binary istream.
Provides methods for MPI-aware loading of data from input archive.
Definition: MpiLoader.h:43
Communication plan.
Definition: Plan.h:45
void endSendBlock(bool isComplete=true)
Finalize a block in the send buffer.
Definition: Buffer.cpp:247
An IntVector is an integer Cartesian vector.
Definition: IntVector.h:73
A group of covalently interacting atoms.
virtual void loadParameters(Serializable::IArchive &ar)
Load internal state from an archive.
An automatically growable PArray.
Definition: GPArray.h:28
int id() const
Get the global id for this group.
Plan & plan()
Get communication plan by reference.
void endRecvBlock()
Finish processing a block in the recv buffer.
Definition: Buffer.cpp:301
void returnPtr()
Reverts a transaction begun by the newPtr() function.
int size() const
Return current number of groups on this processor.
void setId(int id)
Set the global id for this group.
Group< N > * find(int id) const
Find local Group<N> indexed by global id.