1#ifndef PSPG_AM_ITERATOR_GRID_TPP
2#define PSPG_AM_ITERATOR_GRID_TPP
11#include "AmIteratorGrid.h"
12#include <pspg/System.h>
13#include <pscf/inter/Interaction.h>
14#include <pspg/field/RDField.h>
42 interaction_.setNMonomer(system().mixture().nMonomer());
49 readOptional(in,
"isFlexible", isFlexible_);
50 readOptional(in,
"scaleStress", scaleStress_);
60 interaction_.update(system().interaction());
69 int nBlocks, nThreads;
77 double AmIteratorGrid<D>::dotProduct(FieldCUDA
const & a,
80 const int n = a.capacity();
82 double product = (double)gpuInnerProduct(a.cDField(), b.cDField(), n);
87 double AmIteratorGrid<D>::maxAbs(FieldCUDA
const & a)
90 cudaReal max = gpuMaxAbs(a.cDField(), n);
109 pointWiseBinarySubtract<<<nBlocks,nThreads>>>
110 (hists[0].cDField(),hists[1].cDField(),newbasis.cDField(),n);
116 void AmIteratorGrid<D>::addHistories(FieldCUDA& trial,
124 for (
int i = 0; i < nHist; i++) {
125 pointWiseAddScale<<<nBlocks, nThreads>>>
126 (trial.cDField(), basis[i].cDField(), -1*coeffs[i], trial.
capacity());
131 void AmIteratorGrid<D>::addPredictedError(FieldCUDA& fieldTrial,
132 FieldCUDA
const & resTrial,
double lambda)
138 pointWiseAddScale<<<nBlocks, nThreads>>>
139 (fieldTrial.cDField(), resTrial.cDField(), lambda,
140 fieldTrial.capacity());
144 bool AmIteratorGrid<D>::hasInitialGuess()
145 {
return system().w().hasData(); }
148 int AmIteratorGrid<D>::nElements()
150 const int nMonomer = system().mixture().nMonomer();
151 const int nMesh = system().mesh().size();
153 int nEle = nMonomer*nMesh;
156 nEle += system().unitCell().nParameter();
163 void AmIteratorGrid<D>::getCurrent(FieldCUDA& curr)
165 const int nMonomer = system().mixture().nMonomer();
166 const int nMesh = system().mesh().size();
167 const int n = nElements();
177 for (
int i = 0; i < nMonomer; i++) {
178 assignReal<<<nBlocks,nThreads>>>(curr.cDField() + i*nMesh,
179 (*currSys)[i].cDField(), nMesh);
184 const int nParam = system().unitCell().nParameter();
187 cudaReal* temp =
new cudaReal[nParam];
188 for (
int k = 0; k < nParam; k++)
189 temp[k] = (cudaReal)scaleStress_*currParam[k];
192 cudaMemcpy(curr.cDField() + nMonomer*nMesh, temp,
193 nParam*
sizeof(cudaReal), cudaMemcpyHostToDevice);
199 void AmIteratorGrid<D>::evaluate()
205 system().mixture().computeStress(system().domain().waveList());
210 void AmIteratorGrid<D>::getResidual(FieldCUDA& resid)
212 const int n = nElements();
213 const int nMonomer = system().mixture().nMonomer();
214 const int nMesh = system().mesh().size();
222 assignUniformReal<<<nBlocks, nThreads>>>(resid.cDField(), 0, n);
225 for (
int i = 0; i < nMonomer; i++) {
226 int startIdx = i*nMesh;
227 for (
int j = 0; j < nMonomer; j++) {
228 pointWiseAddScale<<<nBlocks, nThreads>>>
229 (resid.cDField() + startIdx,
230 system().c().rgrid(j).cDField(),
231 interaction_.chi(i, j),
233 pointWiseAddScale<<<nBlocks, nThreads>>>
234 (resid.cDField() + startIdx,
235 system().w().rgrid(j).cDField(),
236 -interaction_.p(i, j),
242 if (!system().mixture().isCanonical()) {
243 cudaReal factor = 1/(cudaReal)interaction_.sumChiInverse();
244 for (
int i = 0; i < nMonomer; ++i) {
245 subtractUniform<<<nBlocks, nThreads>>>(resid.cDField() + i*nMesh,
249 for (
int i = 0; i < nMonomer; i++) {
251 cudaReal average = findAverage(resid.cDField()+i*nMesh, nMesh);
253 subtractUniform<<<nBlocks, nThreads>>>(resid.cDField() + i*nMesh,
260 const int nParam = system().unitCell().nParameter();
261 cudaReal* stress =
new cudaReal[nParam];
263 for (
int i = 0; i < nParam; i++) {
264 stress[i] = (cudaReal)(-1*scaleStress_*system().mixture().stress(i));
267 cudaMemcpy(resid.cDField()+nMonomer*nMesh, stress,
268 nParam*
sizeof(cudaReal), cudaMemcpyHostToDevice);
273 void AmIteratorGrid<D>::update(FieldCUDA& newGuess)
275 const int nMonomer = system().mixture().nMonomer();
276 const int nMesh = system().mesh().size();
283 if (system().mixture().isCanonical()) {
284 cudaReal average, wAverage, cAverage;
285 for (
int i = 0; i < nMonomer; i++) {
287 average = findAverage(newGuess.cDField() + i*nMesh, nMesh);
290 subtractUniform<<<nBlocks, nThreads>>>(newGuess.cDField() + i*nMesh,
295 for (
int j = 0; j < nMonomer; j++) {
297 cAverage = findAverage(system().c().rgrid(j).cDField(), nMesh);
298 wAverage += interaction_.chi(i,j) * cAverage;
300 addUniform<<<nBlocks, nThreads>>>(newGuess.cDField() + i*nMesh,
305 system().setWRGrid(newGuess);
306 system().symmetrizeWFields();
311 const int nParam = system().unitCell().nParameter();
312 cudaReal* temp =
new cudaReal[nParam];
314 cudaMemcpy(temp, newGuess.cDField() + nMonomer*nMesh,
315 nParam*
sizeof(cudaReal), cudaMemcpyDeviceToHost);
316 for (
int i = 0; i < nParam; i++) {
317 parameters.
append(1/scaleStress_ * (
double)temp[i]);
319 system().setUnitCell(parameters);
327 void AmIteratorGrid<D>::outputToLog()
330 const int nParam = system().unitCell().nParameter();
331 for (
int i = 0; i < nParam; i++) {
332 Log::file() <<
"Parameter " << i <<
" = "
333 <<
Dbl(system().unitCell().parameters()[i])
342 cudaReal AmIteratorGrid<D>::findAverage(cudaReal
const * field,
int n)
344 cudaReal average = gpuSum(field, n)/n;
Template for Anderson mixing iterator algorithm.
Pspg implementation of the Anderson Mixing iterator.
~AmIteratorGrid()
Destructor.
void readParameters(std::istream &in)
Read all parameters and initialize.
AmIteratorGrid(System< D > &system)
Constructor.
void setup(bool isContinuation)
Setup iterator just before entering iteration loop.
Data * cDField()
Return pointer to underlying C array.
void allocate(int capacity)
Allocate the underlying C array on the device.
int capacity() const
Return allocated size.
Base class for iterative solvers for SCF equations.
Main class in SCFT simulation of one system.
int capacity() const
Return allocated size.
Dynamically allocatable contiguous array template.
Wrapper for a double precision number, for formatted ostream output.
A fixed capacity (static) contiguous array with a variable logical size.
void append(Data const &data)
Append data to the end of the array.
static std::ostream & file()
Get log ostream by reference.
Class for storing history of previous values in an array.
int capacity() const
Return the capacity of the buffer.
int size() const
Return number of values currently in the buffer.
void append(Data const &value)
Add a new value to the buffer.
File containing preprocessor macros for error handling.
#define UTIL_CHECK(condition)
Assertion macro suitable for serial or parallel production code.
int nThreads()
Get the number of threads per block for execution.
int nBlocks()
Get the current number of blocks for execution.
void setThreadsLogical(int nThreadsLogical)
Set the total number of threads required for execution.
C++ namespace for polymer self-consistent field theory (PSCF).
Utility classes for scientific computation.
float product(float a, float b)
Product for float Data.