Skip to content

Commit

Permalink
Alloc gpu_particlecharge just once and reuse
Browse files Browse the repository at this point in the history
  • Loading branch information
LSchwiebert committed Jul 3, 2024
1 parent aa0788c commit 61814cc
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 76 deletions.
8 changes: 2 additions & 6 deletions src/GPU/CalculateEnergyCUDAKernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ void CallBoxInterGPU(VariablesCUDA *vars, const std::vector<int> &cellVector,
int numberOfCells = neighborList.size();
int *gpu_particleKind, *gpu_particleMol;
int *gpu_neighborList, *gpu_cellStartIndex;
double *gpu_particleCharge;
double *gpu_REn, *gpu_LJEn;

// Run the kernel
Expand All @@ -56,8 +55,6 @@ void CallBoxInterGPU(VariablesCUDA *vars, const std::vector<int> &cellVector,

CUMALLOC((void **)&gpu_neighborList, neighborListCount * sizeof(int));
CUMALLOC((void **)&gpu_cellStartIndex, cellStartIndex.size() * sizeof(int));
CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));
CUMALLOC((void **)&gpu_particleKind, particleKind.size() * sizeof(int));
CUMALLOC((void **)&gpu_particleMol, particleMol.size() * sizeof(int));
CUMALLOC((void **)&gpu_LJEn, energyVectorLen * sizeof(double));
Expand All @@ -72,7 +69,7 @@ void CallBoxInterGPU(VariablesCUDA *vars, const std::vector<int> &cellVector,
cellStartIndex.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vars->gpu_cellVector, &cellVector[0], atomNumber * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_particleKind, &particleKind[0],
particleKind.size() * sizeof(int), cudaMemcpyHostToDevice);
Expand All @@ -95,7 +92,7 @@ void CallBoxInterGPU(VariablesCUDA *vars, const std::vector<int> &cellVector,
BoxInterGPU<<<blocksPerGrid, threadsPerBlock>>>(
gpu_cellStartIndex, vars->gpu_cellVector, gpu_neighborList, numberOfCells,
vars->gpu_x, vars->gpu_y, vars->gpu_z, axis, halfAx, electrostatic,
gpu_particleCharge, gpu_particleKind, gpu_particleMol, gpu_REn, gpu_LJEn,
vars->gpu_particleCharge, gpu_particleKind, gpu_particleMol, gpu_REn, gpu_LJEn,
vars->gpu_sigmaSq, vars->gpu_epsilon_Cn, vars->gpu_n, vars->gpu_VDW_Kind,
vars->gpu_isMartini, vars->gpu_count, vars->gpu_rCut,
vars->gpu_rCutCoulomb, vars->gpu_rCutLow, vars->gpu_rOn, vars->gpu_alpha,
Expand Down Expand Up @@ -134,7 +131,6 @@ void CallBoxInterGPU(VariablesCUDA *vars, const std::vector<int> &cellVector,
}

CUFREE(d_temp_storage);
CUFREE(gpu_particleCharge);
CUFREE(gpu_particleKind);
CUFREE(gpu_particleMol);
CUFREE(gpu_LJEn);
Expand Down
67 changes: 14 additions & 53 deletions src/GPU/CalculateEwaldCUDAKernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,9 @@ void CallBoxReciprocalSetupGPU(VariablesCUDA *vars, XYZArray const &coords,
const std::vector<double> &particleCharge,
uint imageSize, double *prefact, double *hsqr,
double &energyRecip, uint box) {
double *gpu_particleCharge;
int atomNumber = particleCharge.size();

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(vars->gpu_x, coords.x, atomNumber * sizeof(double),
cudaMemcpyHostToDevice);
Expand Down Expand Up @@ -70,7 +66,7 @@ void CallBoxReciprocalSetupGPU(VariablesCUDA *vars, XYZArray const &coords,
(int)(atomNumber / PARTICLES_PER_BLOCK) + 1, 1);
BoxReciprocalSumsGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_kx[box],
vars->gpu_ky[box], vars->gpu_kz[box], atomNumber, gpu_particleCharge,
vars->gpu_ky[box], vars->gpu_kz[box], atomNumber, vars->gpu_particleCharge,
vars->gpu_sumRnew[box], vars->gpu_sumInew[box], imageSize);
#ifndef NDEBUG
cudaDeviceSynchronize();
Expand All @@ -92,22 +88,16 @@ void CallBoxReciprocalSetupGPU(VariablesCUDA *vars, XYZArray const &coords,
vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecip, vars->gpu_finalVal, sizeof(double),
cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
}

// Use this function when calculating the reciprocal terms
// with the current volume.
void CallBoxReciprocalSumsGPU(VariablesCUDA *vars, XYZArray const &coords,
const std::vector<double> &particleCharge,
uint imageSize, double &energyRecip, uint box) {
double *gpu_particleCharge;
int atomNumber = particleCharge.size();

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(vars->gpu_x, coords.x, atomNumber * sizeof(double),
cudaMemcpyHostToDevice);
Expand All @@ -127,7 +117,7 @@ void CallBoxReciprocalSumsGPU(VariablesCUDA *vars, XYZArray const &coords,
BoxReciprocalSumsGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_kxRef[box],
vars->gpu_kyRef[box], vars->gpu_kzRef[box], atomNumber,
gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
vars->gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
imageSize);
#ifndef NDEBUG
cudaDeviceSynchronize();
Expand All @@ -149,8 +139,6 @@ void CallBoxReciprocalSumsGPU(VariablesCUDA *vars, XYZArray const &coords,
vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecip, vars->gpu_finalVal, sizeof(double),
cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
}

__global__ void BoxReciprocalSumsGPU(double *gpu_x, double *gpu_y,
Expand Down Expand Up @@ -213,13 +201,9 @@ void CallMolReciprocalGPU(VariablesCUDA *vars, XYZArray const &currentCoords,
// Calculate atom number -- exclude uncharged particles
int atomNumber = particleCharge.size();
int newCoordsNumber = particleCharge.size();
double *gpu_particleCharge;
int blocksPerGrid, threadsPerBlock;

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);

cudaMemcpy(vars->gpu_x, currentCoords.x, atomNumber * sizeof(double),
Expand All @@ -241,7 +225,7 @@ void CallMolReciprocalGPU(VariablesCUDA *vars, XYZArray const &currentCoords,
MolReciprocalGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_nx, vars->gpu_ny,
vars->gpu_nz, vars->gpu_kxRef[box], vars->gpu_kyRef[box],
vars->gpu_kzRef[box], atomNumber, gpu_particleCharge,
vars->gpu_kzRef[box], atomNumber, vars->gpu_particleCharge,
vars->gpu_sumRnew[box], vars->gpu_sumInew[box], vars->gpu_sumRref[box],
vars->gpu_sumIref[box], vars->gpu_prefactRef[box], vars->gpu_recipEnergies,
imageSize);
Expand All @@ -255,8 +239,6 @@ void CallMolReciprocalGPU(VariablesCUDA *vars, XYZArray const &currentCoords,
vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecipNew, vars->gpu_finalVal, sizeof(double),
cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
}

// Calculate reciprocal term for lambdaNew and Old with same coordinates
Expand All @@ -267,13 +249,9 @@ void CallChangeLambdaMolReciprocalGPU(VariablesCUDA *vars,
const double lambdaCoef, uint box) {
// Calculate atom number -- exclude uncharged particles
int atomNumber = particleCharge.size();
double *gpu_particleCharge;
int blocksPerGrid, threadsPerBlock;

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);

cudaMemcpy(vars->gpu_x, coords.x, atomNumber * sizeof(double),
Expand All @@ -288,7 +266,7 @@ void CallChangeLambdaMolReciprocalGPU(VariablesCUDA *vars,
ChangeLambdaMolReciprocalGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_kxRef[box],
vars->gpu_kyRef[box], vars->gpu_kzRef[box], atomNumber,
gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
vars->gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
vars->gpu_sumRref[box], vars->gpu_sumIref[box], vars->gpu_prefactRef[box],
vars->gpu_recipEnergies, lambdaCoef, imageSize);
#ifndef NDEBUG
Expand All @@ -301,8 +279,6 @@ void CallChangeLambdaMolReciprocalGPU(VariablesCUDA *vars,
vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecipNew, vars->gpu_finalVal, sizeof(double),
cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
}

void CallSwapReciprocalGPU(VariablesCUDA *vars, XYZArray const &coords,
Expand All @@ -312,13 +288,8 @@ void CallSwapReciprocalGPU(VariablesCUDA *vars, XYZArray const &coords,
{
// Calculate atom number -- exclude uncharged particles
int atomNumber = particleCharge.size();
// given coordinates
double *gpu_particleCharge;

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
particleCharge.size() * sizeof(double), cudaMemcpyHostToDevice);

cudaMemcpy(vars->gpu_x, coords.x, atomNumber * sizeof(double),
Expand All @@ -333,7 +304,7 @@ void CallSwapReciprocalGPU(VariablesCUDA *vars, XYZArray const &coords,
SwapReciprocalGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_x, vars->gpu_y, vars->gpu_z, vars->gpu_kxRef[box],
vars->gpu_kyRef[box], vars->gpu_kzRef[box], particleCharge.size(),
gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
vars->gpu_particleCharge, vars->gpu_sumRnew[box], vars->gpu_sumInew[box],
vars->gpu_sumRref[box], vars->gpu_sumIref[box], vars->gpu_prefactRef[box],
insert, vars->gpu_recipEnergies, imageSize);
#ifndef NDEBUG
Expand All @@ -346,8 +317,6 @@ void CallSwapReciprocalGPU(VariablesCUDA *vars, XYZArray const &coords,
vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecipNew, vars->gpu_finalVal, sizeof(double),
cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
}

void CallMolExchangeReciprocalGPU(VariablesCUDA *vars,
Expand All @@ -361,17 +330,15 @@ void CallMolExchangeReciprocalGPU(VariablesCUDA *vars,
// Calculate atom number -- exclude uncharged particles
int atomNumber = particleCharge.size();

double *gpu_particleCharge;
double *gpu_MolX;
double *gpu_MolY;
double *gpu_MolZ;

CUMALLOC((void**) &gpu_particleCharge, particleCharge.size() * sizeof(double));
CUMALLOC((void**) &gpu_MolX, atomNumber * sizeof(double));
CUMALLOC((void**) &gpu_MolY, atomNumber * sizeof(double));
CUMALLOC((void**) &gpu_MolZ, atomNumber * sizeof(double));

cudaMemcpy(gpu_particleCharge, &particleCharge[0], particleCharge.size() * sizeof(double),
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0], particleCharge.size() * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(gpu_MolX, &molCoords.x[0], atomNumber * sizeof(double),
cudaMemcpyHostToDevice);
Expand All @@ -390,7 +357,7 @@ void CallMolExchangeReciprocalGPU(VariablesCUDA *vars,
vars->gpu_prefactRef[box],
vars->gpu_sumRnew[box],
vars->gpu_sumInew[box],
gpu_particleCharge,
vars->gpu_particleCharge,
numChargedParticles,
gpu_MolX,
gpu_MolY,
Expand All @@ -406,7 +373,6 @@ void CallMolExchangeReciprocalGPU(VariablesCUDA *vars,
vars->gpu_recipEnergies, vars->gpu_finalVal, imageSize);
cudaMemcpy(&energyRecipNew, vars->gpu_finalVal, sizeof(double), cudaMemcpyDeviceToHost);

CUFREE(gpu_particleCharge);
CUFREE(gpu_MolX);
CUFREE(gpu_MolY);
CUFREE(gpu_MolZ);
Expand All @@ -423,7 +389,6 @@ void CallBoxForceReciprocalGPU(
XYZArray const &molCoords, BoxDimensions const &boxAxes, int box) {
int atomCount = atomForceRec.Count();
int molCount = molForceRec.Count();
double *gpu_particleCharge;
int *gpu_particleMol;
bool *gpu_particleHasNoCharge, *gpu_particleUsed;
bool *arr_particleHasNoCharge = new bool[particleHasNoCharge.size()];
Expand All @@ -442,8 +407,6 @@ void CallBoxForceReciprocalGPU(
int blocksPerGridY = (int)(imageSize / IMAGES_PER_BLOCK) + 1;
dim3 blocksPerGrid(blocksPerGridX, blocksPerGridY, 1);

CUMALLOC((void **)&gpu_particleCharge,
particleCharge.size() * sizeof(double));
CUMALLOC((void **)&gpu_particleHasNoCharge,
particleHasNoCharge.size() * sizeof(bool));
CUMALLOC((void **)&gpu_particleUsed, atomCount * sizeof(bool));
Expand All @@ -463,7 +426,7 @@ void CallBoxForceReciprocalGPU(
cudaMemcpyHostToDevice);
cudaMemcpy(vars->gpu_mForceRecz, molForceRec.z, sizeof(double) * molCount,
cudaMemcpyHostToDevice);
cudaMemcpy(gpu_particleCharge, &particleCharge[0],
cudaMemcpy(vars->gpu_particleCharge, &particleCharge[0],
sizeof(double) * particleCharge.size(), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_particleMol, &particleMol[0], sizeof(int) * particleMol.size(),
cudaMemcpyHostToDevice);
Expand All @@ -488,7 +451,7 @@ void CallBoxForceReciprocalGPU(
BoxForceReciprocalGPU<<<blocksPerGrid, threadsPerBlock>>>(
vars->gpu_aForceRecx, vars->gpu_aForceRecy, vars->gpu_aForceRecz,
vars->gpu_mForceRecx, vars->gpu_mForceRecy, vars->gpu_mForceRecz,
gpu_particleCharge, gpu_particleMol, gpu_particleHasNoCharge,
vars->gpu_particleCharge, gpu_particleMol, gpu_particleHasNoCharge,
gpu_particleUsed, gpu_startMol, gpu_lengthMol, alpha, alphaSq, constValue,
imageSize, vars->gpu_kxRef[box], vars->gpu_kyRef[box],
vars->gpu_kzRef[box], vars->gpu_x, vars->gpu_y, vars->gpu_z,
Expand Down Expand Up @@ -520,7 +483,6 @@ void CallBoxForceReciprocalGPU(
cudaDeviceSynchronize();
#endif
delete[] arr_particleHasNoCharge;
CUFREE(gpu_particleCharge);
CUFREE(gpu_particleHasNoCharge);
CUFREE(gpu_particleUsed);
CUFREE(gpu_startMol);
Expand Down Expand Up @@ -777,5 +739,4 @@ __global__ void ChangeLambdaMolReciprocalGPU(
gpu_sumInew[threadID] * gpu_sumInew[threadID]) *
gpu_prefactRef[threadID]);
}

#endif
35 changes: 20 additions & 15 deletions src/GPU/ConstantDefinitionsCUDAKernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,19 @@ void InitGPUForceField(VariablesCUDA &vars, double const *sigmaSq,
#endif
}

void InitCoordinatesCUDA(VariablesCUDA *vars, uint atomNumber,
// maxAtomNumber is the maximum number of atoms in the system
// maxAtomsInMol is the maximum number of atoms in one molecule
// maxMolNumber is the maximum number of molecules in the system
void InitCoordinatesCUDA(VariablesCUDA *vars, uint maxAtomNumber,
uint maxAtomsInMol, uint maxMolNumber) {
CUMALLOC((void **)&vars->gpu_x, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_y, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_z, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_x, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_y, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_z, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_particleCharge, maxAtomNumber * sizeof(double));

CUMALLOC((void **)&vars->gpu_dx, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_dy, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_dz, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_dx, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_dy, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_dz, maxAtomNumber * sizeof(double));

CUMALLOC((void **)&vars->gpu_nx, maxAtomsInMol * sizeof(double));
CUMALLOC((void **)&vars->gpu_ny, maxAtomsInMol * sizeof(double));
Expand Down Expand Up @@ -124,24 +128,24 @@ void InitCoordinatesCUDA(VariablesCUDA *vars, uint atomNumber,
CUMALLOC((void **)&vars->gpu_Invcell_z[b], 3 * sizeof(double));
}

CUMALLOC((void **)&vars->gpu_aForcex, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForcey, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForcez, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForcex, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForcey, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForcez, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForcex, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForcey, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForcez, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mTorquex, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mTorquey, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mTorquez, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_inForceRange, maxMolNumber * sizeof(int));
CUMALLOC((void **)&vars->gpu_aForceRecx, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForceRecy, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForceRecz, atomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForceRecx, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForceRecy, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_aForceRecz, maxAtomNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForceRecx, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForceRecy, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_mForceRecz, maxMolNumber * sizeof(double));
CUMALLOC((void **)&vars->gpu_cellVector, atomNumber * sizeof(int));
CUMALLOC((void **)&vars->gpu_mapParticleToCell, atomNumber * sizeof(int));
CUMALLOC((void **)&vars->gpu_cellVector, maxAtomNumber * sizeof(int));
CUMALLOC((void **)&vars->gpu_mapParticleToCell, maxAtomNumber * sizeof(int));
#ifndef NDEBUG
checkLastErrorCUDA(__FILE__, __LINE__);
#endif
Expand Down Expand Up @@ -360,6 +364,7 @@ void DestroyCUDAVars(VariablesCUDA *vars) {
CUFREE(vars->gpu_x);
CUFREE(vars->gpu_y);
CUFREE(vars->gpu_z);
CUFREE(vars->gpu_particleCharge);
CUFREE(vars->gpu_dx);
CUFREE(vars->gpu_dy);
CUFREE(vars->gpu_dz);
Expand Down
2 changes: 1 addition & 1 deletion src/GPU/ConstantDefinitionsCUDAKernel.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void InitGPUForceField(VariablesCUDA &vars, double const *sigmaSq,
double Rcut, double const *rCutCoulomb,
double RcutLow, double Ron, double const *alpha,
int ewald, double diElectric_1);
void InitCoordinatesCUDA(VariablesCUDA *vars, uint atomNumber,
void InitCoordinatesCUDA(VariablesCUDA *vars, uint maxAtomNumber,
uint maxAtomsInMol, uint maxMolNumber);
void InitEwaldVariablesCUDA(VariablesCUDA *vars, uint imageTotal);
void InitExp6VariablesCUDA(VariablesCUDA *vars, double *rMin, double *expConst,
Expand Down
2 changes: 1 addition & 1 deletion src/GPU/VariablesCUDA.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ public:
int *gpu_ewald;
double *gpu_diElectric_1;
double *gpu_finalVal;
double *gpu_x, *gpu_y, *gpu_z;
double *gpu_x, *gpu_y, *gpu_z, *gpu_particleCharge;
double *gpu_nx, *gpu_ny, *gpu_nz;
double *gpu_dx, *gpu_dy, *gpu_dz;
double **gpu_kx, **gpu_ky, **gpu_kz;
Expand Down

0 comments on commit 61814cc

Please sign in to comment.