Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Development #14

Merged
merged 16 commits into from
May 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04
FROM nvidia/cuda:12.4.1-devel-ubuntu22.04
ENV PATH /usr/local/cuda/bin${PATH:+:${PATH}}
ENV LD_LIBRARY_PATH /usr/local/cuda/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}

Expand Down
4 changes: 1 addition & 3 deletions Dockerfile.prod
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@ ENV BUILD_DIR "build"
ENV CONFIG "Release"

RUN echo "Installing GPUVMEM"
RUN git clone https://github.com/miguelcarcamov/gpuvmem.git && \
cd gpuvmem && \
cmake . -B $BUILD_DIR -DCMAKE_BUILD_TYPE=$CONFIG && \
RUN cmake . -B $BUILD_DIR -DCMAKE_BUILD_TYPE=$CONFIG && \
cd $BUILD_DIR && \
cmake --build . --target install --verbose -j `nproc` && \
cd .. && \
Expand Down
27 changes: 14 additions & 13 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,19 @@ dependencies:
- cfitsio
- openmpi
- mpi4py
- python=3.8
- numpy=1.23
- python=3.10
- numpy=1.24
- pip
- pip:
- almatasks==1.6.1
- casadata==2023.4.10
- casafeather==0.0.18
- casampi==0.5.3
- casaplotms==2.0.1
- casaplotserver==1.5.1
- casashell==6.5.5.21
- casatasks==6.5.5.21
- casatestutils==6.5.5.21
- casatools==6.5.5.21
- casaviewer==1.7.1
- casadata==2023.9.18
- casafeather==0.0.20
- casalogger==1.0.17
- casampi==0.5.4
- casaplotms==2.2.4
- casaplotserver==1.7.1
- casashell==6.6.0.20
- casatablebrowser==0.0.33
- casatasks==6.6.0.20
- casatestutils==6.6.0.20
- casatools==6.6.0.20
- casaviewer==1.9.1
27 changes: 14 additions & 13 deletions environment_cudatoolkit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,19 @@ dependencies:
- cuda-samples
- openmpi
- mpi4py
- python=3.8
- numpy=1.23
- python=3.10
- numpy=1.24
- pip
- pip:
- almatasks==1.6.1
- casadata==2023.4.10
- casafeather==0.0.18
- casampi==0.5.3
- casaplotms==2.0.1
- casaplotserver==1.5.1
- casashell==6.5.5.21
- casatasks==6.5.5.21
- casatestutils==6.5.5.21
- casatools==6.5.5.21
- casaviewer==1.7.1
- casadata==2023.9.18
- casafeather==0.0.20
- casalogger==1.0.17
- casampi==0.5.4
- casaplotms==2.2.4
- casaplotserver==1.7.1
- casashell==6.6.0.20
- casatablebrowser==0.0.33
- casatasks==6.6.0.20
- casatestutils==6.6.0.20
- casatools==6.6.0.20
- casaviewer==1.9.1
1 change: 0 additions & 1 deletion include/chi2.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ class Chi2 : public Fi {
Chi2();
float calcFi(float* p);
void calcGi(float* p, float* xi);
float simulateModel(float* p) override;
void restartDGi();
void addToDphi(float* device_dphi);
void configure(int penalizatorIndex, int imageIndex, int imageToAdd) override;
Expand Down
1 change: 0 additions & 1 deletion include/classes/fi.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ class Fi {

virtual float calcFi(float* p) = 0;
virtual void calcGi(float* p, float* xi) = 0;
virtual float simulateModel(float* p){};
virtual void restartDGi() = 0;
virtual void addToDphi(float* device_dphi) = 0;
virtual void setPrior(float prior){};
Expand Down
23 changes: 12 additions & 11 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
almatasks==1.6.1
casadata==2023.4.10
casafeather==0.0.18
casampi==0.5.3
casaplotms==2.0.1
casaplotserver==1.5.1
casashell==6.5.5.21
casatasks==6.5.5.21
casatestutils==6.5.5.21
casatools==6.5.5.21
casaviewer==1.7.1
casadata==2023.9.18
casafeather==0.0.20
casalogger==1.0.17
casampi==0.5.4
casaplotms==2.2.4
casaplotserver==1.7.1
casashell==6.6.0.20
casatablebrowser==0.0.33
casatasks==6.6.0.20
casatestutils==6.6.0.20
casatools==6.6.0.20
casaviewer==1.9.1
6 changes: 0 additions & 6 deletions src/chi2.cu
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,6 @@ void Chi2::calcGi(float* p, float* xi) {
dchi2(p, xi, result_dchi2, ip);
};

float Chi2::simulateModel(float* p) {
float result = 0.0f;
result = simulate(p, ip);
return result;
};

void Chi2::restartDGi() {
checkCudaErrors(
cudaMemset(result_dchi2, 0, sizeof(float) * M * N * image_count));
Expand Down
137 changes: 57 additions & 80 deletions src/functions.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1364,10 +1364,10 @@ __host__ void do_gridding(std::vector<Field>& fields,
fields[f].numVisibilitiesPerFreqPerStoke[i][s]);
fields[f].backup_visibilities[i][s].weight.resize(
fields[f].numVisibilitiesPerFreqPerStoke[i][s]);
#pragma omp parallel for schedule(static, 1) num_threads(gridding) \
shared(g_weights, g_weights_aux, g_Vo) private( \
j, k, grid_pos_x, grid_pos_y, uvw, w, Vo, shifted_j, shifted_k, \
kernel_i, kernel_j, herm_j, herm_k, ckernel_result) ordered
#pragma omp parallel for schedule(static, 1) num_threads(gridding) \
shared(g_weights, g_weights_aux, g_Vo) private( \
j, k, grid_pos_x, grid_pos_y, uvw, w, Vo, shifted_j, shifted_k, \
kernel_i, kernel_j, herm_j, herm_k, ckernel_result) ordered
for (int z = 0; z < fields[f].numVisibilitiesPerFreqPerStoke[i][s];
z++) {
uvw = fields[f].visibilities[i][s].uvw[z];
Expand Down Expand Up @@ -1509,6 +1509,7 @@ __host__ void do_gridding(std::vector<Field>& fields,
if (weight > 0.0f) {
fields[f].visibilities[i][s].uvw[l].x = g_uvw[N * k + j].x;
fields[f].visibilities[i][s].uvw[l].y = g_uvw[N * k + j].y;
fields[f].visibilities[i][s].uvw[l].z = 0.0;
fields[f].visibilities[i][s].Vo[l] =
make_cuFloatComplex(g_Vo[N * k + j].x, g_Vo[N * k + j].y);
fields[f].visibilities[i][s].weight[l] = g_weights[N * k + j];
Expand Down Expand Up @@ -1575,9 +1576,9 @@ __host__ double3 calc_beamSize(double s_uu, double s_vv, double s_uv) {
double uu_minus_vv = s_uu - s_vv;
double uu_plus_vv = s_uu + s_vv;
double sqrt_in = sqrt((uu_minus_vv * uu_minus_vv) + 4.0 * uv_square);
beam_size.x = 2.0 * sqrt(log(2.0)) / PI_D /
beam_size.x = 1.0 / sqrt(2.0) / PI_D /
sqrt(uu_plus_vv - sqrt_in); // Major axis in radians
beam_size.y = 2.0 * sqrt(log(2.0)) / PI_D /
beam_size.y = 1.0 / sqrt(2.0) / PI_D /
sqrt(uu_plus_vv + sqrt_in); // Minor axis in radians
beam_size.z = -0.5 * atan2(2.0 * s_uv, uu_minus_vv); // Angle in radians

Expand Down Expand Up @@ -2417,8 +2418,8 @@ __global__ void getGriddedVisFromPix(cufftComplex* Vm,
if (uv.y < 0.0)
uv.y = uv.y + N;

j1 = round(uv.x);
i1 = round(uv.y);
j1 = uv.x;
i1 = uv.y;

if (i1 >= 0 && i1 < N && j1 >= 0 && j1 < N)
Vm[i] = V[N * i1 + j1];
Expand Down Expand Up @@ -3678,7 +3679,8 @@ __global__ void DChi2(float* noise,
int y0 = phs_yobs;
double x = (j - x0) * DELTAX * RPDEG_D;
double y = (i - y0) * DELTAY * RPDEG_D;
// double z = sqrt(1-x*x-y*y)-1;
double z = sqrtf(1 - x * x - y * y);
double z_minus_one = z - 1.0;

float Ukv, Vkv, Wkv, cosk, sink, atten;

Expand All @@ -3690,12 +3692,13 @@ __global__ void DChi2(float* noise,
for (int v = 0; v < numVisibilities; v++) {
Ukv = x * UVW[v].x;
Vkv = y * UVW[v].y;
// Wkv = z * UVW[v].z;
Wkv = z_minus_one * UVW[v].z;

#if (__CUDA_ARCH__ >= 300)
sincospif(2.0 * (Ukv + Vkv), &sink, &cosk);
sincospif(2.0 * (Ukv + Vkv + Wkv), &sink, &cosk);
#else
cosk = cospif(2.0 * (Ukv + Vkv));
sink = sinpif(2.0 * (Ukv + Vkv));
cosk = cospif(2.0 * (Ukv + Vkv + Wkv));
sink = sinpif(2.0 * (Ukv + Vkv + Wkv));
#endif
dchi2 += w[v] * ((Vr[v].x * cosk) - (Vr[v].y * sink));
}
Expand Down Expand Up @@ -3733,7 +3736,8 @@ __global__ void DChi2(float* noise,
int y0 = phs_yobs;
double x = (j - x0) * DELTAX * RPDEG_D;
double y = (i - y0) * DELTAY * RPDEG_D;
// double z = sqrt(1-x*x-y*y)-1;
double z = sqrtf(1 - x * x - y * y);
double z_minus_one = z - 1.0;

float Ukv, Vkv, Wkv, cosk, sink, atten, gcf_i;

Expand All @@ -3745,12 +3749,12 @@ __global__ void DChi2(float* noise,
for (int v = 0; v < numVisibilities; v++) {
Ukv = x * UVW[v].x;
Vkv = y * UVW[v].y;
// Wkv = z * UVW[v].z;
Wkv = z_minus_one * UVW[v].z;
#if (__CUDA_ARCH__ >= 300)
sincospif(2.0 * (Ukv + Vkv), &sink, &cosk);
sincospif(2.0 * (Ukv + Vkv + Wkv), &sink, &cosk);
#else
cosk = cospif(2.0 * (Ukv + Vkv));
sink = sinpif(2.0 * (Ukv + Vkv));
cosk = cospif(2.0 * (Ukv + Vkv + Wkv));
sink = sinpif(2.0 * (Ukv + Vkv + Wkv));
#endif
dchi2 += w[v] * ((Vr[v].x * cosk) - (Vr[v].y * sink));
}
Expand Down Expand Up @@ -4076,7 +4080,8 @@ __host__ float simulate(float* I, VirtualImageProcessor* ip) {

for (int d = 0; d < nMeasurementSets; d++) {
for (int f = 0; f < datasets[d].data.nfields; f++) {
#pragma omp parallel for schedule(static,1) num_threads(num_gpus) reduction(+: resultchi2)
#pragma omp parallel for schedule(static, 1) num_threads(num_gpus) \
reduction(+ : resultchi2)
for (int i = 0; i < datasets[d].data.total_frequencies; i++) {
float result = 0.0;
unsigned int j = omp_get_thread_num();
Expand Down Expand Up @@ -4122,36 +4127,22 @@ __host__ float simulate(float* I, VirtualImageProcessor* ip) {
checkCudaErrors(cudaMemset(vars_gpu[gpu_idx].device_chi2, 0,
sizeof(float) * max_number_vis));

if (NULL != ip->getCKernel()) {
getGriddedVisFromPix<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
} else {
// BILINEAR INTERPOLATION
vis_mod<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
}
// TODO: Here we could just use vis_mod and see what happens
// Use always bilinear interpolation since we don't have
// degridding yet
vis_mod<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
checkCudaErrors(cudaDeviceSynchronize());

// RESIDUAL CALCULATION
Expand Down Expand Up @@ -4211,7 +4202,8 @@ __host__ float chi2(float* I, VirtualImageProcessor* ip) {

for (int d = 0; d < nMeasurementSets; d++) {
for (int f = 0; f < datasets[d].data.nfields; f++) {
#pragma omp parallel for schedule(static,1) num_threads(num_gpus) reduction(+: resultchi2)
#pragma omp parallel for schedule(static, 1) num_threads(num_gpus) \
reduction(+ : resultchi2)
for (int i = 0; i < datasets[d].data.total_frequencies; i++) {
float result = 0.0;
unsigned int j = omp_get_thread_num();
Expand Down Expand Up @@ -4257,36 +4249,21 @@ __host__ float chi2(float* I, VirtualImageProcessor* ip) {
checkCudaErrors(cudaMemset(vars_gpu[gpu_idx].device_chi2, 0,
sizeof(float) * max_number_vis));

if (NULL != ip->getCKernel()) {
getGriddedVisFromPix<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
} else {
// BILINEAR INTERPOLATION
vis_mod<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
}
// Use always bilinear interpolation since we don't have
// degridding yet
vis_mod<<<
datasets[d].fields[f].device_visibilities[i][s].numBlocksUV,
datasets[d]
.fields[f]
.device_visibilities[i][s]
.threadsPerBlockUV>>>(
datasets[d].fields[f].device_visibilities[i][s].Vm,
vars_gpu[gpu_idx].device_V,
datasets[d].fields[f].device_visibilities[i][s].uvw,
datasets[d].fields[f].device_visibilities[i][s].weight,
deltau, deltav,
datasets[d].fields[f].numVisibilitiesPerFreqPerStoke[i][s],
N);
checkCudaErrors(cudaDeviceSynchronize());

// RESIDUAL CALCULATION
Expand Down
2 changes: 1 addition & 1 deletion src/linmin.cu
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ __host__ void linmin(float* p,

*fret = brent(ax, xx, bx, TOL, &xmin, f1dim);
if (verbose_flag) {
printf("xmin = %f\n\n", xmin);
printf("Alpha for linear minimization = %f\n\n", xmin);
}

// GPU MUL AND ADD
Expand Down
Loading