diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 7e095dc0061..ff4411db6a7 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -201,6 +201,7 @@ public: amrex::GpuArray Ez_IndexType; bool m_solve_electron_energy_equation; + }; /** diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index ab7cb6a81d3..06f574e7ec0 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -235,26 +235,14 @@ void HybridPICModel::InitData () } const auto elec_temp = m_elec_temp; - // Initialize electron temperature multifab - // This assumes an uniform electron temperature in the domain - // ... -// Loop through the grids, and over the tiles within each grid -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(*warpx.m_fields.get("fluid_temperature_electrons_hybrid", warpx.finestLevel()), TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { - Array4 const& Te = warpx.m_fields.get("fluid_temperature_electrons_hybrid", warpx.finestLevel())->array(mfi); - const Box& tilebox = mfi.tilebox(); - ParallelFor(tilebox, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - Te(i, j, k) = elec_temp; - }); - } + // Initialize electron temperature multifab + // This assumes an uniform electron temperature in the domain + warpx.m_fields.get("fluid_temperature_electrons_hybrid", warpx.finestLevel())->setVal(elec_temp); // Fill Boundaries in electron temperature multifab warpx.m_fields.get("fluid_temperature_electrons_hybrid", warpx.finestLevel())->FillBoundary(warpx.Geom(warpx.finestLevel()).periodicity()); - + } void HybridPICModel::GetCurrentExternal () diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index eb02ac12417..78a48ae15c7 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -171,36 +171,35 @@ void WarpX::HybridPICEvolveFields () // Initialize Ke = ne^(1-gamma)*Te // Move up, shouldn't Ke be defined with both ne and Te at n instead of ne at n+1 ? - hybrid_electron_fl->HybridInitializeKe(m_fields, m_hybrid_pic_model->m_gamma , finest_level); + hybrid_electron_fl->HybridInitializeKe(m_fields, m_hybrid_pic_model->m_gamma, m_hybrid_pic_model->m_n_floor, finest_level); // Update Ue in electron fluid container // check at what step this should be calculated, here Ue is at n+1 // maybe move up to be consistent with Ke initialization hybrid_electron_fl->HybridInitializeUe(m_fields, - *current_fp_temp[finest_level][0], - *current_fp_temp[finest_level][1], - *current_fp_temp[finest_level][2], - m_hybrid_pic_model.get(), finest_level); + current_fp_temp[finest_level], + m_hybrid_pic_model.get(), + finest_level); // Set fictitious electron particles velocities - qdsmc_hybrid_electron_pc->SetV(finest_level, - *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{0}, finest_level), - *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{1}, finest_level), - *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{2}, finest_level)); + //qdsmc_hybrid_electron_pc->SetV(finest_level, + // *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{0}, finest_level), + // *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{1}, finest_level), + // *m_fields.get(hybrid_electron_fl->name_mf_NU, Direction{2}, finest_level)); // Set fictitious electron particles entropy - qdsmc_hybrid_electron_pc->SetK(finest_level, - *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level), - *m_fields.get(FieldType::rho_fp, finest_level)); + //qdsmc_hybrid_electron_pc->SetK(finest_level, + // *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level), + // *m_fields.get(FieldType::rho_fp, finest_level)); // Push fictitious electron particles - qdsmc_hybrid_electron_pc->PushX(finest_level, dt[0]); + //qdsmc_hybrid_electron_pc->PushX(finest_level, dt[0]); // Deposit entropy from qdsmc - qdsmc_hybrid_electron_pc->DepositK(finest_level, *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level)); + //qdsmc_hybrid_electron_pc->DepositK(finest_level, *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level)); // Update Te after QDSMC solver: - hybrid_electron_fl->HybridUpdateTe(m_fields, m_hybrid_pic_model->m_gamma, finest_level); + //hybrid_electron_fl->HybridUpdateTe(m_fields, m_hybrid_pic_model->m_gamma, m_hybrid_pic_model->m_n_floor, finest_level); } // Calculate the electron pressure at t=n+1 diff --git a/Source/Fluids/QdsmcParticleContainer.H b/Source/Fluids/QdsmcParticleContainer.H index 46ee1b76cf5..839d609b828 100644 --- a/Source/Fluids/QdsmcParticleContainer.H +++ b/Source/Fluids/QdsmcParticleContainer.H @@ -15,10 +15,10 @@ #include #include -#include "Particles/ParticleBoundaries.H" -#include +//#include "Particles/ParticleBoundaries.H" +//#include -#include "QdsmcParticleContainer_fwd.H" +//#include "QdsmcParticleContainer_fwd.H" /** * This enumerated struct is used to index the qdsmc fictitious @@ -38,10 +38,6 @@ struct QdsmcPIdx z, z_node, vx, vy, vz, entropy, np_real, - -#ifdef WARPX_DIM_RZ - theta, ///< RZ needs all three position components -#endif nattribs }; }; @@ -74,7 +70,7 @@ public: //! similar to WarpXParticleContainer::AddNParticles - void AddNParticles (int lev, amrex::Long n, + void AddNParticles (int lev, amrex::Long np, amrex::Vector const & x, amrex::Vector const & y, amrex::Vector const & z); diff --git a/Source/Fluids/QdsmcParticleContainer.cpp b/Source/Fluids/QdsmcParticleContainer.cpp index 9344abbde80..afb96519dca 100644 --- a/Source/Fluids/QdsmcParticleContainer.cpp +++ b/Source/Fluids/QdsmcParticleContainer.cpp @@ -53,6 +53,7 @@ #include #include +#include #include #include @@ -63,7 +64,6 @@ QdsmcParticleContainer::QdsmcParticleContainer (AmrCore* amr_core) : ParticleContainerPureSoA(amr_core->GetParGDB()) { SetParticleSize(); - //InitParticles(0); // only level 0 is used in HybridSolver, commented since this is already called in WarpX.cpp } @@ -78,68 +78,121 @@ QdsmcParticleContainer::AddNParticles (int lev, amrex::Long n, WARPX_ALWAYS_ASSERT_WITH_MESSAGE(y.size() == n,"y.size() != # of qdsmc particles to add"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(z.size() == n,"z.size() != # of qdsmc particles to add"); - if (n <= 0){ - Redistribute(); - return; - } // have to resize here, not in the constructor because grids have not // been built when constructor was called. reserveData(); resizeData(); + long ibegin = 0; + long iend = n; + + const int myproc = amrex::ParallelDescriptor::MyProc(); + const int nprocs = amrex::ParallelDescriptor::NProcs(); + const auto navg = n/nprocs; + const auto nleft = n - navg * nprocs; + if (myproc < nleft) { + ibegin = myproc*(navg+1); + iend = ibegin + navg+1; + } else { + ibegin = myproc*navg + nleft; + iend = ibegin + navg; + } + + + //if (np <= 0){ + // Redistribute(); + // return; + //} + + //std::ofstream ofs("xyz_InitParticles.txt", std::ofstream::out); // REMOVE + //for(int i=0; i::ParticleTileType; - PinnedTile pinned_tile; pinned_tile.define(NumRuntimeRealComps(), NumRuntimeIntComps()); - for (int i = 0; i < n; i++) + const std::size_t np = iend-ibegin; + + std::ofstream ofsID("IDs.txt", std::ofstream::out); // REMOVE + for (auto i = ibegin; i < iend; ++i) { auto & idcpu_data = pinned_tile.GetStructOfArrays().GetIdCPUData(); - idcpu_data.push_back(amrex::SetParticleIDandCPU(ParticleType::NextID(), ParallelDescriptor::MyProc())); + amrex::Long current_id = ParticleType::NextID(); + idcpu_data.push_back(amrex::SetParticleIDandCPU(current_id, ParallelDescriptor::MyProc())); + Print(ofsID) << " i " << i <<" current_id " << current_id << std::endl; // REMOVE } + ofsID.close(); // REMOVE - // write Real attributes (SoA) to particle initialized zero - DefineAndReturnParticleTile(0, 0, 0); + // + // Check here if the idcpu_data is correct and compare with physical particle container + // ... + // ... + + std::ofstream ofs0("some_params.txt", std::ofstream::out); // REMOVE + Print(ofs0) << " ibegin " << ibegin << " iend " << iend << " np " << np << " NReal " << NReal << std::endl; // REMOVE + Print(ofs0) << " NumRuntimeRealComps " << NumRuntimeRealComps() << " NumRuntimeIntComps " << NumRuntimeIntComps() << std::endl; // REMOVE + ofs0.close(); // REMOVE + + //std::ofstream ofs("xyz_InitParticles.txt", std::ofstream::out); // REMOVE + //for(int i=0; i0) || (NumRuntimeIntComps()>0) ){ + DefineAndReturnParticleTile(0, 0, 0); + } + + pinned_tile.resize(np); + + auto old_np = particle_tile.numParticles(); + auto new_np = old_np + pinned_tile.numParticles(); particle_tile.resize(new_np); amrex::copyParticles( - particle_tile, pinned_tile, 0, old_np, pinned_tile.numParticles()); + particle_tile, pinned_tile, 0, old_np, pinned_tile.numParticles() + ); + // Move particles to their appropriate tiles Redistribute(); - // Remove particles that are inside the embedded boundaries here - /** - * - * - * - */ + // Remove particles that are inside the embedded boundaries +//#ifdef AMREX_USE_EB +// if (EB::enabled()) { +// auto & warpx = WarpX::GetInstance(); +// scrapeParticlesAtEB( +// *this, +// warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), +// ParticleBoundaryProcess::Absorb()); +// deleteInvalidParticles(); +// } +//#endif } @@ -152,7 +205,7 @@ QdsmcParticleContainer::InitParticles (int lev) const amrex::Real* dx = warpx.Geom(lev).CellSize(); - // Read this from domain ? + // Number of cells in each direction int nx = (probhi[0] - problo[0])/dx[0]; int ny = (probhi[1] - problo[1])/dx[1]; int nz = (probhi[2] - problo[2])/dx[2]; @@ -164,6 +217,13 @@ QdsmcParticleContainer::InitParticles (int lev) amrex::Vector ypos; amrex::Vector zpos; + //std::ofstream ofs1("variables_InitParticles.txt", std::ofstream::out); // REMOVE + //Print(ofs1) << " problo[0] " << problo[0] << " problo[1] " << problo[1] << " problo[2] " << problo[2] << std::endl; // REMOVE + //Print(ofs1) << " probhi[0] " << probhi[0] << " probhi[1] " << probhi[1] << " probhi[2] " << probhi[2] << std::endl; // REMOVE + //Print(ofs1) << " dx[0] " << dx[0] << " dx[1] " << dx[1] << " dx[2] " << dx[2] << std::endl; // REMOVE + //Print(ofs1) << " nx " << nx << " ny " << ny << " nz " << nz << std::endl; // REMOVE + //ofs1.close(); // REMOVE + // for now, only one MPI rank adds fictitious particles // this is done only once in an entire simulation if (ParallelDescriptor::IOProcessor()) @@ -174,30 +234,21 @@ QdsmcParticleContainer::InitParticles (int lev) { for ( int k = 0; k <= nz; k++) { - // cell centered - //amrex::Real x = problo[0] + (i+0.5)*dx[0]; - //amrex::Real y = problo[1] + (j+0.5)*dx[1]; - //amrex::Real z = problo[2] + (k+0.5)*dx[2]; - // nodal - amrex::Real x = problo[0] + i*dx[0]; - amrex::Real y = problo[1] + j*dx[1]; - amrex::Real z = problo[2] + k*dx[2]; - - if( x <= probhi[0] && y <= probhi[1] && z <= probhi[2]) - { - xpos.push_back(x); - ypos.push_back(y); - zpos.push_back(z); - - n_to_add++; - } + amrex::ParticleReal x = problo[0] + i*dx[0]; + amrex::ParticleReal y = problo[1] + j*dx[1]; + amrex::ParticleReal z = problo[2] + k*dx[2]; + + xpos.push_back(x); + ypos.push_back(y); + zpos.push_back(z); + + n_to_add++; } } } } - - AddNParticles (0, n_to_add, xpos, ypos, zpos); + AddNParticles(0, n_to_add, xpos, ypos, zpos); } @@ -338,7 +389,7 @@ QdsmcParticleContainer::PushX (int lev, amrex::Real dt) }); } - Redistribute(); + //Redistribute(); // search for maximum part_dx/part_dy/part_dz and assert if larger than dx/dy/dz // ... // ... diff --git a/Source/Fluids/WarpXFluidContainer.H b/Source/Fluids/WarpXFluidContainer.H index 28646fb73e3..8f8296e6365 100644 --- a/Source/Fluids/WarpXFluidContainer.H +++ b/Source/Fluids/WarpXFluidContainer.H @@ -134,16 +134,16 @@ public: void DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev, int icomp = 0); // HybridUpdateUe calculates Ue from Je = J_ampere - Ji and saves in NU for hybrid solver - void HybridInitializeUe (ablastr::fields::MultiFabRegister& fields, - amrex::MultiFab &ji_x, amrex::MultiFab &ji_y, amrex::MultiFab &ji_z, + void HybridInitializeUe (ablastr::fields::MultiFabRegister& m_fields, + ablastr::fields::VectorField const& Ji, HybridPICModel const* hybrid_model, int lev); // Sets Ke in fluid container MF to be used by qdsmc particles - void HybridInitializeKe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, int lev); + void HybridInitializeKe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, amrex::Real n_floor, int lev); // UpdateTe takes the already interpolated Ke values from the grid after pushing the fictitious particles // and calculates Te at n+1 - void HybridUpdateTe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, int lev); + void HybridUpdateTe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, amrex::Real n_floor, int lev); [[nodiscard]] amrex::Real getCharge () const {return charge;} [[nodiscard]] amrex::Real getMass () const {return mass;} diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 1751a73f9be..a615c301f3d 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -1409,16 +1409,25 @@ void WarpXFluidContainer::DepositCurrent( } - -void WarpXFluidContainer::HybridInitializeUe (ablastr::fields::MultiFabRegister& fields, - amrex::MultiFab &ji_x, amrex::MultiFab &ji_y, amrex::MultiFab &ji_z, - HybridPICModel const* hybrid_model, int lev) +void WarpXFluidContainer::HybridInitializeUe ( + ablastr::fields::MultiFabRegister& m_fields, + ablastr::fields::VectorField const& Ji, + HybridPICModel const* hybrid_model, + int lev) { using ablastr::fields::Direction; using warpx::fields::FieldType; - ablastr::fields::ScalarField rho_fp = fields.get(FieldType::rho_fp, lev); - ablastr::fields::VectorField current_fp_ampere = fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); + // For safety condition (divition by rho) + amrex::Real rho_floor = PhysConst::q_e*hybrid_model->m_n_floor; + + // Set Ue to 0 + m_fields.get(name_mf_NU, Direction{0}, lev)->setVal(0.0_rt); + m_fields.get(name_mf_NU, Direction{1}, lev)->setVal(0.0_rt); + m_fields.get(name_mf_NU, Direction{2}, lev)->setVal(0.0_rt); + + ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); + ablastr::fields::VectorField current_fp_ampere = m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); // Index type required for interpolating fields from their respective // staggering to nodal grid @@ -1436,24 +1445,31 @@ void WarpXFluidContainer::HybridInitializeUe (ablastr::fields::MultiFabRegister& #endif for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& rho = rho_fp->array(mfi); - amrex::Array4 const & Jx = current_fp_ampere[0]->array(mfi); - amrex::Array4 const & Jy = current_fp_ampere[1]->array(mfi); - amrex::Array4 const & Jz = current_fp_ampere[2]->array(mfi); + amrex::Array4 const& Jx = current_fp_ampere[0]->array(mfi); + amrex::Array4 const& Jy = current_fp_ampere[1]->array(mfi); + amrex::Array4 const& Jz = current_fp_ampere[2]->array(mfi); - amrex::Array4 const & Jix = ji_x.array(mfi); - amrex::Array4 const & Jiy = ji_y.array(mfi); - amrex::Array4 const & Jiz = ji_z.array(mfi); + amrex::Array4 const& Jix = Ji[0]->array(mfi); + amrex::Array4 const& Jiy = Ji[1]->array(mfi); + amrex::Array4 const& Jiz = Ji[2]->array(mfi); - const amrex::Array4 Uex = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); - const amrex::Array4 Uey = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); - const amrex::Array4 Uez = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); + amrex::Array4 const& Uex = m_fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const& Uey = m_fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const& Uez = m_fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); const Box& tilebox = mfi.tilebox(); ParallelFor(tilebox, [=] AMREX_GPU_DEVICE (int i, int j, int k) { if( rho(i, j, k) > 0.0_rt ){ + + // safety condition since we divide by rho_val later + amrex::Real rho_val = rho_floor; + if(rho(i, j, k) > rho_floor){ + rho_val = rho(i, j, k); + } + // Interpolate the total plasma current to a nodal grid auto const jx_interp = ablastr::coarsen::sample::Interp(Jx, Jx_stag, nodal, coarsen, i, j, k, 0); auto const jy_interp = ablastr::coarsen::sample::Interp(Jy, Jy_stag, nodal, coarsen, i, j, k, 0); @@ -1464,9 +1480,11 @@ void WarpXFluidContainer::HybridInitializeUe (ablastr::fields::MultiFabRegister& auto const jiy_interp = ablastr::coarsen::sample::Interp(Jiy, Jy_stag, nodal, coarsen, i, j, k, 0); auto const jiz_interp = ablastr::coarsen::sample::Interp(Jiz, Jz_stag, nodal, coarsen, i, j, k, 0); - Uex(i, j, k) = -(jx_interp - jix_interp)/rho(i, j, k); - Uey(i, j, k) = -(jy_interp - jiy_interp)/rho(i, j, k); - Uez(i, j, k) = -(jz_interp - jiz_interp)/rho(i, j, k); + // this needs to be rewritten for multiple ion species when implemented : + + Uex(i, j, k) = -(jx_interp - jix_interp)/rho_val; + Uey(i, j, k) = -(jy_interp - jiy_interp)/rho_val; + Uez(i, j, k) = -(jz_interp - jiz_interp)/rho_val; } }); @@ -1474,36 +1492,49 @@ void WarpXFluidContainer::HybridInitializeUe (ablastr::fields::MultiFabRegister& } -void WarpXFluidContainer::HybridInitializeKe (ablastr::fields::MultiFabRegister& fields, amrex::Real gamma, int lev) +void WarpXFluidContainer::HybridInitializeKe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, amrex::Real n_floor, int lev) { using warpx::fields::FieldType; - ablastr::fields::ScalarField rho_fp = fields.get(FieldType::rho_fp, lev); + ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); + + // Set Ke to 0 + m_fields.get(name_mf_K, lev)->setVal(0.0_rt); + + // For safety condition (divition by rho) + amrex::Real rho_floor = PhysConst::q_e*n_floor; #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - amrex::Array4 const & rho = rho_fp->array(mfi); - amrex::Array4 const & Te = fields.get(name_mf_T, lev)->array(mfi); - const amrex::Array4 Ke = fields.get(name_mf_K, lev)->array(mfi); + amrex::Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& Te = m_fields.get(name_mf_T, lev)->array(mfi); + amrex::Array4 const& Ke = m_fields.get(name_mf_K, lev)->array(mfi); const Box& tilebox = mfi.tilebox(); ParallelFor(tilebox, [=] AMREX_GPU_DEVICE (int i, int j, int k) { if( rho(i, j, k) > 0.0_rt ){ - amrex::Real ne = rho(i, j, k)/PhysConst::q_e; - Ke(i, j, k) = Te(i, j, k)*std::pow(ne, 1-gamma); + + // safety condition since we divide by rho_val later + amrex::Real rho_val = rho_floor; + if(rho(i, j, k) > rho_floor){ + rho_val = rho(i, j, k); + } + + amrex::Real ne = rho_val/PhysConst::q_e; + Ke(i, j, k) = Te(i, j, k)*std::pow(ne, 1-gamma)/PhysConst::q_e; // Ke with Te in eV to avoid small numbers } }); } } -void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& fields, amrex::Real gamma, int lev) +void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& m_fields, amrex::Real gamma, amrex::Real n_floor, int lev) { using warpx::fields::FieldType; - ablastr::fields::ScalarField rho_fp = fields.get(FieldType::rho_fp, lev); + ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); WarpX &warpx = WarpX::GetInstance(); const amrex::Geometry &geom = warpx.Geom(lev); @@ -1515,18 +1546,23 @@ void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& fie #endif for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - amrex::Array4 const & rho = rho_fp->array(mfi); - amrex::Array4 const & Ke = fields.get(name_mf_K, lev)->array(mfi); - const amrex::Array4 Te = fields.get(name_mf_T, lev)->array(mfi); + amrex::Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& Ke = m_fields.get(name_mf_K, lev)->array(mfi); + amrex::Array4 const& Te = m_fields.get(name_mf_T, lev)->array(mfi); const Box& tilebox = mfi.tilebox(); ParallelFor(tilebox, [=] AMREX_GPU_DEVICE (int i, int j, int k) { Te(i, j, k) = 0; if( rho(i, j, k) > 0.0_rt ){ + amrex::Real ne = rho(i, j, k)/PhysConst::q_e; + if(neAllocateLevelMFs(m_fields, ba, dm, lev); // Initialize qdsmc particle container (add fictitious particles in nodal grid) - qdsmc_hybrid_electron_pc->InitParticles(lev); + if(m_hybrid_pic_model->m_solve_electron_energy_equation){ + qdsmc_hybrid_electron_pc->InitParticles(lev); + } + } // Allocate extra multifabs needed for fluids