diff --git a/GNUmakefile b/GNUmakefile index b9f7462a0..3828f4290 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -41,6 +41,7 @@ USE_PSATD = FALSE USE_PSATD_PICSAR = FALSE USE_RZ = FALSE USE_LLG = FALSE +USE_FERROE = TRUE USE_EB = FALSE diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index 55ab056fd..a98e3c3c8 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -173,6 +173,24 @@ Diagnostics::BaseReadParameters () #endif +#ifdef WARPX_FERROE + // polarization can be written to file only if WarpX::em_solver_medium == MediumForEM::Macroscopic + if (utils::algorithms::is_in(m_varnames, "px") || utils::algorithms::is_in(m_varnames, "py") || utils::algorithms::is_in(m_varnames, "pz")) + { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + WarpX::em_solver_medium == MediumForEM::Macroscopic, + "polarization in plotfiles only works with algo.em_solver_medium=macroscopic"); + } + + // ferroelectric can be written to file only if WarpX::em_solver_medium == MediumForEM::Macroscopic + if (utils::algorithms::is_in(m_varnames, "ferroelectric")) + { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + WarpX::em_solver_medium == MediumForEM::Macroscopic, + "ferroelectric in plotfiles only works with algo.em_solver_medium=macroscopic"); + } +#endif + // If user requests to plot proc_number for a serial run, // delete proc_number from fields_to_plot if (amrex::ParallelDescriptor::NProcs() == 1){ diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index a148e58d6..01f203eb7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -96,6 +96,15 @@ FlushFormatCheckpoint::WriteToFile ( amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Hzbias_fp")); #endif +#ifdef WARPX_FERROE + VisMF::Write(warpx.getpolarization_fp(lev, 0), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "px_fp")); + VisMF::Write(warpx.getpolarization_fp(lev, 1), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "py_fp")); + VisMF::Write(warpx.getpolarization_fp(lev, 2), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pz_fp")); +#endif + if (WarpX::fft_do_time_averaging) { VisMF::Write(warpx.getEfield_avg_fp(lev, 0), @@ -123,6 +132,16 @@ FlushFormatCheckpoint::WriteToFile ( amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_fp")); } + if (warpx.getis_synchronized() || WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + // Need to save j if synchronized because after restart we need j to evolve E by dt/2. + VisMF::Write(warpx.getcurrent_fp(lev, 0), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_fp")); + VisMF::Write(warpx.getcurrent_fp(lev, 1), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_fp")); + VisMF::Write(warpx.getcurrent_fp(lev, 2), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_fp")); + } + if (lev > 0) { VisMF::Write(warpx.getEfield_cp(lev, 0), @@ -159,6 +178,15 @@ FlushFormatCheckpoint::WriteToFile ( amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Hzbias_fp")); #endif +#ifdef WARPX_FERROE + VisMF::Write(warpx.getpolarization_cp(lev, 0), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "px_cp")); + VisMF::Write(warpx.getpolarization_cp(lev, 1), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "py_cp")); + VisMF::Write(warpx.getpolarization_cp(lev, 2), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pz_cp")); +#endif + if (WarpX::fft_do_time_averaging) { VisMF::Write(warpx.getEfield_avg_cp(lev, 0), @@ -185,6 +213,16 @@ FlushFormatCheckpoint::WriteToFile ( VisMF::Write(warpx.getcurrent_cp(lev, 2), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_cp")); } + + if (warpx.getis_synchronized() || WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + // Need to save j if synchronized because after restart we need j to evolve E by dt/2. + VisMF::Write(warpx.getcurrent_cp(lev, 0), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_cp")); + VisMF::Write(warpx.getcurrent_cp(lev, 1), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_cp")); + VisMF::Write(warpx.getcurrent_cp(lev, 2), + amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_cp")); + } } if (warpx.DoPML()) { diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 3f7bf80f8..c740a5a64 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -573,6 +573,11 @@ FlushFormatPlotfile::WriteAllRawFields( WriteRawMF( warpx.getMfield_fp(lev, 0), dm, raw_pltname, default_level_prefix, "M_xface_fp", lev, plot_raw_fields_guards); WriteRawMF( warpx.getMfield_fp(lev, 1), dm, raw_pltname, default_level_prefix, "M_yface_fp", lev, plot_raw_fields_guards); WriteRawMF( warpx.getMfield_fp(lev, 2), dm, raw_pltname, default_level_prefix, "M_zface_fp", lev, plot_raw_fields_guards); +#endif +#ifdef WARPX_FERROE + WriteRawMF( warpx.getpolarization_fp(lev, 0), dm, raw_pltname, default_level_prefix, "px_fp", lev, plot_raw_fields_guards); + WriteRawMF( warpx.getpolarization_fp(lev, 1), dm, raw_pltname, default_level_prefix, "py_fp", lev, plot_raw_fields_guards); + WriteRawMF( warpx.getpolarization_fp(lev, 2), dm, raw_pltname, default_level_prefix, "pz_fp", lev, plot_raw_fields_guards); #endif if (warpx.get_pointer_F_fp(lev)) { diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index e233102ef..518a6479a 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -16,6 +16,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/London/London.H" +#include "FieldSolver/FerroE/FerroE.H" #include "WarpX.H" @@ -701,6 +702,22 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if (m_varnames[comp] == "mag_anisotropy_zface" ){ MacroscopicProperties& macroscopic = warpx.GetMacroscopicProperties(); m_all_field_functors[lev][comp] = std::make_unique(macroscopic.getmag_pointer_anisotropy(2), lev, m_crse_ratio); +#endif +#ifdef WARPX_FERROE + } else if ( m_varnames[comp] == "ferroelectric") { + m_all_field_functors[lev][comp] = std::make_unique(warpx.getFerroE().m_ferroelectric_mf.get(), lev, m_crse_ratio); + } else if ( m_varnames[comp] == "px" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 0), lev, m_crse_ratio, true, 1, 0); + } else if ( m_varnames[comp] == "py" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 1), lev, m_crse_ratio, true, 1, 0); + } else if ( m_varnames[comp] == "pz" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 2), lev, m_crse_ratio, true, 1, 0); + } else if ( m_varnames[comp] == "dpxdt" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 0), lev, m_crse_ratio, true, 1, 1); + } else if ( m_varnames[comp] == "dpydt" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 1), lev, m_crse_ratio, true, 1, 1); + } else if ( m_varnames[comp] == "dpzdt" ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_polarization_fp(lev, 2), lev, m_crse_ratio, true, 1, 1); #endif } else if ( m_varnames[comp] == "superconductor") { m_all_field_functors[lev][comp] = std::make_unique(warpx.getLondon().m_superconductor_mf.get(), lev, m_crse_ratio); diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 8960818b0..6110d68d9 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -287,6 +287,9 @@ WarpX::InitFromCheckpoint () Bfield_fp[lev][i]->setVal(0.0); #ifdef WARPX_MAG_LLG Mfield_fp[lev][i]->setVal(0.0); +#endif +#ifdef WARPX_FERROE + polarization_fp[lev][i]->setVal(0.0); #endif } @@ -302,6 +305,9 @@ WarpX::InitFromCheckpoint () Bfield_cp[lev][i]->setVal(0.0); #ifdef WARPX_MAG_LLG Mfield_cp[lev][i]->setVal(0.0); +#endif +#ifdef WARPX_FERROE + polarization_cp[lev][i]->setVal(0.0); #endif } } @@ -340,6 +346,16 @@ WarpX::InitFromCheckpoint () VisMF::Read(*H_biasfield_fp[lev][2], amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Hzbias_fp")); #endif + +#ifdef WARPX_FERROE + VisMF::Read(*polarization_fp[lev][0], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "px_fp")); + VisMF::Read(*polarization_fp[lev][1], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "py_fp")); + VisMF::Read(*polarization_fp[lev][2], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pz_fp")); +#endif + if (WarpX::fft_do_time_averaging) { VisMF::Read(*Efield_avg_fp[lev][0], @@ -366,6 +382,15 @@ WarpX::InitFromCheckpoint () amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_fp")); } + if (is_synchronized || WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + VisMF::Read(*current_fp[lev][0], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_fp")); + VisMF::Read(*current_fp[lev][1], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_fp")); + VisMF::Read(*current_fp[lev][2], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_fp")); + } + if (lev > 0) { VisMF::Read(*Efield_cp[lev][0], @@ -404,6 +429,15 @@ WarpX::InitFromCheckpoint () VisMF::Read(*H_biasfield_cp[lev][2], amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Hzbias_cp")); #endif + +#ifdef WARPX_FERROE + VisMF::Read(*polarization_cp[lev][0], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "px_cp")); + VisMF::Read(*polarization_cp[lev][1], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "py_cp")); + VisMF::Read(*polarization_cp[lev][2], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pz_cp")); +#endif if (WarpX::fft_do_time_averaging) { VisMF::Read(*Efield_avg_cp[lev][0], @@ -429,6 +463,15 @@ WarpX::InitFromCheckpoint () VisMF::Read(*current_cp[lev][2], amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_cp")); } + + if (is_synchronized || WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + VisMF::Read(*current_cp[lev][0], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_cp")); + VisMF::Read(*current_cp[lev][1], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_cp")); + VisMF::Read(*current_cp[lev][2], + amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_cp")); + } } } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 32a9d896e..31241551c 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -32,6 +32,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "Utils/WarpXUtil.H" #include "FieldSolver/London/London.H" +#include "FieldSolver/FerroE/FerroE.H" #include #include @@ -150,6 +151,14 @@ WarpX::Evolve (int numsteps) m_london->EvolveLondonJ(-0.5_rt*dt[0]); // J^(n) to J^(n-1/2) using E^(n) FillBoundaryJ(guard_cells.ng_alloc_EB); } +#ifdef WARPX_FERROE + if (WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + m_ferroe->EvolveP(-0.5_rt*dt[0]); // P^(n) to P^(n-1/2) using E^(n) + FillBoundaryP(guard_cells.ng_alloc_EB); + m_ferroe->EvolveFerroEJ(-0.5_rt*dt[0]); // J^(n) to J^(n-1/2) using E^(n) + FillBoundaryJ(guard_cells.ng_alloc_EB); + } +#endif is_synchronized = false; } else { if (electrostatic_solver_id == ElectrostaticSolverAlgo::None) { @@ -203,6 +212,9 @@ WarpX::Evolve (int numsteps) if (WarpX::yee_coupled_solver_algo != CoupledYeeSolver::MaxwellLondon) { PushParticlesandDepose(cur_time, skip_deposition); } + if (WarpX::yee_coupled_solver_algo != CoupledYeeSolver::MaxwellFerroE) { + PushParticlesandDepose(cur_time, skip_deposition); + } } // Electromagnetic case: multi-J algorithm else if (do_multi_J) @@ -429,6 +441,9 @@ WarpX::OneStep_nosub (Real cur_time) if (WarpX::yee_coupled_solver_algo != CoupledYeeSolver::MaxwellLondon) { PushParticlesandDepose(cur_time); } + if (WarpX::yee_coupled_solver_algo != CoupledYeeSolver::MaxwellFerroE) { + PushParticlesandDepose(cur_time); + } #ifndef WARPX_MAG_LLG if (WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellLondon) { amrex::Print() << " in evolve london j\n"; @@ -438,6 +453,16 @@ WarpX::OneStep_nosub (Real cur_time) // fill boundary here } #endif +#ifdef WARPX_FERROE + if (WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + amrex::Print() << " in evolve ferroelectric j\n"; + m_ferroe->EvolveP(dt[0]); // P^(n-1/2) to P^(n+1/2) using E^(n) + FillBoundaryP(guard_cells.ng_alloc_EB); + m_ferroe->EvolveFerroEJ(dt[0]); // J^(n-1/2) to J^(n+1/2) using E^(n) + FillBoundaryJ(guard_cells.ng_alloc_EB); + // fill boundary here + } +#endif ExecutePythonCallback("afterdeposition"); diff --git a/Source/FieldSolver/FerroE/CMakeLists.txt b/Source/FieldSolver/FerroE/CMakeLists.txt new file mode 100644 index 000000000..412f8b81f --- /dev/null +++ b/Source/FieldSolver/FerroE/CMakeLists.txt @@ -0,0 +1,4 @@ +target_sources(WarpX + PRIVATE + FerroE.cpp +) diff --git a/Source/FieldSolver/FerroE/Eff_Field_Grad.H b/Source/FieldSolver/FerroE/Eff_Field_Grad.H new file mode 100644 index 000000000..1ce93115d --- /dev/null +++ b/Source/FieldSolver/FerroE/Eff_Field_Grad.H @@ -0,0 +1,157 @@ +/* + Derivative algorithms for calculating the gradient energy terms +*/ + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real UpwardDx ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i+1,j,k,0) - F(i,j,k,0))/(dx[0]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DownwardDx ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i,j,k,0) - F(i-1,j,k,0))/(dx[0]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real UpwardDy ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i,j+1,k,0) - F(i,j,k,0))/(dx[1]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DownwardDy ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i,j,k,0) - F(i,j-1,k,0))/(dx[1]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real UpwardDz ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i,j,k+1,0) - F(i,j,k,0))/(dx[2]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DownwardDz ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx) { + return (F(i,j,k,0) - F(i,j,k-1,0))/(dx[2]); +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DoubleDx ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx, amrex::Array4 const& fe, int const ncomp = 0) { + + if (ncomp == 0){ //x-component + if (fe(i,j,k) == 1 && fe(i+1,j,k) == 0) {//upper end of fe + + return (0.0 - DownwardDx(F, i, j, k, dx))/dx[0]; + + } else if (fe(i,j,k) == 1 && fe(i-1,j,k) == 0) { //lower end of fe + + return (UpwardDx(F, i, j, k, dx) - 0.0)/dx[0]; + + } else {//bulk + + return (UpwardDx(F, i, j, k, dx) - DownwardDx(F, i, j, k, dx))/dx[0]; + + } + } else { //y and z component + if (fe(i,j,k) == 1 && fe(i+1,j,k) == 0) {//upper end of fe + + return 0.5*(8.*F(i-1,j,k,0) - F(i-2,j,k,0) - 7.*F(i,j,k,0))/dx[0]/dx[0]; + + } else if (fe(i,j,k) == 1 && fe(i-1,j,k) == 0) { //lower end of fe + + return 0.5*(8.*F(i+1,j,k,0) - F(i+2,j,k,0) - 7.*F(i,j,k,0))/dx[0]/dx[0]; + + } else {//bulk + + return (UpwardDx(F, i, j, k, dx) - DownwardDx(F, i, j, k, dx))/(dx[0]); + + } + + } +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DoubleDy ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx, amrex::Array4 const& fe, int const ncomp = 0) { + + if (ncomp == 1){ //y-component + if (fe(i,j,k) == 1 && fe(i,j+1,k) == 0) {//upper end of fe + + return (0.0 - DownwardDy(F, i, j, k, dx))/dx[1]; + + } else if (fe(i,j,k) == 1 && fe(i,j-1,k) == 0) { //lower end of fe + + return (UpwardDy(F, i, j, k, dx) - 0.0)/dx[1]; + + } else {//bulk + + return (UpwardDy(F, i, j, k, dx) - DownwardDy(F, i, j, k, dx))/(dx[1]); + + } + } else { //x and z component + if (fe(i,j,k) == 1 && fe(i,j+1,k) == 0) {//upper end of fe + + return 0.5*(8.*F(i,j-1,k,0) - F(i,j-2,k,0) - 7.*F(i,j,k,0))/dx[1]/dx[1]; + + } else if (fe(i,j,k) == 1 && fe(i,j-1,k) == 0) { //lower end of fe + + return 0.5*(8.*F(i,j+1,k,0) - F(i,j+2,k,0) - 7.*F(i,j,k,0))/dx[1]/dx[1]; + + } else {//bulk + + return (UpwardDy(F, i, j, k, dx) - DownwardDy(F, i, j, k, dx))/dx[1]; + + } + + } +} + +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +static amrex::Real DoubleDz ( + amrex::Array4 const& F, + int const i, int const j, int const k, amrex::GpuArray dx, amrex::Array4 const& fe, int const ncomp = 0) { + + if (ncomp == 2){ //z-component + if (fe(i,j,k) == 1 && fe(i,j,k+1) == 0) {//upper end of fe + + return (0.0 - DownwardDz(F, i, j, k, dx))/dx[2]; + + } else if (fe(i,j,k) == 1 && fe(i,j,k-1) == 0) { //lower end of fe + + return (UpwardDz(F, i, j, k, dx) - 0.0)/dx[2]; + + } else {//bulk + + return (UpwardDz(F, i, j, k, dx) - DownwardDz(F, i, j, k, dx))/dx[2]; + + } + } else { //x and y component + if (fe(i,j,k) == 1 && fe(i,j,k+1) == 0) {//upper end of fe + + return 0.5*(8.*F(i,j,k-1,0) - F(i,j,k-2,0) - 7.*F(i,j,k,0))/dx[2]/dx[2]; + + } else if (fe(i,j,k) == 1 && fe(i,j,k-1) == 0) { //lower end of fe + + return 0.5*(8.*F(i,j,k+1,0) - F(i,j,k+2,0) - 7.*F(i,j,k,0))/dx[2]/dx[2]; + + } else {//bulk + + return (UpwardDz(F, i, j, k, dx) - DownwardDz(F, i, j, k, dx))/dx[2]; + + } + + } +} + diff --git a/Source/FieldSolver/FerroE/Eff_Field_Landau.H b/Source/FieldSolver/FerroE/Eff_Field_Landau.H new file mode 100644 index 000000000..6d18a768e --- /dev/null +++ b/Source/FieldSolver/FerroE/Eff_Field_Landau.H @@ -0,0 +1,70 @@ +#include "FerroE.H" + +namespace landau_coefficient{ + + // Gradient energy coefficients + constexpr amrex::Real alpha_1 = -3.7116e7; + constexpr amrex::Real alpha_11 = -2.097e8; + constexpr amrex::Real alpha_12 = 7.974e8; + constexpr amrex::Real alpha_111 = 1.294e9; + constexpr amrex::Real alpha_112 = -1.95e9; + constexpr amrex::Real alpha_123 = -2.5e9; + constexpr amrex::Real alpha_1111 = 3.863e10; + constexpr amrex::Real alpha_1112 = 2.529e10; + constexpr amrex::Real alpha_1122 = 1.637e10; + constexpr amrex::Real alpha_1123 = 1.367e10; +} + +//Compute x-component of electric field corresponding to the Landau free energy contribution +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_ex_Landau(const amrex::Real Px, const amrex::Real Py, const amrex::Real Pz) +{ + using namespace landau_coefficient; + return -2.*alpha_1 * Px + -4.*alpha_11 * std::pow(Px,3.) -2.*alpha_12 * Px * (std::pow(Py,2.) + std::pow(Pz,2.)) + -6.*alpha_111 * std::pow(Px,5.) + -4.*alpha_112 * std::pow(Px,3.) * (std::pow(Py,2.) + std::pow(Pz,2.)) + -2.*alpha_112 * Px * (std::pow(Py,4.) + std::pow(Pz,4.)) + -2.*alpha_123 * Px * std::pow(Py,2.) * std::pow(Pz,2.) + -8.*alpha_1111 * std::pow(Px,7.) + -2.*alpha_1112 * Px * ( std::pow(Py,6.) + std::pow(Pz,6.) + 3.*std::pow(Px,4.)*(std::pow(Py,2) + std::pow(Pz,2))) + -4.*alpha_1122*std::pow(Px,3.)*(std::pow(Py,4.) + std::pow(Pz,4)) + -2.*alpha_1123*Px*std::pow(Py,2.)*std::pow(Pz,2.)*(2.*std::pow(Px,2.) + std::pow(Py,2.) + std::pow(Pz,2)); +} + + +//Compute y-component of electric field corresponding to the Landau free energy contribution +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_ey_Landau(const amrex::Real Px, const amrex::Real Py, const amrex::Real Pz) +{ + using namespace landau_coefficient; + return -2.*alpha_1 * Py + -4.*alpha_11 * std::pow(Py,3.) -2.*alpha_12 * Py * (std::pow(Pz,2.) + std::pow(Px,2.)) + -6.*alpha_111 * std::pow(Py,5.) + -4.*alpha_112 * std::pow(Py,3.) * (std::pow(Pz,2.) + std::pow(Px,2.)) + -2.*alpha_112 * Py * (std::pow(Pz,4.) + std::pow(Px,4.)) + -2.*alpha_123 * Py * std::pow(Pz,2.) * std::pow(Px,2.) + -8.*alpha_1111 * std::pow(Py,7.) + -2.*alpha_1112 * Py * ( std::pow(Pz,6.) + std::pow(Px,6.) + 3.*std::pow(Py,4.)*(std::pow(Pz,2) + std::pow(Px,2))) + -4.*alpha_1122*std::pow(Py,3.)*(std::pow(Pz,4.) + std::pow(Px,4)) + -2.*alpha_1123*Py*std::pow(Pz,2.)*std::pow(Px,2.)*(2.*std::pow(Py,2.) + std::pow(Pz,2.) + std::pow(Px,2)); +} + + +//Compute z-component of electric field corresponding to the Landau free energy contribution +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_ez_Landau(const amrex::Real Px, const amrex::Real Py, const amrex::Real Pz) +{ + using namespace landau_coefficient; + return -2.*alpha_1 * Pz + -4.*alpha_11 * std::pow(Pz,3.) -2.*alpha_12 * Pz * (std::pow(Px,2.) + std::pow(Py,2.)) + -6.*alpha_111 * std::pow(Pz,5.) + -4.*alpha_112 * std::pow(Pz,3.) * (std::pow(Px,2.) + std::pow(Py,2.)) + -2.*alpha_112 * Pz * (std::pow(Px,4.) + std::pow(Py,4.)) + -2.*alpha_123 * Pz * std::pow(Px,2.) * std::pow(Py,2.) + -8.*alpha_1111 * std::pow(Pz,7.) + -2.*alpha_1112 * Pz * ( std::pow(Px,6.) + std::pow(Py,6.) + 3.*std::pow(Pz,4.)*(std::pow(Px,2) + std::pow(Py,2))) + -4.*alpha_1122*std::pow(Pz,3.)*(std::pow(Px,4.) + std::pow(Py,4)) + -2.*alpha_1123*Pz*std::pow(Px,2.)*std::pow(Py,2.)*(2.*std::pow(Pz,2.) + std::pow(Px,2.) + std::pow(Py,2)); +} + diff --git a/Source/FieldSolver/FerroE/FerroE.H b/Source/FieldSolver/FerroE/FerroE.H new file mode 100644 index 000000000..6ffa76fe9 --- /dev/null +++ b/Source/FieldSolver/FerroE/FerroE.H @@ -0,0 +1,46 @@ +/* Copyright 2024 Prabhat Kumar + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + + +#ifndef FERROE_H +#define FERROE_H + +#include +#include +#include +#include +#include +#include +#include +#include + + +class FerroE { + +public: + //Constructor + FerroE (); + + void ReadParameters (); + void InitData (); + void EvolveFerroEJ (amrex::Real dt); + void EvolveP (amrex::Real dt); + + void InitializeFerroelectricMultiFabUsingParser ( amrex::MultiFab *sc_mf, + amrex::ParserExecutor<3> const& sc_parser, const int lev); + + std::string m_str_ferroelectric_function; + std::unique_ptr m_ferroelectric_parser; + std::unique_ptr m_ferroelectric_mf; + + int include_Landau = 1; + int include_grad = 0; + + +}; + +#endif diff --git a/Source/FieldSolver/FerroE/FerroE.cpp b/Source/FieldSolver/FerroE/FerroE.cpp new file mode 100644 index 000000000..886294ef2 --- /dev/null +++ b/Source/FieldSolver/FerroE/FerroE.cpp @@ -0,0 +1,313 @@ +#include "FerroE.H" +#include "Eff_Field_Landau.H" +#include "Eff_Field_Grad.H" +#include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" +#include "Utils/WarpXUtil.H" +#include "WarpX.H" +#include +#include "Utils/Parser/IntervalsParser.H" +#include "Utils/Parser/ParserUtils.H" +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +FerroE::FerroE () +{ + amrex::Print() << " FerroE class is constructed\n"; + ReadParameters(); +} + +void +FerroE::ReadParameters () +{ + amrex::ParmParse pp_ferroe("ferroe"); + pp_ferroe.query("include_Landau", include_Landau); + pp_ferroe.query("include_grad", include_grad); + + utils::parser::Store_parserString(pp_ferroe, "ferroelectric_function(x,y,z)", m_str_ferroelectric_function); + m_ferroelectric_parser = std::make_unique( + utils::parser::makeParser(m_str_ferroelectric_function, {"x", "y", "z"})); +} + +void +FerroE::InitData() +{ + auto& warpx = WarpX::GetInstance(); + + const int lev = 0; + amrex::BoxArray ba = warpx.boxArray(lev); + amrex::DistributionMapping dmap = warpx.DistributionMap(lev); + // number of guard cells used in EB solver + const amrex::IntVect ng_EB_alloc = warpx.getngEB(); + // Define a nodal multifab to store if region is on super conductor (1) or not (0) + const amrex::IntVect nodal_flag = amrex::IntVect::TheNodeVector(); + const int ncomps = 1; + m_ferroelectric_mf = std::make_unique(amrex::convert(ba,nodal_flag), dmap, ncomps, ng_EB_alloc); + + InitializeFerroelectricMultiFabUsingParser(m_ferroelectric_mf.get(), m_ferroelectric_parser->compile<3>(), lev); + +} + +void +FerroE::EvolveFerroEJ (amrex::Real dt) +{ + amrex::Print() << " evolve Ferroelectric J using P\n"; + + auto & warpx = WarpX::GetInstance(); + const int lev = 0; + const amrex::IntVect ng_EB_alloc = warpx.getngEB(); + + amrex::MultiFab * jx = warpx.get_pointer_current_fp(lev, 0); + amrex::MultiFab * jy = warpx.get_pointer_current_fp(lev, 1); + amrex::MultiFab * jz = warpx.get_pointer_current_fp(lev, 2); + + //Px, Py, and Pz have 2 components each. Px(i,j,k,0) is Px and Px(i,j,k,1) is dPx/dt and so on.. + amrex::MultiFab * Px = warpx.get_pointer_polarization_fp(lev, 0); + amrex::MultiFab * Py = warpx.get_pointer_polarization_fp(lev, 1); + amrex::MultiFab * Pz = warpx.get_pointer_polarization_fp(lev, 2); + + // J_tot = free electric current + polarization current (dP/dt) + + for (amrex::MFIter mfi(*jx, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + //Extract field data + amrex::Array4 const& jx_arr = jx->array(mfi); + amrex::Array4 const& jy_arr = jy->array(mfi); + amrex::Array4 const& jz_arr = jz->array(mfi); + amrex::Array4 const& dPx_dt_arr = Px->array(mfi); + amrex::Array4 const& dPy_dt_arr = Py->array(mfi); + amrex::Array4 const& dPz_dt_arr = Pz->array(mfi); + amrex::Array4 const& fe_arr = m_ferroelectric_mf->array(mfi); + amrex::Box const& tjx = mfi.tilebox(jx->ixType().toIntVect()); + amrex::Box const& tjy = mfi.tilebox(jy->ixType().toIntVect()); + amrex::Box const& tjz = mfi.tilebox(jz->ixType().toIntVect()); + + + amrex::ParallelFor(tjx, tjy, tjz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i+1,j,k)==1) { + jx_arr(i,j,k) += dPx_dt_arr(i,j,k,1); + } + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i,j+1,k)==1) { + jy_arr(i,j,k) += dPy_dt_arr(i,j,k,1); + } + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i,j,k+1)==1) { + jz_arr(i,j,k) += dPz_dt_arr(i,j,k,1); + } + } + ); + } + +} + +void +FerroE::InitializeFerroelectricMultiFabUsingParser ( + amrex::MultiFab *sc_mf, + amrex::ParserExecutor<3> const& sc_parser, + const int lev) +{ + using namespace amrex::literals; + + WarpX& warpx = WarpX::GetInstance(); + const amrex::GpuArray dx_lev = warpx.Geom(lev).CellSizeArray(); + const amrex::RealBox& real_box = warpx.Geom(lev).ProbDomain(); + amrex::IntVect iv = sc_mf->ixType().toIntVect(); + for ( amrex::MFIter mfi(*sc_mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Initialize ghost cells in addition to valid cells + + const amrex::Box& tb = mfi.tilebox( iv, sc_mf->nGrowVect()); + amrex::Array4 const& sc_fab = sc_mf->array(mfi); + amrex::ParallelFor (tb, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Shift x, y, z position based on index type (only 3D supported for now) + amrex::Real fac_x = (1._rt - iv[0]) * dx_lev[0] * 0.5_rt; + amrex::Real x = i * dx_lev[0] + real_box.lo(0) + fac_x; + amrex::Real fac_y = (1._rt - iv[1]) * dx_lev[1] * 0.5_rt; + amrex::Real y = j * dx_lev[1] + real_box.lo(1) + fac_y; + amrex::Real fac_z = (1._rt - iv[2]) * dx_lev[2] * 0.5_rt; + amrex::Real z = k * dx_lev[2] + real_box.lo(2) + fac_z; + // initialize the macroparameter + sc_fab(i,j,k) = sc_parser(x,y,z); + }); + + } +} + + +/*Evolution of the polarization P is governed by mu*d^2P/dt^2 + gamma*dP/dt = E_eff + *Let dP/dt = v, then we need to solve the system of following two first-order ODEs + *dv/dt = (E_eff - gamma*v)/mu + *dP/dt = v + */ +//AMREX_GPU_DEVICE +//amrex::Real func(amrex::Real E_eff, amrex::Real dp_dt, amrex::Real gamma, amrex::Real mu) +//{ +// amrex::Real d2p_dt2 = (E_eff - gamma*dp_dt) / mu; +// return d2p_dt2; +//} +// +//AMREX_GPU_DEVICE +//std::pair func(amrex::Real E_eff, amrex::Real dp_dt, amrex::Real gamma, amrex::Real mu) +//{ +// amrex::Real d2p_dt2 = (E_eff - gamma*dp_dt) / mu; +// return std::make_pair(dp_dt, d2p_dt2); +//} +// +// RK4 time integrator +AMREX_GPU_DEVICE AMREX_FORCE_INLINE +void update_p(amrex::Real& p, amrex::Real& dp_dt, amrex::Real dt, amrex::Real E_eff, amrex::Real gamma, amrex::Real mu) +{ + int use_forward_Euler = 0; +// +// //amrex::Real k1, k2, k3, k4; +// +// // Calculate k1 +// auto k1 = func(E_eff, dp_dt, gamma, mu); +// +// amrex::Real p_1 = p + 0.5*dt*k1.first; +// amrex::Real dp_dt_1 = dp_dt + 0.5*dt*k1.second; +// +// // Calculate k2 +// k2 = func(E_eff, dp_dt_1, gamma, mu); +// +// amrex::Real p_1 = p + 0.5*dt*k1.first; +// amrex::Real dp_dt_1 = dp_dt + 0.5*dt*k1.second; +// +// // Calculate k3 +// k3 = dt * func(E_eff, result + 0.5*k2, gamma, mu); +// +// // Calculate k4 +// k4 = dt * func(E_eff, result + k3, gamma, mu); +// +// if (use_RK4){ +// // Update result using weighted sum of ks +// result += (1.0 / 6.0) * (k1 + 2.0*k2 + 2.0*k3 + k4); +// } else { +// result += k1; +// } + //auto k1 = func(E_eff, dp_dt, gamma, mu); + //p += dt*k1.first; + //dp_dt += dt*k1.second; + + if (use_forward_Euler) { + //Forward Euler + amrex::Real rhs = (E_eff - gamma*dp_dt) / mu; + dp_dt += dt*rhs; + p += dt*dp_dt; + } else { + //2nd Order + amrex::Real v_old = dp_dt; + amrex::Real fac = 0.5*dt*gamma/mu; + dp_dt = 1./(1. + fac) * (dt/mu*E_eff + (1. - fac)*v_old); + p += 0.5*dt*(v_old + dp_dt); + } +} + +void +FerroE::EvolveP (amrex::Real dt) +{ + amrex::Print() << " evolve P \n"; + auto & warpx = WarpX::GetInstance(); + const int lev = 0; + const amrex::GpuArray dx = warpx.Geom(lev).CellSizeArray(); + + //Px, Py, and Pz have 2 components each. Px(i,j,k,0) is Px and Px(i,j,k,1) is dPx/dt and so on.. + amrex::MultiFab * Px = warpx.get_pointer_polarization_fp(lev, 0); + amrex::MultiFab * Py = warpx.get_pointer_polarization_fp(lev, 1); + amrex::MultiFab * Pz = warpx.get_pointer_polarization_fp(lev, 2); + + amrex::MultiFab * Ex = warpx.get_pointer_Efield_fp(lev, 0); + amrex::MultiFab * Ey = warpx.get_pointer_Efield_fp(lev, 1); + amrex::MultiFab * Ez = warpx.get_pointer_Efield_fp(lev, 2); + + for (amrex::MFIter mfi(*Px, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + //Extract field data + amrex::Array4 const& Px_arr = Px->array(mfi); + amrex::Array4 const& Py_arr = Py->array(mfi); + amrex::Array4 const& Pz_arr = Pz->array(mfi); + amrex::Array4 const& Ex_arr = Ex->array(mfi); + amrex::Array4 const& Ey_arr = Ey->array(mfi); + amrex::Array4 const& Ez_arr = Ez->array(mfi); + amrex::Array4 const& fe_arr = m_ferroelectric_mf->array(mfi); + amrex::Box const& tpx = mfi.tilebox(Px->ixType().toIntVect()); + amrex::Box const& tpy = mfi.tilebox(Py->ixType().toIntVect()); + amrex::Box const& tpz = mfi.tilebox(Pz->ixType().toIntVect()); + + auto L_include_Landau = include_Landau; + auto L_include_grad = include_grad; + + // Coefficients in the evolution of the polarization equation + constexpr amrex::Real mu = 1.35e-18; + constexpr amrex::Real gamma = 2.0e-7; + // Gradient energy coefficient + constexpr amrex::Real G_11 = 5.1e-10; + + amrex::ParallelFor(tpx, tpy, tpz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i+1,j,k)==1) { + + amrex::Real Ex_eff = Ex_arr(i,j,k); + + if (L_include_Landau == 1){ + Ex_eff += compute_ex_Landau(Px_arr(i,j,k,0), Py_arr(i,j,k,0), Pz_arr(i,j,k,0)); + } + + if (L_include_grad == 1){ + Ex_eff += G_11*DoubleDx(Px_arr, i, j, k, dx, fe_arr, 0) + G_11*DoubleDy(Px_arr, i, j, k, dx, fe_arr, 0) + G_11*DoubleDz(Px_arr, i, j, k, dx, fe_arr, 0); + } + + //get Px and dPx/dt using numerical integration + update_p(Px_arr(i,j,k,0), Px_arr(i,j,k,1), dt, Ex_eff, gamma, mu); + } + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i,j+1,k)==1) { + + amrex::Real Ey_eff = Ey_arr(i,j,k); + + if (L_include_Landau == 1){ + Ey_eff += compute_ey_Landau(Px_arr(i,j,k,0), Py_arr(i,j,k,0), Pz_arr(i,j,k,0)); + } + + if (L_include_grad == 1){ + Ey_eff += G_11*DoubleDx(Py_arr, i, j, k, dx, fe_arr, 1) + G_11*DoubleDy(Py_arr, i, j, k, dx, fe_arr, 1) + G_11*DoubleDz(Py_arr, i, j, k, dx, fe_arr, 1); + } + + //get Py and dPy/dt using numerical integration + update_p(Py_arr(i,j,k,0), Py_arr(i,j,k,1), dt, Ey_eff, gamma, mu); + } + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (fe_arr(i,j,k)==1 and fe_arr(i,j,k+1)==1) { + + amrex::Real Ez_eff = Ez_arr(i,j,k); + + if (L_include_Landau == 1){ + Ez_eff += compute_ez_Landau(Px_arr(i,j,k,0), Py_arr(i,j,k,0), Pz_arr(i,j,k,0)); + } + + if (L_include_grad == 1){ + Ez_eff += G_11*DoubleDx(Pz_arr, i, j, k, dx, fe_arr, 2) + G_11*DoubleDy(Pz_arr, i, j, k, dx, fe_arr, 2) + G_11*DoubleDz(Pz_arr, i, j, k, dx, fe_arr, 2); + } + + //get Pz and dPz/dt using numerical integration + update_p(Pz_arr(i,j,k,0), Pz_arr(i,j,k,1), dt, Ez_eff, gamma, mu); + } + } + ); + } +} + diff --git a/Source/FieldSolver/FerroE/Make.package b/Source/FieldSolver/FerroE/Make.package new file mode 100644 index 000000000..b22c2ef0c --- /dev/null +++ b/Source/FieldSolver/FerroE/Make.package @@ -0,0 +1,3 @@ +CEXE_sources += FerroE.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/FerroE diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveP.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveP.cpp new file mode 100644 index 000000000..99481b76e --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveP.cpp @@ -0,0 +1,33 @@ +/*Evolution of the polarization P is governed by mu*d^2P/dt^2 + gamma*dP/dt = E_eff + *Let dP/dt = v, then we need to solve the system of following two first-order ODEs + *dP/dt = v + *dv/dt = (E - gamma*v)/mu + */ + + +// Define the function representing the system of first-order ODEs +AMREX_GPU_DEVICE +void dPdt(const Real t, const Real* P, Real* dPdt_result, const Real mu, const Real gamma, const Real E) +{ + dPdt_result[0] = P[1]; + dPdt_result[1] = E / mu - gamma * P[1] / mu; +} + +// Forward Euler time integrator +void forwardEuler(const Real t, MultiFab& P, const Real dt, const Real mu, const Real gamma, const Real E) +{ + Real k[2]; + + for (MFIter mfi(P); mfi.isValid(); ++mfi) + { + const Box& bx = mfi.tilebox(); + const auto& P_arr = P.array(mfi); + + // Calculate derivatives + dPdt(t, &P_arr(bx.loVect(), 0), k, mu, gamma, E); + + // Update P using forward Euler method + for (int i = 0; i < 2; ++i) + P_arr(bx.loVect(), i) += dt * k[i]; + } +} diff --git a/Source/FieldSolver/Make.package b/Source/FieldSolver/Make.package index 223d2f7ac..74ff4aa68 100644 --- a/Source/FieldSolver/Make.package +++ b/Source/FieldSolver/Make.package @@ -7,6 +7,9 @@ ifeq ($(USE_PSATD),TRUE) endif include $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/Make.package include $(WARPX_HOME)/Source/FieldSolver/London/Make.package +ifeq ($(USE_FERROE),TRUE) +include $(WARPX_HOME)/Source/FieldSolver/FerroE/Make.package +endif include $(WARPX_HOME)/Source/FieldSolver/MagnetostaticSolver/Make.package VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 000082fca..99734b87f 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -426,6 +426,13 @@ WarpX::InitData () m_london->InitData(); } +#ifdef WARPX_FERROE + if (WarpX::yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + amrex::Print() << " calling ferroe \n"; + m_ferroe->InitData(); + } +#endif + if (ParallelDescriptor::IOProcessor()) { std::cout << "\nGrids Summary:\n"; printGridSummary(std::cout, 0, finestLevel()); @@ -745,6 +752,15 @@ WarpX::InitLevelData (int lev, Real /*time*/) ::tolower); #endif +#ifdef WARPX_FERROE + pp_warpx.query("P_ext_grid_init_style", P_ext_grid_s); // user-defined initial M + std::transform(P_ext_grid_s.begin(), + P_ext_grid_s.end(), + P_ext_grid_s.begin(), + ::tolower); + +#endif + // * Functions with the string "arr" in their names get an Array of // values from the given entry in the table. The array argument is // resized (if necessary) to hold all the values requested. @@ -773,6 +789,12 @@ WarpX::InitLevelData (int lev, Real /*time*/) if (H_bias_ext_grid_s == "constant") utils::parser::getArrWithParser(pp_warpx,"H_bias_external_grid", H_bias_external_grid); #endif + +#ifdef WARPX_FERROE + if (P_ext_grid_s == "constant") + utils::parser::getArrWithParser(pp_warpx, "P_external_grid", P_external_grid); +#endif + // initialize the averaged fields only if the averaged algorithm // is activated ('psatd.do_time_averaging=1') ParmParse pp_psatd("psatd"); @@ -849,6 +871,15 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } +#endif +#ifdef WARPX_FERROE + if (P_ext_grid_s == "constant" || P_ext_grid_s == "default"){ + + int nghost = 1; + for (int icomp = 0; icomp < 3; ++icomp){ + polarization_fp[lev][i]->setVal(P_external_grid[icomp], icomp, 2, nghost); + } + } #endif } @@ -1161,6 +1192,52 @@ WarpX::InitLevelData (int lev, Real /*time*/) #endif //closes #ifdef WARPX_MAG_LLG +#ifdef WARPX_FERROE + if (P_ext_grid_s == "parse_p_ext_grid_function") { +#ifdef WARPX_DIM_RZ + amrex::Abort("P-field parser for external fields does not work with RZ"); +#endif + utils::parser::Store_parserString(pp_warpx, "Px_external_grid_function(x,y,z)", + str_Px_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Py_external_grid_function(x,y,z)", + str_Py_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Pz_external_grid_function(x,y,z)", + str_Pz_ext_grid_function); + + Pxfield_parser = std::make_unique( + utils::parser::makeParser(str_Px_ext_grid_function,{"x","y","z"})); + Pyfield_parser = std::make_unique( + utils::parser::makeParser(str_Py_ext_grid_function,{"x","y","z"})); + Pzfield_parser = std::make_unique( + utils::parser::makeParser(str_Pz_ext_grid_function,{"x","y","z"})); + + // Initialize Mfield_fp with external function directly on the faces + InitializeExternalFieldsOnGridUsingParser(polarization_fp[lev][0].get(), + polarization_fp[lev][1].get(), + polarization_fp[lev][2].get(), + Pxfield_parser->compile<3>(), + Pyfield_parser->compile<3>(), + Pzfield_parser->compile<3>(), + m_edge_lengths[lev], + m_face_areas[lev], + 'P', + lev, PatchType::fine); + if (lev > 0) { + InitializeExternalFieldsOnGridUsingParser(polarization_cp[lev][0].get(), + polarization_cp[lev][1].get(), + polarization_cp[lev][2].get(), + Pxfield_parser->compile<3>(), + Pyfield_parser->compile<3>(), + Pzfield_parser->compile<3>(), + m_edge_lengths[lev], + m_face_areas[lev], + 'P', + lev, PatchType::coarse); + } + } + +#endif //closes #ifdef WARPX_FERROE + // Reading external fields from data file if (add_external_B_field) { std::string read_fields_from_path="./"; diff --git a/Source/Make.WarpX b/Source/Make.WarpX index 57c1e0252..c4ac76bbc 100644 --- a/Source/Make.WarpX +++ b/Source/Make.WarpX @@ -73,6 +73,11 @@ ifeq ($(USE_LLG),TRUE) DEFINES += -DWARPX_MAG_LLG endif +ifeq ($(USE_FERROE),TRUE) + USERSuffix := $(USERSuffix).FERROE + DEFINES += -DWARPX_FERROE +endif + -include Make.package include $(WARPX_HOME)/Source/Make.package include $(WARPX_HOME)/Source/ablastr/Make.package diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 9f08a4b90..44bdd2421 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -696,6 +696,58 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In } } +#ifdef WARPX_FERROE +void +WarpX::FillBoundaryP (IntVect ng) +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + FillBoundaryP(lev, ng); + } +} + +void +WarpX::FillBoundaryP (int lev, IntVect ng) +{ + FillBoundaryP(lev, PatchType::fine, ng); + if (lev > 0) FillBoundaryP(lev, PatchType::coarse, ng); +} + +void +WarpX::FillBoundaryP (int lev, PatchType patch_type, IntVect ng) +{ + std::array mf; + amrex::Periodicity period; + + if (patch_type == PatchType::fine) + { + mf = {polarization_fp[lev][0].get(), polarization_fp[lev][1].get(), polarization_fp[lev][2].get()}; + period = Geom(lev).periodicity(); + } + else if (patch_type == PatchType::coarse) + { + amrex::Abort("EvolveP does not come with coarse patch yet"); + } + + if (do_pml) + { + // ExchangeP not needed for PML algorithm + } + + // Fill guard cells in valid domain + for (int i = 0; i < 3; ++i) + { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + ng <= mf[i]->nGrowVect(), + "Error: in FillBoundaryP, requested more guard cells than allocated"); + + const amrex::IntVect nghost = (safe_guard_cells) ? mf[i]->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*mf[i], nghost, WarpX::do_single_precision_comms, period); + } + +} +#endif + #ifdef WARPX_MAG_LLG void WarpX::FillBoundaryM (IntVect ng) diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 36ea28051..e1fef1874 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -26,7 +26,8 @@ struct MediumForEM { struct CoupledYeeSolver { enum { MaxwellLondon = 0, - None = 1 + MaxwellFerroE = 1, + None = 2 }; }; diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index 3e4f7f560..218c05ee5 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -142,6 +142,7 @@ const std::map IntegrationType_algo_to_int = { const std::map CoupledYeeSolver_algo_to_int = { {"maxwelllondon", CoupledYeeSolver::MaxwellLondon}, + {"maxwellferroe", CoupledYeeSolver::MaxwellFerroE}, {"default", CoupledYeeSolver::None} }; diff --git a/Source/WarpX.H b/Source/WarpX.H index a9a511817..a27b36b0d 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -39,6 +39,9 @@ #include "Utils/Parser/IntervalsParser.H" #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/London/London.H" +#ifdef WARPX_FERROE +#include "FieldSolver/FerroE/FerroE.H" +#endif #include #include @@ -100,6 +103,9 @@ public: MultiParticleContainer& GetPartContainer () { return *mypc; } MacroscopicProperties& GetMacroscopicProperties () { return *m_macroscopic_properties; } London& getLondon () { return *m_london; } +#ifdef WARPX_FERROE + FerroE& getFerroE () { return *m_ferroe; } +#endif MultiDiagnostics& GetMultiDiags () {return *multi_diags;} ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } @@ -125,6 +131,10 @@ public: static amrex::Vector H_bias_external_grid; #endif +#ifdef WARPX_FERROE + static amrex::Vector P_external_grid; +#endif + //! Initialization type for external magnetic field on the grid static std::string B_ext_grid_s; //! Initialization type for external electric field on the grid @@ -143,6 +153,11 @@ public: static std::string M_ext_grid_s; // added for M; static std::string H_bias_ext_grid_s; #endif + +#ifdef WARPX_FERROE + static std::string P_ext_grid_s; // added for P; +#endif + static int ApplyExcitationInPML; //! Whether to apply the effect of an externally-defined electric field static bool add_external_E_field; @@ -211,6 +226,13 @@ public: static std::string str_Hz_bias_ext_grid_function; #endif +#ifdef WARPX_FERROE + // Parser for P_external on the grid + static std::string str_Px_ext_grid_function; + static std::string str_Py_ext_grid_function; + static std::string str_Pz_ext_grid_function; +#endif + //! User-defined parser to initialize x-component of the magnetic field on the grid std::unique_ptr Bxfield_parser; //! User-defined parser to initialize y-component of the magnetic field on the grid @@ -273,6 +295,13 @@ public: std::unique_ptr Hz_biasfield_parser; #endif +#ifdef WARPX_FERROE + // Parser for M_external on the grid + std::unique_ptr Pxfield_parser; + std::unique_ptr Pyfield_parser; + std::unique_ptr Pzfield_parser; +#endif + // Algorithms //! Integer that corresponds to the current deposition algorithm (Esirkepov, direct, Vay) static short current_deposition_algo; @@ -637,6 +666,9 @@ public: // note "direction" of M means face. For M, each face stores all 3 vector components of M amrex::MultiFab * get_pointer_Mfield_fp (int lev, int direction) const { return Mfield_fp[lev][direction].get();} amrex::MultiFab * get_pointer_H_biasfield_fp (int lev, int direction) const { return H_biasfield_fp[lev][direction].get();} +#endif +#ifdef WARPX_FERROE + amrex::MultiFab * get_pointer_polarization_fp (int lev, int direction) const { return polarization_fp[lev][direction].get(); } #endif amrex::MultiFab * get_pointer_current_fp (int lev, int direction) const { return current_fp[lev][direction].get(); } amrex::MultiFab * get_pointer_current_fp_nodal (int lev, int direction) const { return current_fp_nodal[lev][direction].get(); } @@ -652,6 +684,9 @@ public: amrex::MultiFab * get_pointer_Hfield_cp (int lev, int direction) const { return Hfield_cp[lev][direction].get(); } amrex::MultiFab * get_pointer_Mfield_cp (int lev, int direction) const { return Mfield_cp[lev][direction].get(); } amrex::MultiFab * get_pointer_H_biasfield_cp (int lev, int direction) const { return H_biasfield_cp[lev][direction].get(); } +#endif +#ifdef WARPX_FERROE + amrex::MultiFab * get_pointer_polarization_cp (int lev, int direction) const { return polarization_cp[lev][direction].get(); } #endif amrex::MultiFab * get_pointer_current_cp (int lev, int direction) const { return current_cp[lev][direction].get(); } amrex::MultiFab * get_pointer_rho_cp (int lev) const { return rho_cp[lev].get(); } @@ -667,6 +702,9 @@ public: const amrex::MultiFab& getHfield (int lev, int direction) {return *Hfield_aux[lev][direction];} const amrex::MultiFab& getMfield (int lev, int direction) {return *Mfield_aux[lev][direction];} const amrex::MultiFab& getH_biasfield (int lev, int direction) {return *H_biasfield_aux[lev][direction];} +#endif +#ifdef WARPX_FERROE + const amrex::MultiFab& getpolarization_cp (int lev, int direction) {return *polarization_cp[lev][direction];} #endif const amrex::MultiFab& getcurrent_cp (int lev, int direction) {return *current_cp[lev][direction];} const amrex::MultiFab& getEfield_cp (int lev, int direction) {return *Efield_cp[lev][direction];} @@ -684,6 +722,9 @@ public: const amrex::MultiFab& getEfield_fp (int lev, int direction) {return *Efield_fp[lev][direction];} const amrex::MultiFab& getBfield_fp (int lev, int direction) {return *Bfield_fp[lev][direction];} const amrex::MultiFab& getBfield_sc_fp (int lev, int direction) {return *Bfield_sc_fp[lev][direction];} +#ifdef WARPX_FERROE + const amrex::MultiFab& getpolarization_fp (int lev, int direction) {return *polarization_fp[lev][direction];} +#endif #ifdef WARPX_MAG_LLG const amrex::MultiFab& getHfield_fp (int lev, int direction) {return *Hfield_fp[lev][direction];} const amrex::MultiFab& getMfield_fp (int lev, int direction) {return *Mfield_fp[lev][direction];} @@ -804,6 +845,7 @@ public: void EvolveBLondon (int lev, amrex::Real dt, DtType dt_type); void EvolveBLondon (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type); + void MacroscopicEvolveE ( amrex::Real dt); void MacroscopicEvolveE (int lev, amrex::Real dt); void MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real dt); @@ -981,6 +1023,10 @@ public: void FillBoundaryM (amrex::IntVect ng); void FillBoundaryH (amrex::IntVect ng); #endif +#ifdef WARPX_FERROE + void FillBoundaryP (amrex::IntVect ng); +#endif + void FillBoundaryF (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryG (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); @@ -994,6 +1040,11 @@ public: void FillBoundaryH (int lev, amrex::IntVect ng); #endif +#ifdef WARPX_FERROE + void FillBoundaryP (int lev, amrex::IntVect ng); +#endif + + void FillBoundaryF (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryG (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryAux (int lev, amrex::IntVect ng); @@ -1376,6 +1427,9 @@ private: #ifdef WARPX_MAG_LLG void FillBoundaryM (const int lev, const PatchType patch_type, const amrex::IntVect ng); void FillBoundaryH (const int lev, const PatchType patch_type, const amrex::IntVect ng); +#endif +#ifdef WARPX_FERROE + void FillBoundaryP (const int lev, const PatchType patch_type, const amrex::IntVect ng); #endif void FillBoundaryB (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryE (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync = std::nullopt); @@ -1600,6 +1654,9 @@ private: amrex::Vector, 3 > > current_fp_vay; amrex::Vector, 3 > > Efield_fp; amrex::Vector, 3 > > Bfield_fp; +#ifdef WARPX_FERROE + amrex::Vector, 3 > > polarization_fp; +#endif #ifdef WARPX_MAG_LLG amrex::Vector, 3 > > Mfield_fp; amrex::Vector, 3 > > Hfield_fp; @@ -1676,6 +1733,9 @@ private: amrex::Vector, 3 > > current_cp; amrex::Vector, 3 > > Efield_cp; amrex::Vector, 3 > > Bfield_cp; +#ifdef WARPX_FERROE + amrex::Vector, 3 > > polarization_cp; +#endif #ifdef WARPX_MAG_LLG amrex::Vector, 3 > > Mfield_cp; amrex::Vector, 3 > > Hfield_cp; @@ -1732,6 +1792,11 @@ private: std::unique_ptr m_macroscopic_properties; // London solver std::unique_ptr m_london; +#ifdef WARPX_FERROE + // Ferroelectric solver + std::unique_ptr m_ferroe; +#endif + #ifdef WARPX_MAG_LLG diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index ae93680ec..5aee1c116 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -92,6 +92,10 @@ Vector WarpX::H_bias_external_grid(3, 0.0); // M could be one 9-comp vector or a vector of vectors #endif +#ifdef WARPX_FERROE +Vector WarpX::P_external_grid(3, 0.0); +#endif + std::string WarpX::authors = ""; std::string WarpX::B_ext_grid_s = "default"; std::string WarpX::E_ext_grid_s = "default"; @@ -111,6 +115,12 @@ std::string WarpX::H_ext_grid_s = "default"; std::string WarpX::H_bias_ext_grid_s = "default"; // "default" sets M to zero but will be overwritten by user defined input file #endif + +#ifdef WARPX_FERROE +std::string WarpX::P_ext_grid_s = "default"; +// "default" sets P to zero but will be overwritten by user defined input file +#endif + bool WarpX::add_external_E_field = false; bool WarpX::add_external_B_field = false; @@ -171,6 +181,13 @@ std::string WarpX::str_Hy_bias_ext_grid_function; std::string WarpX::str_Hz_bias_ext_grid_function; #endif +#ifdef WARPX_FERROE +// Parser for P_external on the grid +std::string WarpX::str_Px_ext_grid_function; +std::string WarpX::str_Py_ext_grid_function; +std::string WarpX::str_Pz_ext_grid_function; +#endif + int WarpX::do_moving_window = 0; int WarpX::start_moving_window_step = 0; int WarpX::end_moving_window_step = -1; @@ -388,6 +405,9 @@ WarpX::WarpX () Efield_fp.resize(nlevs_max); Bfield_fp.resize(nlevs_max); Bfield_sc_fp.resize(nlevs_max); +#ifdef WARPX_FERROE + polarization_fp.resize(nlevs_max); +#endif #ifdef WARPX_MAG_LLG Mfield_fp.resize(nlevs_max); Hfield_fp.resize(nlevs_max); @@ -441,6 +461,9 @@ WarpX::WarpX () current_cp.resize(nlevs_max); Efield_cp.resize(nlevs_max); Bfield_cp.resize(nlevs_max); +#ifdef WARPX_FERROE + polarization_cp.resize(nlevs_max); +#endif #ifdef WARPX_MAG_LLG Mfield_cp.resize(nlevs_max); Hfield_cp.resize(nlevs_max); @@ -487,6 +510,12 @@ WarpX::WarpX () m_london = std::make_unique(); } +#ifdef WARPX_FERROE + if (yee_coupled_solver_algo == CoupledYeeSolver::MaxwellFerroE) { + m_ferroe = std::make_unique(); + } +#endif + // Set default values for particle and cell weights for costs update; // Default values listed here for the case AMREX_USE_GPU are determined // from single-GPU tests on Summit. @@ -1991,6 +2020,9 @@ WarpX::ClearLevel (int lev) Efield_fp [lev][i].reset(); Bfield_fp [lev][i].reset(); Bfield_sc_fp [lev][i].reset(); +#ifdef WARPX_FERROE + polarization_fp [lev][i].reset(); +#endif #ifdef WARPX_MAG_LLG Mfield_fp [lev][i].reset(); Hfield_fp [lev][i].reset(); @@ -2018,6 +2050,9 @@ WarpX::ClearLevel (int lev) current_cp[lev][i].reset(); Efield_cp [lev][i].reset(); Bfield_cp [lev][i].reset(); +#ifdef WARPX_FERROE + polarization_cp [lev][i].reset(); +#endif #ifdef WARPX_MAG_LLG Mfield_cp [lev][i].reset(); Hfield_cp [lev][i].reset(); @@ -2318,6 +2353,12 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm AllocInitMultiFab(current_fp[lev][1], amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, tag("current_fp[y]"), 0.0_rt); AllocInitMultiFab(current_fp[lev][2], amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, tag("current_fp[z]"), 0.0_rt); +#ifdef WARPX_FERROE + AllocInitMultiFab(polarization_fp[lev][0], amrex::convert(ba, jx_nodal_flag), dm, 2, ngEB, tag("polarization_fp[x]"), 0.0_rt); + AllocInitMultiFab(polarization_fp[lev][1], amrex::convert(ba, jy_nodal_flag), dm, 2, ngEB, tag("polarization_fp[y]"), 0.0_rt); + AllocInitMultiFab(polarization_fp[lev][2], amrex::convert(ba, jz_nodal_flag), dm, 2, ngEB, tag("polarization_fp[z]"), 0.0_rt); +#endif + // Match external field MultiFabs to fine patch if (add_external_B_field) { AllocInitMultiFab(Bfield_fp_external[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_fp_external[x]")); @@ -2656,6 +2697,13 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm AllocInitMultiFab(current_cp[lev][1], amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, tag("current_cp[y]"), 0.0_rt); AllocInitMultiFab(current_cp[lev][2], amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, tag("current_cp[z]"), 0.0_rt); +#ifdef WARPX_FERROE + // Create the MultiFabs for the current + AllocInitMultiFab(polarization_cp[lev][0], amrex::convert(cba, jx_nodal_flag), dm, 2, ngEB, tag("polarization_cp[x]"), 0.0_rt); + AllocInitMultiFab(polarization_cp[lev][1], amrex::convert(cba, jy_nodal_flag), dm, 2, ngEB, tag("polarization_cp[y]"), 0.0_rt); + AllocInitMultiFab(polarization_cp[lev][2], amrex::convert(cba, jz_nodal_flag), dm, 2, ngEB, tag("polarization_cp[z]"), 0.0_rt); +#endif + if (deposit_charge) { // For the multi-J algorithm we can allocate only one rho component (no distinction between old and new) const int rho_ncomps = (WarpX::do_multi_J) ? ncomps : 2*ncomps;