24 #define USE_HOMDYN_SC_MODEL
42 #define BETA_MIN1 0.30 // minimum beta-value for space-charge calculations: start
44 #ifndef USE_HOMDYN_SC_MODEL
45 #define BETA_MIN2 0.45 // minimum beta-value for space-charge calculations: full impact
50 static void Gderivs(
double t,
double Y[],
double dYdt[]) { thisBunch->
derivs(t, Y, dYdt); }
52 static double rootValue = 0.0;
55 static void erfRoot(
double x,
double *fn,
double *df) {
60 *df = (
erfc(
fabs(x) + eps) - v) / eps;
88 Inform msg(
"calcBeamParameters");
90 double ex, ey, nex, ney, b0, bRms, bMax, bMin, g0, dgdt, gInc;
91 double RxRms = 0.0, RyRms = 0.0, Px = 0.0, PxRms = 0.0, PxMax = 0.0, PxMin = 0.0, Py = 0.0, PyRms = 0.0, PyMax = 0.0, PyMin = 0.0;
92 double Pz, PzMax, PzMin, PzRms;
93 double x0Rms, y0Rms, zRms, zMax, zMin, I0, IRms, IMax, IMin;
118 E_m = mc2e * (g0 - 1.0);
119 dEdt_m = 1.0e-12 * mc2e * dgdt;
154 dEdt_m = PzRms * mc2e * b0;
171 for(i = 1; i < numMySlices_m - 1; i++) {
176 for(i = 0; i < numMySlices_m - 0; i++) {
181 for(i = 1; i < numMySlices_m - 1; i++) {
187 for(i = 0; i < numMySlices_m - 0; i++) {
192 for(i = 0; i < numMySlices_m - 0; i++) {
197 for(i = 0; i < numMySlices_m - 0; i++) {
202 for(i = 0; i < numMySlices_m - 0; i++) {
207 for(i = 1; i < numMySlices_m - 1; i++) {
212 for(i = 1; i < numMySlices_m - 1; i++) {
217 for(i = 1; i < numMySlices_m - 1; i++) {
222 for(i = 1; i < numMySlices_m - 1; i++) {
227 for(i = 1; i < numMySlices_m - 1; i++) {
232 throw OpalException(
"EnvelopeBunch",
"EnvelopeBunch::runStats() Undefined label (programming error)");
237 MPI_Allreduce(MPI_IN_PLACE, &nVTot, 1, MPI_INT, MPI_SUM,
Ippl::getComm());
254 for(i = 1; i < nV; i++) {
262 MPI_Allreduce(MPI_IN_PLACE, &M1, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
263 MPI_Allreduce(MPI_IN_PLACE, &M2, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
264 MPI_Allreduce(MPI_IN_PLACE, &maxv, 1, MPI_DOUBLE, MPI_MAX,
Ippl::getComm());
265 MPI_Allreduce(MPI_IN_PLACE, &minv, 1, MPI_DOUBLE, MPI_MIN,
Ippl::getComm());
288 *rms =
sqrt(M2 / nVTot);
290 *rms =
sqrt(M2 / nVTot - M1 * M1 / (nVTot * nVTot));
301 double betagamma = 0.0;
314 for(
int i = 0; i < i1; i++) {
319 assert(i < numMySlices_m);
321 bg = sp->p[
SLI_beta] * sp->computeGamma();
326 sxxp += sp->p[
SLI_x] * pbc;
331 syyp += sp->p[
SLI_y] * pbc;
366 betagamma *=
sqrt(1.0 - (1 / betagamma) * (1 / betagamma));
367 *emtx = *emtnx / betagamma;
368 *emty = *emtny / betagamma;
387 std::shared_ptr<EnvelopeSlice> cs =
slices_m[i];
389 zAvg += cs->p[
SLI_z];
390 dtl[i-j] = cs->p[
SLI_z];
392 g[i-j] = cs->computeGamma();
397 int nV = numMySlices_m - j;
412 std::vector<double> dtG(nVTot, 0.0);
413 std::vector<double> bG(nVTot, 0.0);
414 std::vector<double> gG(nVTot, 0.0);
417 std::vector<int> numsend(numproc, nV);
418 std::vector<int> offsets(numproc);
419 std::vector<int> offsetsG(numproc);
420 std::vector<int> zeros(numproc, 0);
422 MPI_Allgather(&nV, 1, MPI_INT, &offsets.front(), 1, MPI_INT,
Ippl::getComm());
424 for(
int i = 1; i < numproc; i++) {
425 if(offsets[i-1] == 0)
428 offsetsG[i] = offsetsG[i-1] + offsets[i-1];
431 MPI_Alltoallv(&dtl.front(), &numsend.front(), &zeros.front(), MPI_DOUBLE,
432 &dtG.front(), &offsets.front(), &offsetsG.front(), MPI_DOUBLE,
435 MPI_Alltoallv(&b.front(), &numsend.front(), &zeros.front(), MPI_DOUBLE,
436 &bG.front(), &offsets.front(), &offsetsG.front(), MPI_DOUBLE,
439 MPI_Alltoallv(&g.front(), &numsend.front(), &zeros.front(), MPI_DOUBLE,
440 &gG.front(), &offsets.front(), &offsetsG.front(), MPI_DOUBLE,
443 double dum2, dum3, dum4, rms, gZero,
gt;
446 for(
int i = 0; i < nVTot; i++) {
451 linfit(&dtG[0], &gG[0], nVTot, &gZero, >, &dum2, &dum3, &dum4);
455 for(
int i = 0; i < nVTot; i++) {
456 rms +=
pow(gG[i] - gZero - gt * dtG[i], 2);
459 *gInc =
sqrt(rms / nVTot);
474 if(rank - 1 < (nSlice - 14) % (numproc - 1))
478 if(rank < nSlice % numproc)
500 KR = std::unique_ptr<Vector_t[]>(
new Vector_t[nSlices]);
501 KT = std::unique_ptr<Vector_t[]>(
new Vector_t[nSlices]);
502 EF = std::unique_ptr<Vector_t[]>(
new Vector_t[nSlices]);
503 BF = std::unique_ptr<Vector_t[]>(
new Vector_t[nSlices]);
508 for(
unsigned int i = 0; i < nSlices; i++) {
520 throw OpalException(
"EnvelopeBunch::createSlices",
"use more than 13 slices");
547 Esct_m.resize(nSlices, 0.0);
548 G_m.resize(nSlices, 0.0);
549 Exw_m.resize(nSlices, 0.0);
550 Eyw_m.resize(nSlices, 0.0);
551 Ezw_m.resize(nSlices, 0.0);
553 for(
unsigned int i = 0; i < nSlices; i++) {
572 double sqr2 =
sqrt(2.0), v;
573 double bunch_width = 0.0;
591 rootValue = 1.0 - 2.0 * i * frac / (
numSlices_m + 1);
592 v =
fabs(emission_time_s) * sqr2 *
findRoot(erfRoot, 0.0, 5.0, 1.0
e-5) * (emission_time_s < 0.0 ?
Physics::c : 1.0);
605 double gz0 = 0.0, gzN = 0.0;
616 for(
int i = 0; i <
nebin_m; i++) {
617 std::vector<int> tmp;
618 this->
bins_m.push_back(tmp);
622 int bin_i = 0, slice_i = 0;
627 this->
bins_m[bin_i].push_back(slice_i);
710 MPI_Allreduce(MPI_IN_PLACE, &(
z_m[0]), numSlices_m, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
711 MPI_Allreduce(MPI_IN_PLACE, &(
b_m[0]), numSlices_m, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
717 static int already_called = 0;
727 int my_start = 0, my_end = 0;
739 dz2Sum += ((z1[n1] - z1[n1-1]) * (z1[n1] - z1[n1-1]));
748 msg <<
"n1 (= " << n1 <<
") < 2" <<
endl;
754 double sigma_dz =
sqrt(dz2Sum / (n1 - 1));
755 double beta = bSum / n1;
758 std::vector<std::pair<double,double>> z1_b(numSlices_m);
760 z1_b[i] = std::make_pair(z1[i],b[i]);
763 std::sort(z1_b.begin(), z1_b.end(),
764 [](
const std::pair<double,double> &left,
const std::pair<double,double> &right)
765 {
return left.first < right.first;});
768 z1[i] = z1_b[i].first;
769 b [i] = z1_b[i].second;
776 std::vector<double> I1(n1, 0.0);
779 double dzMin = 0.2 * sigma_dz;
792 #pragma omp parallel for private(i,j,dz)
794 for(i = my_start; i <= vend; i++) {
797 dz =
fabs(z1[i+j] - z1[i-j]);
799 }
while((dz < dzMin * (j - 1)) && ((i + j) < n1) && ((i - j) >= 0));
801 if((dz >= dzMin * (j - 1)) && ((i + j) < n1) && ((i - j) >= 0)) {
802 I1[i] = 0.25 * q *
Physics::c * (b[i+j] + b[i-j]) / (dz * (j - 1));
808 MPI_Allreduce(MPI_IN_PLACE, &(I1[0]), n1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
809 for(
int i = 1; i < n1 - 1; i++) {
818 double zMin =
zTail();
819 double zMax =
zHead();
820 dz = (zMax - zMin) / numSlices_m;
821 std::vector<double> z2(n1, 0.0);
822 std::vector<double> I2(n1, 0.0);
830 while((j < n1) && ((z1[j] - z1[0]) <= dz)) {
841 for(
int i = 1; i < n1; i++) {
845 while(((i + j) < n1) && ((z1[i+j] - z1[i]) <= dz)) {
846 if((z1[i+j] - z1[i-1]) > dz) {
856 while(((i - j) >= 0) && ((z1[i-1] - z1[i-j]) < dz)) {
857 if((z1[i] - z1[i-j]) > dz) {
870 if(z2[i-k] <= z2[i-k-1]) {
871 I2[i-k-1] = 0.5 * (I2[i-k] + I2[i-k-1]);
896 msg <<
"Insufficient points to calculate the current (m = " << n2 <<
")" <<
endl;
901 sgSmooth(&I2.front(), n2, n2 / 20, n2 / 20, 0, 1);
915 dz = (zMax - zMin) / 99;
916 for(
int i = 1; i < 100; i++) {
928 Inform msg(
"cSpaceCharge");
930 #ifdef USE_HOMDYN_SC_MODEL
932 double zhead =
zHead();
933 double ztail =
zTail();
934 double L = zhead - ztail;
942 double R = 2 * sigma_av;
945 double H1 =
sqrt((1 - zeta / L) * (1 - zeta / L) + A * A) -
sqrt((zeta / L) * (zeta / L) + A * A) -
fabs(1 - zeta / L) +
fabs(zeta / L);
946 double H2 =
sqrt((1 - xi / L) * (1 - xi / L) + A * A) -
sqrt((xi / L) * (xi / L) + A * A) -
fabs(1 - xi / L) +
fabs(xi / L);
951 double G1 = (1 - zeta / L) /
sqrt((1 - zeta / L) * (1 - zeta / L) + A * A) + (zeta / L) /
sqrt((zeta / L) * (zeta / L) + A * A);
952 double G2 = (1 - xi / L) /
sqrt((1 - xi / L) * (1 - xi / L) + A * A) + (xi / L) /
sqrt((xi / L) * (xi / L) + A * A);
959 msg <<
"called with insufficient slices (" <<
numSlices_m <<
")" <<
endl;
964 msg <<
"Q or I_max <= 0.0, aborting calculation" <<
endl;
989 MPI_Allreduce(MPI_IN_PLACE, &nVTot, 1, MPI_INT, MPI_SUM,
Ippl::getComm());
991 msg <<
"Exiting, to few nV slices" <<
endl;
996 MPI_Allreduce(MPI_IN_PLACE, &sm, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1000 for(
int localIdx = 0; localIdx <
numMySlices_m; localIdx++) {
1008 double dz =
fabs(zj - z0);
1009 if((dz > dzMin) && (zj > zCat)) {
1010 double Aj = xi[j] / (dz * dz);
1011 double v = 1.0 - (1.0 /
sqrt(1.0 + Aj));
1023 Esct[localIdx] = (bt <
BETA_MIN1 ? 0.0 : bt < BETA_MIN2 ? btsq : 1.0);
1028 if(bt < BETA_MIN2) {
1032 G[localIdx] *= btsq;
1043 double dz = zC -
zHead();
1049 *gmsg <<
"EnvelopeBunch::moveZ0(): bunch moved with " << dz <<
" m to " <<
zCat_m <<
" m" <<
endl;
1060 *gmsg <<
"EnvelopeBunch time reset at z = " <<
zAvg() <<
" m with: " <<
t_m <<
" s, new offset: " <<
t_m +
t_offset_m <<
" s";
1094 double g =
sqrt(g2);
1117 #ifdef USE_HOMDYN_SC_MODEL
1172 #ifndef USE_HOMDYN_SC_MODEL
1200 static int msgParsed = 0;
1203 double eps = 1.0e-4;
1204 double time_step_s = tStep;
1250 std::shared_ptr<EnvelopeSlice> sp =
slices_m[i];
1267 double epsLocal = eps;
1271 while(ode_result == 1) {
1279 ode_result =
odeint(&(sp->p[
SLI_z]),
SLNPAR,
t_m,
t_m + time_step_s, epsLocal, 0.1 * time_step_s, 0.0, &nok, &nbad, Gderivs);
1282 if(ode_result != 0) {
1289 if(ode_result == 1) {
1294 msg <<
"EnvelopeBunch::run() Switched to fixed step RK rountine for solving of DE at slice " << i <<
endl;
1297 }
else if((epsLocal != eps) && (msgParsed == 0)) {
1298 msg <<
"EnvelopeBunch::run() integration accuracy relaxed to " << epsLocal <<
" for slice " << i <<
" (ONLY FIRST OCCURENCE MARKED!)" <<
endl;
1319 double ga = 0.0, x0a = 0.0, px0a = 0.0, y0a = 0.0, py0a = 0.0;
1320 double beta, fi_x, fi_y;
1325 std::shared_ptr<EnvelopeSlice> sp =
slices_m[i];
1328 ga += sp->computeGamma();
1348 px0a = px0a / nVTot;
1350 py0a = py0a / nVTot;
1352 msg <<
"EnvelopeBunch::run() No valid slices to subtract average" <<
endl;
1354 beta =
sqrt(1.0 - (1.0 /
pow(ga, 2)));
1363 std::shared_ptr<EnvelopeSlice> sp =
slices_m[i];
1374 void EnvelopeBunch::initialize(
int num_slices,
double Q,
double energy,
double width,
double emission_time,
double frac,
double current,
double bunch_center,
double bX,
double bY,
double mX,
double mY,
double Bz0,
int nbin) {
1376 #ifdef USE_HOMDYN_SC_MODEL
1377 *gmsg <<
"* Using HOMDYN space-charge model" <<
endl;
1379 *gmsg <<
"* Using BET space-charge model" <<
endl;
1388 bunch_center = -1 * emission_time / 2.0;
1408 for(
int i = 0; i < 3; i++) {
1413 MPI_Allreduce(MPI_IN_PLACE, &bf, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1420 for(
int i = 0; i < 3; i++) {
1425 MPI_Allreduce(MPI_IN_PLACE, &ef, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1434 sum +=
slices_m[i]->computeGamma();
1438 MPI_Allreduce(MPI_IN_PLACE, &nValid, 1, MPI_INT, MPI_SUM,
Ippl::getComm());
1439 MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1446 double refpos = 0.0;
1455 MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPI_LONG, MPI_SUM,
Ippl::getComm());
1456 MPI_Allreduce(MPI_IN_PLACE, &refpos, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1457 return refpos / count;
1470 MPI_Allreduce(MPI_IN_PLACE, &nV, 1, MPI_INT, MPI_SUM,
Ippl::getComm());
1477 MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM,
Ippl::getComm());
1498 MPI_Allreduce(MPI_IN_PLACE, &min, 1, MPI_DOUBLE, MPI_MIN,
Ippl::getComm());
1518 MPI_Allreduce(MPI_IN_PLACE, &max, 1, MPI_DOUBLE, MPI_MAX,
Ippl::getComm());
1525 os <<
"* ************** S L B U N C H ***************************************************** " <<
endl;
1528 os <<
"* dT= " << this->
getdT() <<
" [s]" <<
endl;
1529 os <<
"* spos= " << this->
zAvg() <<
" [m]" <<
endl;
1540 os <<
"* ********************************************************************************** " <<
endl;
std::unique_ptr< Vector_t[]> EF
external E fields
size_t mySliceEndOffset_m
last global slice on this processor
double zCat_m
cathode position
pX0 angular deflection centriod x
void sgSmooth(double *c, int n, int nl, int nr, int ld, int m)
IpplTimings::TimerRef selfFieldTimer_m
timer for selfField calculation
std::vector< double > Exw_m
transverse wake field x
double get_meanKineticEnergy()
returns the mean energy
void calcEmittance(double *emtnx, double *emtny, double *emtx, double *emty, int *nValid)
calculate bunch emittance
double emtnx0_m
intrinsic normalized emittance of slice [m rad]
double findRoot(void(*funcd)(double, double *, double *), double x1, double x2, double xacc)
constexpr double e
The value of .
void backup()
backup slice values
PETE_TBTree< OpGT, Index::PETE_Expr_t, PETE_Scalar< double > > gt(const Index &idx, double x)
double t_m
local time in bunch [s]
std::vector< double > b_m
synchronized betas for parallel tracker
core of the envelope tracker based on Rene Bakkers BET implementation
double dfi_x_m
rotation of coordinate system when tracking along the s-axis [rad]
std::unique_ptr< Vector_t[]> KR
define value of radial kick for each slice
unsigned int activeSlice_m
PETE_TUTree< FnFabs, typename T::PETE_Expr_t > fabs(const PETE_Expr< T > &l)
void setCharge(double _Q)
set the charge of the bunch
IpplTimings::TimerRef calcITimer_m
void synchronizeSlices()
synchronize z position and betas of all slices (needed in calcI and space charge calculation) ...
T::PETE_Expr_t::PETE_Return_t max(const PETE_Expr< T > &expr, NDIndex< D > &loc)
The base class for all OPAL exceptions.
Tps< T > sin(const Tps< T > &x)
Sine.
Vector_t KTsl_m
transverse kick of beam
constexpr double two_pi
The value of .
int numMySlices_m
number of my slices in bunch
void initialize(int sli, double charge, double energy, double width, double te, double frac, double current, double center, double bX, double bY, double mX, double mY, double Bz, int nbin)
FTps< T, N > erfc(const FTps< T, N > &x, int trunc=(FTps< T, N >::EXACT))
Complementary error function.
void createBunch()
create and initialize local num slices
slice data synchronized with other MPI nodes
beta normalized velocity (total) [-]
void rk4(double y[], int n, double x, double h, void(*derivs)(double, double[], double[]))
double getGamma(int i)
returns gamma of slice i
double zAvg()
calculate <z> [m]
void setTOffset(double x0, double px0, double y0, double py0)
set transverse offset of bunch
std::vector< std::vector< int > > bins_m
bins for emission
class encpasulating an envelope slice
T::PETE_Expr_t::PETE_Return_t sum(const PETE_Expr< T > &expr)
double hbin_m
emission bin width
double zHead()
calculate the head of the bunch [m]
double emtbx0_m
intrinsic normalized emittance Bush effect [m rad]
transverse wakes calculated
constexpr double alpha
The fine structure constant, no dimension.
double AvBField()
returns average magnetic field
bool reduce(Communicate &, InputIterator, InputIterator, OutputIterator, const ReduceOp &, bool *IncludeVal=0)
angular deflection centriod x
pY0 angular deflection centriod y
EnvelopeBunch(const PartData *ref)
Default constructor.
int solver_m
see enum SolverParameters
double Bz0_m
magnetic field on cathode [T]
int dStat_m
see enum DataStatus
constexpr double pi
The value of .
py beam divergence y [rad]
void setTShape(double enx, double eny, double rx, double ry, double b0)
set transverse shape of bunch (initial distribution)
double zTail()
calculate tail of bunch [m]
std::unique_ptr< Vector_t[]> BF
external B fields
constexpr double c
The velocity of light in m/s.
int numSlices_m
number of total slices in bunch
std::unique_ptr< Profile > currentProfile_m
current profile of bunch (fit)
static void startTimer(TimerRef t)
int currentSlice_m
current Slice set in run() & cSpaceCharge() and used in derivs() & zcsI()
double get_sPos()
return reference position
include radial movement in DV
Vector_t Esl_m
electric field
Tps< T > pow(const Tps< T > &x, int y)
Integer power.
double I0avg_m
average current on creation of bunch (see setLshape)
Vektor< double, 3 > Vector_t
fields synchronized with other MPI nodes
X0 position centroid x [m].
int getLocalNum()
returns the number of local slices
void linfit(double x[], double y[], int ndata, double sig[], int mwt, double *a, double *b, double *siga, double *sigb, double *chi2, double *q)
static MPI_Comm getComm()
std::vector< double > Esct_m
Longitudinal Space-charge field.
int activeSlices_m
number of active slices
void calcI()
calculates the current current distribution
double Eavg()
calculate the average energy of the bunch
std::vector< std::shared_ptr< EnvelopeSlice > > slices_m
array of slices
double emission_time_step_m
Tps< T > sqrt(const Tps< T > &x)
Square root.
ParticleAttrib< double > dt
std::vector< double > Ezw_m
longitudinal wake field
void derivs(double tc, double Y[], double dYdt[])
helper function to calculate derivatives need in RH equation
Inform & slprint(Inform &os)
void computeSpaceCharge()
number of slice parameters
double AvEField()
returns average electric field
Tps< T > cos(const Tps< T > &x)
Cosine.
normalized velocity (total) [-]
std::vector< double > z_m
synchronized z positions for parallel tracker
constexpr double q_e
The elementary charge in As.
double t_offset_m
accumulated time offset by tReset function
void distributeSlices(int nSlice=101)
distributes nSlice amongst processors and initializes slices
void setBinnedLShape(EnvelopeBunchShape shape, double z0, double w, double frac)
set longitudinal shape of bunch (initial distribution)
int odeint(double ystart[], int nvar, double x1, double x2, double eps, double h1, double hmin, int *nok, int *nbad, void(*derivs)(double, double[], double[]))
Y0 position centroid y [m].
void setSolverParameter(int s)
set the DE solver flag
double getBeta(int i)
returns beta of slice i
double Q_m
total bunch charge [C]
Vector_t KRsl_m
radial focussing term beam
constexpr double epsilon_0
The permittivity of vacuum in As/Vm.
double tReset(double dt=0.0)
void timeStep(double tStep, double zCat=0.0)
performs a time-step for all active slices (also handles emission)
px beam divergence x [rad]
static TimerRef getTimer(const char *nm)
std::vector< double > Eyw_m
transverse wake field y
IpplTimings::TimerRef statParamTimer_m
int nebin_m
number of bins for emission
include off-axis movement in DV
int getTotalNum()
returns the total number of slices
static void stopTimer(TimerRef t)
double moveZ0(double zC)
move the complete bunch forward such that the head of the bunch matches the cahtode position ...
void setEnergy(double, double=0.0)
static Communicate * Comm
Vector_t Bsl_m
magnetic field
void runStats(EnvelopeBunchParameter sp, double *xAvg, double *xMax, double *xMin, double *rms, int *nValid)
run statistics on slices
void calcBeamParameters()
calculates envelope statistics
T::PETE_Expr_t::PETE_Return_t min(const PETE_Expr< T > &expr, NDIndex< D > &loc)
int firstBinWithValue_m
first bin on processor containing slices
void calcEnergyChirp(double *g0, double *dgdt, double *gInc, int *nValid)
calculate the energy chirp and uncorrelated energy spread
IpplTimings::TimerRef spaceChargeTimer_m
std::unique_ptr< Vector_t[]> KT
define value of transversal kick for each slice
Inform & endl(Inform &inf)
size_t mySliceStartOffset_m
first global slice on this processor
double dx0_m
offset of the coordinate system when tracking along the s-axis [m]
solve field outside of DV
std::vector< double > G_m
Transverse Space-charge term: Eq.(9)