-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.cpp
More file actions
284 lines (217 loc) · 9.7 KB
/
main.cpp
File metadata and controls
284 lines (217 loc) · 9.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
#include <iostream>
#include <fstream>
#include <math.h>
#include <random>
#include <fstream>
#include<chrono>
#include "cuda_wrapper.h"
#include "input_file.h" //Get inputs from here
using namespace std::chrono;
// The electrostatic PIC code has been implemented for two stream instability case in C++
//The code is accelerated using CUDA programming.
//Jacobi iterative method is used to solve Potential poission equation.
// All the codes equation and methodology is obtained from the following blog
//https://medium.com/swlh/create-your-own-plasma-pic-simulation-with-python-39145c66578b
// And the python code can be found here
//https://github.com/pmocz/pic-python
#define PI 3.14159265
double getAcc( float*, double*, double*, int, int, double, int, double**);
int main()
{
#ifdef device_GPU
cout<<"Running this code at a god speed on GPU " <<endl;
#endif
#ifdef device_CPU
cout<<"Running this code with a snail pace on CPU " <<endl;
#endif
// Generate Initial Conditions
srand(42);
default_random_engine generator;
uniform_real_distribution<double> uniform_distribution(0.0,1.0); //Intializing uniform distribution
normal_distribution<double> norm_distribution(0.0,1.0); //Intializing normal distribution
// Simulation parameters
int N = No_Of_Particles; // Number of particles
int Nx = No_Of_Mesh_Cell; // Number of mesh cells
int tEnd = sim_end_time; // time at which simulation ends
int dt = timestep; // timestep
int n0 = electron_no_density; // electron number density //q charge is 1
int vb = beam_vel; // beam velocity
int vth = beam_width; // beam width
double A = pertub; // perturbation
double boxsize= box_size; // periodic domain [0,boxsize]
double t = 0; // current time of the simulation
bool plotRealTime = 1; // switch on for plotting as the simulation goes along
double dx = boxsize/Nx;
int i, j, Nh, Nt;
double random01, randn, *pos, *vel, *phi_grid, **Gmtx, *bvec;
float *E;
pos = new double[N];
vel = new double[N];
E = new float[N];
bvec = new double[Nx];
phi_grid = new double[Nx]; //allocating dynamic memory after creating the pointer
Gmtx = new double*[Nx]; // couldve use array itself as it acts as pointer and pass it to cuda
for(i = 0; i < Nx; ++i) //2d matrix so we create row pointer
Gmtx[i] = new double[Nx+1]; //and each row has pointer for corresponding column vector
// construct 2 opposite-moving Guassian beams
Nh = (int) N/2;
for (i=0 ; i < N ; i++)
{
pos[i] = uniform_distribution(generator) * boxsize; //Particles are positioned in box with uniform distirbution
vel[i] = vth * norm_distribution(generator) + vb; //Velocities of particles are given normal distirbution
}
for (i=Nh ; i < N ; i++)
vel[i] *= -1; //half of the partivles are given -ve vel
// add perturbation
for (i=0 ; i < N ; i++)
vel[i] *= (1 + A*sin(2*PI*pos[i]/boxsize)); //particles are perturbated with different velocity
// Construct matrix G to compute Gradient which is E (1st derivative)
for (i=0 ; i < Nx ; i++)
{
for (j=0 ; j < Nx ; j++)
{
Gmtx[i][j] = 0;
if (i == j+1) Gmtx[i][j] = 1/(2*dx);
if (i+1 == j) Gmtx[i][j] = -1/(2*dx);
}
}
Gmtx[0][Nx-1] = 1/(2*dx);
Gmtx[Nx-1][0] = -1/(2*dx);
// Just intializing fields
getAcc( E, phi_grid, pos, N, Nx, boxsize, n0, Gmtx );
// number of timesteps
Nt = (int)(ceil(tEnd/dt));
// Simulation main loop
auto start = high_resolution_clock::now(); //starting wall clock
for (int niter = 0 ; niter < Nt ; niter++)
{
for(j = 0 ; j < N ; j++)
{
vel[j] += 0.5*dt * (-E[j]); // (1/2) kick //particle velocity gets updated
// drift (and apply periodic boundary conditions)
pos[j] += vel[j] * dt; //particle position gets updated
pos[j] = fmod(pos[j] , boxsize); // if the particle moves out of box , it throws it to the intial as periodic does
//i.e if new ps is 51 it will be updated as 1
if(pos[j] < 0)
pos[j] = pos[j] + 50.0;
} // if new posn is -ve it will throw correspondinly from other side of box
// update accelerations
getAcc( E, phi_grid, pos, N, Nx, boxsize, n0, Gmtx );
for(j = 0 ; j < N ; j++)
{
vel[j] += 0.5*dt * (-E[j]); // (1/2) kick // Giving two kicks, used as leap frog method.
// update time
t += dt;
}
}
auto stop=high_resolution_clock::now();
auto duration=duration_cast<milliseconds>(stop-start); //stopping wall clock
#ifdef device_GPU
cout<<"Time taken by GPU "<<duration.count() <<" ms" <<endl;
#endif
#ifdef device_CPU
cout<<"Time taken by CPU "<<duration.count() <<" ms" <<endl;
#endif
// Create and open a text file
ofstream MyFile("PICresult.txt");
// Write to the file
cout << "Writing data"<<endl;
for(j = 0 ; j < N ; j++)
MyFile << pos[j] << "\t" << vel[j] << endl;
// Close the file
MyFile.close();
// cleanup
for(i = 0; i < Nx; ++i)
delete[] Gmtx[i];
delete[] Gmtx;
delete[] pos;
delete[] vel;
delete[] E;
delete[] bvec;
delete[] phi_grid;
}
double getAcc( float* E, double* phi_grid, double* pos, int N, int Nx, double boxsize, int n0, double** Gmtx )
{
/*
Calculate the acceleration on each particle due to electric field
pos is an Nx1 matrix of particle positions
Nx is the number of mesh cells
boxsize is the domain [0,boxsize]
n0 is the electron number density
Gmtx is an Nx x Nx matrix for calculating the gradient on the grid
Lmtx is an Nx x Nx matrix for calculating the laplacian on the grid
a is an Nx1 matrix of accelerations
*/
// Calculate Electron Number Density on the Mesh by
// placing particles into the 2 nearest bins (j & j+1, with proper weights)
// and normalizing
int i, j, pidx[N], pidx1[N];
double n[Nx], E_grid[Nx], phi_grid_old[Nx], error, Eg_temp[Nx], weight_j[N], weight_jp1[N], bvec[Nx],Ep_temp[N];
double dx = (double)boxsize / Nx; // mesh cell dim
for (i = 0 ; i < Nx ; i++)
n[i] = 0;
// distributing charge from particles to the grid based on weights
for (i = 0 ; i < N ; i++)
{
pidx[i] = (int)floor(pos[i]/dx); //Getting left node id for each particle
pidx1[i] = pidx[i] + 1; //Getting right node id for each particle
weight_j[i] = ( pidx1[i]*dx - pos[i] )/dx; //Getting weights for left node based on position of particle
weight_jp1[i] = ( pos[i] - pidx[i]*dx )/dx; //Getting weights for Right node based on position of particle
pidx1[i] = pidx1[i] % Nx; //right most node is updated to zero
n[pidx[i]] += weight_j[i]; // n is grid dimensions, output of Pidx would be between the grid 0 to Nx and thus we summ
n[pidx1[i]] += weight_jp1[i]; // weights on each node based on particle posn here. We have now taken charge from particle to grid.
}
// for solving laplacian where 2nd order grad potential
//is equaivalent to n-n0 (net charge density)
for (i = 0 ; i < Nx ; i++)
{
n[i] *= (double)n0 * boxsize / N / dx;
bvec[i] = (n[i] - n0)*dx*dx;
phi_grid_old[i]=phi_grid[i]; // so that we use Jacobi, previous values are used to update.
}
// Solve Poisson's Equation: laplacian(phi) = n-n0
#ifdef device_GPU
get_grid_Efield(E, phi_grid ,bvec, Gmtx, weight_j, weight_jp1, pidx, pidx1);
#endif
#ifdef device_CPU // preprocessor wont compile this part if device_CPU not defined in input file
for (j = 0 ; j < 10000 ; j++)
//Performin Jacobi Iteratively to find electric field from 2nd order potential
{
error = 0;
for (i = 1 ; i < Nx-1 ; i++)
{
phi_grid[i] = 0.5*(phi_grid_old[i-1] + phi_grid_old[i+1] - bvec[i]);
error += abs(phi_grid_old[i] - phi_grid[i]);
}
error += abs(phi_grid_old[0] - 0.5*(phi_grid_old[Nx-1] + phi_grid_old[1] - bvec[0])) + abs(phi_grid_old[Nx-1] - 0.5*(phi_grid_old[Nx-2] + phi_grid_old[0] - bvec[Nx-1]));
phi_grid[0] = 0.5*(phi_grid_old[Nx-1] + phi_grid_old[1] - bvec[0]);
phi_grid[Nx-1] = 0.5*(phi_grid_old[Nx-2] + phi_grid_old[0] - bvec[Nx-1]);
// cout << phi_grid[1] << " " << error << endl;
for (i = 0 ; i < Nx ; i++)
phi_grid_old[i] = 0.7*phi_grid[i] + (1-0.7)*phi_grid_old[i];
if(error < 1e-6) break;
}
cout << "Jacobi_iter_stopped_at" << j << endl;
//updating E field from Phi obtained above
for (i = 0 ; i < Nx ; i++)
{
Eg_temp[i] = 0;
for (j = 0 ; j < Nx ; j++)
{
Eg_temp[i] += Gmtx[i][j]*phi_grid[j];
}
// cout<< Eg_temp[i] <<" " << E_grid[i]<<endl;
E_grid[i] = Eg_temp[i];
}
// cin.get();
// Moving E from nodes to particles
for (i = 0 ; i < N ; i++)
{
Ep_temp[i] = weight_j[i] * E_grid[pidx[i]] + weight_jp1[i] * E_grid[pidx1[i]];
// cout<< Ep_temp[i] <<" " << E[i]<<endl;
E[i] = Ep_temp[i];
}
// cin.get();
#endif
return 0;
}