-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathJDF_NLIST.py
466 lines (378 loc) · 17.9 KB
/
JDF_NLIST.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:07:07 2018
@author: Piotr Traczykowski
"""
import tables
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
from math import log, floor, ceil, fmod
from scipy import interpolate
import sys
import datetime
import multiprocessing
import time
### The below line os.nice(15) is to reduce the process priority on your system
### This is to allow your system to behave more smoothly while still the
### JDF will utilize all of its resources
import os
os.nice(20)
##################################################################
##################################################################
##################################################################
# Core procedure for JDF routine
def JDF_CORE(f_Z,X_inp,Y_inp,dist_in,smoothing,rand_x,rand_y,DDDz,NoOfElec):
init_column_dist=np.sum(dist_in,0)
Xh=np.linspace(np.min(X_inp),np.max(X_inp),np.rint(smoothing*len(X_inp)))
Yh=np.linspace(np.min(Y_inp),np.max(Y_inp),np.rint(smoothing*len(Y_inp)))
f_col_dist=interpolate.interp1d(X_inp,init_column_dist)
intp_col_dist=f_col_dist(Xh)
# Make sure interpolated values are positive
intp_col_dist=intp_col_dist.clip(min=0.0)
intp_col_dist=intp_col_dist/np.sum(intp_col_dist)
x0=GenerateParticleX(intp_col_dist,rand_x,Xh)
# Find corresponding indices and weights in the other dimension
indx_temp = np.argsort(np.square(x0-X_inp))
indx_temp = indx_temp[:2]
min_val = np.min(indx_temp)
max_val = np.max(indx_temp)
X_min=X_inp[min_val]
X_max=X_inp[max_val]
tw1=1.0-(x0-X_min)/(X_max-X_min)
tw2=1.0-(X_max-x0)/(X_max-X_min)
init_row_dist=tw1*dist_in[:,min_val] + tw2*dist_in[:,max_val]
# Pick column distribution type
f_row_dist=interpolate.interp1d(Y_inp,init_row_dist)
intp_row_dist=f_row_dist(Yh)
# Make sure interpolated values are positive
intp_row_dist=intp_row_dist.clip(min=0.0)
intp_row_dist=intp_row_dist/np.sum(intp_row_dist)
y0=GenerateParticleY(intp_row_dist,rand_y,Yh)
return np.vstack((x0,y0,DDDz,NoOfElec))
# Generate particle X coordinate
def GenerateParticleX(PDF_x,ranDx,Xhin):
Px_CDF=np.cumsum(PDF_x)
Px_CDF=np.sort(Px_CDF)
f_X = interpolate.interp1d(Px_CDF, Xhin,fill_value='extrapolate')
x0=f_X(ranDx)
return x0
# Generate particle Y coordinate
def GenerateParticleY(PDF_y,ranDy,Yhin):
Py_CDF=np.cumsum(PDF_y)
Py_CDF=np.sort(Py_CDF)
f_Y = interpolate.interp1d(Py_CDF, Yhin,fill_value='extrapolate')
y0=f_Y(ranDy)
return y0
# Generate Halton sequences
def HaltonRandomNumber(dims, nb_pts):
hArr = np.empty(nb_pts * dims)
hArr.fill(np.nan)
pArr = np.empty(nb_pts)
pArr.fill(np.nan)
Primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,\
83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163]
log_nb_pts = log(nb_pts + 1)
for i in range(dims):
b = Primes[i]
n = np.int(np.ceil(log_nb_pts / np.log(b)))
for t in range(n):
pArr[t] = np.float_power(b, -(t + 1) )
for j in range(nb_pts):
d = j + 1
sum_ = np.fmod(d, b) * pArr[0]
for t in range(1, n):
d = np.floor(d / b)
sum_ += np.fmod(d, b) * pArr[t]
hArr[j*dims + i] = sum_
return hArr.reshape(nb_pts, dims)
# Routine that calculates new microparticles positions and weights for selected (just one) slice
def SliceCalculate(bin_x_in,bin_y_in,z_hlt,i,StepZ,NumberOfSlices,interpolator,f_Z,new_x,new_y,Non_Zero_Z,Num_Of_Slice_Particles,minz,JDFSmoothing,RandomHaltonSequence):
print 'Slice ',i,' of ',NumberOfSlices
new_z=np.full((1),(minz+(StepZ*i)))
xx,yy,zz=np.meshgrid(new_x,new_y,new_z)
positionsin = np.column_stack((xx.ravel('F'), yy.ravel('F'), zz.ravel('F'), interpolator(xx,yy,zz).ravel('F') ))
xy_flat=np.vstack((positionsin[:,0], positionsin[:,1])).T
hist2d_flat,edges=np.histogramdd(xy_flat,bins=(bin_x_in,bin_y_in), weights=positionsin[:,3])
#if np.sum(hist2d_flat)>0:
Dist=hist2d_flat
Xin=np.linspace(np.min(positionsin[:,0]),np.max(positionsin[:,0]),bin_x_in)
Yin=np.linspace(np.min(positionsin[:,1]),np.max(positionsin[:,1]),bin_y_in)
dots=[]
ZZZ=minz+(i*StepZ)
ZZZ_in=np.full((Num_Of_Slice_Particles),minz+(i*StepZ)+(StepZ*z_hlt))
NoOfElec=(Non_Zero_Z)*(f_Z(ZZZ)/(NumberOfSlices))/(Num_Of_Slice_Particles)
for j in range (0,Num_Of_Slice_Particles):
result=JDF_CORE(f_Z,Xin,Yin,Dist,JDFSmoothing,RandomHaltonSequence[j,0],RandomHaltonSequence[j,1],ZZZ_in[j],NoOfElec)
dots.append(result[:,0])
return dots
##################################################################
##################################################################
##################################################################
# Main routine for JDF
if __name__ == '__main__':
if len(sys.argv)==2:
file_name_in=sys.argv[1]
print 'Processing file:', file_name_in
else:
print 'Usage: JDF_NLIST <Input File Name>\n'
sys.exit(1)
file_name_base = (file_name_in.split('.')[0]).strip()
import PARAMS_JDF
# Set default parameters and read from parameters file
try:
k_u=PARAMS_JDF.k_u
except:
k_u = 228.4727
try:
a_u=PARAMS_JDF.a_u
except:
a_u = 1.0121809
try:
SlicesMultiplyFactor=PARAMS_JDF.SlicesMultiplyFactor
except:
SlicesMultiplyFactor = 10
try:
Num_Of_Slice_Particles=PARAMS_JDF.NumOfSliceParticles
except:
Num_Of_Slice_Particles = 800
try:
binnumber_X=PARAMS_JDF.X_DensitySampling
except:
binnumber_X = 40
try:
binnumber_Y=PARAMS_JDF.Y_DensitySampling
except:
binnumber_Y = 40
try:
binnumber_Z=PARAMS_JDF.Z_DensitySampling
except:
binnumber_Z = 40
try:
S_factor=PARAMS_JDF.BeamStretchFactor
except:
S_factor = 0.0
# Print to screen parameters used for calculations.
#==============================================================================
print 'User defined parameters:'
print 'k_u = ',k_u
print 'a_u = ',a_u
print 'Slices per wavelnegth = ',SlicesMultiplyFactor
#print 'Particle density samples = ',binnumber
print 'Density sampling in X = ',binnumber_X
print 'Density sampling in Y = ',binnumber_Y
print 'Current / Density sampling in Z =' ,binnumber_Z
print 'Stretching factor in Z = ',S_factor
#print 'Shape sampling number = ',NumShapeSlices
#==============================================================================
f=tables.open_file(file_name_in,'r')
Particles=f.root.Particles.read()
#Filter the particles with z values
#*************************************************************
# The below section calculates size of the bin according
# to value of Lc (binnumbers=total_length/Lc)
# USER DATA - MODIFY ACCORDING TO REQUIREMENTS
Pi=np.pi # Pi number taken from 'numpy' as more precise than just 3.1415
c=299792458.0 # Speed of light
m=9.11e-31 # mass of electron
e_0=8.854E-12 # vacuum permitivity
e_ch=1.602e-19 # charge of one electron
#*************************************************************
JDFSmoothing=1.0
# Select slice from the beam
#print len(Particles)
#midZ=np.mean(Particles[:,4])
#zlen=0.20*(np.max(Particles[:,4])-np.min(Particles[:,4]))
#Particles=Particles[(Particles[:,4] < (midZ+zlen)) & (Particles[:,4] > (midZ-zlen))]
#print len(Particles)
mA_X = Particles[:,0]
mA_Y = Particles[:,2]
mA_Z = Particles[:,4]
# Descale the momentum units from p/mc to SI - to keep compatibility with rest of calculations
mA_PX = Particles[:,1]*(m*c)
mA_PY = Particles[:,3]*(m*c)
mA_PZ = Particles[:,5]*(m*c)
mA_WGHT = Particles[:,6]
# The below section calculate some initial data - 4*Pi*Rho is the one mose desired
xyz = np.vstack([mA_X,mA_Y,mA_Z]).T
size_x=max(mA_X)-min(mA_X)
size_y=max(mA_Y)-min(mA_Y)
size_z=max(mA_Z)-min(mA_Z)
minx=np.min(mA_X)
maxx=np.max(mA_X)
miny=np.min(mA_Y)
maxy=np.max(mA_Y)
print 'Size of sample X,Y,Z = ', size_x,size_y,size_z
# Calculate some needed values: Omega,Rho,Lc,Lambda_U,Lambda_R,
p_tot=np.sqrt((mA_PX[:]**2)+(mA_PY[:]**2.)+(mA_PZ[:]**2.))
gamma=(np.sqrt(1+(p_tot/(m*c))**2.))
gamma_0=np.mean(gamma)
RandomHaltonSequence=HaltonRandomNumber(2,Num_Of_Slice_Particles)
z_hlt=0.5-HaltonRandomNumber(2,Num_Of_Slice_Particles)
#RandomHaltonSequence=np.random.rand(Num_Of_Slice_Particles,2)
## Linear smoothing between bins in JDF_CORE
lambda_u=(2.0*Pi)/k_u
# Use plane-pole undulator
lambda_r=(lambda_u/(2.0*gamma_0**2.0))*(1+(a_u**2.0)/2.0)
print 'Using plane-pole undulator configuration !'
# Use helical undulator
#lambda_r=(lambda_u/(2*gamma_0**2))*(1+a_u**2)
#print 'Using helical undulator configuration !'
print 'lambda_r = ',lambda_r
NumberOfSlices=int(SlicesMultiplyFactor*((max(mA_Z)+S_factor*size_z)-(min(mA_Z)-S_factor*size_z))/(lambda_r))
TotalNumberOfElectrons=np.sum(mA_WGHT)
InitialParticleCharge=TotalNumberOfElectrons*e_ch
Hz, edges_Z = np.histogram(mA_Z, bins = binnumber_Z,normed=False,weights=mA_WGHT,range=((min(mA_Z)-S_factor*size_z,max(mA_Z)+S_factor*size_z)))
Hz=ndimage.gaussian_filter(Hz,1.0)
Non_Zero_Z=float(np.count_nonzero(Hz))
#x0_Z = np.linspace(0.5*(edges_Z[0][0]+edges_Z[0][1]),0.5*(edges_Z[0][binnumber_Z]+edges_Z[0][binnumber_Z-1]),binnumber_Z)
x0_Z = np.linspace(0.5*(edges_Z[0]+edges_Z[1]),0.5*(edges_Z[binnumber_Z]+edges_Z[binnumber_Z-1]),binnumber_Z)
y0_Z = Hz
f_Z = interpolate.PchipInterpolator(x0_Z, y0_Z)
m_Z_plt=np.linspace(min(mA_Z)-S_factor*size_z,max(mA_Z)+S_factor*size_z,100)
plt.plot(m_Z_plt,f_Z(m_Z_plt))
plt.show()
start = time.time()
x0y0z0=np.vstack((mA_X,mA_Y,mA_Z)).T
# Create 3D particles density map
HxHyHz,edges_XYZ=np.histogramdd(x0y0z0,bins=(binnumber_X, binnumber_Y,binnumber_Z),weights=mA_WGHT)
print 'Histogram done...'
# Apply Gaussian filter to smoothen density map artifacts (1.0 is default value)
# if user wish to use 'raw' data the below line should be commented
HxHyHz=ndimage.gaussian_filter(HxHyHz,1.0)
# Map the 3D density map onto equispaced meshgrid
new_x=np.linspace(np.min(mA_X),np.max(mA_X),binnumber_X)
new_y=np.linspace(np.min(mA_Y),np.max(mA_Y),binnumber_Y)
new_z=np.linspace(np.min(mA_Z),np.max(mA_Z),binnumber_Z)
xxh,yyh,zzh=np.meshgrid(new_x,new_y,new_z)
positions = np.column_stack((xxh.ravel('F'), yyh.ravel('F'), zzh.ravel('F'), HxHyHz.ravel('F') ))
## The below line removes or histogram values < 0 what improves performance
positions = positions[positions[:,3]>0.0]
xyzpoints=np.vstack((positions[:,0],positions[:,1],positions[:,2])).T
# Interpolate the 3D histogram (meshgrid) into continuous distribution (nearest neighbor or linear interpolation allowed)
### Linear interpolation - slow but more accurate
#interpolator=interpolate.LinearNDInterpolator(xyzpoints,positions[:,3],rescale=True,fill_value=0)
### Nearest neighbour interpolation - fast and less accurate
interpolator=interpolate.NearestNDInterpolator(xyzpoints,positions[:,3],rescale=True)
### Below is option to plot the density map of the beam - uncomment if you want to see one.
#==============================================================================
#from mpl_toolkits.mplot3d import Axes3D
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(xyzpoints[:,0],xyzpoints[:,1],xyzpoints[:,2],c=positions[:,3])
#plt.show()
#==============================================================================
print 'Interpolation map created...'
minz=np.min(x0y0z0[:,2])
maxz=np.max(x0y0z0[:,2])
StepZ=(maxz-minz)/NumberOfSlices
# Sweep over all slices along Z-axis (longitudinal direction) and generate new microparticles
# Routine is parallel and uses ALL available cores
pool = multiprocessing.Pool()
print 'Executing main JDF loop...'
result2 = []
#Create list of slices to calculate - with positive value of current, avoids further checks in algorithm.
slice_list = []
for slice_number in range(0,NumberOfSlices):
ZZZ=minz+(slice_number*StepZ)
NoOfElec=(Non_Zero_Z)*(f_Z(ZZZ)/(NumberOfSlices))/(Num_Of_Slice_Particles)
if NoOfElec>0:
slice_list.append(slice_number)
for slice_number in slice_list:
pool.apply_async(SliceCalculate, args=(binnumber_X,binnumber_Y,z_hlt[:,1],slice_number,StepZ,NumberOfSlices,interpolator,f_Z,new_x,new_y,Non_Zero_Z,Num_Of_Slice_Particles,minz,JDFSmoothing,RandomHaltonSequence), callback=result2.append)
pool.close()
pool.join()
#==============================================================================
# ## SERIAL VERSION DEBUG ONLY !!!
# for slice_number in range(0,NumberOfSlices):
# ZZZ=minz+(slice_number*StepZ)
# NoOfElec=(Non_Zero_Z)*(f_Z(ZZZ)/(NumberOfSlices))/(Num_Of_Slice_Particles)
# if NoOfElec>0:
# results=SliceCalculate(binnumber_X,binnumber_Y,z_hlt[:,1],slice_number,StepZ,NumberOfSlices,interpolator,f_Z,new_x,new_y,Non_Zero_Z,Num_Of_Slice_Particles,minz,JDFSmoothing,RandomHaltonSequence)
# result2.append(results)
#==============================================================================
# Rearrange the output from parallel loop above
print 'Output array shape is: ',np.shape(result2)
Total_Number_Of_Particles=len(result2)*Num_Of_Slice_Particles
print 'Total number of particles = ',Total_Number_Of_Particles
Full_X=np.zeros(Total_Number_Of_Particles)
Full_PX=np.zeros(0)
Full_Y=np.zeros(Total_Number_Of_Particles)
Full_PY=np.zeros(0)
Full_Z=np.zeros(Total_Number_Of_Particles)
Full_PZ=np.zeros(0)
Full_Ne=np.zeros(Total_Number_Of_Particles)
print 'Starting rearranging array...'
counter=0
for j in range(0,Num_Of_Slice_Particles):
for i in range(0,len(result2)):
Full_X[counter]=result2[i][j][0]
Full_Y[counter]=result2[i][j][1]
Full_Z[counter]=result2[i][j][2]
Full_Ne[counter]=result2[i][j][3]
counter=counter+1
# Generate noise
# Add noise
print 'Adding noise...'
Rand_Z=(StepZ*(np.random.random(len(Full_Z)) - 0.50))/np.sqrt(Full_Ne)
Full_Z=Full_Z+Rand_Z
# Interpolate momentum data onto new microparticles (griddata used)
print 'Starting to interpolate momentum data... - takes time'
def Calculate_PX():
Full_PX = interpolate.griddata((mA_X.ravel(), mA_Y.ravel(), mA_Z.ravel()),mA_PX.ravel(),(Full_X, Full_Y, Full_Z), method='linear',rescale=True)
return Full_PX
def Calculate_PY():
Full_PY = interpolate.griddata((mA_X.ravel(), mA_Y.ravel(), mA_Z.ravel()),mA_PY.ravel(),(Full_X, Full_Y, Full_Z), method='linear',rescale=True)
return Full_PY
def Calculate_PZ():
Full_PZ = interpolate.griddata((mA_X.ravel(), mA_Y.ravel(), mA_Z.ravel()),mA_PZ.ravel(),(Full_X, Full_Y, Full_Z), method='linear',rescale=True)
return Full_PZ
# Parallel momentum data mapping
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=3)
async_result_PX = pool.apply_async(Calculate_PX, ())
async_result_PY = pool.apply_async(Calculate_PY, ())
async_result_PZ = pool.apply_async(Calculate_PZ, ())
Full_PX = async_result_PX.get()
Full_PY = async_result_PY.get()
Full_PZ = async_result_PZ.get()
# Scale units from SI to p = p/mc
Full_PX=Full_PX/(m*c)
Full_PY=Full_PY/(m*c)
Full_PZ=Full_PZ/(m*c)
# Merge all data into one array
x_px_y_py_z_pz_NE = np.vstack([Full_X.flat,Full_PX.flat,Full_Y.flat,Full_PY.flat,Full_Z.flat,Full_PZ.flat,Full_Ne.flat]).T
# Add Poisson noise to particle weights
x_px_y_py_z_pz_NE[:,6]=np.random.poisson(x_px_y_py_z_pz_NE[:,6])
# Remove all particles with weights <= zero
x_px_y_py_z_pz_NE=x_px_y_py_z_pz_NE[x_px_y_py_z_pz_NE[:,6] > 0]
# Check for NaN calues in data - it sometimes happen when using 'linear' option in momentum interpolations (griddata parameter)
NaN_Mask=~np.any(np.isnan(x_px_y_py_z_pz_NE), axis=1)
x_px_y_py_z_pz_NE=x_px_y_py_z_pz_NE[NaN_Mask]
# Rescale the charge of new particle set (needed due to S_factor usage)
ChargeFactor=InitialParticleCharge/np.sum(x_px_y_py_z_pz_NE[:,6]*e_ch)
print 'Charge scaling factor = ',ChargeFactor
x_px_y_py_z_pz_NE[:,6]=x_px_y_py_z_pz_NE[:,6]*ChargeFactor
print 'Final charge of particles = ',np.sum(x_px_y_py_z_pz_NE[:,6]*e_ch)
print 'Saving the output to files...'
# Create Group in HDF5 and add metadata for Visit
# Open output file
output_file=tables.open_file(file_name_base+'_JDF.h5','w')
ParticleGroup=output_file.create_array('/','Particles',x_px_y_py_z_pz_NE)
boundsGroup=output_file.create_group('/','globalGridGlobalLimits','')
boundsGroup._v_attrs.vsType='limits'
boundsGroup._v_attrs.vsKind='Cartesian'
timeGroup=output_file.create_group('/','time','time')
timeGroup._v_attrs.vsType='time'
ParticleGroup._v_attrs.vsType='variableWithMesh'
ParticleGroup._v_attrs.vsTimeGroup='time'
ParticleGroup._v_attrs.vsNumSpatialDims = 3
ParticleGroup._v_attrs.vsLimits='globalGridGlobalLimits'
ParticleGroup._v_attrs.vsLabels='x,px,y,py,z,pz,NE'
now = datetime.datetime.now()
ParticleGroup._v_attrs.FXFELConversionTime=now.strftime("%Y-%m-%d %H:%M:%S")
ParticleGroup._v_attrs.FXFELSourceFileName=file_name_in
#Close the file
output_file.close()
end = time.time()
print 'Time of work: ',end - start