From 13c4148baea215d48b0ceb4736bb7458e4094910 Mon Sep 17 00:00:00 2001 From: natj Date: Tue, 3 Mar 2020 16:47:33 -0500 Subject: [PATCH] sync shock and pic folders --- projects/shocks/adv.py | 821 ----------------- projects/shocks/init_problem.py | 102 -- projects/shocks/jobs/y8x512s0.rusty | 21 + projects/shocks/{ => misc}/filters.py | 0 projects/shocks/{ => misc}/moving_wall.py | 0 projects/shocks/{ => old}/plot2d_var.py | 114 ++- projects/shocks/{ => old}/prtcl_path.py | 0 projects/shocks/{ => old}/shock_RH.py | 0 projects/shocks/pic.py | 1024 ++++++++------------- projects/shocks/plot3d.py | 221 +++++ projects/shocks/problem.py | 59 ++ projects/shocks/weibel.py | 788 ---------------- projects/shocks/weibel_mini.ini | 63 -- 13 files changed, 758 insertions(+), 2455 deletions(-) delete mode 100644 projects/shocks/adv.py delete mode 100644 projects/shocks/init_problem.py create mode 100644 projects/shocks/jobs/y8x512s0.rusty rename projects/shocks/{ => misc}/filters.py (100%) rename projects/shocks/{ => misc}/moving_wall.py (100%) rename projects/shocks/{ => old}/plot2d_var.py (87%) rename projects/shocks/{ => old}/prtcl_path.py (100%) rename projects/shocks/{ => old}/shock_RH.py (100%) create mode 100644 projects/shocks/plot3d.py create mode 100644 projects/shocks/problem.py delete mode 100644 projects/shocks/weibel.py delete mode 100644 projects/shocks/weibel_mini.ini diff --git a/projects/shocks/adv.py b/projects/shocks/adv.py deleted file mode 100644 index 5b7e8737..00000000 --- a/projects/shocks/adv.py +++ /dev/null @@ -1,821 +0,0 @@ -from __future__ import print_function -from mpi4py import MPI - -import numpy as np - -import sys, os -import h5py - -import pycorgi.twoD as corgi -import pyrunko.tools.twoD as pytools -import pyrunko.vlv.twoD as pyvlv -import pyrunko.pic.twoD as pypic -import pyrunko.fields.twoD as pyfld - - -from configSetup import Configuration -import argparse -import initialize as init -from initialize_pic import loadTiles -from initialize_pic import initialize_virtuals -from initialize_pic import globalIndx -from sampling import boosted_maxwellian -from initialize_pic import spatialLoc -from injector_pic import inject -from injector_pic import insert_em -from time import sleep -from visualize import get_yee - -#global simulation seed -np.random.seed(1) - -try: - import matplotlib.pyplot as plt - from visualize import plotNode - from visualize import plotJ, plotE, plotDens - from visualize import saveVisz - - from visualize import getYee2D - from visualize import plot2dYee - from visualize_pic import plot2dParticles - -except: - pass - -from timer import Timer - -from init_problem import Configuration_Shocks as Configuration - -debug = False - -def debug_print(n, msg): - if debug: - print("{}: {}".format(n.rank(), msg)) - sys.stdout.flush() - - -def filler(xloc, ispcs, conf): - # perturb position between x0 + RUnif[0,1) - - #electrons - if ispcs == 0: - delgam = conf.delgam #* np.abs(conf.mi / conf.me) * conf.temp_ratio - - xx = xloc[0] + np.random.rand(1) - yy = xloc[1] + np.random.rand(1) - #zz = xloc[2] + np.random.rand(1) - zz = 0.5 - - #positrons/ions/second species - if ispcs == 1: - delgam = conf.delgam - - #on top of electrons - xx = xloc[0] - yy = xloc[1] - zz = 0.5 - - gamma = conf.gamma - direction = -1 - ux, uy, uz, uu = boosted_maxwellian(delgam, gamma, direction=direction, dims=3) - - x0 = [xx, yy, zz] - u0 = [ux, uy, uz] - return x0, u0 - - - -# Field initialization (guide field) -def insert_em(grid, conf): - - #into radians - btheta = conf.btheta/180.*np.pi - bphi = conf.bphi/180.*np.pi - beta = conf.beta - - kk = 0 - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - yee = tile.get_yee(0) - - ii,jj = tile.index - - for n in range(conf.NzMesh): - for m in range(-3, conf.NyMesh+3): - for l in range(-3, conf.NxMesh+3): - # get global coordinates - iglob, jglob, kglob = globalIndx( (ii,jj), (l,m,n), conf) - - yee.bx[l,m,n] = conf.binit*np.cos(btheta) - yee.by[l,m,n] = conf.binit*np.sin(btheta)*np.sin(bphi) - yee.bz[l,m,n] = conf.binit*np.sin(btheta)*np.cos(bphi) - - yee.ex[l,m,n] = 0.0 - yee.ey[l,m,n] =-beta*yee.bz[l,m,n] - yee.ez[l,m,n] = beta*yee.by[l,m,n] - - -if __name__ == "__main__": - - do_plots = True - do_print = False - - if MPI.COMM_WORLD.Get_rank() == 0: - do_print =True - - if do_print: - print("Running with {} MPI processes.".format(MPI.COMM_WORLD.Get_size())) - - ################################################## - # set up plotting and figure - try: - if do_plots: - plt.fig = plt.figure(1, figsize=(6,9)) - plt.rc('font', family='serif', size=8) - plt.rc('xtick') - plt.rc('ytick') - - gs = plt.GridSpec(11, 1) - gs.update(hspace = 0.0) - - axs = [] - for ai in range(11): - axs.append( plt.subplot(gs[ai]) ) - except: - #print() - pass - - - # Timer for profiling - timer = Timer() - timer.start("total") - timer.start("init") - - timer.do_print = do_print - - - # parse command line arguments - parser = argparse.ArgumentParser(description='Simple PIC-Maxwell simulations') - parser.add_argument('--conf', dest='conf_filename', default=None, - help='Name of the configuration file (default: None)') - args = parser.parse_args() - if args.conf_filename == None: - conf = Configuration('shock_mini.ini', do_print=do_print) - else: - if do_print: - print("Reading configuration setup from ", args.conf_filename) - conf = Configuration(args.conf_filename, do_print=do_print) - - - grid = corgi.Grid(conf.Nx, conf.Ny, conf.Nz) - - xmin = 0.0 - xmax = conf.Nx*conf.NxMesh #XXX scaled length - ymin = 0.0 - ymax = conf.Ny*conf.NyMesh - grid.set_grid_lims(xmin, xmax, ymin, ymax) - - #init.loadMpiRandomly(grid) - #init.loadMpiXStrides(grid) - debug_print(grid, "load mpi 2d") - init.loadMpi2D(grid) - debug_print(grid, "load tiles") - loadTiles(grid, conf) - - ################################################## - # Path to be created - if grid.master(): - if not os.path.exists( conf.outdir ): - os.makedirs(conf.outdir) - if not os.path.exists( conf.outdir+"/restart" ): - os.makedirs(conf.outdir+"/restart") - if not os.path.exists( conf.outdir+"/full_output" ): - os.makedirs(conf.outdir+"/full_output") - - do_initialization = True - - #check if this is the first time and we do not have any restart files - if not os.path.exists( conf.outdir+'/restart/laps.txt'): - conf.laprestart = -1 #to avoid next if statement - - # restart from latest file - deep_io_switch = 0 - if conf.laprestart >= 0: - do_initialization = False - - #switch between automatic restart and user-defined lap - if conf.laprestart == 0: - - #get latest restart file from housekeeping file - with open(conf.outdir+"/restart/laps.txt", "r") as lapfile: - #lapfile.write("{},{}\n".format(lap, deep_io_switch)) - lines = lapfile.readlines() - slap, sdeep_io_switch = lines[-1].strip().split(',') - lap = int(slap) - deep_io_switch = int(sdeep_io_switch) - - read_lap = deep_io_switch - odir = conf.outdir + '/restart' - - elif conf.laprestart > 0: - lap = conf.laprestart - read_lap = lap - odir = conf.outdir + '/full_output' - - debug_print(grid, "read") - if do_print: - print("...reading Yee lattices (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_yee(grid, read_lap, odir) - - if do_print: - print("...reading particles (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_particles(grid, read_lap, odir) - - lap += 1 #step one step ahead - - # initialize - if do_initialization: - debug_print(grid, "inject") - lap = 0 - np.random.seed(1) - inject(grid, filler, conf) #injecting plasma particles - insert_em(grid, conf) - - #static load balancing setup; communicate neighbor info once - debug_print(grid, "analyze bcs") - grid.analyze_boundaries() - debug_print(grid, "send tiles") - grid.send_tiles() - debug_print(grid, "recv tiles") - grid.recv_tiles() - MPI.COMM_WORLD.barrier() - - #sys.exit() - - debug_print(grid, "init virs") - initialize_virtuals(grid, conf) - - - timer.stop("init") - timer.stats("init") - - - # end of initialization - ################################################## - debug_print(grid, "solvers") - - - # visualize initial condition - if do_plots: - try: - plotNode( axs[0], grid, conf) - #plotXmesh(axs[1], grid, conf, 0, "x") - saveVisz(-1, grid, conf) - except: - pass - - - Nsamples = conf.Nt - #pusher = pypic.BorisPusher() - pusher = pypic.VayPusher() - - - #fldprop = pyfld.FDTD2() - fldprop = pyfld.FDTD4() - fintp = pypic.LinearInterpolator() - currint = pypic.ZigZag() - analyzer = pypic.Analyzator() - #flt = pyfld.Binomial2(conf.NxMesh, conf.NyMesh, conf.NzMesh) - - - do_compensator = False - - #binomial + compensator filter - if not(do_compensator): - flt = pyfld.General3p(conf.NxMesh, conf.NyMesh, conf.NzMesh) - fltC = pyfld.General3p(conf.NxMesh, conf.NyMesh, conf.NzMesh) - flt.alpha = 0.5 #make binomial - fltC.alpha = conf.npasses/2 + 1 #binomial compensation is n/2 + 1 - else: - #strided filters - flt = pyfld.General3pStrided(conf.NxMesh, conf.NyMesh, conf.NzMesh) - flt.alpha = 0.5 #make binomial - flt.stride = 2 #make binomial - - - #enhance numerical speed of light slightly to suppress numerical Cherenkov instability - fldprop.corr = 1.02 - - - - # quick field snapshots - debug_print(grid, "fld_writer") - fld_writer = pyfld.FieldsWriter(conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.stride) - - # test particles - debug_print(grid, "prtcl_writer") - prtcl_writer = pypic.TestPrtclWriter( - conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.ppc, len(grid.get_local_tiles()), - conf.n_test_prtcls) - - - - - debug_print(grid, "mpi_e") - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - debug_print(grid, "mpi_b") - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - ################################################## - sys.stdout.flush() - - #simulation loop - time = lap*(conf.cfl/conf.c_omp) - for lap in range(lap, conf.Nt+1): - debug_print(grid, "lap_start") - - ################################################## - # advance Half B - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_b1") - debug_print(grid, "mpi_b1") - - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - timer.stop_comp("mpi_b1") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc0") - debug_print(grid, "upd_bc0") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc0") - - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b1") - debug_print(grid, "push_half_b1") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_half_b(tile) - - timer.stop_comp("push_half_b1") - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_b2") - debug_print(grid, "mpi_b2") - - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - timer.stop_comp("mpi_b2") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc1") - debug_print(grid, "upd_bc1") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc1") - - - ################################################## - # move particles (only locals tiles) - - #-------------------------------------------------- - #interpolate fields (can move to next asap) - timer.start_comp("interp_em") - debug_print(grid, "interp_em") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - fintp.solve(tile) - - timer.stop_comp("interp_em") - #-------------------------------------------------- - - #-------------------------------------------------- - #push particles in x and u - timer.start_comp("push") - debug_print(grid, "push") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - pusher.solve(tile) - - timer.stop_comp("push") - - #-------------------------------------------------- - # apply moving walls - timer.start_comp("walls") - debug_print(grid, "walls") - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - - timer.stop_comp("walls") - ################################################## - # advance B half - - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b2") - debug_print(grid, "push_half_b2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_half_b(tile) - - timer.stop_comp("push_half_b2") - - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_e1") - debug_print(grid, "mpi_e1") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e1") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc2") - debug_print(grid, "upd_bc2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc2") - - - ################################################## - # advance E - - #-------------------------------------------------- - #push E - timer.start_comp("push_e") - debug_print(grid, "push_e") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_e(tile) - - timer.stop_comp("push_e") - - #-------------------------------------------------- - #current calculation; charge conserving current deposition - timer.start_comp("comp_curr") - debug_print(grid, "comp_cur") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - currint.solve(tile) - - timer.stop_comp("comp_curr") - - #-------------------------------------------------- - timer.start_comp("clear_vir_cur") - debug_print(grid, "clear_vir_cur") - - #clear virtual current arrays for easier addition after mpi - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.clear_current() - - timer.stop_comp("clear_vir_cur") - - - ################################################## - # current solving is also taking place in nbor ranks - # that is why we update virtuals here with MPI - # - # This is the most expensive task so we do not double it - # here. - - #-------------------------------------------------- - #mpi send currents - timer.start_comp("mpi_cur") - debug_print(grid, "mpi_cur") - - grid.send_data(0) - grid.recv_data(0) - grid.wait_data(0) - - timer.stop_comp("mpi_cur") - - - #-------------------------------------------------- - #exchange currents - timer.start_comp("cur_exchange") - debug_print(grid, "cur_exchange") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.exchange_currents(grid) - - timer.stop_comp("cur_exchange") - - - - ################################################## - # particle communication (only local/boundary tiles) - - #-------------------------------------------------- - #local particle exchange (independent) - timer.start_comp("check_outg_prtcls") - debug_print(grid, "check_outg_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.check_outgoing_particles() - - timer.stop_comp("check_outg_prtcls") - - #-------------------------------------------------- - # global mpi exchange (independent) - timer.start_comp("pack_outg_prtcls") - debug_print(grid, "pack_outg_prtcls") - - for cid in grid.get_boundary_tiles(): - tile = grid.get_tile(cid) - tile.pack_outgoing_particles() - - timer.stop_comp("pack_outg_prtcls") - - #-------------------------------------------------- - # MPI global particle exchange - # transfer primary and extra data - timer.start_comp("mpi_prtcls") - debug_print(grid, "mpi_prtcls") - - debug_print(grid, "mpi_prtcls: send3") - grid.send_data(3) - - debug_print(grid, "mpi_prtcls: recv3") - grid.recv_data(3) - - debug_print(grid, "mpi_prtcls: wait3") - grid.wait_data(3) - - # orig just after send3 - debug_print(grid, "mpi_prtcls: send4") - grid.send_data(4) - - debug_print(grid, "mpi_prtcls: recv4") - grid.recv_data(4) - - debug_print(grid, "mpi_prtcls: wait4") - grid.wait_data(4) - - timer.stop_comp("mpi_prtcls") - - #-------------------------------------------------- - # global unpacking (independent) - timer.start_comp("unpack_vir_prtcls") - debug_print(grid, "unpack_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.unpack_incoming_particles() - tile.check_outgoing_particles() - - timer.stop_comp("unpack_vir_prtcls") - - #-------------------------------------------------- - # transfer local + global - timer.start_comp("get_inc_prtcls") - debug_print(grid, "get_inc_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.get_incoming_particles(grid) - - timer.stop_comp("get_inc_prtcls") - - #-------------------------------------------------- - # delete local transferred particles - timer.start_comp("del_trnsfrd_prtcls") - debug_print(grid, "del_trnsfrd_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.delete_transferred_particles() - - timer.stop_comp("del_trnsfrd_prtcls") - - #-------------------------------------------------- - # delete all virtual particles (because new prtcls will come) - timer.start_comp("del_vir_prtcls") - debug_print(grid, "del_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.delete_all_particles() - - timer.stop_comp("del_vir_prtcls") - - - - ################################################## - #filter - timer.start_comp("filter") - - #sweep over npasses times - for fj in range(conf.npasses): - - #update global neighbors (mpi) - grid.send_data(0) - grid.recv_data(0) - grid.wait_data(0) - - #get halo boundaries - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - #filter each tile - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - flt.solve(tile) - - MPI.COMM_WORLD.barrier() # sync everybody - - if do_compensator: - - #update global neighbors (mpi) - grid.send_data(0) - grid.recv_data(0) - grid.wait_data(0) - - #get halo boundaries - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - #do one full compensation pass lastly - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - fltC.solve(tile) - - #-------------------------------------------------- - timer.stop_comp("filter") - - - #-------------------------------------------------- - #add current to E - timer.start_comp("add_cur") - debug_print(grid, "add_cur") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.deposit_current() - - timer.stop_comp("add_cur") - - #comm E - timer.start_comp("mpi_e2") - debug_print(grid, "mpi_e2") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e2") - - - ################################################## - # data reduction and I/O - - timer.lap("step") - if (lap % conf.interval == 0): - debug_print(grid, "io") - if do_print: - print("--------------------------------------------------") - print("------ lap: {} / t: {}".format(lap, time)) - - #for cid in grid.get_tile_ids(): - # tile = grid.get_tile(cid) - # tile.erase_temporary_arrays() - - timer.stats("step") - timer.comp_stats() - timer.purge_comps() - - #analyze (independent) - timer.start("io") - - #shrink particle arrays - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.shrink_to_fit_all_particles() - - #analyze - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - analyzer.analyze2d(tile) - - # barrier for quick writers - MPI.COMM_WORLD.barrier() - - debug_print(grid, "fld_writer") - #shallow IO - fld_writer.write(grid, lap) #quick field snapshots - - debug_print(grid, "prtcl_writer") - prtcl_writer.write(grid, lap) #test particles - - #deep IO - if (conf.full_interval > 0 and (lap % conf.full_interval == 0) and (lap > 0)): - debug_print(grid, "deep io") - - debug_print(grid, "deep write_yee") - pyvlv.write_yee(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_analysis") - pyvlv.write_analysis(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_prtcls") - pyvlv.write_particles(grid,lap, conf.outdir + "/full_output/" ) - - - #restart IO (overwrites) - if ((lap % conf.restart == 0) and (lap > 0)): - debug_print(grid, "restart io") - #flip between two sets of files - deep_io_switch = 1 if deep_io_switch == 0 else 0 - - debug_print(grid, "write_yee") - pyvlv.write_yee(grid, deep_io_switch, conf.outdir + "/restart/" ) - debug_print(grid, "write_prtcls") - pyvlv.write_particles(grid,deep_io_switch, conf.outdir + "/restart/" ) - - #if successful adjust info file - MPI.COMM_WORLD.barrier() # sync everybody in case of failure before write - if grid.rank() == 0: - with open(conf.outdir+"/restart/laps.txt", "a") as lapfile: - lapfile.write("{},{}\n".format(lap, deep_io_switch)) - - - #-------------------------------------------------- - #2D plots - if do_plots: - try: - plotNode(axs[0], grid, conf) - - yee = getYee2D(grid, conf) - plot2dYee(axs[1], yee, grid, conf, 'rho', label_title=True) - plot2dYee(axs[2], yee, grid, conf, 'jx' , label_title=True) - plot2dYee(axs[3], yee, grid, conf, 'jy' , label_title=True) - plot2dYee(axs[4], yee, grid, conf, 'jz' , label_title=True) - plot2dYee(axs[5], yee, grid, conf, 'ex' , label_title=True) - plot2dYee(axs[6], yee, grid, conf, 'ey' , label_title=True) - plot2dYee(axs[7], yee, grid, conf, 'ez' , label_title=True) - plot2dYee(axs[8], yee, grid, conf, 'bx' , label_title=True) - plot2dYee(axs[9], yee, grid, conf, 'by' , label_title=True) - plot2dYee(axs[10], yee, grid, conf, 'bz' , label_title=True) - saveVisz(lap, grid, conf) - except: - #print() - pass - timer.stop("io") - - - timer.stats("io") - timer.start("step") #refresh lap counter (avoids IO profiling) - - sys.stdout.flush() - - #next step - time += conf.cfl/conf.c_omp - #end of loop - - timer.stop("total") - timer.stats("total") diff --git a/projects/shocks/init_problem.py b/projects/shocks/init_problem.py deleted file mode 100644 index 72eb2189..00000000 --- a/projects/shocks/init_problem.py +++ /dev/null @@ -1,102 +0,0 @@ -from __future__ import print_function -from configSetup import Configuration - -from numpy import sqrt, pi -import numpy as np - - -class Configuration_Shocks(Configuration): - - def __init__(self, *file_names, do_print=False): - Configuration.__init__(self, *file_names) - - #-------------------------------------------------- - # problem specific initializations - if do_print: - print("Initializing shock setup...") - - #-------------------------------------------------- - # particle initialization - - #local variables just for easier/cleaner syntax - me = np.abs(self.me) - mi = np.abs(self.mi) - c = self.cfl - ppc = self.ppc*2.0 #multiply x2 to account for 2 species/pair plasma - - # if gammas < 1, we interpret it as beta/c - if self.gamma == 0: - self.gamma = 1.0 - self.beta = 0.0 - else: - if(self.gamma < 1.): - self.gamma = sqrt(1./(1.-self.gamma**2.)) - self.beta = sqrt(1.-1./self.gamma**2.) - - #plasma reaction & subsequent normalization - self.omp=c/self.c_omp - self.qe = -(self.omp**2.*self.gamma)/((ppc*.5)*(1.+me/mi)) - self.qi = -self.qe - - me *= abs(self.qi) - mi *= abs(self.qi) - - #temperatures - self.delgam_e = self.delgam - self.delgam_i = self.temp_ratio*self.delgam_e - - #-------------------------------------------------- - # printing - - if do_print: - print("init: Positron thermal spread: ", self.delgam_i) - print("init: Electron thermal spread: ", self.delgam_e) - - sigmaeff = self.sigma #* temperature corrections - if do_print: - print("init: Upstream gamma: ",self.gamma) - print("init: Alfven (outflow) three-velocity: ",sqrt(sigmaeff/(1.+sigmaeff))) - print("init: Ion beta: ", 2.*self.delgam_i/(self.sigma*(mi/me+1.)/(mi/me)) ) - print("init: Electron beta: ",2.*self.delgam_e/(self.sigma*(mi/me+1.)) ) - - #-------------------------------------------------- - # field initialization - - #---------cold plasma----------- - # parse external magnetic field strength from sigma_ext - #self.bz_ext = sqrt( (self.gamma-1.0)*.5*ppc*c**2*(mi+me)*self.sigma_ext) - - #determine initial magnetic field based on magnetization sigma which - #is magnetic energy density/ kinetic energy density - #this definition works even for nonrelativistic flows. - self.binit = sqrt((self.gamma)*ppc*.5*c**2.*(me*(1.+me/mi))*self.sigma) - - - #----------hot plasma---------- - #corrdelgam_qe = 1.0 - #corrdelgam_sig = 1.0 - - #delgam_i = self.delgam_i - #delgam_e = self.delgam_e - - #zeta=delgam_i/(0.24 + delgam_i) - #gad_i=1./3.*(5 - 1.21937*zeta + 0.18203*zeta**2 - 0.96583*zeta**3 + 2.32513*zeta**4 - 2.39332*zeta**5 + 1.07136*zeta**6) - #delgam_e=self.delgam*mi/me*self.temp_ratio - #zeta=delgam_e/(0.24 + delgam_e) - #gad_e=1./3.*(5 - 1.21937*zeta + 0.18203*zeta**2 - 0.96583*zeta**3 + 2.32513*zeta**4 - 2.39332*zeta**5 + 1.07136*zeta**6) - - #self.binit=1.*sqrt(ppc*.5*c**2.* \ - # (mi*(1.+corrdelgam_sig*gad_i/(gad_i-1.)*self.delgam_i)+me*(1.+ \ - # corrdelgam_sig*gad_e/(gad_e-1.)*self.delgam_e))*self.sigma) - - if do_print: - print("init: sigma: ", self.sigma) - print("init: B_guide: ", self.binit) - - - #possibly moving piston wall - if(self.wallgamma < 1.): - self.wallgamma = sqrt(1./(1.-self.wallgamma**2.)) - self.beta = sqrt(1.-1./self.gamma**2.) - - diff --git a/projects/shocks/jobs/y8x512s0.rusty b/projects/shocks/jobs/y8x512s0.rusty new file mode 100644 index 00000000..56967514 --- /dev/null +++ b/projects/shocks/jobs/y8x512s0.rusty @@ -0,0 +1,21 @@ +#!/bin/bash +#SBATCH -p cca +#SBATCH -J y8x512s0 +#SBATCH --output=%J.out +#SBATCH --error=%J.err +#SBATCH -t 2-00:00:00 +#SBATCH --nodes 21 +#SBATCH --ntasks-per-node=40 +#SBATCH --exclusive +#SBATCH --constraint=skylake + +# activate threading +export OMP_NUM_THREADS=1 +export PYTHONDONTWRITEBYTECODE=true +export HDF5_USE_FILE_LOCKING=FALSE + +# go to working directory +cd $RUNKODIR/projects/shocks/ + +mpirun python3 pic.py --conf y8x512s0.ini + diff --git a/projects/shocks/filters.py b/projects/shocks/misc/filters.py similarity index 100% rename from projects/shocks/filters.py rename to projects/shocks/misc/filters.py diff --git a/projects/shocks/moving_wall.py b/projects/shocks/misc/moving_wall.py similarity index 100% rename from projects/shocks/moving_wall.py rename to projects/shocks/misc/moving_wall.py diff --git a/projects/shocks/plot2d_var.py b/projects/shocks/old/plot2d_var.py similarity index 87% rename from projects/shocks/plot2d_var.py rename to projects/shocks/old/plot2d_var.py index 01898c3b..c2fef96f 100644 --- a/projects/shocks/plot2d_var.py +++ b/projects/shocks/old/plot2d_var.py @@ -7,19 +7,16 @@ from scipy.stats import mstats from scipy.optimize import curve_fit -from visualize import imshow +from pytools.visualize import imshow +from pytools.conf import Configuration -from configSetup import Configuration -from combine_files import get_file_list -from combine_files import combine_tiles +#from combine_files import get_file_list -from scipy.ndimage.filters import gaussian_filter - -from parser import parse_input +from pytools import read_h5_array +from pytools.cli import parse_args import argparse - # trick to make nice colorbars # see http://joseph-long.com/writing/colorbars/ def colorbar(mappable, @@ -71,40 +68,36 @@ def colorbar(mappable, 'cmap': "RdBu", 'vsymmetric':True, }, + 'Gammae': {'title': r"$\Gamma_e$", + 'vmin': 0.0, + 'vmax': 15.0, + 'derived':True + }, } - -def read_var(f5F, var): - try: - val = f5F[var][:,:,0] - except: - nx = f5F['Nx'].value - ny = f5F['Ny'].value - nz = f5F['Nz'].value - print("reshaping 1D array into multiD with {} {} {}".format(nx,ny,nz)) - - val = f5F[var][:] - val = np.reshape(val, (ny, nx, nz)) - val = np.transpose(val[:,:,0]) - - return val - - def build_bdens(f5F): - bx = read_var(f5F, "bx") - by = read_var(f5F, "by") - bz = read_var(f5F, "bz") + bx = read_h5_array(f5F, "bx") + by = read_h5_array(f5F, "by") + bz = read_h5_array(f5F, "bz") return bx*bx + by*by + bz*bz def build_edens(f5F): - ex = read_var(f5F, "ex") - ey = read_var(f5F, "ey") - ez = read_var(f5F, "ez") + ex = read_h5_array(f5F, "ex") + ey = read_h5_array(f5F, "ey") + ez = read_h5_array(f5F, "ez") return ex*ex + ey*ey + ez*ez +def build_gamma(f5F): + vx = read_h5_array(f5F, "Vxp") + vy = read_h5_array(f5F, "Vyp") + vz = read_h5_array(f5F, "Vzp") + + gam = 1.0/np.sqrt(1.0 - vx**2 + vy**2 + vz**2) + return gam + def nandiv(x, y): return x/y @@ -154,7 +147,7 @@ def plot2d_shock_single( # normal singular variables if not(args['derived']): - val = read_var(f5F, var) + val = read_h5_array(f5F, var) # composite quantities else: @@ -164,11 +157,25 @@ def plot2d_shock_single( val = build_bdens(f5F) if var == "edens": val = build_edens(f5F) + if var == "Gammae": + val = build_gamma(f5F) + + + + #drop z direction + val = val[:,:,0] + + print("corner000", val[0,0]) + print("cornerNNN", val[-1,-1]) + print("min val", np.min(val)) + print("max val", np.max(val)) + print("avg val", np.average(val)) + #-------------------------------------------------- # get shape nx, ny = np.shape(val) - #print("nx={} ny={}".format(nx, ny)) + print("nx={} ny={}".format(nx, ny)) walloc = 5.0 xmin =-(walloc)/info['skindepth'] @@ -229,8 +236,8 @@ def plot2d_shock_single( #-------------------------------------------------- # colorbar #ax.set_xlim((200.0, 600.0)) - ax.set_xlim((0.0, 400.0)) - ax.set_ylim((0.0, 64.0)) + #ax.set_xlim((0.0, 400.0)) + #ax.set_ylim((0.0, 64.0)) if do_dark: @@ -453,18 +460,19 @@ def plot2d_particles(ax, info, args): #-------------------------------------------------- -def build_info(fdir, lap): - info = {} - info['lap'] = lap - info['fields_file'] = fdir + 'fields_'+str(args.lap)+'.h5' - info['analysis_file'] = fdir + 'analysis'+str(args.lap)+'.h5' - - return info +#def build_info(fdir, lap): +# info = {} +# info['lap'] = lap +# info['fields_file'] = fdir + 'fields_'+str(args.lap)+'.h5' +# info['analysis_file'] = fdir + 'analysis'+str(args.lap)+'.h5' +# +# return info def quick_build_info(fdir, lap): info = {} info['lap'] = lap - info['fields_file'] = fdir + 'flds_'+str(args.lap)+'.h5' + #info['fields_file'] = fdir + 'flds_'+str(args.lap)+'.h5' + info['fields_file'] = fdir + 'full_moms_'+str(args.lap)+'.h5' info['analysis_file'] = fdir + 'analysis_'+str(args.lap)+'.h5' info['particle_file'] = fdir + 'test-prtcls' @@ -507,14 +515,18 @@ def quick_build_info(fdir, lap): #-------------------------------------------------- # command line driven version - conf, fdir, args = parse_input() + #conf, fdir, args = parse_input() + args = parse_args() + conf = Configuration(args.conf_filename, do_print=True) + fdir = conf.outdir fdir += '/' print("plotting {}".format(fdir)) - fname_F = "flds" - fname_A = "analysis" - fname_P = "test-prtcls" + #fname_F = "flds" + #fname_F = "moments" + #fname_A = "analysis" + #fname_P = "test-prtcls" if do_dark: plt.style.use('dark_background') @@ -528,6 +540,7 @@ def quick_build_info(fdir, lap): #info = build_info(fdir, args.lap) info = quick_build_info(fdir, args.lap) + info['skindepth'] = conf.c_omp/conf.stride if do_prtcls: for hlaps in range(args.lap-5*conf.interval, args.lap+1, conf.interval): @@ -541,9 +554,10 @@ def quick_build_info(fdir, lap): # FIXME else: - files_F = get_file_list(fdir, fname_F) - files_A = get_file_list(fdir, fname_A) - files_P = get_file_list(fdir, fname_P) + #files_F = get_file_list(fdir, fname_F) + #files_A = get_file_list(fdir, fname_A) + #files_P = get_file_list(fdir, fname_P) + # FIXME and use loop instead for lap, f in enumerate(files_F): diff --git a/projects/shocks/prtcl_path.py b/projects/shocks/old/prtcl_path.py similarity index 100% rename from projects/shocks/prtcl_path.py rename to projects/shocks/old/prtcl_path.py diff --git a/projects/shocks/shock_RH.py b/projects/shocks/old/shock_RH.py similarity index 100% rename from projects/shocks/shock_RH.py rename to projects/shocks/old/shock_RH.py diff --git a/projects/shocks/pic.py b/projects/shocks/pic.py index 0ff5515c..84613de5 100644 --- a/projects/shocks/pic.py +++ b/projects/shocks/pic.py @@ -1,838 +1,600 @@ +# -*- coding: utf-8 -*- + +# system libraries from __future__ import print_function from mpi4py import MPI - import numpy as np - import sys, os -import h5py - -import pycorgi.twoD as corgi -import pyrunko.tools.twoD as pytools -import pyrunko.vlv.twoD as pyvlv -import pyrunko.pic.twoD as pypic -import pyrunko.fields.twoD as pyfld - -from configSetup import Configuration -import argparse -import initialize as init -from initialize_pic import loadTiles -from initialize_pic import initialize_virtuals -from initialize_pic import globalIndx -from sampling import boosted_maxwellian -from initialize_pic import spatialLoc -from injector_pic import inject -from injector_pic import insert_em -from time import sleep -from visualize import get_yee +# runko + auxiliary modules +import pytools # runko python tools -#global simulation seed -np.random.seed(1) -try: - import matplotlib.pyplot as plt - from visualize import plotNode - from visualize import plotJ, plotE, plotDens - from visualize import saveVisz - - from visualize import getYee2D - from visualize import plot2dYee - from visualize_pic import plot2dParticles +# problem specific modules +from problem import Configuration_Problem as Configuration -except: - pass +np.random.seed(1) # global simulation seed -from timer import Timer -from init_problem import Configuration_Shocks as Configuration +# Prtcl velocity (and location modulation inside cell) +# +# NOTE: Cell extents are from xloc to xloc + 1, i.e., dx = 1 in all dims. +# Therefore, typically we use position as x0 + RUnif[0,1). +# +# NOTE: Default injector changes odd ispcs's loc to (ispcs-1)'s prtcl loc. +# This means that positrons/ions are on top of electrons to guarantee +# charge conservation (because zero charge density initially). +# +def velocity_profile(xloc, ispcs, conf): -debug = False - -def debug_print(n, msg): - if debug: - print("{}: {}".format(n.rank(), msg)) - sys.stdout.flush() - - -def filler(xloc, ispcs, conf): - # perturb position between x0 + RUnif[0,1) - - #electrons + # electrons if ispcs == 0: - delgam = conf.delgam #* np.abs(conf.mi / conf.me) * conf.temp_ratio - - xx = xloc[0] + np.random.rand(1) - yy = xloc[1] + np.random.rand(1) - #zz = xloc[2] + np.random.rand(1) - zz = 0.5 + delgam = conf.delgam # * np.abs(conf.mi / conf.me) * conf.temp_ratio - #positrons/ions/second species - if ispcs == 1: - delgam = conf.delgam + # positrons/ions/second species + elif ispcs == 1: + delgam = conf.delgam - #on top of electrons - xx = xloc[0] - yy = xloc[1] - zz = 0.5 + # perturb position between x0 + RUnif[0,1) + xx = xloc[0] + np.random.rand(1) + yy = xloc[1] + np.random.rand(1) + zz = xloc[2] + np.random.rand(1) + # velocity sampling gamma = conf.gamma direction = -1 - ux, uy, uz, uu = boosted_maxwellian(delgam, gamma, direction=direction, dims=3) + ux, uy, uz, uu = pytools.sample_boosted_maxwellian( + delgam, gamma, direction=direction, dims=3 + ) x0 = [xx, yy, zz] u0 = [ux, uy, uz] return x0, u0 -def initialize_piston(grid, piston, conf): - - gam = conf.wallgamma - beta = np.sqrt(1.-1./gam**2.) - - piston.gammawall = gam - piston.betawall = beta - piston.walloc = 5.0 - - #print("wall gamma:", piston.gammawall) - #print("wall beta:", piston.betawall) - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - - #TODO: remove prtcls inside the wall +# number of prtcls of species 'ispcs' to be added to cell at location 'xloc' +# +# NOTE: Plasma frequency is adjusted based on conf.ppc (and prtcl charge conf.qe/qi +# and mass conf.me/mi are computed accordingly) so modulate density in respect +# to conf.ppc only. +# +def density_profile(xloc, ispcs, conf): + # uniform plasma with default n_0 number density + return conf.ppc +# Field initialization +def insert_em_fields(grid, conf): -# Field initialization (guide field) -def insert_em(grid, conf): + # into radians + btheta = conf.btheta / 180.0 * np.pi + bphi = conf.bphi / 180.0 * np.pi + beta = conf.beta - #into radians - btheta = conf.btheta/180.*np.pi - bphi = conf.bphi/180.*np.pi - beta = conf.beta - - kk = 0 - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + for tile in pytools.tiles_all(grid): yee = tile.get_yee(0) - ii,jj = tile.index + ii,jj,kk = tile.index if conf.threeD else (*tile.index, 0) - for n in range(conf.NzMesh): - for m in range(-3, conf.NyMesh+3): - for l in range(-3, conf.NxMesh+3): + # insert values into Yee lattices; includes halos from -3 to n+3 + for n in range(-3, conf.NzMesh + 3): + for m in range(-3, conf.NyMesh + 3): + for l in range(-3, conf.NxMesh + 3): # get global coordinates - iglob, jglob, kglob = globalIndx( (ii,jj), (l,m,n), conf) + iglob, jglob, kglob = pytools.ind2loc((ii, jj, kk), (l, m, n), conf) + r = np.sqrt(iglob ** 2 + jglob ** 2 + kglob ** 2) - yee.bx[l,m,n] = conf.binit*np.cos(btheta) - yee.by[l,m,n] = conf.binit*np.sin(btheta)*np.sin(bphi) - yee.bz[l,m,n] = conf.binit*np.sin(btheta)*np.cos(bphi) + yee.bx[l, m, n] = conf.binit * np.cos(bphi) + yee.by[l, m, n] = conf.binit * np.sin(bphi) * np.sin(btheta) + yee.bz[l, m, n] = conf.binit * np.sin(bphi) * np.cos(btheta) - yee.ex[l,m,n] = 0.0 - yee.ey[l,m,n] =-beta*yee.bz[l,m,n] - yee.ez[l,m,n] = beta*yee.by[l,m,n] + yee.ex[l, m, n] = 0.0 + yee.ey[l, m, n] = -beta * yee.bz[l, m, n] + yee.ez[l, m, n] = beta * yee.by[l, m, n] + return if __name__ == "__main__": - do_plots = False + # -------------------------------------------------- + # initial setup do_print = False - if MPI.COMM_WORLD.Get_rank() == 0: - do_print =True + do_print = True if do_print: - print("Running with {} MPI processes.".format(MPI.COMM_WORLD.Get_size())) - - ################################################## - # set up plotting and figure - try: - if do_plots: - plt.fig = plt.figure(1, figsize=(6,9)) - plt.rc('font', family='serif', size=8) - plt.rc('xtick') - plt.rc('ytick') - - gs = plt.GridSpec(11, 1) - gs.update(hspace = 0.0) - - axs = [] - for ai in range(11): - axs.append( plt.subplot(gs[ai]) ) - except: - #print() - pass - + print("Running pic.py with {} MPI processes.".format(MPI.COMM_WORLD.Get_size())) + # -------------------------------------------------- # Timer for profiling - timer = Timer() + timer = pytools.Timer() + timer.start("total") timer.start("init") - timer.do_print = do_print - + # -------------------------------------------------- # parse command line arguments - parser = argparse.ArgumentParser(description='Simple PIC-Maxwell simulations') - parser.add_argument('--conf', dest='conf_filename', default=None, - help='Name of the configuration file (default: None)') - args = parser.parse_args() - if args.conf_filename == None: - conf = Configuration('shock_mini.ini', do_print=do_print) - else: - if do_print: - print("Reading configuration setup from ", args.conf_filename) - conf = Configuration(args.conf_filename, do_print=do_print) + args = pytools.parse_args() + # create conf object with simulation parameters based on them + conf = Configuration(args.conf_filename, do_print=do_print) - grid = corgi.Grid(conf.Nx, conf.Ny, conf.Nz) + # -------------------------------------------------- + # load runko - xmin = 0.0 - xmax = conf.Nx*conf.NxMesh #XXX scaled length - ymin = 0.0 - ymax = conf.Ny*conf.NyMesh - grid.set_grid_lims(xmin, xmax, ymin, ymax) + if conf.threeD: + # 3D modules + import pycorgi.threeD as pycorgi # corgi ++ bindings + import pyrunko.pic.threeD as pypic # runko pic c++ bindings + import pyrunko.fields.threeD as pyfld # runko fld c++ bindings - #init.loadMpiRandomly(grid) - #init.loadMpiXStrides(grid) - debug_print(grid, "load mpi 2d") - init.loadMpi2D(grid) - debug_print(grid, "load tiles") - loadTiles(grid, conf) + elif conf.twoD: + # 2D modules + import pycorgi.twoD as pycorgi # corgi ++ bindings + import pyrunko.pic.twoD as pypic # runko pic c++ bindings + import pyrunko.fields.twoD as pyfld # runko fld c++ bindings - ################################################## - # Path to be created - if grid.master(): - if not os.path.exists( conf.outdir ): - os.makedirs(conf.outdir) - if not os.path.exists( conf.outdir+"/restart" ): - os.makedirs(conf.outdir+"/restart") - if not os.path.exists( conf.outdir+"/full_output" ): - os.makedirs(conf.outdir+"/full_output") - - do_initialization = True - - #check if this is the first time and we do not have any restart files - if not os.path.exists( conf.outdir+'/restart/laps.txt'): - conf.laprestart = -1 #to avoid next if statement - - # restart from latest file - deep_io_switch = 0 - if conf.laprestart >= 0: - do_initialization = False - - #switch between automatic restart and user-defined lap - if conf.laprestart == 0: - - #get latest restart file from housekeeping file - with open(conf.outdir+"/restart/laps.txt", "r") as lapfile: - #lapfile.write("{},{}\n".format(lap, deep_io_switch)) - lines = lapfile.readlines() - slap, sdeep_io_switch = lines[-1].strip().split(',') - lap = int(slap) - deep_io_switch = int(sdeep_io_switch) - - read_lap = deep_io_switch - odir = conf.outdir + '/restart' - - elif conf.laprestart > 0: - lap = conf.laprestart - read_lap = lap - odir = conf.outdir + '/full_output' - - debug_print(grid, "read") - if do_print: - print("...reading Yee lattices (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_yee(grid, read_lap, odir) + # -------------------------------------------------- + # setup grid + grid = pycorgi.Grid(conf.Nx, conf.Ny, conf.Nz) + grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax, conf.zmin, conf.zmax) - if do_print: - print("...reading particles (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_particles(grid, read_lap, odir) + # compute initial mpi ranks using Hilbert's curve partitioning + pytools.balance_mpi(grid, conf) - lap += 1 #step one step ahead + # load pic tiles into grid + pytools.pic.load_tiles(grid, conf) - # initialize - if do_initialization: - debug_print(grid, "inject") - lap = 0 - np.random.seed(1) - inject(grid, filler, conf) #injecting plasma particles - insert_em(grid, conf) + # -------------------------------------------------- + # simulation restart - #static load balancing setup; communicate neighbor info once - debug_print(grid, "analyze bcs") - grid.analyze_boundaries() - debug_print(grid, "send tiles") - grid.send_tiles() - debug_print(grid, "recv tiles") - grid.recv_tiles() - MPI.COMM_WORLD.barrier() + # create output folders + if grid.master(): + pytools.create_output_folders(conf) - #sys.exit() + # get current restart file status + io_stat = pytools.check_for_restart(conf) - debug_print(grid, "init virs") - initialize_virtuals(grid, conf) + # no restart file; initialize simulation + if io_stat["do_initialization"]: + if do_print: + print("initializing simulation...") + lap = 0 + np.random.seed(1) # sync rnd generator seed for different mpi ranks - timer.stop("init") - timer.stats("init") + # injecting plasma particles + prtcl_stat = pytools.pic.inject(grid, velocity_profile, density_profile, conf) + if do_print: + print("injected:") + print(" e- prtcls: {}".format(prtcl_stat[0])) + print(" e+ prtcls: {}".format(prtcl_stat[1])) + # inserting em grid + insert_em_fields(grid, conf) - # end of initialization - ################################################## - debug_print(grid, "solvers") + else: + if do_print: + print("restarting simulation from lap {}...".format(io_stat["lap"])) + # read restart files + pyfld.read_yee(grid, io_stat["read_lap"], io_stat["read_dir"]) + pypic.read_particles(grid, io_stat["read_lap"], io_stat["read_dir"]) - # visualize initial condition - if do_plots: - try: - plotNode( axs[0], grid, conf) - #plotXmesh(axs[1], grid, conf, 0, "x") - saveVisz(-1, grid, conf) - except: - pass + # step one step ahead + lap = io_stat["lap"] + 1 + # -------------------------------------------------- + # static load balancing setup; communicate neighborhood info once - Nsamples = conf.Nt - #pusher = pypic.BorisPusher() - pusher = pypic.VayPusher() + grid.analyze_boundaries() + grid.send_tiles() + grid.recv_tiles() + MPI.COMM_WORLD.barrier() + # load virtual mpi halo tiles + pytools.pic.load_virtual_tiles(grid, conf) - #fldprop = pyfld.FDTD2() - fldprop = pyfld.FDTD4() - fintp = pypic.LinearInterpolator() - currint = pypic.ZigZag() - analyzer = pypic.Analyzator() - flt = pyfld.Binomial2(conf.NxMesh, conf.NyMesh, conf.NzMesh) + # -------------------------------------------------- + # load physics solvers - #enhance numerical speed of light slightly to suppress numerical Cherenkov instability - fldprop.corr = 1.02 + # pusher = pypic.BorisPusher() + pusher = pypic.VayPusher() + # fldprop = pyfld.FDTD2() + fldprop = pyfld.FDTD4() - #moving walls - piston = pypic.Piston() - initialize_piston(grid, piston, conf) + fintp = pypic.LinearInterpolator() + currint = pypic.ZigZag() + flt = pyfld.Binomial2(conf.NxMesh, conf.NyMesh, conf.NzMesh) + + # enhance numerical speed of light slightly to suppress numerical Cherenkov instability + fldprop.corr = 1.02 + # -------------------------------------------------- + # I/O objects # quick field snapshots - debug_print(grid, "fld_writer") - fld_writer = pyfld.FieldsWriter(conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.stride) + fld_writer = pyfld.FieldsWriter( + conf.outdir, + conf.Nx, + conf.NxMesh, + conf.Ny, + conf.NyMesh, + conf.Nz, + conf.NzMesh, + conf.stride, + ) # test particles - debug_print(grid, "prtcl_writer") prtcl_writer = pypic.TestPrtclWriter( - conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.ppc, len(grid.get_local_tiles()), - conf.n_test_prtcls) - - - - - debug_print(grid, "mpi_e") - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - debug_print(grid, "mpi_b") - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - ################################################## - sys.stdout.flush() - - #simulation loop - time = lap*(conf.cfl/conf.c_omp) - for lap in range(lap, conf.Nt+1): - debug_print(grid, "lap_start") + conf.outdir, + conf.Nx, + conf.NxMesh, + conf.Ny, + conf.NyMesh, + conf.Nz, + conf.NzMesh, + conf.ppc, + len(grid.get_local_tiles()), + conf.n_test_prtcls, + ) + + mom_writer = pypic.PicMomentsWriter( + conf.outdir, + conf.Nx, + conf.NxMesh, + conf.Ny, + conf.NyMesh, + conf.Nz, + conf.NzMesh, + conf.stride, + ) + + # -------------------------------------------------- + # reflecting leftmost wall + piston = pypic.Piston() + + # set piston wall speed (for standard reflector it is non-moving so gam = 0) + piston.gammawall = conf.wallgamma + piston.betawall = np.sqrt(1.0 - 1.0 / conf.wallgamma ** 2.0) + piston.walloc = 5.0 # leave 5 cell spacing between the wall for boundary conditions + + # -------------------------------------------------- + # -------------------------------------------------- + # -------------------------------------------------- + # end of initialization - ################################################## - # advance Half B + timer.stop("init") + timer.stats("init") + # timer.verbose = 1 # 0 normal; 1 - debug mode - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_b1") - debug_print(grid, "mpi_b1") + # -------------------------------------------------- + # sync e and b fields - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) + # mpi e + grid.send_data(1) + grid.recv_data(1) + grid.wait_data(1) - timer.stop_comp("mpi_b1") + # mpi b + grid.send_data(2) + grid.recv_data(2) + grid.wait_data(2) - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc0") - debug_print(grid, "upd_bc0") + for tile in pytools.tiles_all(grid): + tile.update_boundaries(grid) - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) + ################################################## + # simulation time step loop - timer.stop_comp("upd_bc0") + sys.stdout.flush() - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b1") - debug_print(grid, "push_half_b1") + # simulation loop + time = lap * (conf.cfl / conf.c_omp) + for lap in range(lap, conf.Nt + 1): - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # push B half + t1 = timer.start_comp("push_half_b1") + for tile in pytools.tiles_all(grid): fldprop.push_half_b(tile) piston.field_bc(tile) + timer.stop_comp(t1) - timer.stop_comp("push_half_b1") - - #-------------------------------------------------- + # -------------------------------------------------- # comm B - timer.start_comp("mpi_b2") - debug_print(grid, "mpi_b2") - - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - timer.stop_comp("mpi_b2") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc1") - debug_print(grid, "upd_bc1") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("mpi_b2") + grid.send_data(2) + grid.recv_data(2) + grid.wait_data(2) + timer.stop_comp(t1) + + # -------------------------------------------------- + # update boundaries + t1 = timer.start_comp("upd_bc1") + for tile in pytools.tiles_all(grid): tile.update_boundaries(grid) + timer.stop_comp(t1) - timer.stop_comp("upd_bc1") - - - ################################################## + ################################################## # move particles (only locals tiles) - #-------------------------------------------------- - #interpolate fields (can move to next asap) - timer.start_comp("interp_em") - debug_print(grid, "interp_em") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # interpolate fields + t1 = timer.start_comp("interp_em") + for tile in pytools.tiles_local(grid): fintp.solve(tile) + timer.stop_comp(t1) - timer.stop_comp("interp_em") - #-------------------------------------------------- - - #-------------------------------------------------- - #push particles in x and u - timer.start_comp("push") - debug_print(grid, "push") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # push particles in x and u + t1 = timer.start_comp("push") + for tile in pytools.tiles_local(grid): pusher.solve(tile) + timer.stop_comp(t1) - timer.stop_comp("push") - - #-------------------------------------------------- - # apply moving walls - timer.start_comp("walls") - debug_print(grid, "walls") - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # apply moving/reflecting walls + t1 = timer.start_comp("walls") + for tile in pytools.tiles_local(grid): piston.solve(tile) + timer.stop_comp(t1) - timer.stop_comp("walls") ################################################## # advance B half - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b2") - debug_print(grid, "push_half_b2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # push B half + t1 = timer.start_comp("push_half_b2") + for tile in pytools.tiles_all(grid): fldprop.push_half_b(tile) piston.field_bc(tile) + timer.stop_comp(t1) - timer.stop_comp("push_half_b2") - - - #-------------------------------------------------- + # -------------------------------------------------- # comm B - timer.start_comp("mpi_e1") - debug_print(grid, "mpi_e1") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e1") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc2") - debug_print(grid, "upd_bc2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("mpi_e1") + grid.send_data(1) + grid.recv_data(1) + grid.wait_data(1) + timer.stop_comp(t1) + + # -------------------------------------------------- + # update boundaries + t1 = timer.start_comp("upd_bc2") + for tile in pytools.tiles_all(grid): tile.update_boundaries(grid) - - timer.stop_comp("upd_bc2") - + timer.stop_comp(t1) ################################################## - # advance E + # advance E - #-------------------------------------------------- - #push E - timer.start_comp("push_e") - debug_print(grid, "push_e") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # push E + t1 = timer.start_comp("push_e") + for tile in pytools.tiles_all(grid): fldprop.push_e(tile) piston.field_bc(tile) + timer.stop_comp(t1) - timer.stop_comp("push_e") - - #-------------------------------------------------- - #current calculation; charge conserving current deposition - timer.start_comp("comp_curr") - debug_print(grid, "comp_cur") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # current calculation; charge conserving current deposition + t1 = timer.start_comp("comp_curr") + for tile in pytools.tiles_local(grid): currint.solve(tile) + timer.stop_comp(t1) - timer.stop_comp("comp_curr") - - #-------------------------------------------------- - timer.start_comp("clear_vir_cur") - debug_print(grid, "clear_vir_cur") - - #clear virtual current arrays for easier addition after mpi - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # clear virtual current arrays for boundary addition after mpi + t1 = timer.start_comp("clear_vir_cur") + for tile in pytools.tiles_virtual(grid): tile.clear_current() + timer.stop_comp(t1) - timer.stop_comp("clear_vir_cur") - - - ################################################## - # current solving is also taking place in nbor ranks - # that is why we update virtuals here with MPI - # - # This is the most expensive task so we do not double it - # here. - - #-------------------------------------------------- - #mpi send currents - timer.start_comp("mpi_cur") - debug_print(grid, "mpi_cur") - + # -------------------------------------------------- + # mpi send currents + t1 = timer.start_comp("mpi_cur") grid.send_data(0) - grid.recv_data(0) + grid.recv_data(0) grid.wait_data(0) + timer.stop_comp(t1) - timer.stop_comp("mpi_cur") - - - #-------------------------------------------------- - #exchange currents - timer.start_comp("cur_exchange") - debug_print(grid, "cur_exchange") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # exchange currents + t1 = timer.start_comp("cur_exchange") + for tile in pytools.tiles_all(grid): tile.exchange_currents(grid) - - timer.stop_comp("cur_exchange") - - + timer.stop_comp(t1) ################################################## # particle communication (only local/boundary tiles) - #-------------------------------------------------- - #local particle exchange (independent) - timer.start_comp("check_outg_prtcls") - debug_print(grid, "check_outg_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # local particle exchange (independent) + t1 = timer.start_comp("check_outg_prtcls") + for tile in pytools.tiles_local(grid): tile.check_outgoing_particles() - timer.stop_comp("check_outg_prtcls") - #-------------------------------------------------- + # -------------------------------------------------- # global mpi exchange (independent) - timer.start_comp("pack_outg_prtcls") - debug_print(grid, "pack_outg_prtcls") - - for cid in grid.get_boundary_tiles(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("pack_outg_prtcls") + for tile in pytools.tiles_boundary(grid): tile.pack_outgoing_particles() + timer.stop_comp(t1) - timer.stop_comp("pack_outg_prtcls") - - #-------------------------------------------------- + # -------------------------------------------------- # MPI global particle exchange # transfer primary and extra data - timer.start_comp("mpi_prtcls") - debug_print(grid, "mpi_prtcls") - - debug_print(grid, "mpi_prtcls: send3") - grid.send_data(3) - - debug_print(grid, "mpi_prtcls: recv3") - grid.recv_data(3) - - debug_print(grid, "mpi_prtcls: wait3") - grid.wait_data(3) + t1 = timer.start_comp("mpi_prtcls") + grid.send_data(3) + grid.recv_data(3) + grid.wait_data(3) # orig just after send3 - debug_print(grid, "mpi_prtcls: send4") - grid.send_data(4) - - debug_print(grid, "mpi_prtcls: recv4") - grid.recv_data(4) - - debug_print(grid, "mpi_prtcls: wait4") - grid.wait_data(4) - - timer.stop_comp("mpi_prtcls") + grid.send_data(4) + grid.recv_data(4) + grid.wait_data(4) + timer.stop_comp(t1) - #-------------------------------------------------- + # -------------------------------------------------- # global unpacking (independent) - timer.start_comp("unpack_vir_prtcls") - debug_print(grid, "unpack_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("unpack_vir_prtcls") + for tile in pytools.tiles_virtual(grid): tile.unpack_incoming_particles() tile.check_outgoing_particles() + timer.stop_comp(t1) - timer.stop_comp("unpack_vir_prtcls") - - #-------------------------------------------------- + # -------------------------------------------------- # transfer local + global - timer.start_comp("get_inc_prtcls") - debug_print(grid, "get_inc_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("get_inc_prtcls") + for tile in pytools.tiles_local(grid): tile.get_incoming_particles(grid) + timer.stop_comp(t1) - timer.stop_comp("get_inc_prtcls") - - #-------------------------------------------------- + # -------------------------------------------------- # delete local transferred particles - timer.start_comp("del_trnsfrd_prtcls") - debug_print(grid, "del_trnsfrd_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("del_trnsfrd_prtcls") + for tile in pytools.tiles_local(grid): tile.delete_transferred_particles() + timer.stop_comp(t1) - timer.stop_comp("del_trnsfrd_prtcls") - - #-------------------------------------------------- + # -------------------------------------------------- # delete all virtual particles (because new prtcls will come) - timer.start_comp("del_vir_prtcls") - debug_print(grid, "del_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) + t1 = timer.start_comp("del_vir_prtcls") + for tile in pytools.tiles_virtual(grid): tile.delete_all_particles() - - timer.stop_comp("del_vir_prtcls") - - + timer.stop_comp(t1) ################################################## - #filter + # filter timer.start_comp("filter") - #sweep over npasses times + # sweep over npasses times for fj in range(conf.npasses): - #update global neighbors (mpi) + # update global neighbors (mpi) grid.send_data(0) - grid.recv_data(0) + grid.recv_data(0) grid.wait_data(0) - #get halo boundaries - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + # get halo boundaries and filter + for tile in pytools.tiles_local(grid): tile.update_boundaries(grid) - - #filter each tile - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + for tile in pytools.tiles_local(grid): flt.solve(tile) - MPI.COMM_WORLD.barrier() # sync everybody + MPI.COMM_WORLD.barrier() # sync everybody - #clean current behind piston + # clean current behind piston if conf.npasses > 0: - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) + for tile in pytools.tiles_local(grid): piston.field_bc(tile) - #-------------------------------------------------- + # -------------------------------------------------- timer.stop_comp("filter") - - #-------------------------------------------------- - #add current to E - timer.start_comp("add_cur") - debug_print(grid, "add_cur") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # -------------------------------------------------- + # add current to E + t1 = timer.start_comp("add_cur") + for tile in pytools.tiles_all(grid): tile.deposit_current() + timer.stop_comp(t1) - timer.stop_comp("add_cur") - - #comm E - timer.start_comp("mpi_e2") - debug_print(grid, "mpi_e2") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e2") + # comm E + t1 = timer.start_comp("mpi_e2") + grid.send_data(1) + grid.recv_data(1) + grid.wait_data(1) + timer.stop_comp(t1) + # -------------------------------------------------- + # comm B + t1 = timer.start_comp("mpi_b1") + grid.send_data(2) + grid.recv_data(2) + grid.wait_data(2) + timer.stop_comp(t1) + + # -------------------------------------------------- + # update boundaries + t1 = timer.start_comp("upd_bc0") + for tile in pytools.tiles_all(grid): + tile.update_boundaries(grid) + timer.stop_comp(t1) ################################################## # data reduction and I/O timer.lap("step") - if (lap % conf.interval == 0): - debug_print(grid, "io") + if lap % conf.interval == 0: if do_print: print("--------------------------------------------------") - print("------ lap: {} / t: {}".format(lap, time)) - - #for cid in grid.get_tile_ids(): - # tile = grid.get_tile(cid) - # tile.erase_temporary_arrays() + print("------ lap: {} / t: {}".format(lap, time)) timer.stats("step") timer.comp_stats() timer.purge_comps() - - #analyze (independent) + + # analyze (independent) timer.start("io") - #shrink particle arrays - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) + # shrink particle arrays + for tile in pytools.tiles_all(grid): tile.shrink_to_fit_all_particles() - #analyze - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - analyzer.analyze2d(tile) + # analyze + # for tile in pytools.tiles_local(grid): analyzer.analyze2d(tile) # barrier for quick writers MPI.COMM_WORLD.barrier() - debug_print(grid, "fld_writer") - #shallow IO - fld_writer.write(grid, lap) #quick field snapshots + # shallow IO + fld_writer.write(grid, lap) # quick field snapshots + prtcl_writer.write(grid, lap) # test particles + mom_writer.write(grid, lap) # quick field snapshots - debug_print(grid, "prtcl_writer") - prtcl_writer.write(grid, lap) #test particles + # deep IO + if conf.full_interval > 0 and (lap % conf.full_interval == 0) and (lap > 0): + pyfld.write_yee(grid, lap, conf.outdir + "/full_output/") + pypic.write_particles(grid, lap, conf.outdir + "/full_output/") + # pypic.write_analysis(grid, lap, conf.outdir + "/full_output/") - #deep IO - if (conf.full_interval > 0 and (lap % conf.full_interval == 0) and (lap > 0)): - debug_print(grid, "deep io") + # restart IO (overwrites) + if (lap % conf.restart == 0) and (lap > 0): - debug_print(grid, "deep write_yee") - pyvlv.write_yee(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_analysis") - pyvlv.write_analysis(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_prtcls") - pyvlv.write_particles(grid,lap, conf.outdir + "/full_output/" ) + # flip between two sets of files + io_stat["deep_io_switch"] = 1 if io_stat["deep_io_switch"] == 0 else 0 + pyfld.write_yee( + grid, io_stat["deep_io_switch"], conf.outdir + "/restart/" + ) + pypic.write_particles( + grid, io_stat["deep_io_switch"], conf.outdir + "/restart/" + ) - #restart IO (overwrites) - if ((lap % conf.restart == 0) and (lap > 0)): - debug_print(grid, "restart io") - #flip between two sets of files - deep_io_switch = 1 if deep_io_switch == 0 else 0 - - debug_print(grid, "write_yee") - pyvlv.write_yee(grid, deep_io_switch, conf.outdir + "/restart/" ) - debug_print(grid, "write_prtcls") - pyvlv.write_particles(grid,deep_io_switch, conf.outdir + "/restart/" ) - - #if successful adjust info file - MPI.COMM_WORLD.barrier() # sync everybody in case of failure before write + # if successful adjust info file + MPI.COMM_WORLD.barrier() # sync everybody in case of failure before write if grid.rank() == 0: - with open(conf.outdir+"/restart/laps.txt", "a") as lapfile: - lapfile.write("{},{}\n".format(lap, deep_io_switch)) - - - #-------------------------------------------------- - #2D plots - if do_plots: - try: - plotNode(axs[0], grid, conf) - - yee = getYee2D(grid, conf) - plot2dYee(axs[1], yee, grid, conf, 'rho', label_title=True) - plot2dYee(axs[2], yee, grid, conf, 'jx' , label_title=True) - plot2dYee(axs[3], yee, grid, conf, 'jy' , label_title=True) - plot2dYee(axs[4], yee, grid, conf, 'jz' , label_title=True) - plot2dYee(axs[5], yee, grid, conf, 'ex' , label_title=True) - plot2dYee(axs[6], yee, grid, conf, 'ey' , label_title=True) - plot2dYee(axs[7], yee, grid, conf, 'ez' , label_title=True) - plot2dYee(axs[8], yee, grid, conf, 'bx' , label_title=True) - plot2dYee(axs[9], yee, grid, conf, 'by' , label_title=True) - plot2dYee(axs[10], yee, grid, conf, 'bz' , label_title=True) - saveVisz(lap, grid, conf) - except: - #print() - pass - timer.stop("io") + with open(conf.outdir + "/restart/laps.txt", "a") as lapfile: + lapfile.write("{},{}\n".format(lap, io_stat["deep_io_switch"])) + timer.stop("io") timer.stats("io") - timer.start("step") #refresh lap counter (avoids IO profiling) + timer.start("step") # refresh lap counter (avoids IO profiling) sys.stdout.flush() - #move grid - #for cid in grid.get_tile_ids(): - # tile = grid.get_tile(cid) - - # #get current loc and advance - # (i,j) = tile.index - # mins = spatialLoc(grid, [i,j], [0,0,0], conf) - # maxs = spatialLoc(grid, [i,j], [conf.NxMesh, conf.NyMesh, conf.NzMesh], conf) - - # #increase x - # mins[0] = +1.0 - # maxs[0] = +1.0 - - # tile.set_tile_mins(mins[0:2]) - # tile.set_tile_maxs(maxs[0:2]) - + # next step + time += conf.cfl / conf.c_omp + # end of loop - #next step - time += conf.cfl/conf.c_omp - #end of loop + # -------------------------------------------------- + # end of simulation timer.stop("total") timer.stats("total") diff --git a/projects/shocks/plot3d.py b/projects/shocks/plot3d.py new file mode 100644 index 00000000..0b87ef5b --- /dev/null +++ b/projects/shocks/plot3d.py @@ -0,0 +1,221 @@ +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable +import h5py as h5 +import sys, os +import matplotlib.ticker as ticker +from matplotlib import cm + +#3D +from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.axes_grid1 import make_axes_locatable + +# pytools bindings +import pytools +from problem import Configuration_Problem as Configuration + + +if __name__ == "__main__": + + do_print = True + do_dark = False + + if do_dark: + fig = plt.figure(1, figsize=(8,4.0), dpi=300) + + plt.rc('font', family='serif', size=7) + plt.rc('xtick') + plt.rc('ytick') + + plt.style.use('dark_background') + else: + fig = plt.figure(1, figsize=(4,2.5), dpi=200) + plt.rc('font', family='sans-serif') + #plt.rc('text', usetex=True) + plt.rc('xtick', labelsize=8) + plt.rc('ytick', labelsize=8) + plt.rc('axes', labelsize=8) + #plt.style.use('dark_background') + + + gs = plt.GridSpec(1, 1) + gs.update(hspace = 0.0) + gs.update(wspace = 0.0) + + axs = [] + #axs.append( plt.subplot(gs[0,0]) ) + axs.append( fig.add_subplot(111, projection='3d') ) + + + #-------------------------------------------------- + # prepare data + + args = pytools.parse_args() + conf = Configuration(args.conf_filename, do_print=do_print) + + print(conf.outdir) + + fname_fld = "flds" + fname_prtcls = "test-prtcls" + + #args.lap + lap = 1000 + + box_offset = 0 + for lap in [100, 1000, 2000]: + + fields_file = conf.outdir + '/'+fname_fld+'_'+str(lap)+'.h5' + + f5 = h5.File(fields_file,'r') + data = pytools.read_h5_array(f5, 'jz') + + #cut reflector out + data = data[6:,:,:] + + #limit box length + xlen = 512 + if lap <= 100: + xlen = 128 + elif lap <= 1000: + xlen = 256+64 + + #data = data[0:256,:,:] + #data = data[0:512,:,:] + data = data[0:xlen,:,:] + + print(np.shape(data)) + + nx, ny, nz = np.shape(data) + + #-------------------------------------------------- + # create box + + box = pytools.pybox3d.Box(axs[0]) + + box.z0 -= box_offset + + aspect_ratio = nx/ny + print("box aspect ratio: {}".format(aspect_ratio)) + box.dx = aspect_ratio + box.dy = 1.0 + box.dz = 1.5 + + #box.set_data(np.log10(data)) + box.set_data(data) + #box.vmin = -0.02 + #box.vmax = +0.02 + cmap = cm.get_cmap('RdBu') + + + #surface rendering + box.draw_left( cmap=cmap) + box.draw_front(cmap=cmap) + box.draw_top( cmap=cmap) + box.draw_outline() + + + + #back exploded panels + if False: + data = pytools.read_h5_array(f5, 'bz') + data = data[6:,:,:] #cut off reflector + data = data[0:xlen,:,:] #limit box length + bz2 = data**2 + + box.set_data(bz2) + box.vmin = 0.0 + #box.vmax = 0.20 + cmap = cm.get_cmap('viridis') + + off_bot = 1.7 + off_back = 1.7 + off_left = 0.7 + off_right = 0.7 + + box.draw_exploded_panels_outline("bottom", off=off_bot) + box.draw_exploded_panels_outline("left", off=off_left) + #box.draw_exploded_panels_outline("right", off=off_right) + box.draw_exploded_panels_outline("back", off=off_back) + + box.draw_exploded_bottom(off=off_bot, cmap=cmap) + box.draw_exploded_back( off=off_back, cmap=cmap) + + box.draw_exploded_left( off=off_left, cmap=cmap) + #box.draw_exploded_right( off=off_right, cmap=cmap) + + if False: + #front exploded panels + off = -1.95 #this puts them in front of the box + cmap = plt.cm.RdBu + box.draw_exploded_panels_outline("bottom", off=off) + box.draw_exploded_panels_outline("left", off=off) + box.draw_exploded_panels_outline("right", off=off) + + box.draw_exploded_bottom(off=off, cmap=cmap) + box.draw_exploded_left( off=off, cmap=cmap) + box.draw_exploded_right( off=off, cmap=cmap) + + + if True: + + #last lap exploded slices + if lap == 2000: + for loc in np.linspace(0.2, 0.8, 8): + + locx = loc*aspect_ratio + print("slize at:", locx) + box.draw_exploded_slice("left-front", locx, 2.2, cmap=cmap) + + + + axs[0].set_axis_off() + #axs[0].view_init(45.0, -110.0) + + axs[0].view_init(45.0, -130.0) + + box_offset += 3.1 + + #end of lap loop + + if False: + #colorbars + m = plt.cm.ScalarMappable(cmap=plt.cm.viridis) + m.set_array([0.0, 1.0]) + m.set_clim( vmin=0.0, vmax=1.0 ) + cbaxes = fig.add_axes([0.2, 0.91, 0.6, 0.02]) #[left, bottom, width, height], + cb = plt.colorbar(m, cax = cbaxes, orientation="horizontal", ticklocation="top") + fig.text(0.15, 0.91, r'$n_{\pm}$') + + m = plt.cm.ScalarMappable(cmap=plt.cm.inferno) + m.set_array([0.0, 1.0]) + m.set_clim( vmin=0.0, vmax=1.0 ) + cbaxes = fig.add_axes([0.2, 0.09, 0.6, 0.02]) #[left, bottom, width, height], + cb = plt.colorbar(m, cax = cbaxes, orientation="horizontal", ticklocation="top") + fig.text(0.15, 0.10, r'$n_{\nu}$') + + m = plt.cm.ScalarMappable(cmap=plt.cm.RdBu) + m.set_array([-1.0, 1.0]) + m.set_clim( vmin=-1.0, vmax=1.0 ) + cbaxes = fig.add_axes([0.2, 0.06, 0.6, 0.02]) #[left, bottom, width, height], + cb = plt.colorbar(m, cax = cbaxes, orientation="horizontal", ticklocation="bottom") + #cb.set_label(r'$J$', rotation=0) + fig.text(0.15, 0.05, r'$J$') + + + pytools.pybox3d.axisEqual3D(axs[0]) + + #axs[0].set_title('Step {}'.format(lap+1)) + + slap = str(lap).rjust(4, '0') + fname = conf.outdir + '/3d_long_'+slap + plt.subplots_adjust(left=-0.45, bottom=-0.45, right=1.35, top=1.45) + + axs[0].set_xlabel('x') + axs[0].set_ylabel('y') + axs[0].set_zlabel('z') + + #plt.savefig(fname+'.pdf') + plt.savefig(fname+'.png') + + + diff --git a/projects/shocks/problem.py b/projects/shocks/problem.py new file mode 100644 index 00000000..608b895f --- /dev/null +++ b/projects/shocks/problem.py @@ -0,0 +1,59 @@ +from __future__ import print_function + +from pytools import Configuration + +import numpy as np +from numpy import sqrt, pi + + +# extend default conf class with problem specific parameters +class Configuration_Problem(Configuration): + def __init__(self, *file_names, do_print=False): + Configuration.__init__(self, *file_names) + + # problem specific initializations + if do_print: + print("Initializing problem setup...") + + # local variables just for easier/cleaner syntax + me = np.abs(self.me) + mi = np.abs(self.mi) + c = self.cfl + ppc = self.ppc * 2.0 # multiply x2 to account for 2 species/pair plasma + + # if gammas < 1, we interpret it as beta/c + if self.gamma == 0: + self.gamma = 1.0 + self.beta = 0.0 + else: + if self.gamma < 1.0: + self.gamma = sqrt(1.0 / (1.0 - self.gamma ** 2.0)) + self.beta = sqrt(1.0 - 1.0 / self.gamma ** 2.0) + + # plasma reaction & subsequent normalization + self.omp = c / self.c_omp + self.qe = -(self.omp ** 2.0 * self.gamma) / ((ppc * 0.5) * (1.0 + me / mi)) + self.qi = -self.qe + + me *= abs(self.qi) + mi *= abs(self.qi) + + # temperatures + self.delgam_e = self.delgam + self.delgam_i = self.temp_ratio * self.delgam_e + + # ---------cold plasma----------- + # parse external magnetic field strength from sigma_ext + + # determine initial magnetic field based on magnetization sigma which + # is magnetic energy density/ kinetic energy density + # this definition works even for nonrelativistic flows. + self.binit = sqrt( + (self.gamma) * ppc * 0.5 * c ** 2.0 * (me * (1.0 + me / mi)) * self.sigma + ) + + #possibly moving piston wall + #if(self.wallgamma < 1.): + # self.wallgamma = sqrt(1./(1.-self.wallgamma**2.)) + + diff --git a/projects/shocks/weibel.py b/projects/shocks/weibel.py deleted file mode 100644 index 98d01b0e..00000000 --- a/projects/shocks/weibel.py +++ /dev/null @@ -1,788 +0,0 @@ -from __future__ import print_function -from mpi4py import MPI - -import numpy as np - -import sys, os -import h5py - -import pycorgi.twoD as corgi -import pyrunko.tools.twoD as pytools -import pyrunko.vlv.twoD as pyvlv -import pyrunko.pic.twoD as pypic -import pyrunko.fields.twoD as pyfld - - -from configSetup import Configuration -import argparse -import initialize as init -from initialize_pic import loadTiles -from initialize_pic import initialize_virtuals -from initialize_pic import globalIndx -from sampling import boosted_maxwellian -from initialize_pic import spatialLoc -from injector_pic import inject -from injector_pic import insert_em -from time import sleep -from visualize import get_yee - -#global simulation seed -np.random.seed(1) - -try: - import matplotlib.pyplot as plt - from visualize import plotNode - from visualize import plotJ, plotE, plotDens - from visualize import saveVisz - - from visualize import getYee2D - from visualize import plot2dYee - from visualize_pic import plot2dParticles - -except: - pass - -from timer import Timer - -from init_problem import Configuration_Shocks as Configuration - -debug = False - -def debug_print(n, msg): - if debug: - print("{}: {}".format(n.rank(), msg)) - sys.stdout.flush() - - -def filler(xloc, ispcs, conf): - # perturb position between x0 + RUnif[0,1) - - #electrons - if ispcs == 0: - delgam = conf.delgam #* np.abs(conf.mi / conf.me) * conf.temp_ratio - - xx = xloc[0] + np.random.rand(1) - yy = xloc[1] + np.random.rand(1) - #zz = xloc[2] + np.random.rand(1) - zz = 0.5 - - #positrons/ions/second species - if ispcs == 1: - delgam = conf.delgam - - #on top of electrons - xx = xloc[0] - yy = xloc[1] - zz = 0.5 - - gamma = conf.gamma - direction = -1 - ux, uy, uz, uu = boosted_maxwellian(delgam, gamma, direction=direction, dims=3) - - x0 = [xx, yy, zz] - u0 = [ux, uy, uz] - return x0, u0 - - - -# Field initialization (guide field) -def insert_em(grid, conf): - - #into radians - btheta = conf.btheta/180.*np.pi - bphi = conf.bphi/180.*np.pi - beta = conf.beta - - kk = 0 - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - yee = tile.get_yee(0) - - ii,jj = tile.index - - for n in range(conf.NzMesh): - for m in range(-3, conf.NyMesh+3): - for l in range(-3, conf.NxMesh+3): - # get global coordinates - iglob, jglob, kglob = globalIndx( (ii,jj), (l,m,n), conf) - - yee.bx[l,m,n] = conf.binit*np.cos(btheta) - yee.by[l,m,n] = conf.binit*np.sin(btheta)*np.sin(bphi) - yee.bz[l,m,n] = conf.binit*np.sin(btheta)*np.cos(bphi) - - yee.ex[l,m,n] = 0.0 - yee.ey[l,m,n] =-beta*yee.bz[l,m,n] - yee.ez[l,m,n] = beta*yee.by[l,m,n] - - -if __name__ == "__main__": - - do_plots = True - do_print = False - - if MPI.COMM_WORLD.Get_rank() == 0: - do_print =True - - if do_print: - print("Running with {} MPI processes.".format(MPI.COMM_WORLD.Get_size())) - - ################################################## - # set up plotting and figure - try: - if do_plots: - plt.fig = plt.figure(1, figsize=(6,9)) - plt.rc('font', family='serif', size=8) - plt.rc('xtick') - plt.rc('ytick') - - gs = plt.GridSpec(11, 1) - gs.update(hspace = 0.0) - - axs = [] - for ai in range(11): - axs.append( plt.subplot(gs[ai]) ) - except: - #print() - pass - - - # Timer for profiling - timer = Timer() - timer.start("total") - timer.start("init") - - timer.do_print = do_print - - - # parse command line arguments - parser = argparse.ArgumentParser(description='Simple PIC-Maxwell simulations') - parser.add_argument('--conf', dest='conf_filename', default=None, - help='Name of the configuration file (default: None)') - args = parser.parse_args() - if args.conf_filename == None: - conf = Configuration('shock_mini.ini', do_print=do_print) - else: - if do_print: - print("Reading configuration setup from ", args.conf_filename) - conf = Configuration(args.conf_filename, do_print=do_print) - - - grid = corgi.Grid(conf.Nx, conf.Ny, conf.Nz) - - xmin = 0.0 - xmax = conf.Nx*conf.NxMesh #XXX scaled length - ymin = 0.0 - ymax = conf.Ny*conf.NyMesh - grid.set_grid_lims(xmin, xmax, ymin, ymax) - - #init.loadMpiRandomly(grid) - #init.loadMpiXStrides(grid) - debug_print(grid, "load mpi 2d") - init.loadMpi2D(grid) - debug_print(grid, "load tiles") - loadTiles(grid, conf) - - ################################################## - # Path to be created - if grid.master(): - if not os.path.exists( conf.outdir ): - os.makedirs(conf.outdir) - if not os.path.exists( conf.outdir+"/restart" ): - os.makedirs(conf.outdir+"/restart") - if not os.path.exists( conf.outdir+"/full_output" ): - os.makedirs(conf.outdir+"/full_output") - - do_initialization = True - - #check if this is the first time and we do not have any restart files - if not os.path.exists( conf.outdir+'/restart/laps.txt'): - conf.laprestart = -1 #to avoid next if statement - - # restart from latest file - deep_io_switch = 0 - if conf.laprestart >= 0: - do_initialization = False - - #switch between automatic restart and user-defined lap - if conf.laprestart == 0: - - #get latest restart file from housekeeping file - with open(conf.outdir+"/restart/laps.txt", "r") as lapfile: - #lapfile.write("{},{}\n".format(lap, deep_io_switch)) - lines = lapfile.readlines() - slap, sdeep_io_switch = lines[-1].strip().split(',') - lap = int(slap) - deep_io_switch = int(sdeep_io_switch) - - read_lap = deep_io_switch - odir = conf.outdir + '/restart' - - elif conf.laprestart > 0: - lap = conf.laprestart - read_lap = lap - odir = conf.outdir + '/full_output' - - debug_print(grid, "read") - if do_print: - print("...reading Yee lattices (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_yee(grid, read_lap, odir) - - if do_print: - print("...reading particles (lap {}) from {}".format(read_lap, odir)) - pyvlv.read_particles(grid, read_lap, odir) - - lap += 1 #step one step ahead - - # initialize - if do_initialization: - debug_print(grid, "inject") - lap = 0 - np.random.seed(1) - inject(grid, filler, conf) #injecting plasma particles - insert_em(grid, conf) - - #static load balancing setup; communicate neighbor info once - debug_print(grid, "analyze bcs") - grid.analyze_boundaries() - debug_print(grid, "send tiles") - grid.send_tiles() - debug_print(grid, "recv tiles") - grid.recv_tiles() - MPI.COMM_WORLD.barrier() - - #sys.exit() - - debug_print(grid, "init virs") - initialize_virtuals(grid, conf) - - - timer.stop("init") - timer.stats("init") - - - # end of initialization - ################################################## - debug_print(grid, "solvers") - - - # visualize initial condition - if do_plots: - try: - plotNode( axs[0], grid, conf) - #plotXmesh(axs[1], grid, conf, 0, "x") - saveVisz(-1, grid, conf) - except: - pass - - - Nsamples = conf.Nt - #pusher = pypic.BorisPusher() - pusher = pypic.VayPusher() - - - #fldprop = pyfld.FDTD2() - fldprop = pyfld.FDTD4() - fintp = pypic.LinearInterpolator() - currint = pypic.ZigZag() - analyzer = pypic.Analyzator() - flt = pyfld.Binomial2(conf.NxMesh, conf.NyMesh, conf.NzMesh) - - #enhance numerical speed of light slightly to suppress numerical Cherenkov instability - fldprop.corr = 1.02 - - - # quick field snapshots - debug_print(grid, "fld_writer") - fld_writer = pyfld.FieldsWriter(conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.stride) - - # test particles - debug_print(grid, "prtcl_writer") - prtcl_writer = pypic.TestPrtclWriter( - conf.outdir, - conf.Nx, conf.NxMesh, - conf.Ny, conf.NyMesh, - conf.Nz, conf.NzMesh, - conf.ppc, len(grid.get_local_tiles()), - conf.n_test_prtcls) - - - - - debug_print(grid, "mpi_e") - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - debug_print(grid, "mpi_b") - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - ################################################## - sys.stdout.flush() - - #simulation loop - time = lap*(conf.cfl/conf.c_omp) - for lap in range(lap, conf.Nt+1): - debug_print(grid, "lap_start") - - ################################################## - # advance Half B - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_b1") - debug_print(grid, "mpi_b1") - - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - timer.stop_comp("mpi_b1") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc0") - debug_print(grid, "upd_bc0") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc0") - - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b1") - debug_print(grid, "push_half_b1") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_half_b(tile) - - timer.stop_comp("push_half_b1") - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_b2") - debug_print(grid, "mpi_b2") - - grid.send_data(2) - grid.recv_data(2) - grid.wait_data(2) - - timer.stop_comp("mpi_b2") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc1") - debug_print(grid, "upd_bc1") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc1") - - - ################################################## - # move particles (only locals tiles) - - #-------------------------------------------------- - #interpolate fields (can move to next asap) - timer.start_comp("interp_em") - debug_print(grid, "interp_em") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - fintp.solve(tile) - - timer.stop_comp("interp_em") - #-------------------------------------------------- - - #-------------------------------------------------- - #push particles in x and u - timer.start_comp("push") - debug_print(grid, "push") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - pusher.solve(tile) - - timer.stop_comp("push") - - #-------------------------------------------------- - # apply moving walls - timer.start_comp("walls") - debug_print(grid, "walls") - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - - timer.stop_comp("walls") - ################################################## - # advance B half - - #-------------------------------------------------- - #push B half - timer.start_comp("push_half_b2") - debug_print(grid, "push_half_b2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_half_b(tile) - - timer.stop_comp("push_half_b2") - - - #-------------------------------------------------- - # comm B - timer.start_comp("mpi_e1") - debug_print(grid, "mpi_e1") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e1") - - #-------------------------------------------------- - #update boundaries - timer.start_comp("upd_bc2") - debug_print(grid, "upd_bc2") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - timer.stop_comp("upd_bc2") - - - ################################################## - # advance E - - #-------------------------------------------------- - #push E - timer.start_comp("push_e") - debug_print(grid, "push_e") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - fldprop.push_e(tile) - - timer.stop_comp("push_e") - - #-------------------------------------------------- - #current calculation; charge conserving current deposition - timer.start_comp("comp_curr") - debug_print(grid, "comp_cur") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - currint.solve(tile) - - timer.stop_comp("comp_curr") - - #-------------------------------------------------- - timer.start_comp("clear_vir_cur") - debug_print(grid, "clear_vir_cur") - - #clear virtual current arrays for easier addition after mpi - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.clear_current() - - timer.stop_comp("clear_vir_cur") - - - ################################################## - # current solving is also taking place in nbor ranks - # that is why we update virtuals here with MPI - # - # This is the most expensive task so we do not double it - # here. - - #-------------------------------------------------- - #mpi send currents - timer.start_comp("mpi_cur") - debug_print(grid, "mpi_cur") - - grid.send_data(0) - grid.recv_data(0) - grid.wait_data(0) - - timer.stop_comp("mpi_cur") - - - #-------------------------------------------------- - #exchange currents - timer.start_comp("cur_exchange") - debug_print(grid, "cur_exchange") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.exchange_currents(grid) - - timer.stop_comp("cur_exchange") - - - - ################################################## - # particle communication (only local/boundary tiles) - - #-------------------------------------------------- - #local particle exchange (independent) - timer.start_comp("check_outg_prtcls") - debug_print(grid, "check_outg_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.check_outgoing_particles() - - timer.stop_comp("check_outg_prtcls") - - #-------------------------------------------------- - # global mpi exchange (independent) - timer.start_comp("pack_outg_prtcls") - debug_print(grid, "pack_outg_prtcls") - - for cid in grid.get_boundary_tiles(): - tile = grid.get_tile(cid) - tile.pack_outgoing_particles() - - timer.stop_comp("pack_outg_prtcls") - - #-------------------------------------------------- - # MPI global particle exchange - # transfer primary and extra data - timer.start_comp("mpi_prtcls") - debug_print(grid, "mpi_prtcls") - - debug_print(grid, "mpi_prtcls: send3") - grid.send_data(3) - - debug_print(grid, "mpi_prtcls: recv3") - grid.recv_data(3) - - debug_print(grid, "mpi_prtcls: wait3") - grid.wait_data(3) - - # orig just after send3 - debug_print(grid, "mpi_prtcls: send4") - grid.send_data(4) - - debug_print(grid, "mpi_prtcls: recv4") - grid.recv_data(4) - - debug_print(grid, "mpi_prtcls: wait4") - grid.wait_data(4) - - timer.stop_comp("mpi_prtcls") - - #-------------------------------------------------- - # global unpacking (independent) - timer.start_comp("unpack_vir_prtcls") - debug_print(grid, "unpack_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.unpack_incoming_particles() - tile.check_outgoing_particles() - - timer.stop_comp("unpack_vir_prtcls") - - #-------------------------------------------------- - # transfer local + global - timer.start_comp("get_inc_prtcls") - debug_print(grid, "get_inc_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.get_incoming_particles(grid) - - timer.stop_comp("get_inc_prtcls") - - #-------------------------------------------------- - # delete local transferred particles - timer.start_comp("del_trnsfrd_prtcls") - debug_print(grid, "del_trnsfrd_prtcls") - - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.delete_transferred_particles() - - timer.stop_comp("del_trnsfrd_prtcls") - - #-------------------------------------------------- - # delete all virtual particles (because new prtcls will come) - timer.start_comp("del_vir_prtcls") - debug_print(grid, "del_vir_prtcls") - - for cid in grid.get_virtual_tiles(): - tile = grid.get_tile(cid) - tile.delete_all_particles() - - timer.stop_comp("del_vir_prtcls") - - - - ################################################## - #filter - timer.start_comp("filter") - - #sweep over npasses times - for fj in range(conf.npasses): - - #update global neighbors (mpi) - grid.send_data(0) - grid.recv_data(0) - grid.wait_data(0) - - #get halo boundaries - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - tile.update_boundaries(grid) - - #filter each tile - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - flt.solve(tile) - - MPI.COMM_WORLD.barrier() # sync everybody - - - #-------------------------------------------------- - timer.stop_comp("filter") - - - #-------------------------------------------------- - #add current to E - timer.start_comp("add_cur") - debug_print(grid, "add_cur") - - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.deposit_current() - - timer.stop_comp("add_cur") - - #comm E - timer.start_comp("mpi_e2") - debug_print(grid, "mpi_e2") - - grid.send_data(1) - grid.recv_data(1) - grid.wait_data(1) - - timer.stop_comp("mpi_e2") - - - ################################################## - # data reduction and I/O - - timer.lap("step") - if (lap % conf.interval == 0): - debug_print(grid, "io") - if do_print: - print("--------------------------------------------------") - print("------ lap: {} / t: {}".format(lap, time)) - - #for cid in grid.get_tile_ids(): - # tile = grid.get_tile(cid) - # tile.erase_temporary_arrays() - - timer.stats("step") - timer.comp_stats() - timer.purge_comps() - - #analyze (independent) - timer.start("io") - - #shrink particle arrays - for cid in grid.get_tile_ids(): - tile = grid.get_tile(cid) - tile.shrink_to_fit_all_particles() - - #analyze - for cid in grid.get_local_tiles(): - tile = grid.get_tile(cid) - analyzer.analyze2d(tile) - - # barrier for quick writers - MPI.COMM_WORLD.barrier() - - debug_print(grid, "fld_writer") - #shallow IO - fld_writer.write(grid, lap) #quick field snapshots - - debug_print(grid, "prtcl_writer") - prtcl_writer.write(grid, lap) #test particles - - #deep IO - if (conf.full_interval > 0 and (lap % conf.full_interval == 0) and (lap > 0)): - debug_print(grid, "deep io") - - debug_print(grid, "deep write_yee") - pyvlv.write_yee(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_analysis") - pyvlv.write_analysis(grid, lap, conf.outdir + "/full_output/" ) - debug_print(grid, "deep write_prtcls") - pyvlv.write_particles(grid,lap, conf.outdir + "/full_output/" ) - - - #restart IO (overwrites) - if ((lap % conf.restart == 0) and (lap > 0)): - debug_print(grid, "restart io") - #flip between two sets of files - deep_io_switch = 1 if deep_io_switch == 0 else 0 - - debug_print(grid, "write_yee") - pyvlv.write_yee(grid, deep_io_switch, conf.outdir + "/restart/" ) - debug_print(grid, "write_prtcls") - pyvlv.write_particles(grid,deep_io_switch, conf.outdir + "/restart/" ) - - #if successful adjust info file - MPI.COMM_WORLD.barrier() # sync everybody in case of failure before write - if grid.rank() == 0: - with open(conf.outdir+"/restart/laps.txt", "a") as lapfile: - lapfile.write("{},{}\n".format(lap, deep_io_switch)) - - - #-------------------------------------------------- - #2D plots - if do_plots: - try: - plotNode(axs[0], grid, conf) - - yee = getYee2D(grid, conf) - plot2dYee(axs[1], yee, grid, conf, 'rho', label_title=True) - plot2dYee(axs[2], yee, grid, conf, 'jx' , label_title=True) - plot2dYee(axs[3], yee, grid, conf, 'jy' , label_title=True) - plot2dYee(axs[4], yee, grid, conf, 'jz' , label_title=True) - plot2dYee(axs[5], yee, grid, conf, 'ex' , label_title=True) - plot2dYee(axs[6], yee, grid, conf, 'ey' , label_title=True) - plot2dYee(axs[7], yee, grid, conf, 'ez' , label_title=True) - plot2dYee(axs[8], yee, grid, conf, 'bx' , label_title=True) - plot2dYee(axs[9], yee, grid, conf, 'by' , label_title=True) - plot2dYee(axs[10], yee, grid, conf, 'bz' , label_title=True) - saveVisz(lap, grid, conf) - except: - #print() - pass - timer.stop("io") - - - timer.stats("io") - timer.start("step") #refresh lap counter (avoids IO profiling) - - sys.stdout.flush() - - #next step - time += conf.cfl/conf.c_omp - #end of loop - - timer.stop("total") - timer.stats("total") diff --git a/projects/shocks/weibel_mini.ini b/projects/shocks/weibel_mini.ini deleted file mode 100644 index 237c37d9..00000000 --- a/projects/shocks/weibel_mini.ini +++ /dev/null @@ -1,63 +0,0 @@ - -[io] -outdir: "weibel_mini" -interval: 10 #output frequency in units of simulation steps for analysis files -full_interval: 20 #output frequency to write full simulation snapshots -restart: 20 #frequency to write restart files (these overwrite previous files) -stride: 1 #output reduce factor; NxMesh/stride must be int -laprestart: -1 #restart switch (-1 no restart; 0 automatic; X lap to restart) - - -#simulation parameters -[simulation] -cfl: 0.45 #time step in units of CFL -Nt: 100 - - -[problem] -Nspecies: 2 #number of species (typically 2) -delgam: 3.0e-1 #temperature -temp_ratio: 1.0 #T_i/T_e - -gamma: 10.0 #bulk flow speed - -me: -1.0 #electron mass-to-charge -mi: +1.0 #ion mass-to-charge - -#if non-zero add external non-evolving field -sigma: 0.0 # magnetization number (omega_ce/omega_pe)^2 - -npasses: 8 #number of current filter passes - - -bphi: 90.0 #Bfield z angle (bphi=0 bz, bphi=90 -> x-y plane) -btheta: 0.0 #Bfield x-y angle: btheta=0 -> parallel - -wallgamma: 1.0 - - -#spatial grid parameters -[grid] -Nx: 16 -Ny: 16 -Nz: 1 -NxMesh: 10 -NyMesh: 10 -NzMesh: 1 - -dx: 1.0 -dy: 1.0 -dz: 1.0 - - -#individual velocity mesh parameters -[vmesh] -Nvx: 1 -Nvy: 1 -Nvz: 1 - -[particles] -ppc: 4 #particle per cell per species -c_omp: 10 - -n_test_prtcls: 10000 #number of test particles used