-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathIan_functions.py
219 lines (198 loc) · 7.64 KB
/
Ian_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
import dirfile_functions as df
import numpy as np
import matplotlib.pyplot as plt
import pygetdata as gd
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
from scipy.stats import binned_statistic_2d
import pandas as pd
from scipy import signal
roach_path = 'roach_data/'
ancillary_path = 'xy_stage/'
old_path = '2012_data/'
bolo_path = '2012_data/bolo_data/'
def avg_arrays(how_long, array):
counter = 0
current_sum = 0.
out_array = []
for i in range(len(array)):
current_sum += array[i]
counter += 1
if counter == how_long:
counter = 0
out_array.append(float(current_sum/how_long))
current_sum = 0.
return out_array
def avg_arrays(how_long, array):
counter = 0
current_sum = 0.
out_array = []
for i in range(len(array)):
current_sum += array[i]
counter += 1
print counter, current_sum
if counter == how_long:
counter = 0
print current_sum/how_long
out_array.append(float(current_sum/how_long))
current_sum = 0.
return out_array
def IQtoMag(i_vals, q_vals):
mag = []
for i in range(len(i_vals)):
mag.append(np.sqrt(np.square(i_vals[i])+np.square(q_vals[i])))
return mag
def data_generator(x_array, y_array, value_array):
out_array = []
for i in range(len(x_array)):
out_array.append([x_array[i], y_array[i], value_array[i]])
return out_array
def fancier_avg(array, length):
nsamples = float(len(array))/float(length)
counter = 0.
counter_limit = np.floor(nsamples)
run_sum = 0.
offset_check = float(np.mod(nsamples,1.))
i_offset = 0.
f_offset = float(np.mod(nsamples,1.))
avg_array = []
for i in range(len(array)):
if counter < counter_limit:
run_sum += array[i]
#print counter, i
counter += 1.
else:
#print counter, i, 'split value'
run_sum += f_offset*array[i]
avg_array.append(run_sum/nsamples)
i_offset = 1.-f_offset
run_sum = i_offset*array[i]
if i_offset > offset_check:
f_offset = 1.+offset_check-i_offset
counter_limit = np.floor(nsamples)-1
#print i_offset, f_offset, counter_limit, 'was bigger'
else:
f_offset = offset_check-i_offset
counter_limit = np.floor(nsamples)
#print i_offset, f_offset, counter_limit, 'was smaller'
counter = 0.
return avg_array
def IQtoPhase(Iarray, Qarray):
out_array = []
for i in range(len(Iarray)):
if Iarray[i] != 0:
out_array.append(np.arctan(Qarray[i]/Iarray[i]))
else:
out_array.append(np.pi/2.)
return out_array
#stolen from stack overflow to test
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
#### below here are newer useful functions, above is preserved for records
def loadArbData(dirfile, file, file_type):
d = gd.dirfile(dirfile, gd.RDWR|gd.UNENCODED)
vectors = d.field_list()
#print vectors
for i in range (len(vectors)):
if vectors[i] == file:
if file_type == 'u16':
values = d.getdata(file, gd.UINT16, num_frames = d.nframes)
if file_type == 'u32':
values = d.getdata(file, gd.UINT32, num_frames = d.nframes)
if file_type == 's32':
values = d.getdata(file, gd.INT32, num_frames = d.nframes)
return np.asarray(values)
def arb_binning(ra, dec, bolo_data, pixel_size):
#pixel size is in degrees so pass ra and dec in degrees
# returns bolo data binned for map processing
n_ra = np.int(np.ceil((ra.max()-ra.min())/pixel_size))+1
n_dec = np.int(np.ceil((dec.max()-dec.min())/pixel_size))+1
ra_bins = np.amin(ra)-pixel_size/2.+np.arange(n_ra+1)*pixel_size
dec_bins = np.amin(dec)-pixel_size/2.+np.arange(n_dec+1)*pixel_size
i_ind = np.digitize(ra,ra_bins)
j_ind = np.digitize(dec, dec_bins)
bolo_array = np.zeros((len(bolo_data),3))
bolo_array[:,0] = bolo_data
bolo_array[:,1] = i_ind
bolo_array[:,2] = j_ind
size = [n_ra, n_dec]
return bolo_array, size
def ra_to_deg(ra):
ra = ra*15
return ra
def ra_dec_to_array(ra, dec):
data_out = np.zeros((len(ra),2))
data_out[:,0] = ra
data_out[:,1] = dec
return data_out
def despike_naive(bolo_data):
out_data = bolo_data
for i in range(len(bolo_data)-60):
val = bolo_data[i]
l_val = bolo_data[i-60]
r_val = bolo_data[i+60]
if np.abs(l_val - val) > 0.0005 or np.abs(r_val - val) > 0.0005:
val = np.mean([l_val, r_val])
out_data[i] = val
return out_data
def clean_up_world(world):
ra_vals = np.zeros((len(world), 1))
dec_vals = np.zeros((len(world), 1))
ra_vals = world[:,0]
dec_vals = world[:,1]
ra_vals = np.round(ra_vals)
dec_vals = np.round(dec_vals)
out_world = np.zeros((len(world), 2))
out_world[:,0] = ra_vals
out_world[:,1] = dec_vals
ra_size = ra_vals.max()
dec_size = dec_vals.max()
size = [np.int(ra_size), np.int(dec_size)]
return out_world.astype(int), size
def sort_bolo(bolo_data, ra_dec_bins, size, splash_flag=0):
data_array = np.zeros((size[1]+2, size[0]+2))
for i in range(len(bolo_data)):
ra_bin = ra_dec_bins[i][0]
dec_bin = ra_dec_bins[i][1]
data_array[dec_bin][ra_bin] = data_array[dec_bin][ra_bin] + bolo_data[i]
if splash_flag == 1:
data_array[dec_bin-1][ra_bin] = data_array[dec_bin-1][ra_bin] + bolo_data[i]
data_array[dec_bin+1][ra_bin] = data_array[dec_bin+1][ra_bin] + bolo_data[i]
data_array[dec_bin][ra_bin-1] = data_array[dec_bin][ra_bin-1] + bolo_data[i]
data_array[dec_bin][ra_bin+1] = data_array[dec_bin][ra_bin+1] + bolo_data[i]
data_array[dec_bin-1][ra_bin-1] = data_array[dec_bin-1][ra_bin-1] + bolo_data[i]
data_array[dec_bin+1][ra_bin-1] = data_array[dec_bin+1][ra_bin-1] + bolo_data[i]
data_array[dec_bin-1][ra_bin+1] = data_array[dec_bin-1][ra_bin+1] + bolo_data[i]
data_array[dec_bin+1][ra_bin+1] = data_array[dec_bin+1][ra_bin+1] + bolo_data[i]
return data_array
#could be rewritten to make it faster, but relatively fast already. no splash beam size ~0.0333 deg, splash set it to 0.0116667
# for posterity, the original processing function, use arb binning now
def arb_map_gen(ra, dec, bolo_data, pixel_size):
#note that the bolo data ra and dec data passed must all be the same length
# process the RA and DEC data into degrees first and interpolate.
#pixel size is in degrees so pass ra and dec in degrees
n_ra = np.int(np.ceil((ra.max()-ra.min())/pixel_size))
n_dec = np.int(np.ceil((dec.max()-dec.min())/pixel_size))
ra_bins, dec_bins = [], []
ijbol_list = []
size = [n_ra, n_dec]
print n_ra
print n_dec
for i in range(0, n_ra):
ra_bins.append(ra.min()+i*pixel_size)
for j in range(0, n_dec):
dec_bins.append(dec.min()+j*pixel_size)
for n in range(0, len(bolo_data)):
ra_test = np.abs(ra_bins-ra[n])
dec_test = np.abs(dec_bins-dec[n])
i_ind = np.where(ra_test == np.min(ra_test))
j_ind = np.where(dec_test == np.min(dec_test))
ijbol_list.append([bolo_data[n], i_ind[0][0], j_ind[0][0]])
return ijbol_list, size