-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutilities.py
119 lines (103 loc) · 3.68 KB
/
utilities.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 11:44:03 2020
@author: Danish
"""
import time
import sys
import tensorflow as tf
import pandas as pd
from tqdm import tqdm
class Timer:
def __init__(self):
self.begin = 0
self.total_time=0
def restart(self):
self.begin = time.time()
def start(self):
self.begin = time.time()
def time_converter(self, time):
m, s = divmod(time, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d" % (h, m, s)
def get_time_hhmmss(self, rem_batches):
end = time.time()
time_taken = end - self.begin
reamin_time = time_taken*rem_batches
time_str = self.time_converter(reamin_time)
self.total_time += time_taken
return time_str, time_taken
def get_total_time(self):
return self.time_converter(self.total_time)
#some utility methods
def PrintInline(string):
sys.stdout.write('\r'+string)
sys.stdout.flush()
def TF_GPUsetup(GB=4):
"""
Restrict TensorFlow to only allocate 1*X GB of memory on the first GPU. Often Needed
When GPU run out of memory. It would be one of the solution for the issue: Failed to
get convolution algorithm. This is probably because cuDNN failed to initialize,
Parameters
----------
GB : int, optional
The amount of GPU memory you want to use. It is recommended to use 1 GB
less than your total GPU memory. The default is 4.
Returns
-------
None.
"""
if type(GB)!=int:
raise TypeError('Type of Parameter `GB` must be `int` and it should be 1 GB less than your GPU memory')
gpus = tf.config.experimental.list_physical_devices('GPU')
config = [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=(1024*GB))]
if gpus:
# Restrict TensorFlow to only allocate 1*X GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(gpus[0], config)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
print('\nTensorflow GPU installed: '+str(tf.test.is_built_with_cuda()))
print('Is Tensorflow using GPU: '+str(tf.test.is_gpu_available()))
def to_weeks(obj, format_='%m%d%Y', splitter='/', convert=True, year_index=2, keep=None):
"""
Parameters
----------
obj : TYPE
DESCRIPTION.
format_ : TYPE, optional
DESCRIPTION. The default is '%m%d%Y'.
splitter : TYPE, optional
DESCRIPTION. The default is '/'.
convert : TYPE, optional
DESCRIPTION. The default is True.
year_index : TYPE, optional
DESCRIPTION. The default is 2.
keep : TYPE, optional
If you want to keep the certain amount of chracters in the given string and remove
rest of them, then specify the amount of chracters you want to keep. The default is None.
Returns
-------
TYPE
DESCRIPTION.
"""
if convert:
for j in tqdm(range(len(obj))):
if keep:
split = obj[j][0:keep].split(splitter)
else:
split = obj[j].split(splitter)
new = ['0'+i if len(i)<2 else i for i in split]
if len(new[year_index])<4:
new[year_index] = new[year_index]+'20'
obj[j] = ''.join(new)
obj = pd.to_datetime(obj, format=format_)
return obj.dt.week
def extract_sub_df(df, idx):
if type(df.loc[idx]) == type(pd.Series([1])):
return df.loc[[idx]]
else:
return df.loc[idx]