forked from wuyifan18/DeepLog
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLogKeyModel_train.py
97 lines (84 loc) · 3.46 KB
/
LogKeyModel_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.utils.data import TensorDataset, DataLoader
import argparse
import os
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
window_size = 10
input_size = 1
hidden_size = 64
num_layers = 2
num_classes = 28
num_epochs = 300
batch_size = 2048
model_dir = 'model'
log = 'Adam_batch_size=' + str(batch_size) + ';epoch=' + str(num_epochs)
def generate(name):
num_sessions = 0
inputs = []
outputs = []
with open('data/' + name, 'r') as f:
for line in f.readlines():
num_sessions += 1
line = tuple(map(lambda n: n - 1, map(int, line.strip().split())))
for i in range(len(line) - window_size):
inputs.append(line[i:i + window_size])
outputs.append(line[i + window_size])
print('Number of sessions({}): {}'.format(name, num_sessions))
print('Number of seqs({}): {}'.format(name, len(inputs)))
dataset = TensorDataset(torch.tensor(inputs, dtype=torch.float), torch.tensor(outputs))
return dataset
class Model(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_keys):
super(Model, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_keys)
def forward(self, input):
h0 = torch.zeros(self.num_layers, input.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, input.size(0), self.hidden_size).to(device)
out, _ = self.lstm(input, (h0, c0))
out = self.fc(out[:, -1, :])
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-num_layers', default=2, type=int)
parser.add_argument('-hidden_size', default=64, type=int)
parser.add_argument('-window_size', default=10, type=int)
args = parser.parse_args()
num_layers = args.num_layers
hidden_size = args.hidden_size
window_size = args.window_size
model = Model(input_size, hidden_size, num_layers, num_classes).to(device)
seq_dataset = generate('hdfs_train')
dataloader = DataLoader(seq_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
writer = SummaryWriter(logdir='log/' + log)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
# Train the model
total_step = len(dataloader)
for epoch in range(num_epochs): # Loop over the dataset multiple times
train_loss = 0
for step, (seq, label) in enumerate(dataloader):
# Forward pass
seq = seq.clone().detach().view(-1, window_size, input_size).to(device)
output = model(seq)
loss = criterion(output, label.to(device))
# Backward and optimize
optimizer.zero_grad()
loss.backward()
train_loss += loss.item()
optimizer.step()
print('Epoch [{}/{}], Train_loss: {:.4f}'.format(epoch + 1, num_epochs, train_loss / len(dataloader.dataset)))
writer.add_scalar('train_loss', train_loss / len(dataloader.dataset), epoch + 1)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
torch.save(model.state_dict(), model_dir + '/' + log + '.pt')
writer.close()
print('Finished Training')