-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathalready_seen.py
executable file
·128 lines (109 loc) · 5 KB
/
already_seen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File : already_seen.py
# Description : Here we try to evaluate the performances of reservoirs to say it it has already seen seen
# an input or not. We send a one in the reservoir and
# Auteur : Nils Schaetti <nils.schaetti@unine.ch>
# Date : 01.02.2017 17:59:05
# Lieu : Nyon, Suisse
#
# This file is part of the Reservoir Computing Memory Project.
# The Reservoir Computing Memory Project is a set of free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
#
import Oger
from tools.already_seen_language import AlreadySeenLanguage
from tools.metrics import remembering_rate, equal_output, lucidity
import mdp
import os
import cPickle
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import numpy as np
#########################################################################
#
# Experience settings
#
#########################################################################
# Reservoir Properties
rc_LeakRate = 0.15 # Leak rate
rc_InputScaling = 0.25 # Input scaling
rc_Size = 50 # Reservoir size
rc_Sparsity = 0 # Reservoir sparsity
rc_SpectralRadius = 0.99 # Spectral radius
# Data set properties
rc_DatasetSize = 40 # Dataset size (number of samples)
rc_MemoryLength = 100 # How long time to remember the entry
rc_TestLength = rc_DatasetSize - rc_TrainingLength # Test set length (number of samples)
rc_Threshold = 0.3 # Threshold to say that the input was already seen.
rc_TrainingLength = 30 # Training set length (number of samples)
rc_SampleLength = 1000 # Length of a sample
rc_SloppingMemory = False # Is the memory slowly fadding away?
####################################################
# Main function
####################################################
if __name__ == "__main__":
# Generate the dataset
generator = AlreadySeenLanguage([0,0,1], [[1,0,0],[0,1,0]], memory_length = rc_MemoryLength, slopping_memory = rc_SloppingMemory)
inputs, outputs = generator.generateDataset(sample_length = rc_SampleLength, n_samples = rc_DatasetSize, sparsity = rc_Sparsity)
# Reservoir
#reservoir = Oger.nodes.ReservoirNode(input_dim = 3, output_dim = rc_Size, input_scaling = rc_InputScaling, set_initial_state = False, my_initial_state = np.random.rand((rc_Size)))
reservoir = Oger.nodes.LeakyReservoirNode(input_dim = 3, output_dim = rc_Size, input_scaling = rc_InputScaling, leak_rate = rc_LeakRate)
readout = Oger.nodes.RidgeRegressionNode()
# Training and test
training_in, training_out = inputs[0:rc_TrainingLength], outputs[0:rc_TrainingLength]
test_in, test_out = inputs[rc_TrainingLength:], outputs[rc_TrainingLength:]
# Create the flow
flow = mdp.Flow([reservoir, readout], verbose=1)
# Reservoir input data
data = [training_in, zip(training_in, training_out)]
# Train
flow.train(data)
# For each test
sample_pos = 0
remembering_rates = 0.0
lucidities = 0.0
esn_outs = []
final_outs = []
for sample_test in test_in:
# Evaluate
esn_out = flow(sample_test)
esn_outs += [esn_out]
# To 1
final_out = []
for out in esn_out:
if np.abs(out) > rc_Threshold:
final_out = final_out + [1.0]
else:
final_out = final_out + [0.0]
final_outs = final_outs + [final_out]
remembering_rates += remembering_rate(final_out, test_out[sample_pos])
lucidities += lucidity(final_out, test_out[sample_pos])
sample_pos += 1
print("Rembering rate : " + str(remembering_rates / float(len(test_in))))
print("Lucidity : " + str(lucidities / float(len(test_in))))
f, graph = plt.subplots(4)
for p in range(0,4):
average = np.average(esn_outs[sample_pos-1-p])
a_average = np.full((len(esn_outs[sample_pos-1-p])), average)
diff = np.abs(esn_outs[sample_pos-1-p] - a_average) / np.max(np.abs(esn_outs[sample_pos-1-p] - a_average))
graph[p].plot(np.abs(esn_outs[sample_pos-1-p]))
#graph[p].plot(diff)
graph[p].plot(test_out[sample_pos-1-p])
graph[p].plot(final_outs[sample_pos-1-p])
#graph[p].plot(a_average)
plt.show()