Skip to content

Adding JET tests #252

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,10 @@ RCSparseArraysExt = "SparseArrays"
[compat]
Adapt = "4.1.1"
Aqua = "0.8"
CellularAutomata = "0.0.2"
CellularAutomata = "0.0.6"
Compat = "4.16.0"
DifferentialEquations = "7.15.0"
JET = "0.9.18"
LIBSVM = "0.8"
LinearAlgebra = "1.10"
MLJLinearModels = "0.9.2, 0.10"
Expand All @@ -46,11 +47,12 @@ julia = "1.10"
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b"
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "Test", "SafeTestsets", "DifferentialEquations", "MLJLinearModels", "LIBSVM", "Statistics", "SparseArrays"]
test = ["Aqua", "Test", "SafeTestsets", "DifferentialEquations", "MLJLinearModels", "LIBSVM", "Statistics", "SparseArrays", "JET"]
6 changes: 3 additions & 3 deletions src/ReservoirComputing.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
module ReservoirComputing

using Adapt: adapt
using CellularAutomata: CellularAutomaton
using CellularAutomata: CellularAutomaton, AbstractCA
using Compat: @compat
using LinearAlgebra: eigvals, mul!, I, qr, Diagonal
using NNlib: fast_act, sigmoid
using Random: Random, AbstractRNG, randperm
using Random: Random, AbstractRNG, randperm, rand
using Reexport: Reexport, @reexport
using WeightInitializers: DeviceAgnostic, PartialFunction, Utils
@reexport using WeightInitializers
Expand Down Expand Up @@ -40,7 +40,7 @@ export scaled_rand, weighted_init, informed_init, minimal_init, chebyshev_mappin
logistic_mapping, modified_lm
export rand_sparse, delay_line, delay_line_backward, cycle_jumps,
simple_cycle, pseudo_svd, chaotic_init
export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal
export RNN, MRNN, GRU, GRUParams, FullyGated
export train
export ESN, HybridESN, KnowledgeModel, DeepESN
export RECA
Expand Down
57 changes: 25 additions & 32 deletions src/esn/esn_inits.jl
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ Create an input layer for informed echo state networks [^Pathak2018].
Chaos: An Interdisciplinary Journal of Nonlinear Science 28.4 (2018).
"""
function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
scaling=T(0.1), model_in_size, gamma=T(0.5)) where {T <: Number}
scaling=T(0.1), model_in_size::Int, gamma=T(0.5)) where {T <: Number}
res_size, in_size = dims
state_size = in_size - model_in_size

Expand All @@ -162,18 +162,17 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
for i in 1:num_for_state
idxs = findall(Bool[zero_connections .== input_matrix[i, :]
for i in 1:size(input_matrix, 1)])
random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)]
random_clm_idx = range(1, state_size; step=1)[DeviceAgnostic.rand(rng, T, 1:end)]
random_row_idx = idxs[rand(1:length(idxs))]
random_clm_idx = rand(state_size+1:in_size)
input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) -
T(0.5)) .* (T(2) * scaling)
end

for i in 1:num_for_model
idxs = findall(Bool[zero_connections .== input_matrix[i, :]
for i in 1:size(input_matrix, 1)])
random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)]
random_clm_idx = range(state_size + 1, in_size; step=1)[DeviceAgnostic.rand(
rng, T, 1:end)]
random_row_idx = idxs[rand(1:length(idxs))]
random_clm_idx = rand(state_size+1:in_size)
input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) -
T(0.5)) .* (T(2) * scaling)
end
Expand Down Expand Up @@ -298,7 +297,7 @@ function irrational(rng::AbstractRNG, ::Type{T}, res_size::Int, in_size::Int;
end
end

return T.(input_matrix)
return map(T, input_matrix)
end

@doc raw"""
Expand Down Expand Up @@ -888,8 +887,8 @@ end

"""
pseudo_svd([rng], [T], dims...;
max_value=1.0, sparsity=0.1, sorted=true, reverse_sort=false,
return_sparse=false)
max_value=1.0, sparsity=0.1, sorted=true,
return_sparse=false, return_diag=false)

Returns an initializer to build a sparse reservoir matrix with the given
`sparsity` by using a pseudo-SVD approach as described in [^yang].
Expand All @@ -910,8 +909,6 @@ Returns an initializer to build a sparse reservoir matrix with the given
Default is 0.1
- `sorted`: A boolean indicating whether to sort the singular values before
creating the diagonal matrix. Default is `true`.
- `reverse_sort`: A boolean indicating whether to reverse the sorted
singular values. Default is `false`.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `false`.
- `return_diag`: flag for returning a `Diagonal` matrix. If both `return_diag`
Expand All @@ -936,13 +933,12 @@ julia> res_matrix = pseudo_svd(5, 5)
"""
function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
max_value::Number=T(1.0), sparsity::Number=0.1, sorted::Bool=true,
reverse_sort::Bool=false, return_sparse::Bool=false,
return_sparse::Bool=false,
return_diag::Bool=false) where {T <: Number}
throw_sparse_error(return_sparse)
reservoir_matrix = create_diag(rng, T, dims[1],
max_value;
sorted=sorted,
reverse_sort=reverse_sort)
sorted=sorted)
tmp_sparsity = get_sparsity(reservoir_matrix, dims[1])

while tmp_sparsity <= sparsity
Expand All @@ -960,25 +956,17 @@ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
end
end

#hacky workaround for the moment
function rand_range(rng, T, n::Int)
return Int(1 + floor(DeviceAgnostic.rand(rng, T) * n))
end

function create_diag(rng::AbstractRNG, ::Type{T}, dim::Number, max_value::Number;
sorted::Bool=true, reverse_sort::Bool=false) where {T <: Number}
function create_diag(rng::AbstractRNG, ::Type{T}, dim::Integer, max_value::Number;
sorted::Bool=true) where {T <: Number}
diagonal_matrix = DeviceAgnostic.zeros(rng, T, dim, dim)
if sorted == true
if reverse_sort == true
diagonal_values = sort(
DeviceAgnostic.rand(rng, T, dim) .* max_value; rev=true)
diagonal_values[1] = max_value
else
diagonal_values = sort(DeviceAgnostic.rand(rng, T, dim) .* max_value)
diagonal_values[end] = max_value
end
else
diagonal_values = DeviceAgnostic.rand(rng, T, dim) .* max_value
diagonal_values = Array(DeviceAgnostic.rand(rng, T, dim) .* T(max_value))
if sorted
#if reverse_sort
# Base.sort!(diagonal_values; rev=true)
# diagonal_values[1] = T(max_value)
#else
Base.sort!(diagonal_values)
diagonal_values[end] = T(max_value)
end

for i in 1:dim
Expand All @@ -1003,6 +991,11 @@ function create_qmatrix(rng::AbstractRNG, ::Type{T}, dim::Number,
return qmatrix
end

#hacky workaround for the moment
function rand_range(rng, T, n::Int)
return Int(1 + floor(DeviceAgnostic.rand(rng, T) * n))
end

function get_sparsity(M, dim)
return size(M[M .!= 0], 1) / (dim * dim - size(M[M .!= 0], 1)) #nonzero/zero elements
end
Expand Down
34 changes: 20 additions & 14 deletions src/esn/esn_reservoir_drivers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ specified reservoir driver.
update.
"""
function create_states(reservoir_driver::AbstractReservoirDriver,
train_data::AbstractArray, washout::Int, reservoir_matrix::AbstractMatrix,
input_matrix::AbstractMatrix, bias_vector::AbstractArray)
train_data::AbstractArray{T,2}, washout::Int, reservoir_matrix::AbstractMatrix,
input_matrix::AbstractMatrix, bias_vector::AbstractArray) where {T<:Number}
train_len = size(train_data, 2) - washout
res_size = size(reservoir_matrix, 1)
states = adapt(typeof(train_data), zeros(res_size, train_len))
Expand All @@ -32,6 +32,7 @@ function create_states(reservoir_driver::AbstractReservoirDriver,

for i in 1:washout
yv = @view train_data[:, i]
@show typeof(yv)
_state = next_state!(_state, reservoir_driver, _state, yv, reservoir_matrix,
input_matrix, bias_vector, tmp_array)
end
Expand All @@ -47,8 +48,8 @@ function create_states(reservoir_driver::AbstractReservoirDriver,
end

function create_states(reservoir_driver::AbstractReservoirDriver,
train_data::AbstractArray, washout::Int, reservoir_matrix::Vector,
input_matrix::AbstractArray, bias_vector::AbstractArray)
train_data::AbstractArray{T,2}, washout::Int, reservoir_matrix::Vector,
input_matrix::AbstractArray, bias_vector::AbstractArray) where {T<:Number}
train_len = size(train_data, 2) - washout
res_size = sum([size(reservoir_matrix[i], 1) for i in 1:length(reservoir_matrix)])
states = adapt(typeof(train_data), zeros(res_size, train_len))
Expand Down Expand Up @@ -313,13 +314,13 @@ end

#check this one, not sure
function create_gru_layers(gru, variant::Minimal, res_size, in_size)
Wz_in = gru.inner_layer(res_size, in_size)
Wz = gru.reservoir(res_size, res_size)
bz = gru.bias(res_size, 1)
Wz_in = gru.inner_layer[1](res_size, in_size)
Wz = gru.reservoir[1](res_size, res_size)
bz = gru.bias[1](res_size, 1)

Wr_in = nothing
Wr = nothing
br = nothing
Wr_in = gru.inner_layer[2](res_size, in_size)
Wr = gru.reservoir[2](res_size, res_size)
br = gru.bias[2](res_size, 1)

return GRUParams(gru.activation_function, variant, Wz_in, Wz, bz, Wr_in, Wr, br)
end
Expand Down Expand Up @@ -357,14 +358,19 @@ function obtain_gru_state!(out, variant::FullyGated, gru, x, y, W, W_in, b, tmp_
end

#minimal
#=
function obtain_gru_state!(out, variant::Minimal, gru, x, y, W, W_in, b, tmp_array)
mul!(tmp_array[1], gru.Wz_in, y)
mul!(tmp_array[2], gru.Wz, x)
@. tmp_array[3] = gru.activation_function[1](tmp_array[1] + tmp_array[2] + gru.bz)

mul!(tmp_array[4], W_in, y)
mul!(tmp_array[5], W, tmp_array[3] .* x)
@. tmp_array[6] = gru.activation_function[2](tmp_array[4] + tmp_array[5] + b)
mul!(tmp_array[4], gru.Wr_in, y)
mul!(tmp_array[5], gru.Wr, x)
@. tmp_array[6] = gru.activation_function[2](tmp_array[4] + tmp_array[5] + gru.br)

return @. out = (1 - tmp_array[3]) * x + tmp_array[3] * tmp_array[6]
mul!(tmp_array[7], W_in, y)
mul!(tmp_array[8], W, tmp_array[6] .* x)
@. tmp_array[9] = gru.activation_function[3](tmp_array[7] + tmp_array[8] + b)
return @. out = (1 - tmp_array[3]) * x + tmp_array[3] * tmp_array[9]
end
=#
4 changes: 2 additions & 2 deletions src/reca/reca.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ arXiv preprint arXiv:1410.0162 (2014).
automata._” arXiv preprint arXiv:1703.02806 (2017).
"""
function RECA(train_data,
automata;
generations=8,
automata::AbstractCA;
generations::Int=8,
input_encoding=RandomMapping(),
nla_type=NLADefault(),
states_type=StandardStates())
Expand Down
2 changes: 1 addition & 1 deletion src/reca/reca_input_encodings.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ struct RandomMaps{T, E, G, M, S} <: AbstractEncodingData
ca_size::S
end

function create_encoding(rm::RandomMapping, input_data, generations)
function create_encoding(rm::RandomMapping, input_data::Int, generations::Int)
maps = init_maps(size(input_data, 1), rm.permutations, rm.expansion_size)
states_size = generations * rm.expansion_size * rm.permutations
ca_size = rm.expansion_size * rm.permutations
Expand Down
4 changes: 2 additions & 2 deletions src/states.jl
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ function PaddedStates(; padding=1.0)
end

function (states_type::PaddedStates)(mat::AbstractMatrix)
results = states_type.(eachcol(mat))
results = map(states_type, eachcol(mat))
return hcat(results...)
end

Expand Down Expand Up @@ -294,7 +294,7 @@ nla(nlat::NonLinearAlgorithm, x_old::AbstractVecOrMat) = nlat(x_old)

# dispatch over matrices for all nonlin algorithms
function (nlat::NonLinearAlgorithm)(x_old::AbstractMatrix)
results = nlat.(eachcol(x_old))
results = map(nlat, eachcol(x_old))
return hcat(results...)
end

Expand Down
6 changes: 5 additions & 1 deletion test/qa.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using ReservoirComputing, Aqua
using ReservoirComputing, Aqua, JET
@testset "Aqua" begin
Aqua.find_persistent_tasks_deps(ReservoirComputing)
Aqua.test_ambiguities(ReservoirComputing; recursive=false)
Expand All @@ -9,3 +9,7 @@ using ReservoirComputing, Aqua
Aqua.test_unbound_args(ReservoirComputing)
Aqua.test_undefined_exports(ReservoirComputing)
end

@testset "JET" begin
JET.test_package(ReservoirComputing)
end
Loading