diff --git a/src/abstract.jl b/src/abstract.jl index a08bd63..c3940a3 100644 --- a/src/abstract.jl +++ b/src/abstract.jl @@ -174,6 +174,11 @@ storage_type(op::AbstractLinearOperator) = error("please implement storage_type storage_type(op::LinearOperator) = typeof(op.Mv5) storage_type(M::AbstractMatrix{T}) where {T} = Vector{T} +# Lazy wrappers +storage_type(op::Adjoint) = storage_type(parent(op)) +storage_type(op::Transpose) = storage_type(parent(op)) +storage_type(op::Diagonal) = typeof(parent(op)) + """ reset!(op) diff --git a/test/gpu/amdgpu.jl b/test/gpu/amdgpu.jl index bda6b90..d12f3b9 100644 --- a/test/gpu/amdgpu.jl +++ b/test/gpu/amdgpu.jl @@ -11,5 +11,10 @@ using LinearOperators, AMDGPU y = M * v @test y isa ROCArray{Float32} + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(adjoint(A)) + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(transpose(A)) + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(adjoint(A)) + @test LinearOperators.storage_type(Diagonal(v)) == typeof(v) + @testset "AMDGPU S kwarg" test_S_kwarg(arrayType = ROCArray) end diff --git a/test/gpu/nvidia.jl b/test/gpu/nvidia.jl index c707dee..63bf732 100644 --- a/test/gpu/nvidia.jl +++ b/test/gpu/nvidia.jl @@ -13,5 +13,11 @@ using LinearOperators, CUDA, CUDA.CUSPARSE, CUDA.CUSOLVER v = CUDA.rand(35) y = M * v @test y isa CuVector{Float32} + + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(adjoint(A)) + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(transpose(A)) + @test LinearOperators.storage_type(A) == LinearOperators.storage_type(adjoint(A)) + @test LinearOperators.storage_type(Diagonal(v)) == typeof(v) + @testset "Nvidia S kwarg" test_S_kwarg(arrayType = CuArray) end