diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index c5d46e7592..ce0ee0231c 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -5,6 +5,7 @@ version = "0.2.11" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" FLoops = "cc61a311-1640-44b5-9fba-1b764f453329" @@ -35,6 +36,7 @@ NDTensorsTBLISExt = "TBLIS" [compat] Adapt = "3.5" +BlockArrays = "0.16" Compat = "4.9" Dictionaries = "0.3.5" FLoops = "0.2.1" diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/BlockSparseArrays/README.md new file mode 100644 index 0000000000..ae3a119fbe --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/README.md @@ -0,0 +1,59 @@ +# BlockSparseArrays.jl + +A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. + +It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys +to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. +`BlockArrays` reinterprets the `SparseArray` as a blocked data structure. + +````julia +using NDTensors.BlockSparseArrays +using BlockArrays: BlockArrays, blockedrange + +# Block dimensions +i1 = [2, 3] +i2 = [2, 3] + +i_axes = (blockedrange(i1), blockedrange(i2)) + +function block_size(axes, block) + return length.(getindex.(axes, BlockArrays.Block.(block.n))) +end + +# Data +nz_blocks = BlockArrays.Block.([(1, 1), (2, 2)]) +nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] +nz_block_lengths = prod.(nz_block_sizes) + +# Blocks with discontiguous underlying data +d_blocks = randn.(nz_block_sizes) + +# Blocks with contiguous underlying data +# d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) +# d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] + +B = BlockSparseArray(nz_blocks, d_blocks, i_axes) + +# Access a block +B[BlockArrays.Block(1, 1)] + +# Access a non-zero block, returns a zero matrix +B[BlockArrays.Block(1, 2)] + +# Set a zero block +B[BlockArrays.Block(1, 2)] = randn(2, 3) + +# Matrix multiplication (not optimized for sparsity yet) +B * B +```` + +You can generate this README with: +```julia +using Literate +Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) +``` + +--- + +*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* + diff --git a/NDTensors/src/BlockSparseArrays/examples/README.jl b/NDTensors/src/BlockSparseArrays/examples/README.jl new file mode 100644 index 0000000000..a55c39e2d0 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/examples/README.jl @@ -0,0 +1,52 @@ +# # BlockSparseArrays.jl +# +# A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. +# +# It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys +# to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. +# `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. + +using NDTensors.BlockSparseArrays +using BlockArrays: BlockArrays, blockedrange + +## Block dimensions +i1 = [2, 3] +i2 = [2, 3] + +i_axes = (blockedrange(i1), blockedrange(i2)) + +function block_size(axes, block) + return length.(getindex.(axes, BlockArrays.Block.(block.n))) +end + +## Data +nz_blocks = BlockArrays.Block.([(1, 1), (2, 2)]) +nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] +nz_block_lengths = prod.(nz_block_sizes) + +## Blocks with discontiguous underlying data +d_blocks = randn.(nz_block_sizes) + +## Blocks with contiguous underlying data +## d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) +## d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] + +B = BlockSparseArray(nz_blocks, d_blocks, i_axes) + +## Access a block +B[BlockArrays.Block(1, 1)] + +## Access a non-zero block, returns a zero matrix +B[BlockArrays.Block(1, 2)] + +## Set a zero block +B[BlockArrays.Block(1, 2)] = randn(2, 3) + +## Matrix multiplication (not optimized for sparsity yet) +B * B + +# You can generate this README with: +# ```julia +# using Literate +# Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) +# ``` diff --git a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl new file mode 100644 index 0000000000..ddc40bf6d0 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl @@ -0,0 +1,12 @@ +module BlockSparseArrays +using BlockArrays +using Dictionaries + +using BlockArrays: block + +export BlockSparseArray, SparseArray + +include("sparsearray.jl") +include("blocksparsearray.jl") + +end diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl new file mode 100644 index 0000000000..440c1d2e6a --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -0,0 +1,121 @@ +using BlockArrays: block + +# Also add a version with contiguous underlying data. +struct BlockSparseArray{ + T,N,Blocks<:SparseArray{<:AbstractArray{T,N},N},Axes<:NTuple{N,AbstractUnitRange{Int}} +} <: AbstractBlockArray{T,N} + blocks::Blocks + axes::Axes +end + +# The size of a block +function block_size(axes::Tuple, block::Block) + return length.(getindex.(axes, Block.(block.n))) +end + +struct BlockZero{Axes} + axes::Axes +end + +function (f::BlockZero)(T::Type, I::CartesianIndex) + return fill!(T(undef, block_size(f.axes, Block(Tuple(I)))), false) +end + +function BlockSparseArray( + blocks::AbstractVector{<:Block{N}}, blockdata::AbstractVector, axes::NTuple{N} +) where {N} + return BlockSparseArray(Dictionary(blocks, blockdata), axes) +end + +function BlockSparseArray( + blockdata::Dictionary{<:Block{N}}, axes::NTuple{N,AbstractUnitRange{Int}} +) where {N} + blocks = keys(blockdata) + cartesianblocks = map(block -> CartesianIndex(block.n), blocks) + cartesiandata = Dictionary(cartesianblocks, blockdata) + block_storage = SparseArray(cartesiandata, blocklength.(axes), BlockZero(axes)) + return BlockSparseArray(block_storage, axes) +end + +function BlockSparseArray( + blockdata::Dictionary{<:Block{N}}, blockinds::NTuple{N,AbstractVector} +) where {N} + return BlockSparseArray(blockdata, blockedrange.(blockinds)) +end + +Base.axes(block_arr::BlockSparseArray) = block_arr.axes + +function Base.copy(block_arr::BlockSparseArray) + return BlockSparseArray(deepcopy(block_arr.blocks), copy.(block_arr.axes)) +end + +function BlockArrays.viewblock(block_arr::BlockSparseArray, block) + blks = block.n + @boundscheck blockcheckbounds(block_arr, blks...) + ## block_size = length.(getindex.(axes(block_arr), Block.(blks))) + # TODO: Make this `Zeros`? + ## zero = zeros(eltype(block_arr), block_size) + return block_arr.blocks[blks...] # Fails because zero isn't defined + ## return get_nonzero(block_arr.blocks, blks, zero) +end + +function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) where {T,N} + @boundscheck blockcheckbounds(block_arr, Block(bi.I)) + bl = view(block_arr, block(bi)) + inds = bi.α + @boundscheck checkbounds(bl, inds...) + v = bl[inds...] + return v +end + +function Base.setindex!( + block_arr::BlockSparseArray{T,N}, v, i::Vararg{Integer,N} +) where {T,N} + @boundscheck checkbounds(block_arr, i...) + block_indices = findblockindex.(axes(block_arr), i) + block = map(block_index -> Block(block_index.I), block_indices) + offsets = map(block_index -> only(block_index.α), block_indices) + block_view = @view block_arr[block...] + block_view[offsets...] = v + block_arr[block...] = block_view + return block_arr +end + +function BlockArrays._check_setblock!( + block_arr::BlockSparseArray{T,N}, v, block::NTuple{N,Integer} +) where {T,N} + for i in 1:N + bsz = length(axes(block_arr, i)[Block(block[i])]) + if size(v, i) != bsz + throw( + DimensionMismatch( + string( + "tried to assign $(size(v)) array to ", + length.(getindex.(axes(block_arr), block)), + " block", + ), + ), + ) + end + end +end + +function Base.setindex!( + block_arr::BlockSparseArray{T,N}, v, block::Vararg{Block{1},N} +) where {T,N} + blks = Int.(block) + @boundscheck blockcheckbounds(block_arr, blks...) + @boundscheck BlockArrays._check_setblock!(block_arr, v, blks) + # This fails since it tries to replace the element + block_arr.blocks[blks...] = v + # Use .= here to overwrite data. + ## block_view = @view block_arr[Block(blks)] + ## block_view .= v + return block_arr +end + +function Base.getindex(block_arr::BlockSparseArray{T,N}, i::Vararg{Integer,N}) where {T,N} + @boundscheck checkbounds(block_arr, i...) + v = block_arr[findblockindex.(axes(block_arr), i)...] + return v +end diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl new file mode 100644 index 0000000000..a3f5b6dcbd --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl @@ -0,0 +1,32 @@ +struct SparseArray{T,N,Zero} <: AbstractArray{T,N} + data::Dictionary{CartesianIndex{N},T} + dims::NTuple{N,Int64} + zero::Zero +end + +Base.size(a::SparseArray) = a.dims + +function Base.setindex!(a::SparseArray{T,N}, v, I::CartesianIndex{N}) where {T,N} + set!(a.data, I, v) + return a +end +function Base.setindex!(a::SparseArray{T,N}, v, I::Vararg{Int,N}) where {T,N} + return setindex!(a, v, CartesianIndex(I)) +end + +function Base.getindex(a::SparseArray{T,N}, I::CartesianIndex{N}) where {T,N} + return get(a.data, I, a.zero(T, I)) +end +function Base.getindex(a::SparseArray{T,N}, I::Vararg{Int,N}) where {T,N} + return getindex(a, CartesianIndex(I)) +end + +## # `getindex` but uses a default if the value is +## # structurally zero. +## function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} +## @boundscheck checkbounds(a, I) +## return get(a.data, I, zero) +## end +## function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} +## return get_nonzero(a, CartesianIndex(I), zero) +## end diff --git a/NDTensors/src/BlockSparseArrays/test/runtests.jl b/NDTensors/src/BlockSparseArrays/test/runtests.jl new file mode 100644 index 0000000000..a048b0ac32 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/test/runtests.jl @@ -0,0 +1,12 @@ +using Test +using NDTensors.BlockSparseArrays + +@testset "Test NDTensors.BlockSparseArrays" begin + @testset "README" begin + @test include( + joinpath( + pkgdir(BlockSparseArrays), "src", "BlockSparseArrays", "examples", "README.jl" + ), + ) isa Any + end +end diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 38d5f53a05..7df0990789 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -19,6 +19,8 @@ using TupleTools include("SetParameters/src/SetParameters.jl") using .SetParameters +include("BlockSparseArrays/src/BlockSparseArrays.jl") +using .BlockSparseArrays include("SmallVectors/src/SmallVectors.jl") using .SmallVectors @@ -117,6 +119,7 @@ include("empty/adapt.jl") # include("arraytensor/arraytensor.jl") include("arraytensor/array.jl") +include("arraytensor/blocksparsearray.jl") ##################################### # Deprecations diff --git a/NDTensors/src/arraytensor/arraytensor.jl b/NDTensors/src/arraytensor/arraytensor.jl index d91b4a6d01..13607d240e 100644 --- a/NDTensors/src/arraytensor/arraytensor.jl +++ b/NDTensors/src/arraytensor/arraytensor.jl @@ -1,7 +1,12 @@ # Used for dispatch to distinguish from Tensors wrapping TensorStorage. # Remove once TensorStorage is removed. const ArrayStorage{T,N} = Union{ - Array{T,N},ReshapedArray{T,N},SubArray{T,N},PermutedDimsArray{T,N},StridedView{T,N} + Array{T,N}, + ReshapedArray{T,N}, + SubArray{T,N}, + PermutedDimsArray{T,N}, + StridedView{T,N}, + BlockSparseArray{T,N}, } const MatrixStorage{T} = Union{ ArrayStorage{T,2}, @@ -41,6 +46,7 @@ function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) return tensor end +# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` function contraction_output( tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR ) diff --git a/NDTensors/src/arraytensor/blocksparsearray.jl b/NDTensors/src/arraytensor/blocksparsearray.jl new file mode 100644 index 0000000000..53da75cbf6 --- /dev/null +++ b/NDTensors/src/arraytensor/blocksparsearray.jl @@ -0,0 +1,16 @@ +# TODO: Implement. +function contraction_output(tensor1::BlockSparseArray, tensor2::BlockSparseArray, indsR) + return error("Not implemented") +end + +# TODO: Implement. +function contract!( + tensorR::BlockSparseArray, + labelsR, + tensor1::BlockSparseArray, + labels1, + tensor2::BlockSparseArray, + labels2, +) + return error("Not implemented") +end diff --git a/NDTensors/test/BlockSparseArrays.jl b/NDTensors/test/BlockSparseArrays.jl new file mode 100644 index 0000000000..5d1345d0e7 --- /dev/null +++ b/NDTensors/test/BlockSparseArrays.jl @@ -0,0 +1,4 @@ +using Test +using NDTensors + +include(joinpath(pkgdir(NDTensors), "src", "BlockSparseArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/Project.toml b/NDTensors/test/Project.toml index 131682dd8c..c1c309317a 100644 --- a/NDTensors/test/Project.toml +++ b/NDTensors/test/Project.toml @@ -1,4 +1,5 @@ [deps] +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" diff --git a/NDTensors/test/arraytensor/array.jl b/NDTensors/test/arraytensor/array.jl new file mode 100644 index 0000000000..39809d0db3 --- /dev/null +++ b/NDTensors/test/arraytensor/array.jl @@ -0,0 +1,45 @@ +using NDTensors +using LinearAlgebra +using Test + +using NDTensors: storage, storagetype + +@testset "Tensor wrapping Array" begin + is1 = (2, 3) + D1 = randn(is1) + + is2 = (3, 4) + D2 = randn(is2) + + T1 = tensor(D1, is1) + T2 = tensor(D2, is2) + + @test T1[1, 1] == D1[1, 1] + + x = rand() + T1[1, 1] = x + + @test T1[1, 1] == x + @test array(T1) == D1 + @test storagetype(T1) <: Matrix{Float64} + @test storage(T1) == D1 + @test eltype(T1) == eltype(D1) + @test inds(T1) == is1 + + R = T1 * T2 + @test storagetype(R) <: Matrix{Float64} + @test Array(R) ≈ Array(T1) * Array(T2) + + T1r = randn!(similar(T1)) + @test Array(T1r + T1) ≈ Array(T1r) + Array(T1) + @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) + + U, S, V = svd(T1) + @test U * S * V ≈ T1 + + T12 = contract(T1, (1, -1), T2, (-1, 2)) + @test T12 ≈ T1 * T2 + + D12 = contract(D1, (1, -1), D2, (-1, 2)) + @test D12 ≈ Array(T12) +end diff --git a/NDTensors/test/arraytensor/arraytensor.jl b/NDTensors/test/arraytensor/arraytensor.jl index 39809d0db3..a71944d01c 100644 --- a/NDTensors/test/arraytensor/arraytensor.jl +++ b/NDTensors/test/arraytensor/arraytensor.jl @@ -2,44 +2,7 @@ using NDTensors using LinearAlgebra using Test -using NDTensors: storage, storagetype - -@testset "Tensor wrapping Array" begin - is1 = (2, 3) - D1 = randn(is1) - - is2 = (3, 4) - D2 = randn(is2) - - T1 = tensor(D1, is1) - T2 = tensor(D2, is2) - - @test T1[1, 1] == D1[1, 1] - - x = rand() - T1[1, 1] = x - - @test T1[1, 1] == x - @test array(T1) == D1 - @test storagetype(T1) <: Matrix{Float64} - @test storage(T1) == D1 - @test eltype(T1) == eltype(D1) - @test inds(T1) == is1 - - R = T1 * T2 - @test storagetype(R) <: Matrix{Float64} - @test Array(R) ≈ Array(T1) * Array(T2) - - T1r = randn!(similar(T1)) - @test Array(T1r + T1) ≈ Array(T1r) + Array(T1) - @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) - - U, S, V = svd(T1) - @test U * S * V ≈ T1 - - T12 = contract(T1, (1, -1), T2, (-1, 2)) - @test T12 ≈ T1 * T2 - - D12 = contract(D1, (1, -1), D2, (-1, 2)) - @test D12 ≈ Array(T12) +@testset "Tensor wrapping AbstractArrays" begin + include("array.jl") + include("blocksparsearray.jl") end diff --git a/NDTensors/test/arraytensor/blocksparsearray.jl b/NDTensors/test/arraytensor/blocksparsearray.jl new file mode 100644 index 0000000000..5d508608a5 --- /dev/null +++ b/NDTensors/test/arraytensor/blocksparsearray.jl @@ -0,0 +1,52 @@ +using NDTensors +using NDTensors.BlockSparseArrays +using BlockArrays: BlockArrays +using LinearAlgebra +using Test + +using NDTensors: storage, storagetype + +@testset "Tensor wrapping BlockSparseArray" begin + is1 = ([1, 1], [1, 2]) + D1 = BlockSparseArray( + [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 1), randn(1, 2)], is1 + ) + + is2 = ([1, 2], [2, 2]) + D2 = BlockSparseArray( + [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 2), randn(2, 2)], is2 + ) + + T1 = tensor(D1, is1) + T2 = tensor(D2, is2) + + @test T1[1, 1] == D1[1, 1] + + x = rand() + T1[1, 1] = x + + @test T1[1, 1] == x + @test array(T1) == D1 + @test storagetype(T1) <: BlockSparseArray{Float64,2} + @test storage(T1) == D1 + @test eltype(T1) == eltype(D1) + @test inds(T1) == is1 + + @test_broken R = T1 * T2 + @test_broken storagetype(R) <: Matrix{Float64} + @test_broken Array(R) ≈ Array(T1) * Array(T2) + + @test_broken T1r = randn!(similar(T1)) + @test_broken Array(T1r + T1) ≈ Array(T1r) + Array(T1) + @test_broken Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) + + # TODO: Not implemented yet. + ## U, S, V = svd(T1) + ## @test U * S * V ≈ T1 + + @test_broken T12 = contract(T1, (1, -1), T2, (-1, 2)) + @test_broken T12 ≈ T1 * T2 + + @test_broken D12 = contract(D1, (1, -1), D2, (-1, 2)) + @test_broken D12 ≈ Array(T12) +end diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl index 8e4f1733f8..222353618a 100644 --- a/NDTensors/test/runtests.jl +++ b/NDTensors/test/runtests.jl @@ -19,6 +19,7 @@ end @safetestset "NDTensors" begin @testset "$filename" for filename in [ + "BlockSparseArrays.jl", "SetParameters.jl", "SmallVectors.jl", "linearalgebra.jl",