From 0d64da8b8fa62dfe381324392aed8ad52ed79d3a Mon Sep 17 00:00:00 2001 From: Lander Burgelman <39218680+leburgel@users.noreply.github.com> Date: Mon, 9 Jan 2023 23:59:26 +0100 Subject: [PATCH] Additions needed for sweeping algorithms on trees --- Project.toml | 3 + src/ITensorNetworks.jl | 15 +- src/abstractindsnetwork.jl | 2 +- src/abstractitensornetwork.jl | 181 +++++- src/expect.jl | 7 +- src/exports.jl | 33 +- src/imports.jl | 33 +- src/indsnetwork.jl | 2 +- src/itensornetwork.jl | 6 +- src/models.jl | 78 ++- src/requires/omeinsumcontractionorders.jl | 2 +- src/treetensornetworks/abstractprojttno.jl | 145 +++++ .../abstracttreetensornetwork.jl | 370 +++++++++++ src/treetensornetworks/opsum_to_ttno.jl | 589 ++++++++++++++++++ src/treetensornetworks/projttno.jl | 57 ++ src/treetensornetworks/projttno_apply.jl | 55 ++ src/treetensornetworks/projttnosum.jl | 59 ++ src/treetensornetworks/ttno.jl | 152 +++++ src/treetensornetworks/ttns.jl | 112 ++++ src/utility.jl | 17 + test/test_indsnetwork.jl | 2 +- test/test_itensornetwork.jl | 74 ++- test/test_opsum_to_ttno.jl | 73 +++ test/test_sitetype.jl | 1 + test/test_ttno.jl | 51 ++ test/test_ttns.jl | 49 ++ 26 files changed, 2138 insertions(+), 30 deletions(-) create mode 100644 src/treetensornetworks/abstractprojttno.jl create mode 100644 src/treetensornetworks/abstracttreetensornetwork.jl create mode 100644 src/treetensornetworks/opsum_to_ttno.jl create mode 100644 src/treetensornetworks/projttno.jl create mode 100644 src/treetensornetworks/projttno_apply.jl create mode 100644 src/treetensornetworks/projttnosum.jl create mode 100644 src/treetensornetworks/ttno.jl create mode 100644 src/treetensornetworks/ttns.jl create mode 100644 src/utility.jl create mode 100644 test/test_opsum_to_ttno.jl create mode 100644 test/test_ttno.jl create mode 100644 test/test_ttns.jl diff --git a/Project.toml b/Project.toml index aac720a9..a6874a21 100644 --- a/Project.toml +++ b/Project.toml @@ -10,6 +10,7 @@ Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" @@ -17,7 +18,9 @@ Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" +SparseArrayKit = "a9a3c162-d163-4c15-8926-b8794fbefed2" SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index 92020987..28654953 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -6,6 +6,7 @@ using Dictionaries using DocStringExtensions using Graphs using Graphs.SimpleGraphs # AbstractSimpleGraph +using IsApprox using ITensors using ITensors.ContractionSequenceOptimization using ITensors.ITensorVisualizationCore @@ -15,7 +16,9 @@ using Observers using Printf using Requires using SimpleTraits +using SparseArrayKit using SplitApplyCombine +using StaticArrays using Suppressor using TimerOutputs @@ -27,6 +30,7 @@ using ITensors: @timeit_debug, AbstractMPS, Algorithm, + OneITensor, check_hascommoninds, commontags, orthocenter, @@ -69,11 +73,20 @@ include("expect.jl") include("models.jl") include("tebd.jl") include("itensornetwork.jl") +include("utility.jl") include("specialitensornetworks.jl") include("renameitensornetwork.jl") include("boundarymps.jl") include("beliefpropagation.jl") -include(joinpath("treetensornetworks", "treetensornetwork.jl")) +include(joinpath("treetensornetworks", "abstracttreetensornetwork.jl")) +# include(joinpath("treetensornetworks", "treetensornetwork.jl")) +include(joinpath("treetensornetworks", "ttns.jl")) +include(joinpath("treetensornetworks", "ttno.jl")) +include(joinpath("treetensornetworks", "opsum_to_ttno.jl")) +include(joinpath("treetensornetworks", "abstractprojttno.jl")) +include(joinpath("treetensornetworks", "projttno.jl")) +include(joinpath("treetensornetworks", "projttnosum.jl")) +include(joinpath("treetensornetworks", "projttno_apply.jl")) # Compatibility of ITensor observer and Observers # TODO: Delete this include(joinpath("treetensornetworks", "solvers", "update_observer.jl")) diff --git a/src/abstractindsnetwork.jl b/src/abstractindsnetwork.jl index 70de7710..fb27c363 100644 --- a/src/abstractindsnetwork.jl +++ b/src/abstractindsnetwork.jl @@ -19,7 +19,7 @@ edge_data_type(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} function uniqueinds(is::AbstractIndsNetwork, edge::AbstractEdge) inds = IndexSet(get(is, src(edge), Index[])) - for ei in setdiff(incident_edges(is, src(edge)...), [edge]) + for ei in setdiff(incident_edges(is, src(edge)), [edge]) inds = unioninds(inds, get(is, ei, Index[])) end return inds diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index 7fb24856..0e097bdb 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -45,6 +45,12 @@ end # Iteration # +# TODO: iteration + +# TODO: different `map` functionalities as defined for ITensors.AbstractMPS + +# TODO: broadcasting + function union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kwargs...) tn = ITensorNetwork(union(data_graph(tn1), data_graph(tn2)); kwargs...) # Add any new edges that are introduced during the union @@ -104,6 +110,36 @@ end # Convenience wrapper itensors(tn::AbstractITensorNetwork) = Vector{ITensor}(tn) +# +# Promotion and conversion +# + +function LinearAlgebra.promote_leaf_eltypes(tn::AbstractITensorNetwork) + return LinearAlgebra.promote_leaf_eltypes(itensors(tn)) +end + +function ITensors.promote_itensor_eltype(tn::AbstractITensorNetwork) + return LinearAlgebra.promote_leaf_eltypes(tn) +end + +ITensors.scalartype(tn::AbstractITensorNetwork) = LinearAlgebra.promote_leaf_eltypes(tn) + +# TODO: eltype(::AbstractITensorNetwork) (cannot behave the same as eltype(::ITensors.AbstractMPS)) + +# TODO: mimic ITensors.AbstractMPS implementation using map +function ITensors.convert_leaf_eltype(eltype::Type, tn::AbstractITensorNetwork) + tn = copy(tn) + vertex_data(tn) .= convert_eltype.(Ref(eltype), vertex_data(tn)) + return tn +end + +# TODO: mimic ITensors.AbstractMPS implementation using map +function NDTensors.convert_scalartype(eltype::Type{<:Number}, tn::AbstractITensorNetwork) + tn = copy(tn) + vertex_data(tn) .= ITensors.adapt.(Ref(eltype), vertex_data(tn)) + return tn +end + # # Conversion to Graphs # @@ -185,11 +221,13 @@ end function replaceinds(tn::AbstractITensorNetwork, is_is′::Pair{<:IndsNetwork,<:IndsNetwork}) tn = copy(tn) is, is′ = is_is′ - # TODO: Check that `is` and `is′` have the same vertices and edges. + @assert underlying_graph(is) == underlying_graph(is′) for v in vertices(is) + isassigned(is, v) || continue setindex_preserve_graph!(tn, replaceinds(tn[v], is[v] => is′[v]), v) end for e in edges(is) + isassigned(is, e) || continue for v in (src(e), dst(e)) setindex_preserve_graph!(tn, replaceinds(tn[v], is[e] => is′[e]), v) end @@ -208,7 +246,7 @@ const map_inds_label_functions = [ :setprime, :noprime, :replaceprime, - :swapprime, + # :swapprime, # TODO: add @test_broken as a reminder :addtags, :removetags, :replacetags, @@ -227,6 +265,24 @@ for f in map_inds_label_functions function $f(n::Union{IndsNetwork,AbstractITensorNetwork}, args...; kwargs...) return map_inds($f, n, args...; kwargs...) end + + function $f( + ffilter::typeof(linkinds), + n::Union{IndsNetwork,AbstractITensorNetwork}, + args...; + kwargs..., + ) + return map_inds($f, n, args...; sites=[], kwargs...) + end + + function $f( + ffilter::typeof(siteinds), + n::Union{IndsNetwork,AbstractITensorNetwork}, + args...; + kwargs..., + ) + return map_inds($f, n, args...; links=[], kwargs...) + end end end @@ -402,12 +458,19 @@ function factorize( return factorize(tn, edgetype(tn)(edge); kwargs...) end -# For ambiguity error +# For ambiguity error; TODO: decide whether to use graph mutating methods when resulting graph is unchanged? function _orthogonalize_edge(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) - tn = factorize(tn, edge; kwargs...) - # TODO: Implement as `only(common_neighbors(tn, src(edge), dst(edge)))` - new_vertex = only(neighbors(tn, src(edge)) ∩ neighbors(tn, dst(edge))) - return contract(tn, new_vertex => dst(edge)) + # tn = factorize(tn, edge; kwargs...) + # # TODO: Implement as `only(common_neighbors(tn, src(edge), dst(edge)))` + # new_vertex = only(neighbors(tn, src(edge)) ∩ neighbors(tn, dst(edge))) + # return contract(tn, new_vertex => dst(edge)) + tn = copy(tn) + left_inds = uniqueinds(tn, edge) + ltags = tags(tn, edge) + X, Y = factorize(tn[src(edge)], left_inds; tags=ltags, ortho="left", kwargs...) + tn[src(edge)] = X + tn[dst(edge)] *= Y + return tn end function orthogonalize(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) @@ -429,6 +492,25 @@ function orthogonalize(ψ::AbstractITensorNetwork, source_vertex) return ψ end +# TODO: decide whether to use graph mutating methods when resulting graph is unchanged? +function _truncate_edge(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) + tn = copy(tn) + left_inds = uniqueinds(tn, edge) + ltags = tags(tn, edge) + U, S, V = svd(tn[src(edge)], left_inds; lefttags=ltags, ortho="left", kwargs...) + tn[src(edge)] = U + tn[dst(edge)] *= (S * V) + return tn +end + +function truncate(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) + return _truncate_edge(tn, edge; kwargs...) +end + +function truncate(tn::AbstractITensorNetwork, edge::Pair; kwargs...) + return truncate(tn, edgetype(tn)(edge); kwargs...) +end + function Base.:*(c::Number, ψ::AbstractITensorNetwork) v₁ = first(vertices(ψ)) cψ = copy(ψ) @@ -572,6 +654,91 @@ function visualize( return visualize(Vector{ITensor}(tn), args...; vertex_labels, kwargs...) end +# +# Link dimensions +# + +function maxlinkdim(tn::AbstractITensorNetwork) + md = 1 + for e in edges(tn) + md = max(md, linkdim(tn, e)) + end + return md +end + +function linkdim(tn::AbstractITensorNetwork, edge::Pair) + return linkdim(tn, edgetype(tn)(edge)) +end + +function linkdim(tn::AbstractITensorNetwork{V}, edge::AbstractEdge{V}) where {V} + ls = linkinds(tn, edge) + return prod([isnothing(l) ? 1 : dim(l) for l in ls]) +end + +function linkdims(tn::AbstractITensorNetwork{V}) where {V} + ld = DataGraph{V,Any,Int}(copy(underlying_graph(tn))) + for e in edges(ld) + ld[e] = linkdim(tn, e) + end + return ld +end + +# +# Common index checking +# + +function hascommoninds( + ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} +) where {V} + for v in vertices(A) + !hascommoninds(siteinds(A, v), siteinds(B, v)) && return false + end + return true +end + +function check_hascommoninds( + ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} +) where {V} + N = nv(A) + if nv(B) ≠ N + throw( + DimensionMismatch( + "$(typeof(A)) and $(typeof(B)) have mismatched number of vertices $N and $(nv(B))." + ), + ) + end + for v in vertices(A) + !hascommoninds(siteinds(A, v), siteinds(B, v)) && error( + "$(typeof(A)) A and $(typeof(B)) B must share site indices. On vertex $v, A has site indices $(siteinds(A, v)) while B has site indices $(siteinds(B, v)).", + ) + end + return nothing +end + +function hassameinds( + ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} +) where {V} + nv(A) ≠ nv(B) && return false + for v in vertices(A) + !ITensors.hassameinds(siteinds(A, v), siteinds(B, v)) && return false + end + return true +end + +# +# Site combiners +# + +# TODO: will be broken, fix this +function site_combiners(tn::AbstractITensorNetwork{V}) where {V} + Cs = DataGraph{V,ITensor}(copy(underlying_graph(tn))) + for v in vertices(tn) + s = siteinds(tn, v) + Cs[v] = combiner(s; tags=commontags(s)) + end + return Cs +end + ## # TODO: should this make sure that internal indices ## # don't clash? ## function hvncat( diff --git a/src/expect.jl b/src/expect.jl index 2a0e9e72..b25fc937 100644 --- a/src/expect.jl +++ b/src/expect.jl @@ -5,14 +5,17 @@ function expect( maxdim=nothing, ortho=false, sequence=nothing, + sites=vertices(ψ), ) s = siteinds(ψ) - res = Dictionary(vertices(ψ), Vector{Float64}(undef, nv(ψ))) + ElT = promote_itensor_eltype(ψ) + # ElT = ishermitian(ITensors.op(op, s[sites[1]])) ? real(ElT) : ElT + res = Dictionary(sites, Vector{ElT}(undef, length(sites))) if isnothing(sequence) sequence = contraction_sequence(inner_network(ψ, ψ; flatten=true)) end normψ² = norm_sqr(ψ; sequence) - for v in vertices(ψ) + for v in sites O = ITensor(Op(op, v), s) Oψ = apply(O, ψ; cutoff, maxdim, ortho) res[v] = contract_inner(ψ, Oψ; sequence) / normψ² diff --git a/src/exports.jl b/src/exports.jl index 15f355a5..ba2a64ea 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -17,7 +17,10 @@ export grid, edgetype, is_directed, rem_vertex!, - vertices + vertices, + post_order_dfs_vertices, + edge_path, + vertex_path # NamedGraphs export named_binary_tree, @@ -45,17 +48,33 @@ export AbstractITensorNetwork, randomITensorNetwork, ⊗, itensors, - tensor_product, - TreeTensorNetworkState, - TTNS, + reverse_bfs_edges, data_graph, flatten_networks, inner_network, + norm_network, + factorize!, norm_sqr_network, linkinds_combiners, combine_linkinds, subgraphs, reverse_bfs_edges, + # treetensornetwork + default_root_vertex, + ortho_center, + set_ortho_center, + TreeTensorNetworkState, + TTNS, + randomTTNS, + productTTNS, + TreeTensorNetworkOperator, + TTNO, + ProjTTNO, + ProjTTNOSum, + ProjTTNOApply, + set_nsite, + position, + finite_state_machine, # contraction_sequences.jl contraction_sequence, # utils.jl @@ -63,6 +82,7 @@ export AbstractITensorNetwork, # namedgraphs.jl rename_vertices, # models.jl + heisenberg, ising, # opsum.jl group_terms, @@ -81,5 +101,8 @@ export hypercubic_lattice_graph, square_lattice_graph, chain_lattice_graph # ITensorNetworks: partition.jl export partition, partition_vertices, subgraphs, subgraph_vertices +# ITensorNetworks: utility.jl +export relabel_sites + # KrylovKit -export eigsolve, linsolve +export eigsolve, linsolve \ No newline at end of file diff --git a/src/imports.jl b/src/imports.jl index c2dfab70..441794ff 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -13,7 +13,12 @@ import Base: union import NamedGraphs: - vertextype, convert_vertextype, vertex_to_parent_vertex, rename_vertices, disjoint_union + vertextype, + convert_vertextype, + vertex_to_parent_vertex, + rename_vertices, + disjoint_union, + incident_edges import .DataGraphs: underlying_graph, @@ -27,15 +32,22 @@ import Graphs: SimpleGraph, is_directed, weights import KrylovKit: eigsolve, linsolve -import LinearAlgebra: svd, factorize, qr +import LinearAlgebra: factorize, normalize, normalize!, qr, svd import ITensors: # contraction contract, orthogonalize, + isortho, inner, + loginner, norm, + lognorm, expect, + # truncation + truncate, + replacebond!, + replacebond, # site and link indices siteind, siteinds, @@ -58,7 +70,22 @@ import ITensors: settags, tags, # dag - dag + dag, + # permute + permute, + #commoninds + check_hascommoninds, + hascommoninds, + # linkdims + linkdim, + linkdims, + maxlinkdim, + # projected operators + product, + nsite, + # promotion and conversion + promote_itensor_eltype, + scalartype using ITensors.ContractionSequenceOptimization: deepmap diff --git a/src/indsnetwork.jl b/src/indsnetwork.jl index 2341a4ce..251e3c23 100644 --- a/src/indsnetwork.jl +++ b/src/indsnetwork.jl @@ -19,7 +19,7 @@ is_directed(::Type{<:IndsNetwork}) = false # When setting an edge with collections of `Index`, set the reverse direction # edge with the `dag`. function reverse_data_direction( - inds_network::IndsNetwork, is::Union{Index,Tuple{Vararg{<:Index}},Vector{<:Index}} + inds_network::IndsNetwork, is::Union{Index,Tuple{Vararg{Index}},Vector{<:Index}} ) return dag(is) end diff --git a/src/itensornetwork.jl b/src/itensornetwork.jl index d79c5230..356e430c 100644 --- a/src/itensornetwork.jl +++ b/src/itensornetwork.jl @@ -234,7 +234,11 @@ function ITensorNetwork(eltype::Type, is::IndsNetwork, initstate::Function) return ψ end -function ITensorNetwork(is::IndsNetwork, initstate::Function) +function ITensorNetwork(eltype::Type, is::IndsNetwork, initstate::Union{String,Integer}) + return ITensorNetwork(eltype, is, v -> initstate) +end + +function ITensorNetwork(is::IndsNetwork, initstate::Union{String,Integer,Function}) return ITensorNetwork(Number, is, initstate) end diff --git a/src/models.jl b/src/models.jl index 88380832..ed342487 100644 --- a/src/models.jl +++ b/src/models.jl @@ -1,10 +1,78 @@ -function ising(g::AbstractGraph; h) +_maybe_fill(x, n) = x +_maybe_fill(x::Number, n) = fill(x, n) + +""" +Random field J1-J2 Heisenberg model on a general graph +""" +function heisenberg( + g::AbstractGraph; J1=1.0, J2=0.0, h::Union{<:Real,Vector{<:Real}}=0 +) + h = _maybe_fill(h, nv(g)) ℋ = OpSum() - for e in edges(g) - ℋ -= "Z", maybe_only(src(e)), "Z", maybe_only(dst(e)) + if !iszero(J1) + for e in edges(g) + ℋ += J1 / 2, "S+", maybe_only(src(e)), "S-", maybe_only(dst(e)) + ℋ += J1 / 2, "S-", maybe_only(src(e)), "S+", maybe_only(dst(e)) + ℋ += J1, "Sz", maybe_only(src(e)), "Sz", maybe_only(dst(e)) + end end - for v in vertices(g) - ℋ += h, "X", maybe_only(v) + if !iszero(J2) + # TODO, more clever way of looping over next to nearest neighbors? + for (i, v) in enumerate(vertices(g)) + nnn = [neighbors(g, n) for n in neighbors(g, v)] + nnn = setdiff(Base.Iterators.flatten(nnn), neighbors(g, v)) + nnn = setdiff(nnn, vertices(g)[1:i]) + for nn in nnn + ℋ += J2 / 2, "S+", maybe_only(v), "S-", maybe_only(nn) + ℋ += J2 / 2, "S-", maybe_only(v), "S+", maybe_only(nn) + ℋ += J2, "Sz", maybe_only(v), "Sz", maybe_only(nn) + end + end + end + for (i, v) in enumerate(vertices(g)) + if !iszero(h[i]) + ℋ -= h[i], "Sz", maybe_only(v) + end + end + return ℋ +end + +""" +Next-to-nearest-neighbor Ising model (ZZX) on a general graph +""" +function ising(g::AbstractGraph; J1=-1.0, J2=0.0, h::Union{<:Real,Vector{<:Real}}=0) + h = _maybe_fill(h, nv(g)) + ℋ = OpSum() + if !iszero(J1) + for e in edges(g) + ℋ += J1, "Z", maybe_only(src(e)), "Z", maybe_only(dst(e)) + end + end + if !iszero(J2) + # TODO, more clever way of looping over next to nearest neighbors? + for (i, v) in enumerate(vertices(g)) + nnn = [neighbors(g, n) for n in neighbors(g, v)] + nnn = setdiff(Base.Iterators.flatten(nnn), neighbors(g, v)) + nnn = setdiff(nnn, vertices(g)[1:i]) + for nn in nnn + ℋ += J2, "Z", maybe_only(v), "Z", maybe_only(nn) + end + end + end + for (i, v) in enumerate(vertices(g)) + if !iszero(h[i]) + ℋ += h[i], "X", maybe_only(v) + end end return ℋ end + +""" +Random field J1-J2 Heisenberg model on a chain of length N +""" +heisenberg(N::Integer; kwargs...) = heisenberg(grid((N,)); kwargs...) + +""" +Next-to-nearest-neighbor Ising model (ZZX) on a chain of length N +""" +ising(N::Integer; kwargs...) = ising(grid((N,)); kwargs...) diff --git a/src/requires/omeinsumcontractionorders.jl b/src/requires/omeinsumcontractionorders.jl index f9a96af1..c62fdabc 100644 --- a/src/requires/omeinsumcontractionorders.jl +++ b/src/requires/omeinsumcontractionorders.jl @@ -1,7 +1,7 @@ # OMEinsumContractionOrders wrapper for ITensors # Slicing is not supported, because it might require extra work to slice an `ITensor` correctly. -const ITensorList = Union{Vector{<:ITensor},Tuple{Vararg{<:ITensor}}} +const ITensorList = Union{Vector{ITensor},Tuple{Vararg{ITensor}}} # TODO: Replace with `inds(A::ITensor)` or `collect(inds(A::ITensor))` getid(index::Index) = index diff --git a/src/treetensornetworks/abstractprojttno.jl b/src/treetensornetworks/abstractprojttno.jl new file mode 100644 index 00000000..bb5622a4 --- /dev/null +++ b/src/treetensornetworks/abstractprojttno.jl @@ -0,0 +1,145 @@ +abstract type AbstractProjTTNO{V} end + +copy(::AbstractProjTTNO) = error("Not implemented") + +set_nsite(::AbstractProjTTNO, nsite) = error("Not implemented") + +# silly constructor wrapper +shift_position(::AbstractProjTTNO, pos) = error("Not implemented") + +make_environment!(::AbstractProjTTNO, psi, e) = error("Not implemented") + +underlying_graph(P::AbstractProjTTNO) = underlying_graph(P.H) + +pos(P::AbstractProjTTNO) = P.pos + +Graphs.edgetype(P::AbstractProjTTNO) = edgetype(underlying_graph(P)) + +on_edge(P::AbstractProjTTNO) = isa(pos(P), edgetype(P)) + +nsite(P::AbstractProjTTNO) = on_edge(P) ? 0 : length(pos(P)) + +function sites(P::AbstractProjTTNO{V}) where {V} + on_edge(P) && return V[] + return pos(P) +end + +function incident_edges(P::AbstractProjTTNO{V})::Vector{NamedEdge{V}} where {V} + on_edge(P) && return [pos(P), reverse(pos(P))] + edges = [ + [edgetype(P)(n => v) for n in setdiff(neighbors(underlying_graph(P), v), sites(P))] for + v in sites(P) + ] + return collect(Base.Iterators.flatten(edges)) +end + +function internal_edges(P::AbstractProjTTNO{V})::Vector{NamedEdge{V}} where {V} + on_edge(P) && return edgetype(P)[] + edges = [ + [edgetype(P)(v => n) for n in neighbors(underlying_graph(P), v) ∩ sites(P)] for + v in sites(P) + ] + return collect(Base.Iterators.flatten(edges)) +end + +environment(P::AbstractProjTTNO, edge::Pair) = environment(P, edgetype(P)(edge)) +function environment(P::AbstractProjTTNO{V}, edge::NamedEdge{V})::ITensor where {V} + return P.environments[edge] +end + +# there has to be a better way to do this... +function _separate_first(V::Vector) + sep = Base.Iterators.peel(V) + isnothing(sep) && return eltype(V)[], eltype(V)[] + return sep[1], collect(sep[2]) +end + +function _separate_first_two(V::Vector) + frst, rst = _separate_first(V) + scnd, rst = _separate_first(rst) + return frst, scnd, rst +end + +function contract(P::AbstractProjTTNO, v::ITensor)::ITensor + environments = ITensor[environment(P, edge) for edge in incident_edges(P)] + # manual heuristic for contraction order fixing: for each site in ProjTTNO, apply up to + # two environments, then TTNO tensor, then other environments + if on_edge(P) + itensor_map = environments + else + itensor_map = Union{ITensor,OneITensor}[] # TODO: will a Hamiltonian TTNO tensor ever be a OneITensor? + for s in sites(P) + site_envs = filter(hascommoninds(P.H[s]), environments) + frst, scnd, rst = _separate_first_two(site_envs) + site_tensors = vcat(frst, scnd, P.H[s], rst) + append!(itensor_map, site_tensors) + end + end + # TODO: actually use optimal contraction sequence here + Hv = v + for it in itensor_map + Hv *= it + end + return Hv +end + +function product(P::AbstractProjTTNO, v::ITensor)::ITensor + Pv = contract(P, v) + if order(Pv) != order(v) + error( + string( + "The order of the ProjTTNO-ITensor product P*v is not equal to the order of the ITensor v, ", + "this is probably due to an index mismatch.\nCommon reasons for this error: \n", + "(1) You are trying to multiply the ProjTTNO with the $(nsite(P))-site wave-function at the wrong position.\n", + "(2) `orthogonalize!` was called, changing the MPS without updating the ProjTTNO.\n\n", + "P*v inds: $(inds(Pv)) \n\n", + "v inds: $(inds(v))", + ), + ) + end + return noprime(Pv) +end + +(P::AbstractProjTTNO)(v::ITensor) = product(P, v) + +function Base.eltype(P::AbstractProjTTNO)::Type + ElType = eltype(P.H(first(sites(P)))) + for v in sites(P) + ElType = promote_type(ElType, eltype(P.H[v])) + end + for e in incident_edges(P) + ElType = promote_type(ElType, eltype(environments(P, e))) + end + return ElType +end + +function Base.size(P::AbstractProjTTNO)::Tuple{Int,Int} + d = 1 + for e in incident_edges(P) + for i in inds(environment(P, e)) + plev(i) > 0 && (d *= dim(i)) + end + end + for j in sites(P) + for i in inds(P.H[j]) + plev(i) > 0 && (d *= dim(i)) + end + end + return (d, d) +end + +function position( + P::AbstractProjTTNO{V}, psi::TTNS{V}, pos::Union{Vector{<:V},NamedEdge{V}} +) where {V} + # shift position + P = shift_position(P, pos) + # invalidate environments corresponding to internal edges + for e in internal_edges(P) + unset!(P.environments, e) + end + # make all environments surrounding new position + for e in incident_edges(P) + make_environment!(P, psi, e) + end + return P +end diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl new file mode 100644 index 00000000..963d4b5f --- /dev/null +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -0,0 +1,370 @@ +# TODO: Replace `AbstractITensorNetwork` with a trait `IsTree`. +abstract type AbstractTreeTensorNetwork{V} <: AbstractITensorNetwork{V} end + +function underlying_graph_type(G::Type{<:AbstractTreeTensorNetwork}) + return underlying_graph_type(data_graph_type(G)) +end + +# +# Field access +# + +ITensorNetwork(ψ::AbstractTreeTensorNetwork) = ψ.itensor_network +ortho_center(ψ::AbstractTreeTensorNetwork) = ψ.ortho_center + +function default_root_vertex(gs::AbstractGraph...) + # @assert all(is_tree.(gs)) + return first(leaf_vertices(gs[end])) +end + +# +# Orthogonality center +# + +isortho(ψ::AbstractTreeTensorNetwork) = isone(length(ortho_center(ψ))) + +function set_ortho_center( + ψ::AbstractTreeTensorNetwork{V}, new_center::Vector{<:V} +) where {V} + return typeof(ψ)(itensor_network(ψ), new_center) +end + +reset_ortho_center(ψ::AbstractTreeTensorNetwork) = set_ortho_center(ψ, vertices(ψ)) + +# +# Dense constructors +# + +# construct from dense ITensor, using IndsNetwork of site indices +function (::Type{TTNT})( + A::ITensor, is::IndsNetwork; ortho_center=default_root_vertex(is), kwargs... +) where {TTNT<:AbstractTreeTensorNetwork} + for v in vertices(is) + @assert hasinds(A, is[v]) + end + @assert ortho_center ∈ vertices(is) + ψ = ITensorNetwork(is) + Ã = A + for e in post_order_dfs_edges(ψ, ortho_center) + left_inds = uniqueinds(is, e) + L, R = factorize(Ã, left_inds; tags=edge_tag(e), ortho="left", kwargs...) + l = commonind(L, R) + ψ[src(e)] = L + is[e] = [l] + Ã = R + end + ψ[ortho_center] = Ã + T = TTNT(ψ) + T = orthogonalize(T, ortho_center) + return T +end + +# construct from dense ITensor, using AbstractNamedGraph and vector of site indices +# TODO: remove if it doesn't turn out to be useful +function (::Type{TTNT})( + A::ITensor, sites::Vector, g::AbstractNamedGraph; vertex_order=vertices(g), kwargs... +) where {TTNT<:AbstractTreeTensorNetwork} + is = IndsNetwork(g; site_space=Dictionary(vertex_order, sites)) + return TTNT(A, is; kwargs...) +end + +# construct from dense array, using IndsNetwork +# TODO: probably remove this one, doesn't seem very useful +function (::Type{TTNT})( + A::AbstractArray{<:Number}, is::IndsNetwork; vertex_order=vertices(is), kwargs... +) where {TTNT<:AbstractTreeTensorNetwork} + sites = [is[v] for v in vertex_order] + return TTNT(itensor(A, sites...), is; kwargs...) +end + +# construct from dense array, using NamedDimGraph and vector of site indices +function (::Type{TTNT})( + A::AbstractArray{<:Number}, sites::Vector, args...; kwargs... +) where {TTNT<:AbstractTreeTensorNetwork} + return TTNT(itensor(A, sites...), sites, args...; kwargs...) +end + +# +# Orthogonalization +# + +function orthogonalize(ψ::AbstractTreeTensorNetwork{V}, root_vertex::V; kwargs...) where {V} + (isortho(ψ) && only(ortho_center(ψ)) == root_vertex) && return ψ + if isortho(ψ) + edge_list = edge_path(ψ, only(ortho_center(ψ)), root_vertex) + else + edge_list = post_order_dfs_edges(ψ, root_vertex) + end + for e in edge_list + ψ = orthogonalize(ψ, e) + end + return set_ortho_center(ψ, [root_vertex]) +end + +# For ambiguity error + +function orthogonalize(tn::AbstractTreeTensorNetwork, edge::AbstractEdge; kwargs...) + return typeof(tn)(orthogonalize(ITensorNetwork(tn), edge; kwargs...)) +end + +# +# Truncation +# + +function truncate( + ψ::AbstractTreeTensorNetwork; root_vertex::Tuple=default_root_vertex(ψ), kwargs... +) + for e in post_order_dfs_edges(ψ, root_vertex) + # always orthogonalize towards source first to make truncations controlled + ψ = orthogonalize(ψ, src(e)) + ψ = truncate(ψ, e; kwargs...) + ψ = set_ortho_center(ψ, [dst(e)]) + end + return ψ +end + +# For ambiguity error +function truncate(tn::AbstractTreeTensorNetwork, edge::AbstractEdge; kwargs...) + return typeof(tn)(truncate(ITensorNetwork(tn), edge; kwargs...)) +end + +# +# Contraction +# + +# TODO: decide on contraction order: reverse dfs vertices or forward dfs edges? +function contract( + ψ::AbstractTreeTensorNetwork{V}, root_vertex::V=default_root_vertex(ψ); kwargs... +) where {V} + ψ = copy(ψ) + # reverse post order vertices + traversal_order = reverse(post_order_dfs_vertices(ψ, root_vertex)) + return contract(ITensorNetwork(ψ); sequence=traversal_order, kwargs...) + # # forward post order edges + # ψ = copy(ψ) + # for e in post_order_dfs_edges(ψ, root_vertex) + # ψ = contract(ψ, e) + # end + # return ψ[root_vertex] +end + +function inner( + ϕ::AbstractTreeTensorNetwork, + ψ::AbstractTreeTensorNetwork; + root_vertex=default_root_vertex(ϕ, ψ), +) + ϕᴴ = sim(dag(ψ); sites=[]) + ψ = sim(ψ; sites=[]) + ϕψ = ϕᴴ ⊗ ψ + # TODO: find the largest tensor and use it as + # the `root_vertex`. + for e in post_order_dfs_edges(ψ, root_vertex) + if has_vertex(ϕψ, (src(e), 2)) + ϕψ = contract(ϕψ, (src(e), 2) => (src(e), 1)) + end + ϕψ = contract(ϕψ, (src(e), 1) => (dst(e), 1)) + if has_vertex(ϕψ, (dst(e), 2)) + ϕψ = contract(ϕψ, (dst(e), 2) => (dst(e), 1)) + end + end + return ϕψ[root_vertex, 1][] +end + +function norm(ψ::AbstractTreeTensorNetwork) + if isortho(ψ) + return norm(ψ[only(ortho_center(ψ))]) + end + return √(abs(real(inner(ψ, ψ)))) +end + +# +# Utility +# + +function normalize!(ψ::AbstractTreeTensorNetwork) + c = ortho_center(ψ) + lognorm_ψ = lognorm(ψ) + if lognorm_ψ == -Inf + return ψ + end + z = exp(lognorm_ψ / length(c)) + for v in c + ψ[v] ./= z + end + return ψ +end + +function normalize(ψ::AbstractTreeTensorNetwork) + return normalize!(copy(ψ)) +end + +function _apply_to_orthocenter!(f, ψ::AbstractTreeTensorNetwork, x) + v = first(ortho_center(ψ)) + ψ[v] = f(ψ[v], x) + return ψ +end + +function _apply_to_orthocenter(f, ψ::AbstractTreeTensorNetwork, x) + return _apply_to_orthocenter!(f, copy(ψ), x) +end + +Base.:*(ψ::AbstractTreeTensorNetwork, α::Number) = _apply_to_orthocenter(*, ψ, α) + +Base.:*(α::Number, ψ::AbstractTreeTensorNetwork) = ψ * α + +Base.:/(ψ::AbstractTreeTensorNetwork, α::Number) = _apply_to_orthocenter(/, ψ, α) + +Base.:-(ψ::AbstractTreeTensorNetwork) = -1 * ψ + +function LinearAlgebra.rmul!(ψ::AbstractTreeTensorNetwork, α::Number) + return _apply_to_orthocenter!(*, ψ, α) +end + +function lognorm(ψ::AbstractTreeTensorNetwork) + if isortho(ψ) + return log(norm(ψ[only(ortho_center(ψ))])) + end + lognorm2_ψ = loginner(ψ, ψ) + rtol = eps(real(scalartype(ψ))) * 10 + atol = rtol + if !IsApprox.isreal(lognorm2_ψ, Approx(; rtol=rtol, atol=atol)) + @warn "log(norm²) is $lognorm2_T, which is not real up to a relative tolerance of $rtol and an absolute tolerance of $atol. Taking the real part, which may not be accurate." + end + return 0.5 * real(lognorm2_ψ) +end + +function logdot(ψ1::TTNT, ψ2::TTNT; kwargs...) where {TTNT<:AbstractTreeTensorNetwork} + return loginner(ψ1, ψ2; kwargs...) +end + +# TODO: stick with this traversal or find optimal contraction sequence? +function loginner( + ψ1::TTNT, ψ2::TTNT; root_vertex=default_root_vertex(ψ1, ψ2) +)::Number where {TTNT<:AbstractTreeTensorNetwork} + N = nv(ψ1) + if nv(ψ2) != N + throw(DimensionMismatch("inner: mismatched number of vertices $N and $(nv(ψ2))")) + end + ψ1dag = sim(dag(ψ1); sites=[]) + traversal_order = reverse(post_order_dfs_vertices(ψ1, root_vertex)) + check_hascommoninds(siteinds, ψ1dag, ψ2) + + O = ψ1dag[root_vertex] * ψ2[root_vertex] + + normO = norm(O) + log_inner_tot = log(normO) + O ./= normO + + for v in traversal_order[2:end] + O = (O * ψ1dag[v]) * ψ2[v] + normO = norm(O) + log_inner_tot += log(normO) + O ./= normO + end + + if !isreal(O[]) || real(O[]) < 0 + log_inner_tot += log(complex(O[])) + end + return log_inner_tot +end + +function _add_maxlinkdims(ψs::AbstractTreeTensorNetwork...) + maxdims = Dictionary{edgetype(ψs[1]),Int}() + for e in edges(ψs[1]) + maxdims[e] = sum(ψ -> linkdim(ψ, e), ψs) + maxdims[reverse(e)] = maxdims[e] + end + return maxdims +end + +# TODO: actually implement this? +function Base.:+( + ::ITensors.Algorithm"densitymatrix", + ψs::TTNT...; + cutoff=1e-15, + root_vertex=default_root_vertex(ψs...), + kwargs..., +) where {TTNT<:AbstractTreeTensorNetwork} + return error("Not implemented (yet) for trees.") +end + +function Base.:+( + ::ITensors.Algorithm"directsum", ψs::TTNT...; root_vertex=default_root_vertex(ψs...) +) where {TTNT<:AbstractTreeTensorNetwork} + @assert all(ψ -> nv(first(ψs)) == nv(ψ), ψs) + + # Output state + ϕ = TTNS(siteinds(ψs[1])) + + vs = post_order_dfs_vertices(ϕ, root_vertex) + es = post_order_dfs_edges(ϕ, root_vertex) + link_space = Dict{edgetype(ϕ),Index}() + + for v in reverse(vs) + edges = filter(e -> dst(e) == v || src(e) == v, es) + dims_in = findall(e -> dst(e) == v, edges) + dim_out = findfirst(e -> src(e) == v, edges) + + ls = [Tuple(only(linkinds(ψ, e)) for e in edges) for ψ in ψs] + ϕv, lv = directsum((ψs[i][v] => ls[i] for i in 1:length(ψs))...; tags=tags.(first(ls))) + for din in dims_in + link_space[edges[din]] = lv[din] + end + if !isnothing(dim_out) + ϕv = replaceind(ϕv, lv[dim_out] => dag(link_space[edges[dim_out]])) + end + + ϕ[v] = ϕv + end + return convert(TTNT, ϕ) +end + +# TODO: switch default algorithm once more are implemented +function Base.:+( + ψs::AbstractTreeTensorNetwork...; alg=ITensors.Algorithm"directsum"(), kwargs... +) + return +(ITensors.Algorithm(alg), ψs...; kwargs...) +end + +Base.:+(ψ::AbstractTreeTensorNetwork) = ψ + +ITensors.add(ψs::AbstractTreeTensorNetwork...; kwargs...) = +(ψs...; kwargs...) + +function Base.:-(ψ1::AbstractTreeTensorNetwork, ψ2::AbstractTreeTensorNetwork; kwargs...) + return +(ψ1, -ψ2; kwargs...) +end + +function ITensors.add(A::T, B::T; kwargs...) where {T<:AbstractTreeTensorNetwork} + return +(A, B; kwargs...) +end + +function permute( + ψ::TTNT, ::Tuple{typeof(linkind),typeof(siteinds),typeof(linkind)} +)::TTNT where {TTNT<:AbstractTreeTensorNetwork} + ψ̃ = copy(ψ) + for v in vertices(ψ) + ls = [only(linkinds(ψ, n => v)) for n in neighbors(ψ, v)] # TODO: won't work for multiple indices per link... + ss = sort(Tuple(siteinds(ψ, v)); by=plev) + setindex_preserve_graph!( + ψ̃, permute(ψ[v], filter(!isnothing, (ls[1], ss..., ls[2:end]...))), v + ) + end + ψ̃ = set_ortho_center(ψ̃, ortho_center(ψ)) + return ψ̃ +end + +function Base.isapprox( + x::AbstractTreeTensorNetwork, + y::AbstractTreeTensorNetwork; + atol::Real=0, + rtol::Real=Base.rtoldefault( + LinearAlgebra.promote_leaf_eltypes(x), LinearAlgebra.promote_leaf_eltypes(y), atol + ), +) + d = norm(x - y) + if isfinite(d) + return d <= max(atol, rtol * max(norm(x), norm(y))) + else + error("In `isapprox(x::TTNS, y::TTNS)`, `norm(x - y)` is not finite") + end +end diff --git a/src/treetensornetworks/opsum_to_ttno.jl b/src/treetensornetworks/opsum_to_ttno.jl new file mode 100644 index 00000000..0d4bc9ad --- /dev/null +++ b/src/treetensornetworks/opsum_to_ttno.jl @@ -0,0 +1,589 @@ +# convert ITensors.OpSum to TreeTensorNetworkOperator + +# TODO: fix symbolic SVD compression for certain combinations of long-range interactions! + +# +# Utility methods +# + +# linear ordering of vertices in tree graph relative to chosen root +function find_index_in_tree(site, g::AbstractGraph, root_vertex) + ordering = post_order_dfs_vertices(g, root_vertex) + return findfirst(x -> x == site, ordering) +end + +function find_index_in_tree(o::Op, g::AbstractGraph, root_vertex) + return find_index_in_tree(ITensors.site(o), g::AbstractGraph, root_vertex) +end + +# determine 'support' of product operator on tree graph +function span(t::Scaled{C,Prod{Op}}, g::AbstractGraph) where {C} + spn = eltype(g)[] + nterms = length(t) + for i in 1:nterms, j in i:nterms + path = vertex_path(g, ITensors.site(t[i]), ITensors.site(t[j])) + spn = union(spn, path) + end + return spn +end + +# determine whether an operator string crosses a given graph vertex +function crosses_vertex(t::Scaled{C,Prod{Op}}, g::AbstractGraph, v) where {C} + return v ∈ span(t, g) +end + +# annoying thing to allow sparse arrays with ITensors.Op entries +# TODO: get rid of this +function Base.zero(::Type{Scaled{C,Prod{Op}}}) where {C} + return zero(C) * Prod([Op("0")]) +end +Base.zero(t::Scaled) = zero(typeof(t)) + +# +# Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo.jl +# + +""" + finite_state_machine(os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V) where {C,V} + +Finite state machine generator for ITensors.OpSum Hamiltonian defined on a tree graph. The +site Index graph must be a tree graph, and the chosen root must be a leaf vertex of this +tree. Returns a DataGraph of SparseArrayKit.SparseArrays +""" +function finite_state_machine( + os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V +) where {C,V} + os = deepcopy(os) + os = sorteachterm(os, sites, root_vertex) + os = ITensors.sortmergeterms(os) + + ValType = ITensors.determineValType(ITensors.terms(os)) + + # sparse symbolic representation of the TTNO Hamiltonian as a DataGraph of SparseArrays + sparseTTNO = DataGraph{V,SparseArray{Scaled{ValType,Prod{Op}}}}(underlying_graph(sites)) + + # some things to keep track of + vs = post_order_dfs_vertices(sites, root_vertex) # store vertices in fixed ordering relative to root + es = post_order_dfs_edges(sites, root_vertex) # store edges in fixed ordering relative to root + ranks = Dict(v => degree(sites, v) for v in vs) # rank of every TTNO tensor in network + linkmaps = Dict(e => Dict{Prod{Op},Int}() for e in es) # map from term in Hamiltonian to edge channel index for every edge + site_coef_done = Prod{Op}[] # list of Hamiltonian terms for which the coefficient has been added to a site factor + edge_orders = DataGraph{V,Vector{edgetype(sites)}}(underlying_graph(sites)) # relate indices of sparse TTNO tensor to incident graph edges for each site + + for v in vs + # collect all nontrivial entries of the TTNO tensor at vertex v + entries = Tuple{MVector{ranks[v],Int},Scaled{C,Prod{Op}}}[] # MVector might be overkill... + + # for every vertex, find all edges that contain this vertex + edges = filter(e -> dst(e) == v || src(e) == v, es) + # use the corresponding ordering as index order for tensor elements at this site + edge_orders[v] = edges + dims_in = findall(e -> dst(e) == v, edges) + edges_in = edges[dims_in] + dim_out = findfirst(e -> src(e) == v, edges) + edge_out = (isnothing(dim_out) ? [] : edges[dim_out]) + + # sanity check, leaves only have single incoming or outgoing edge + @assert !isempty(dims_in) || !isnothing(dim_out) + (isempty(dims_in) || isnothing(dim_out)) && @assert is_leaf(sites, v) + + for term in os + # loop over OpSum and pick out terms that act on current vertex + crosses_vertex(term, sites, v) || continue + + # for every incoming edge, filter out factors that come in from the direction of + # that edge + incoming = Dict( + e => filter(t -> e ∈ edge_path(sites, ITensors.site(t), v), ITensors.terms(term)) + for e in edges_in + ) + # filter out factor that acts on current vertex + onsite = filter(t -> (ITensors.site(t) == v), ITensors.terms(term)) + # filter out factors that go out along the outgoing edge + outgoing = filter( + t -> edge_out ∈ edge_path(sites, v, ITensors.site(t)), ITensors.terms(term) + ) + + # translate into tensor entry + T_inds = fill(-1, ranks[v]) + for din in dims_in + if !isempty(incoming[edges[din]]) + T_inds[din] = ITensors.posInLink!(linkmaps[edges[din]], ITensors.argument(term)) # get incoming channel + end + end + if !isnothing(dim_out) && !isempty(outgoing) + T_inds[dim_out] = ITensors.posInLink!(linkmaps[edge_out], ITensors.argument(term)) # add outgoing channel + end + # if term starts at this site, add its coefficient as a site factor + site_coef = one(C) + if (isempty(dims_in) || all(T_inds[dims_in] .== -1)) && + ITensors.argument(term) ∉ site_coef_done + site_coef = ITensors.coefficient(term) + push!(site_coef_done, ITensors.argument(term)) + end + # add onsite identity for interactions passing through vertex + if isempty(onsite) + if !ITensors.using_auto_fermion() && isfermionic(outgoing, sites) # TODO: check if fermions are actually supported here! + push!(onsite, Op("F", v)) + else + push!(onsite, Op("Id", v)) + end + end + # save indices and value of sparse tensor entry + el = (MVector{ranks[v]}(T_inds), site_coef * Prod(onsite)) + push!(entries, el) + end + + # handle start and end of operator terms and convert to sparse array + linkdims = Tuple([ + (isempty(linkmaps[e]) ? 0 : maximum(values(linkmaps[e]))) + 2 for e in edges + ]) + T = SparseArray{Scaled{ValType,Prod{Op}},ranks[v]}(undef, linkdims) + for (T_inds, t) in entries + if !isempty(dims_in) + start_dims = filter(d -> T_inds[d] == -1, dims_in) + normal_dims = filter(d -> T_inds[d] != -1, dims_in) + T_inds[start_dims] .= 1 # always start in first channel + T_inds[normal_dims] .+= 1 # shift regular channels + end + if !isnothing(dim_out) + if T_inds[dim_out] == -1 + T_inds[dim_out] = linkdims[dim_out] # always end in last channel + else + T_inds[dim_out] += 1 # shift regular channel + end + end + T[T_inds...] = t + end + # add starting and ending identity operators + if !isnothing(dim_out) + T[ones(Int, ranks[v])...] = 1 * Prod([Op("Id", v)]) # starting identity is easy + end + # ending identities not so much + idT_end_inds = ones(Int, ranks[v]) + if !isnothing(dim_out) + idT_end_inds[dim_out] = linkdims[dim_out] + end + for din in dims_in + idT_end_inds[din] = linkdims[din] + T[idT_end_inds...] = 1 * Prod([Op("Id", v)]) + idT_end_inds[din] = 1 # reset + end + sparseTTNO[v] = T + end + return sparseTTNO, edge_orders +end + +""" + fsmTTNO(os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V, kwargs...) where {C,V} + +Construct a dense TreeTensorNetworkOperator from sparse finite state machine +represenatation, without compression. +""" +function fsmTTNO( + os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V +)::TTNO where {C,V} + ValType = ITensors.determineValType(ITensors.terms(os)) + # start from finite state machine + fsm, edge_orders = finite_state_machine(os, sites, root_vertex) + # some trickery to get link dimension for every edge + link_space = Dict{edgetype(sites),Index}() + function get_linkind!(link_space, e) + if !haskey(link_space, e) + d = findfirst(x -> (x == e || x == reverse(e)), edge_orders[src(e)]) + link_space[e] = Index(size(fsm[src(e)], d), edge_tag(e)) + end + return link_space[e] + end + # compress finite state machine into dense form + H = TTNO(sites) + for v in vertices(sites) + linkinds = [get_linkind!(link_space, e) for e in edge_orders[v]] + linkdims = dim.(linkinds) + H[v] = ITensor() + for (T_ind, t) in nonzero_pairs(fsm[v]) + (abs(coefficient(t)) > eps()) || continue + T = zeros(ValType, linkdims...) + ct = convert(ValType, coefficient(t)) + T[T_ind] += ct + T = itensor(T, linkinds) + H[v] += T * computeSiteProd(sites, ITensors.argument(t)) + end + end + return H +end + +# this is broken for certain combinations of longer-range interactions, no idea why... +""" + svdTTNO(os::OpSum{C}, sites::IndsNetwork{V<:Index}, root_vertex::V, kwargs...) where {C,V} + +Construct a dense TreeTensorNetworkOperator from a symbolic OpSum representation of a +Hamiltonian, compressing shared interaction channels. +""" +function svdTTNO( + os::OpSum{C}, sites::IndsNetwork{VT,<:Index}, root_vertex::VT; kwargs... +)::TTNO where {C,VT} + mindim::Int = get(kwargs, :mindim, 1) + maxdim::Int = get(kwargs, :maxdim, 10000) + cutoff::Float64 = get(kwargs, :cutoff, 1E-15) + + ValType = ITensors.determineValType(ITensors.terms(os)) + + # some things to keep track of + vs = post_order_dfs_vertices(sites, root_vertex) # store vertices in fixed ordering relative to root + es = post_order_dfs_edges(sites, root_vertex) # store edges in fixed ordering relative to root + ranks = Dict(v => degree(sites, v) for v in vs) # rank of every TTNO tensor in network + Vs = Dict(e => Matrix{ValType}(undef, 1, 1) for e in es) # link isometries for SVD compression of TTNO + leftmaps = Dict(e => Dict{Vector{Op},Int}() for e in es) # map from term in Hamiltonian to edge left channel index for every edge + rightmaps = Dict(e => Dict{Vector{Op},Int}() for e in es) # map from term in Hamiltonian to edge right channel index for every edge + leftbond_coefs = Dict(e => ITensors.MatElem{ValType}[] for e in es) # bond coefficients for left edge channels + site_coef_done = Prod{Op}[] # list of terms for which the coefficient has been added to a site factor + bond_coef_done = Dict(v => Prod{Op}[] for v in vs) # list of terms for which the coefficient has been added to a bond matrix for each vertex + + # temporary symbolic representation of TTNO Hamiltonian + tempTTNO = Dict(v => Tuple{MVector{ranks[v],Int},Scaled{C,Prod{Op}}}[] for v in vs) + + # build compressed finite state machine representation + for v in vs + # for every vertex, find all edges that contain this vertex + edges = filter(e -> dst(e) == v || src(e) == v, es) + # use the corresponding ordering as index order for tensor elements at this site + dims_in = findall(e -> dst(e) == v, edges) + edges_in = edges[dims_in] + dim_out = findfirst(e -> src(e) == v, edges) + edge_out = (isnothing(dim_out) ? [] : edges[dim_out]) + + # sanity check, leaves only have single incoming or outgoing edge + @assert !isempty(dims_in) || !isnothing(dim_out) + (isempty(dims_in) || isnothing(dim_out)) && @assert is_leaf(sites, v) + + for term in os + # loop over OpSum and pick out terms that act on current vertex + crosses_vertex(term, sites, v) || continue + + # for every incoming edge, filter out factors that come in from the direction of + # that edge + incoming = Dict( + e => filter(t -> e ∈ edge_path(sites, ITensors.site(t), v), ITensors.terms(term)) + for e in edges_in + ) + # filter out factor that acts on current vertex + onsite = filter(t -> (ITensors.site(t) == v), ITensors.terms(term)) + # filter out factors that go out along the outgoing edge + outgoing = filter( + t -> edge_out ∈ edge_path(sites, v, ITensors.site(t)), ITensors.terms(term) + ) + + # translate into tensor entry + T_inds = fill(-1, ranks[v]) + # channel merging still not working properly somehow! + for din in dims_in + bond_row = -1 + bond_col = -1 + # treat factors coming in along current edge as 'left' + left = incoming[edges[din]] + other_dims_in = filter(dd -> dd != din, dims_in) + other_incoming = [incoming[edges[dd]] for dd in other_dims_in] + # treat all other factors as 'right' + right = vcat(other_incoming..., outgoing) + if !isempty(left) + bond_row = ITensors.posInLink!(leftmaps[edges[din]], left) + bond_col = ITensors.posInLink!(rightmaps[edges[din]], vcat(onsite, right)) # get incoming channel + bond_coef = one(ValType) + if ITensors.argument(term) ∉ bond_coef_done[v] + bond_coef *= convert(ValType, ITensors.coefficient(term)) + push!(bond_coef_done[v], ITensors.argument(term)) + end + push!(leftbond_coefs[edges[din]], ITensors.MatElem(bond_row, bond_col, bond_coef)) + end + T_inds[din] = bond_col + end + if !isnothing(dim_out) && !isempty(outgoing) + T_inds[dim_out] = ITensors.posInLink!(rightmaps[edge_out], outgoing) # add outgoing channel + end + # if term starts at this site, add its coefficient as a site factor + site_coef = one(C) + if (isempty(dims_in) || all(T_inds[dims_in] .== -1)) && + ITensors.argument(term) ∉ site_coef_done + site_coef = ITensors.coefficient(term) + push!(site_coef_done, ITensors.argument(term)) + end + # add onsite identity for interactions passing through vertex + if isempty(onsite) + if !ITensors.using_auto_fermion() && isfermionic(outgoing, sites) # TODO: check if fermions are actually supported here! + push!(onsite, Op("F", v)) + else + push!(onsite, Op("Id", v)) + end + end + # save indices and value of symbolic tensor entry + el = (MVector{ranks[v]}(T_inds), site_coef * Prod(onsite)) + push!(tempTTNO[v], el) + end + ITensors.remove_dups!(tempTTNO[v]) + # handle symbolic truncation (still something wrong with this) + for din in dims_in + if !isempty(leftbond_coefs[edges[din]]) + M = ITensors.toMatrix(leftbond_coefs[edges[din]]) + U, S, V = svd(M) + P = S .^ 2 + truncate!(P; maxdim=maxdim, cutoff=cutoff, mindim=mindim) + tdim = length(P) + nc = size(M, 2) + Vs[edges[din]] = Matrix{ValType}(V[1:nc, 1:tdim]) + end + end + end + + # compress this tempTTNO representation into dense form + + link_space = dictionary( + [e => Index((isempty(rightmaps[e]) ? 0 : size(Vs[e], 2)) + 2, edge_tag(e)) for e in es] + ) + + H = TTNO(sites) + + for v in vs # can I merge this with previous loop? no, need all need the Vs... + + # redo the whole thing like before + edges = filter(e -> dst(e) == v || src(e) == v, es) + dims_in = findall(e -> dst(e) == v, edges) + dim_out = findfirst(e -> src(e) == v, edges) + + # slice isometries at this vertex + Vv = [Vs[e] for e in edges] + + linkinds = [link_space[e] for e in edges] + linkdims = dim.(linkinds) + + H[v] = ITensor() + + for (T_inds, t) in tempTTNO[v] + (abs(coefficient(t)) > eps()) || continue + T = zeros(ValType, linkdims...) + ct = convert(ValType, coefficient(t)) + terminal_dims = findall(d -> T_inds[d] == -1, 1:ranks[v]) # directions in which term starts or ends + normal_dims = findall(d -> T_inds[d] ≠ -1, 1:ranks[v]) # normal dimensions, do truncation thingies + T_inds[terminal_dims] .= 1 # start in channel 1 + if !isnothing(dim_out) && dim_out ∈ terminal_dims + T_inds[dim_out] = linkdims[dim_out] # end in channel linkdims[d] for each dimension d + end + if isempty(normal_dims) + T[T_inds...] += ct # on-site term + else + # abracadabra? + dim_ranges = Tuple(size(Vv[d], 2) for d in normal_dims) + for c in CartesianIndices(dim_ranges) + z = ct + temp_inds = copy(T_inds) + for (i, d) in enumerate(normal_dims) + V_factor = Vv[d][T_inds[d], c[i]] + z *= (d ∈ dims_in ? conj(V_factor) : V_factor) + temp_inds[d] = 1 + c[i] + end + T[temp_inds...] += z + end + end + T = itensor(T, linkinds) + H[v] += T * computeSiteProd(sites, ITensors.argument(t)) + end + + # add starting and ending identity operators + idT = zeros(ValType, linkdims...) + if !isnothing(dim_out) + idT[ones(Int, ranks[v])...] = 1.0 # starting identity is easy + end + # ending identities not so much + idT_end_inds = ones(Int, ranks[v]) + if !isnothing(dim_out) + idT_end_inds[dim_out] = linkdims[dim_out] + end + for din in dims_in + idT_end_inds[din] = linkdims[din] + idT[idT_end_inds...] = 1 + idT_end_inds[din] = 1 # reset + end + T = itensor(idT, linkinds) + H[v] += T * computeSiteProd(sites, Prod([Op("Id", v)])) + end + + return H +end + +# +# Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo_generic.jl +# + +# TODO: fix quantum number and fermion support, definitely broken + +# needed an extra `only` compared to ITensors version since IndsNetwork has Vector{<:Index} +# as vertex data +function isfermionic(t::Vector{Op}, sites::IndsNetwork{V,<:Index}) where {V} + p = +1 + for op in t + if has_fermion_string(ITensors.name(op), only(sites[ITensors.site(op)])) + p *= -1 + end + end + return (p == -1) +end + +# only(site(ops[1])) in ITensors breaks for Tuple site labels, had to drop the only +function computeSiteProd(sites::IndsNetwork{V,<:Index}, ops::Prod{Op})::ITensor where {V} + v = ITensors.site(ops[1]) + T = op(sites[v], ITensors.which_op(ops[1]); ITensors.params(ops[1])...) + for j in 2:length(ops) + (ITensors.site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") + opj = op(sites[v], ITensors.which_op(ops[j]); ITensors.params(ops[j])...) + T = product(T, opj) + end + return T +end + +# changed `isless_site` to use tree vertex ordering relative to root +function sorteachterm( + os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V +) where {V} + os = copy(os) + findpos(op::Op) = find_index_in_tree(op, sites, root_vertex) + isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) + N = nv(sites) + for n in eachindex(os) + t = os[n] + Nt = length(t) + + if !all(map(v -> has_vertex(sites, v), ITensors.sites(t))) + error( + "The OpSum contains a term $t that does not have support on the underlying graph." + ) + end + + prevsite = N + 1 #keep track of whether we are switching + #to a new site to make sure F string + #is only placed at most once for each site + + # Sort operators in t by site order, + # and keep the permutation used, perm, for analysis below + perm = Vector{Int}(undef, Nt) + sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt=isless_site) + + t = coefficient(t) * Prod(ITensors.terms(t)[perm]) + + # Identify fermionic operators, + # zeroing perm for bosonic operators, + # and inserting string "F" operators + parity = +1 + for n in Nt:-1:1 + currsite = ITensors.site(t[n]) + fermionic = has_fermion_string( + ITensors.which_op(t[n]), only(sites[ITensors.site(t[n])]) + ) + if !ITensors.using_auto_fermion() && (parity == -1) && (currsite < prevsite) + error("No verified fermion support for automatic TTNO constructor!") # no verified support, just throw error + # Put local piece of Jordan-Wigner string emanating + # from fermionic operators to the right + # (Remaining F operators will be put in by svdMPO) + terms(t)[n] = Op("$(ITensors.which_op(t[n])) * F", only(ITensors.site(t[n]))) + end + prevsite = currsite + + if fermionic + error("No verified fermion support for automatic TTNO constructor!") # no verified support, just throw error + parity = -parity + else + # Ignore bosonic operators in perm + # by zeroing corresponding entries + perm[n] = 0 + end + end + if parity == -1 + error("Parity-odd fermionic terms not yet supported by AutoTTNO") + end + + # Keep only fermionic op positions (non-zero entries) + filter!(!iszero, perm) + # and account for anti-commuting, fermionic operators + # during above sort; put resulting sign into coef + t *= ITensors.parity_sign(perm) + ITensors.terms(os)[n] = t + end + return os +end + +""" + TTNO(os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) + TTNO(eltype::Type{<:Number}, os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) + +Convert an OpSum object `os` to a TreeTensorNetworkOperator, with indices given by `sites`. +""" +function TTNO( + os::OpSum, + sites::IndsNetwork{V,<:Index}; + root_vertex::V=default_root_vertex(sites), + splitblocks=false, + method::Symbol=:fsm, # default to construction from finite state machine until svdTTNO is fixed + trunc=false, + kwargs..., +)::TTNO where {V} + length(ITensors.terms(os)) == 0 && error("OpSum has no terms") + is_tree(sites) || error("Site index graph must be a tree.") + is_leaf(sites, root_vertex) || error("Tree root must be a leaf vertex.") + + os = deepcopy(os) + os = sorteachterm(os, sites, root_vertex) + os = ITensors.sortmergeterms(os) # not exported + + if hasqns(first(first(vertex_data(sites)))) + error("No verified quantum number support for automatic TTNO constructor!") # no verified support, just throw error + end + if method == :svd + @warn "Symbolic SVD compression not working for long-range interactions." # add warning until this is fixed + T = svdTTNO(os, sites, root_vertex; kwargs...) + elseif method == :fsm + T = fsmTTNO(os, sites, root_vertex) + end + # add option for numerical truncation, but throw warning as this can fail sometimes + if trunc + @warn "Naive numerical truncation of TTNO Hamiltonian may fail for larger systems." + # see https://github.com/ITensor/ITensors.jl/issues/526 + lognormT = lognorm(T) + T /= exp(lognormT / nv(T)) # TODO: fix broadcasting for in-place assignment + truncate!(T; root_vertex, cutoff=1e-15) + T *= exp(lognormT / nv(T)) + end + if splitblocks + error("splitblocks not yet implemented for AbstractTreeTensorNetwork.") + T = ITensors.splitblocks(linkinds, T) # TODO: make this work + end + return T +end + +# Conversion from other formats +function TTNO(o::Op, s::IndsNetwork; kwargs...) + return TTNO(OpSum{Float64}() + o, s; kwargs...) +end + +function TTNO(o::Scaled{C,Op}, s::IndsNetwork; kwargs...) where {C} + return TTNO(OpSum{C}() + o, s; kwargs...) +end + +function TTNO(o::Sum{Op}, s::IndsNetwork; kwargs...) + return TTNO(OpSum{Float64}() + o, s; kwargs...) +end + +function TTNO(o::Prod{Op}, s::IndsNetwork; kwargs...) + return TTNO(OpSum{Float64}() + o, s; kwargs...) +end + +function TTNO(o::Scaled{C,Prod{Op}}, s::IndsNetwork; kwargs...) where {C} + return TTNO(OpSum{C}() + o, s; kwargs...) +end + +function TTNO(o::Sum{Scaled{C,Op}}, s::IndsNetwork; kwargs...) where {C} + return TTNO(OpSum{C}() + o, s; kwargs...) +end + +# Catch-all for leaf eltype specification +function TTNO(eltype::Type{<:Number}, os, sites::IndsNetwork; kwargs...) + return NDTensors.convert_scalartype(eltype, TTNO(os, sites; kwargs...)) +end diff --git a/src/treetensornetworks/projttno.jl b/src/treetensornetworks/projttno.jl new file mode 100644 index 00000000..c9a4d876 --- /dev/null +++ b/src/treetensornetworks/projttno.jl @@ -0,0 +1,57 @@ +""" +ProjTTNO +""" +struct ProjTTNO{V} <: AbstractProjTTNO{V} + pos::Union{Vector{<:V},NamedEdge{V}} # TODO: cleanest way to specify effective Hamiltonian position? + H::TTNO{V} + environments::Dictionary{NamedEdge{V},ITensor} +end + +function ProjTTNO(H::TTNO) + return ProjTTNO(vertices(H), H, Dictionary{edgetype(H),ITensor}()) +end + +copy(P::ProjTTNO) = ProjTTNO(P.pos, copy(P.H), copy(P.environments)) + +# trivial if we choose to specify position as above; only kept to allow using alongside +# ProjMPO +function set_nsite(P::ProjTTNO, nsite) + return P +end + +function shift_position(P::ProjTTNO, pos) + return ProjTTNO(pos, P.H, P.environments) +end + +function make_environment!(P::ProjTTNO{V}, psi::TTNS{V}, e::NamedEdge{V})::ITensor where {V} + # invalidate environment for opposite edge direction if necessary + reverse(e) ∈ incident_edges(P) || unset!(P.environments, reverse(e)) + # do nothing if valid environment already present + if haskey(P.environments, e) + env = environment(P, e) + else + if is_leaf(underlying_graph(P), src(e)) + # leaves are easy + env = psi[src(e)] * P.H[src(e)] * dag(prime(psi[src(e)])) + else + # construct by contracting neighbors + neighbor_envs = ITensor[] + for n in setdiff(neighbors(underlying_graph(P), src(e)), [dst(e)]) + push!(neighbor_envs, make_environment!(P, psi, edgetype(P)(n, src(e)))) + end + # manually heuristic for contraction order: two environments, site tensors, then + # other environments + frst, scnd, rst = _separate_first_two(neighbor_envs) + itensor_map = vcat(psi[src(e)], frst, scnd, P.H[src(e)], dag(prime(psi[src(e)])), rst) + # TODO: actually use optimal contraction sequence here + env = reduce(*, itensor_map) + end + # cache + set!(P.environments, e, env) + end + @assert( + hascommoninds(environment(P, e), psi[src(e)]), + "Something went wrong, probably re-orthogonalized this edge in the same direction twice!" + ) + return env +end diff --git a/src/treetensornetworks/projttno_apply.jl b/src/treetensornetworks/projttno_apply.jl new file mode 100644 index 00000000..82d99a9d --- /dev/null +++ b/src/treetensornetworks/projttno_apply.jl @@ -0,0 +1,55 @@ +struct ProjTTNOApply{V} <: AbstractProjTTNO{V} + pos::Union{Vector{<:V},NamedEdge{V}} + psi0::TTNS{V} + H::TTNO{V} + environments::Dictionary{NamedEdge{V},ITensor} +end + +function ProjTTNOApply(psi0::TTNS, H::TTNO) + return ProjTTNOApply(vertextype(H)[], psi0, H, Dictionary{edgetype(H),ITensor}()) +end + +function copy(P::ProjTTNOApply) + return ProjTTNOApply(P.pos, copy(P.psi0), copy(P.H), copy(P.environments)) +end + +function set_nsite(P::ProjTTNOApply, nsite) + return P +end + +function shift_position(P::ProjTTNOApply, pos) + return ProjTTNOApply(pos, P.psi0, P.H, P.environments) +end + +function make_environment!(P::ProjTTNOApply{V}, psi::TTNS{V}, e::NamedEdge{V})::ITensor where {V} + # invalidate environment for opposite edge direction if necessary + reverse(e) ∈ incident_edges(P) || unset!(P.environments, reverse(e)) + # do nothing if valid environment already present + if haskey(P.environments, e) + env = environment(P, e) + else + if is_leaf(underlying_graph(P), src(e)) + # leaves are easy + env = P.psi0[src(e)] * P.H[src(e)] * dag(psi[src(e)]) + else + # construct by contracting neighbors + neighbor_envs = ITensor[] + for n in setdiff(neighbors(underlying_graph(P), src(e)), [dst(e)]) + push!(neighbor_envs, make_environment!(P, psi, edgetype(P)(n, src(e)))) + end + # manually heuristic for contraction order: two environments, site tensors, then + # other environments + frst, scnd, rst = _separate_first_two(neighbor_envs) + itensor_map = vcat(P.psi0[src(e)], frst, scnd, P.H[src(e)], dag(psi[src(e)]), rst) + # TODO: actually use optimal contraction sequence here + env = reduce(*, itensor_map) + end + # cache + set!(P.environments, e, env) + end + @assert( + hascommoninds(environment(P, e), psi[src(e)]), + "Something went wrong, probably re-orthogonalized this edge in the same direction twice!" + ) + return env +end diff --git a/src/treetensornetworks/projttnosum.jl b/src/treetensornetworks/projttnosum.jl new file mode 100644 index 00000000..c06f318e --- /dev/null +++ b/src/treetensornetworks/projttnosum.jl @@ -0,0 +1,59 @@ +""" +ProjTTNOSum +""" +struct ProjTTNOSum{V} + pm::Vector{ProjTTNO{V}} + function ProjTTNOSum(pm::Vector{ProjTTNO{V}}) where {V} + return new{V}(pm) + end +end + +copy(P::ProjTTNOSum) = ProjTTNOSum(copy.(P.pm)) + +ProjTTNOSum(ttnos::Vector{<:TTNO}) = ProjTTNOSum([ProjTTNO(M) for M in ttnos]) + +ProjTTNOSum(Ms::TTNO{V}...) where {V} = ProjTTNOSum([Ms...]) + +on_edge(P::ProjTTNOSum) = on_edge(P.pm[1]) + +nsite(P::ProjTTNOSum) = nsite(P.pm[1]) + +function set_nsite(Ps::ProjTTNOSum, nsite) + return ProjTTNOSum(map(M -> set_nsite(M, nsite), Ps.pm)) +end + +underlying_graph(P::ProjTTNOSum) = underlying_graph(P.pm[1]) + +Base.length(P::ProjTTNOSum) = length(P.pm[1]) + +sites(P::ProjTTNOSum) = sites(P.pm[1]) + +incident_edges(P::ProjTTNOSum) = incident_edges(P.pm[1]) + +internal_edges(P::ProjTTNOSum) = internal_edges(P.pm[1]) + +function product(P::ProjTTNOSum, v::ITensor)::ITensor + Pv = product(P.pm[1], v) + for n in 2:length(P.pm) + Pv += product(P.pm[n], v) + end + return Pv +end + +function Base.eltype(P::ProjTTNOSum) + elT = eltype(P.pm[1]) + for n in 2:length(P.pm) + elT = promote_type(elT, eltype(P.pm[n])) + end + return elT +end + +(P::ProjTTNOSum)(v::ITensor) = product(P, v) + +Base.size(P::ProjTTNOSum) = size(P.pm[1]) + +function position( + P::ProjTTNOSum{V}, psi::TTNS{V}, pos::Union{Vector{<:V},NamedEdge{V}} +) where {V} + ProjTTNOSum(map(M -> position(M, psi, pos), P.pm)) +end diff --git a/src/treetensornetworks/ttno.jl b/src/treetensornetworks/ttno.jl new file mode 100644 index 00000000..29f5b498 --- /dev/null +++ b/src/treetensornetworks/ttno.jl @@ -0,0 +1,152 @@ +""" + TreeTensorNetworkOperator <: AbstractITensorNetwork + +A finite size tree tensor network operator type. +Keeps track of the orthogonality center. + +# Fields + +- itensor_network::ITensorNetwork{V} +- ortho_lims::Vector{V}: A vector of vertices defining the orthogonality limits. +""" +struct TreeTensorNetworkOperator{V} <: AbstractTreeTensorNetwork{V} + itensor_network::ITensorNetwork{V} + ortho_center::Vector{V} + function TreeTensorNetworkOperator{V}( + itensor_network::ITensorNetwork, ortho_center::Vector=vertices(itensor_network) + ) where {V} + @assert is_tree(itensor_network) + return new{V}(itensor_network, ortho_center) + end +end + +function data_graph_type(G::Type{<:TreeTensorNetworkOperator}) + return data_graph_type(fieldtype(G, :itensor_network)) +end + +function copy(ψ::TreeTensorNetworkOperator) + return TreeTensorNetworkOperator(copy(ψ.itensor_network), copy(ψ.ortho_center)) +end + +const TTNO = TreeTensorNetworkOperator + +# Field access +itensor_network(ψ::TreeTensorNetworkOperator) = getfield(ψ, :itensor_network) + +# Required for `AbstractITensorNetwork` interface +data_graph(ψ::TreeTensorNetworkOperator) = data_graph(itensor_network(ψ)) + +# +# Constructor +# + +TreeTensorNetworkOperator(tn::ITensorNetwork, args...) = TreeTensorNetworkOperator{vertextype(tn)}(tn, args...) + +# catch-all for default ElType +function (::Type{TTNT})(g::AbstractGraph, args...; kwargs...) where {TTNT<:TTNO} + return TTNT(Float64, g, args...; kwargs...) +end + +function TreeTensorNetworkOperator(::Type{ElT}, graph::AbstractGraph, args...; kwargs...) where {ElT<:Number} + itensor_network = ITensorNetwork(ElT, graph; kwargs...) + return TreeTensorNetworkOperator(itensor_network, args...) +end + +# +# Inner products +# + +# TODO: implement using multi-graph disjoint union +function inner(y::TTNS, A::TTNO, x::TTNS; root_vertex=default_root_vertex(x, A, y)) + traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) + check_hascommoninds(siteinds, A, x) + check_hascommoninds(siteinds, A, y) + ydag = sim(dag(y); sites=[]) + x = sim(x; sites=[]) + O = ydag[root_vertex] * A[root_vertex] * x[root_vertex] + for v in traversal_order[2:end] + O = O * ydag[v] * A[v] * x[v] + end + return O[] +end + +# TODO: implement using multi-graph disjoint +function inner( + B::TTNO, y::TTNS, A::TTNO, x::TTNS; root_vertex=default_root_vertex(B, y, A, x) +) + N = nv(B) + if nv(y) != N || nv(x) != N || nv(A) != N + throw( + DimensionMismatch( + "inner: mismatched number of vertices $N and $(nv(x)) or $(nv(y)) or $(nv(A))" + ), + ) + end + check_hascommoninds(siteinds, A, x) + check_hascommoninds(siteinds, B, y) + for v in vertices(B) + !hascommoninds( + uniqueinds(siteinds(A, v), siteinds(x, v)), uniqueinds(siteinds(B, v), siteinds(y, v)) + ) && error( + "$(typeof(x)) Ax and $(typeof(y)) By must share site indices. On site $v, Ax has site indices $(uniqueinds(siteinds(A, v), (siteinds(x, v)))) while By has site indices $(uniqueinds(siteinds(B, v), siteinds(y, v))).", + ) + end + ydag = sim(linkinds, dag(y)) + Bdag = sim(linkinds, dag(B)) + traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) + yB = ydag[root_vertex] * Bdag[root_vertex] + Ax = A[root_vertex] * x[root_vertex] + O = yB * Ax + for v in traversal_order[2:end] + yB = ydag[v] * Bdag[v] + Ax = A[v] * x[v] + yB *= O + O = yB * Ax + end + return O[] +end + +# +# Construction from operator (map) +# + +function TTNO( + ::Type{ElT}, sites::IndsNetwork, ops::Dictionary; kwargs... +) where {ElT<:Number} + N = nv(sites) + os = Prod{Op}() + for v in vertices(sites) + os *= Op(ops[v], v) + end + T = TTNO(ElT, os, sites; kwargs...) + # see https://github.com/ITensor/ITensors.jl/issues/526 + lognormT = lognorm(T) + T /= exp(lognormT / N) # TODO: fix broadcasting for in-place assignment + truncate!(T; cutoff=1e-15) + T *= exp(lognormT / N) + return T +end + +function TTNO( + ::Type{ElT}, sites::IndsNetwork, fops::Function; kwargs... +) where {ElT<:Number} + ops = Dictionary(vertices(sites), map(v -> fops(v), vertices(sites))) + return TTNO(ElT, sites, ops; kwargs...) +end + +function TTNO(::Type{ElT}, sites::IndsNetwork, op::String; kwargs...) where {ElT<:Number} + ops = Dictionary(vertices(sites), fill(op, nv(sites))) + return TTNO(ElT, sites, ops; kwargs...) +end + +# +# Conversion +# + +function convert(::Type{<:TTNS}, T::TTNO) + return TTNS(itensor_network(T), ortho_center(T)) +end + +function convert(::Type{<:TTNO}, T::TTNS) + return TTNO(itensor_network(T), ortho_center(T)) +end diff --git a/src/treetensornetworks/ttns.jl b/src/treetensornetworks/ttns.jl new file mode 100644 index 00000000..5d62b27e --- /dev/null +++ b/src/treetensornetworks/ttns.jl @@ -0,0 +1,112 @@ +""" + TreeTensorNetworkState{V} <: AbstractITensorNetwork{V} + +# Fields + +- itensor_network::ITensorNetwork{V} +- ortho_lims::Vector{V}: A vector of vertices defining the orthogonality limits. + +""" +struct TreeTensorNetworkState{V} <: AbstractTreeTensorNetwork{V} + itensor_network::ITensorNetwork{V} + ortho_center::Vector{V} + function TreeTensorNetworkState{V}( + itensor_network::ITensorNetwork, ortho_center::Vector=vertices(itensor_network) + ) where {V} + @assert is_tree(itensor_network) + return new{V}(itensor_network, ortho_center) + end +end + +function data_graph_type(G::Type{<:TreeTensorNetworkState}) + return data_graph_type(fieldtype(G, :itensor_network)) +end + +function copy(ψ::TreeTensorNetworkState) + return TreeTensorNetworkState(copy(ψ.itensor_network), copy(ψ.ortho_center)) +end + +const TTNS = TreeTensorNetworkState + +# Field access +itensor_network(ψ::TreeTensorNetworkState) = getfield(ψ, :itensor_network) + +# Required for `AbstractITensorNetwork` interface +data_graph(ψ::TreeTensorNetworkState) = data_graph(itensor_network(ψ)) + +# +# Constructor +# + +TreeTensorNetworkState(tn::ITensorNetwork, args...) = TreeTensorNetworkState{vertextype(tn)}(tn, args...) + +# catch-all for default ElType +function (::Type{TTNT})(g::AbstractGraph, args...; kwargs...) where {TTNT<:TTNS} + return TTNT(Float64, g, args...; kwargs...) +end + +function TreeTensorNetworkState(::Type{ElT}, graph::AbstractGraph, args...; kwargs...) where {ElT<:Number} + itensor_network = ITensorNetwork(ElT, graph; kwargs...) + return TreeTensorNetworkState(itensor_network, args...) +end + +# construct from given state (map) +function TreeTensorNetworkState( + ::Type{ElT}, is::IndsNetwork, initstate, args... +) where {ElT<:Number} + itensor_network = ITensorNetwork(ElT, is, initstate) + return TreeTensorNetworkState(itensor_network, args...) +end + +# TODO: randomcircuitTTNS? +function randomTTNS(args...; kwargs...) + T = TTNS(args...; kwargs...) + randn!.(vertex_data(T)) + normalize!.(vertex_data(T)) + return T +end + +function productTTNS(args...; kwargs...) + return TTNS(args...; link_space=1, kwargs...) +end + +# +# Utility +# + +function replacebond!(T::TTNS, edge::AbstractEdge, phi::ITensor; kwargs...) + ortho::String = get(kwargs, :ortho, "left") + swapsites::Bool = get(kwargs, :swapsites, false) + which_decomp::Union{String,Nothing} = get(kwargs, :which_decomp, nothing) + normalize::Bool = get(kwargs, :normalize, false) + + indsTe = inds(T[src(edge)]) + if swapsites + sb = siteinds(M, src(edge)) + sbp1 = siteinds(M, dst(edge)) + indsTe = replaceinds(indsTe, sb, sbp1) + end + + L, R, spec = factorize( + phi, indsTe; which_decomp=which_decomp, tags=tags(T, edge), kwargs... + ) + + T[src(edge)] = L + T[dst(edge)] = R + if ortho == "left" + normalize && (T[dst(edge)] ./= norm(T[dst(edge)])) + isortho(T) && (T = set_ortho_center(T, [dst(edge)])) + elseif ortho == "right" + normalize && (T[src(edge)] ./= norm(T[src(edge)])) + isortho(T) && (T = set_ortho_center(T, [src(edge)])) + end + return spec +end + +function replacebond!(T::TTNS, edge::Pair, phi::ITensor; kwargs...) + return replacebond!(T, edgetype(T)(edge), phi; kwargs...) +end + +function replacebond(T0::TTNS, args...; kwargs...) + return replacebond!(copy(T0), args...; kwargs...) +end diff --git a/src/utility.jl b/src/utility.jl new file mode 100644 index 00000000..5d155f64 --- /dev/null +++ b/src/utility.jl @@ -0,0 +1,17 @@ +""" +Relabel sites in OpSum according to given site map +""" +function relabel_sites(O::OpSum, vmap::AbstractDictionary) + Oout = OpSum() + for term in Ops.terms(O) + c = Ops.coefficient(term) + p = Ops.argument(term) + # swap sites for every Op in product and multiply resulting Ops + pout = prod([ + Op(Ops.which_op(o), map(v -> vmap[v], Ops.sites(o))...; Ops.params(o)...) for o in p + ]) + # add to new OpSum + Oout += c * pout + end + return Oout +end diff --git a/test/test_indsnetwork.jl b/test/test_indsnetwork.jl index e5567f60..1a9bc27f 100644 --- a/test/test_indsnetwork.jl +++ b/test/test_indsnetwork.jl @@ -1,6 +1,6 @@ +using Dictionaries using ITensors using ITensorNetworks -using Dictionaries using Random using Test diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index 8d9cd064..71faec7b 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -67,7 +67,7 @@ using Test @test !has_vertex(tn_2, ((1, 2), 2)) @test tn_2[((1, 2), 1)] ≈ tn[((1, 2), 2)] * tn[((1, 2), 1)] end - + @testset "Remove edge (regression test for issue #5)" begin dims = (2, 2) g = named_grid(dims) @@ -84,7 +84,7 @@ using Test @test has_vertex(tn, ((2, 2), 1)) @test has_vertex(tn, ((2, 2), 2)) end - + @testset "Custom element type" for eltype in (Float32, Float64, ComplexF32, ComplexF64), link_space in (nothing, 3), g in ( @@ -163,4 +163,74 @@ using Test @test issetequal(p2, [3, 4]) @test wc ≈ log2(3) end + + @testset "Index access" begin + dims = (2, 2) + g = named_grid(dims) + s = siteinds("S=1/2", g) + ψ = ITensorNetwork(s; link_space=2) + + nt = ITensorNetworks.neighbor_itensors(ψ, (1, 1)) + @test length(nt) == 2 + @test all(map(hascommoninds(ψ[1, 1]), nt)) + + @test all(map(t -> isempty(commoninds(inds(t), uniqueinds(ψ, (1, 1)))), nt)) + + e = (1, 1) => (2, 1) + uie = uniqueinds(ψ, e) + @test isempty(commoninds(uie, inds(ψ[2, 1]))) + @test issetequal(uie, union(commoninds(ψ[1, 1], ψ[1, 2]), uniqueinds(ψ, (1, 1)))) + + @test siteinds(ψ, (1, 1)) == s[1, 1] + + cie = commoninds(ψ, e) + @test hasinds(ψ[1, 1], cie) && hasinds(ψ[2, 1], cie) + @test isempty(commoninds(uie, cie)) + + @test linkinds(ψ, e) == commoninds(ψ[1, 1], ψ[2, 1]) + end + + @testset "ElType conversion, $new_eltype" for new_eltype in (Float32, ComplexF64) + dims = (2, 2) + g = named_grid(dims) + s = siteinds("S=1/2", g) + ψ = randomITensorNetwork(s; link_space=2) + @test ITensors.scalartype(ψ) == Float64 + + ϕ = ITensors.convert_leaf_eltype(new_eltype, ψ) + @test ITensors.scalartype(ϕ) == new_eltype + end + + @testset "Construction from state map" for ElT in (Float32, ComplexF64) + dims = (2, 2) + g = named_grid(dims) + s = siteinds("S=1/2", g) + state_map(v::Tuple) = iseven(sum(isodd.(v))) ? "↑" : "↓" + + ψ = ITensorNetwork(s, state_map) + t = ψ[2, 2] + si = only(siteinds(ψ, (2, 2))) + bi = map(e -> only(linkinds(ψ, e)), incident_edges(ψ, (2, 2))) + @test eltype(t) == Float64 + @test abs(t[si => "↑", [b => end for b in bi]...]) == 1.0 # insert_links introduces extra signs through factorization... + @test t[si => "↓", [b => end for b in bi]...] == 0.0 + + ϕ = ITensorNetwork(ElT, s, state_map) + t = ϕ[2, 2] + si = only(siteinds(ϕ, (2, 2))) + bi = map(e -> only(linkinds(ϕ, e)), incident_edges(ϕ, (2, 2))) + @test eltype(t) == ElT + @test abs(t[si => "↑", [b => end for b in bi]...]) == convert(ElT, 1.0) # insert_links introduces extra signs through factorization... + @test t[si => "↓", [b => end for b in bi]...] == convert(ElT, 0.0) + end + + @testset "Priming and tagging" begin + # TODO: add actual tests + + tooth_lengths = fill(2, 3) + c = named_comb_tree(tooth_lengths) + is = siteinds("S=1/2", c) + tn = randomITensorNetwork(is; link_space=3) + @test_broken swapprime(tn, 0, 2) + end end diff --git a/test/test_opsum_to_ttno.jl b/test/test_opsum_to_ttno.jl new file mode 100644 index 00000000..34ffa488 --- /dev/null +++ b/test/test_opsum_to_ttno.jl @@ -0,0 +1,73 @@ +using Dictionaries +using ITensors +using ITensorNetworks +using Random +using Test + +@testset "OpSum to TTNO" begin + # small comb tree + tooth_lengths = fill(2, 3) + c = named_comb_tree(tooth_lengths) + root_vertex = (3, 2) + is = siteinds("S=1/2", c) + + # linearized version + linear_order = [4, 1, 2, 5, 3, 6] + vmap = Dictionary(vertices(is)[linear_order], 1:length(linear_order)) + sites = only.(collect(vertex_data(is)))[linear_order] + + # test with next-to-nearest-neighbor Ising Hamiltonian + J1 = -1 + J2 = 2 + h = 0.5 + H = ising(c; J1=J1, J2=J2, h=h) + + # add combination of longer range interactions + Hlr = copy(H) + Hlr += 5, "Z", (1, 2), "Z", (2, 2) + Hlr += -4, "Z", (1, 1), "Z", (2, 2) + + @testset "Finite state machine" begin + # get TTNO Hamiltonian directly + Hfsm = TTNO(H, is; root_vertex=root_vertex, method=:fsm, cutoff=1e-10) + # get corresponding MPO Hamiltonian + Hline = MPO(relabel_sites(H, vmap), sites) + # compare resulting dense Hamiltonians + @disable_warn_order begin + Tttno = prod(Hline) + Tmpo = contract(Hfsm) + end + @test Tttno ≈ Tmpo rtol = 1e-6 + + # same thing for longer range interactions + Hfsm_lr = TTNO(Hlr, is; root_vertex=root_vertex, method=:fsm, cutoff=1e-10) + Hline_lr = MPO(relabel_sites(Hlr, vmap), sites) + @disable_warn_order begin + Tttno_lr = prod(Hline_lr) + Tmpo_lr = contract(Hfsm_lr) + end + @test Tttno_lr ≈ Tmpo_lr rtol = 1e-6 + end + + @testset "Svd approach" begin + # get TTNO Hamiltonian directly + Hsvd = TTNO(H, is; root_vertex=root_vertex, method=:svd, cutoff=1e-10) + # get corresponding MPO Hamiltonian + Hline = MPO(relabel_sites(H, vmap), sites) + # compare resulting dense Hamiltonians + @disable_warn_order begin + Tttno = prod(Hline) + Tmpo = contract(Hsvd) + end + @test Tttno ≈ Tmpo rtol = 1e-6 + + # this breaks for longer range interactions + Hsvd_lr = TTNO(Hlr, is; root_vertex=root_vertex, method=:svd, cutoff=1e-10) + Hline_lr = MPO(relabel_sites(Hlr, vmap), sites) + @disable_warn_order begin + Tttno_lr = prod(Hline_lr) + Tmpo_lr = contract(Hsvd_lr) + end + @test_broken Tttno_lr ≈ Tmpo_lr rtol_lr = 1e-6 + end +end diff --git a/test/test_sitetype.jl b/test/test_sitetype.jl index 6f90205b..a3515565 100644 --- a/test/test_sitetype.jl +++ b/test/test_sitetype.jl @@ -1,3 +1,4 @@ +using Dictionaries using ITensors using ITensorNetworks using Random diff --git a/test/test_ttno.jl b/test/test_ttno.jl new file mode 100644 index 00000000..bb65c6d1 --- /dev/null +++ b/test/test_ttno.jl @@ -0,0 +1,51 @@ +using Test +using ITensorNetworks +using ITensors +using Random + +@testset "TTNO Basics" begin + + # random comb tree + tooth_lengths = rand(1:3, rand(2:4)) + c = named_comb_tree(tooth_lengths) + # specify random site dimension on every site + dmap = v -> rand(1:3) + is = siteinds(dmap, c) + # operator site inds + is_isp = union_all_inds(is, prime(is; links=[])) + # specify random linear vertex ordering of graph vertices + vertex_order = shuffle(vertices(c)) + + @testset "Construct TTNO from ITensor or Array" begin + cutoff = 1e-10 + sites_o = [is_isp[v] for v in vertex_order] + # create random ITensor with these indices + O = randomITensor(sites_o...) + # dense TTNS constructor from IndsNetwork + @disable_warn_order o1 = TTNO(O, is_isp; cutoff) + # dense TTNS constructor from Vector{Vector{Index}} and NamedDimGraph + @disable_warn_order o2 = TTNO(O, sites_o, c; vertex_order, cutoff) + # convert to array with proper index order + AO = Array(O, sites_o...) + # dense array constructor from IndsNetwork + @disable_warn_order o3 = TTNO(AO, is_isp; vertex_order, cutoff) + # dense array constructor from Vector{Vector{Index}} and NamedDimGraph + @disable_warn_order o4 = TTNO(AO, sites_o, c; vertex_order, cutoff) + # see if this actually worked + root_vertex = only(ortho_center(o1)) + @disable_warn_order begin + O1 = contract(o1, root_vertex) + O2 = contract(o2, root_vertex) + O3 = contract(o3, root_vertex) + O4 = contract(o4, root_vertex) + end + @test norm(O - O1) < 1e2 * cutoff + @test norm(O - O2) < 1e2 * cutoff + @test norm(O - O3) < 1e2 * cutoff + @test norm(O - O4) < 1e2 * cutoff + end + + @testset "Ortho" begin + # TODO + end +end diff --git a/test/test_ttns.jl b/test/test_ttns.jl new file mode 100644 index 00000000..25fe8b38 --- /dev/null +++ b/test/test_ttns.jl @@ -0,0 +1,49 @@ +using Test +using ITensorNetworks +using ITensors +using Random + +@testset "TTNS Basics" begin + + # random comb tree + tooth_lengths = rand(1:3, rand(2:4)) + c = named_comb_tree(tooth_lengths) + # specify random site dimension on every site + dmap = v -> rand(1:3) + is = siteinds(dmap, c) + # specify random linear vertex ordering of graph vertices + vertex_order = shuffle(vertices(c)) + + @testset "Construct TTNS from ITensor or Array" begin + cutoff = 1e-10 + sites_s = [only(is[v]) for v in vertex_order] + # create random ITensor with these indices + S = randomITensor(vertex_data(is)...) + # dense TTNS constructor from IndsNetwork + @disable_warn_order s1 = TTNS(S, is; cutoff) + # dense TTNS constructor from Vector{Index} and NamedDimGraph + @disable_warn_order s2 = TTNS(S, sites_s, c; vertex_order, cutoff) + # convert to array with proper index order + @disable_warn_order AS = Array(S, sites_s...) + # dense array constructor from IndsNetwork + @disable_warn_order s3 = TTNS(AS, is; vertex_order, cutoff) + # dense array constructor from Vector{Index} and NamedDimGraph + @disable_warn_order s4 = TTNS(AS, sites_s, c; vertex_order, cutoff) + # see if this actually worked + root_vertex = only(ortho_center(s1)) + @disable_warn_order begin + S1 = contract(s1, root_vertex) + S2 = contract(s2, root_vertex) + S3 = contract(s3, root_vertex) + S4 = contract(s4, root_vertex) + end + @test norm(S - S1) < 1e2 * cutoff + @test norm(S - S2) < 1e2 * cutoff + @test norm(S - S3) < 1e2 * cutoff + @test norm(S - S4) < 1e2 * cutoff + end + + @testset "Ortho" begin + # TODO + end +end