Skip to content

Commit

Permalink
Merge de5f118 into 2567204
Browse files Browse the repository at this point in the history
  • Loading branch information
emstoudenmire authored Mar 25, 2024
2 parents 2567204 + de5f118 commit 361de4e
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 11 deletions.
6 changes: 3 additions & 3 deletions ITensorGaussianMPS/src/gmps.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import Base: sortperm, size, length, eltype, conj, transpose, copy, *
using ITensors: alias
using ITensors: alias, ITensorMPS
abstract type AbstractSymmetry end
struct ConservesNfParity{T} <: AbstractSymmetry
data::T
Expand Down Expand Up @@ -177,8 +177,8 @@ end

function quadratic_operator(os::OpSum)
os = deepcopy(os)
#os = ITensors.sorteachterm(os, sites)
os = ITensors.sortmergeterms(os)
#os = ITensorMPS.sorteachterm(os, sites)
os = ITensorMPS.sortmergeterms(os)

nterms = length(os)
coefs = Vector{Number}(undef, nterms)
Expand Down
6 changes: 3 additions & 3 deletions src/ITensorMPS/abstractprojmpo/projmposum.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ end
copy(P::AbstractSum) = typeof(P)(copy.(terms(P)))

function nsite(P::AbstractSum)
@assert allequal(nsite.(terms(P)))
@assert all(y -> y == nsite(first(terms(P))), nsite.(terms(P)))
return nsite(first(terms(P)))
end

Expand All @@ -19,12 +19,12 @@ function set_nsite!(A::AbstractSum, nsite)
end

function length(A::AbstractSum)
@assert allequal(length.(terms(A)))
@assert all(y -> y == length(first(terms(A))), length.(terms(A)))
return length(first(terms(A)))
end

function site_range(A::AbstractSum)
@assert allequal(Iterators.map(site_range, terms(A)))
@assert all(y -> y == site_range(first(terms(A))), Iterators.map(site_range, terms(A)))
return site_range(first(terms(A)))
end

Expand Down
7 changes: 3 additions & 4 deletions src/ITensorMPS/dmrg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -249,12 +249,11 @@ function dmrg(
## into `DeviceMemory`. This conversion line is here temporarily to fix that problem when it arises
## Adapt is only called when using CUDA backend. CPU will work as implemented previously.
## TODO this might be the only place we really need iscu if its not fixed.
phi::ITensor = if NDTensors.iscu(phi) && NDTensors.iscu(vecs[1])
adapt(set_eltype(unwrap_array_type(phi), eltype(vecs[1])), vecs[1])
if NDTensors.iscu(phi) && NDTensors.iscu(vecs[1])
phi = adapt(ITensors.set_eltype(unwrap_array_type(phi), eltype(vecs[1])), vecs[1])
else
vecs[1]
phi = vecs[1]
end
#phi::ITensor = vecs[1]

ortho = ha == 1 ? "left" : "right"

Expand Down
3 changes: 2 additions & 1 deletion test/ITensorMPS/Ops/test_ops_mpo.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
using Test
using ITensors
using ITensors.Ops
using ITensors: ITensorMPS
using LinearAlgebra

@testset "Ops to MPO" begin
Expand Down Expand Up @@ -42,7 +43,7 @@ using LinearAlgebra
ℋ_merged = OpSum()
ℋ_merged += (4, "Z", 1)
ℋ_merged += (3, "X", 2)
@test ITensors.sortmergeterms(ℋ) == ℋ_merged
@test ITensorMPS.sortmergeterms(ℋ) == ℋ_merged

# Test with repeated terms
s = siteinds("S=1/2", 1)
Expand Down

0 comments on commit 361de4e

Please sign in to comment.