Skip to content

Commit

Permalink
Merge branch 'ITensor:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
markusschmitt authored Jul 13, 2022
2 parents 76f507e + 6a83a83 commit 62fa370
Show file tree
Hide file tree
Showing 27 changed files with 371 additions and 203 deletions.
11 changes: 11 additions & 0 deletions NDTensors/NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,17 @@ Note that as of Julia v1.5, in order to see deprecation warnings you will need t

After we release v1 of the package, we will start following [semantic versioning](https://semver.org).

NDTensors v0.1.42 Release Notes
===============================

Bugs:

Enhancements:

- Define `map` for Tensor and TensorStorage (b66d1b7)
- Define `real` and `imag` for Tensor (b66d1b7)
- Throw error when trying to do an eigendecomposition of Tensor with Infs or NaNs (b66d1b7)

NDTensors v0.1.41 Release Notes
===============================

Expand Down
2 changes: 1 addition & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <mfishman@flatironinstitute.org>"]
version = "0.1.41"
version = "0.1.42"

[deps]
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
Expand Down
3 changes: 3 additions & 0 deletions NDTensors/src/imports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,18 @@ import Base:
fill!,
getindex,
hash,
imag,
isempty,
isless,
iterate,
length,
map,
ndims,
permutedims,
permutedims!,
promote_rule,
randn,
real,
reshape,
setindex,
setindex!,
Expand Down
22 changes: 20 additions & 2 deletions NDTensors/src/linearalgebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,16 @@ function LinearAlgebra.eigen(
use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff)
use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff)

DM, VM = eigen(matrix(T))
matrixT = matrix(T)
if any(!isfinite, matrixT)
throw(
ArgumentError(
"Trying to perform the eigendecomposition of a matrix containing NaNs or Infs"
),
)
end

DM, VM = eigen(matrixT)

# Sort by largest to smallest eigenvalues
p = sortperm(DM; rev=true, by=abs)
Expand Down Expand Up @@ -343,7 +352,16 @@ function LinearAlgebra.eigen(
use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff)
use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff)

DM, VM = eigen(matrix(T))
matrixT = matrix(T)
if any(!isfinite, matrixT)
throw(
ArgumentError(
"Trying to perform the eigendecomposition of a matrix containing NaNs or Infs"
),
)
end

DM, VM = eigen(matrixT)

# Sort by largest to smallest eigenvalues
#p = sortperm(DM; rev = true)
Expand Down
13 changes: 11 additions & 2 deletions NDTensors/src/tensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -124,13 +124,22 @@ copyto!(R::Tensor, T::Tensor) = (copyto!(storage(R), storage(T)); R)

complex(T::Tensor) = setstorage(T, complex(storage(T)))

Base.real(T::Tensor) = setstorage(T, real(storage(T)))
real(T::Tensor) = setstorage(T, real(storage(T)))

Base.imag(T::Tensor) = setstorage(T, imag(storage(T)))
imag(T::Tensor) = setstorage(T, imag(storage(T)))

# Define Base.similar in terms of NDTensors.similar
Base.similar(T::Tensor, args...) = similar(T, args...)

function map(f, x::Tensor{T}) where {T}
if !iszero(f(zero(T)))
error(
"map(f, ::Tensor) currently doesn't support functions that don't preserve zeros, while you passed a function such that f(0) = $(f(zero(T))). This isn't supported right now because it doesn't necessarily preserve the sparsity structure of the input tensor.",
)
end
return setstorage(x, map(f, storage(x)))
end

#
# Necessary to overload since the generic fallbacks are
# slow
Expand Down
9 changes: 9 additions & 0 deletions NDTensors/src/tensorstorage.jl
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,15 @@ Base.copyto!(S1::TensorStorage, S2::TensorStorage) = (copyto!(data(S1), data(S2)

Random.randn!(S::TensorStorage) = (randn!(data(S)); S)

function map(f, x::TensorStorage{T}) where {T}
if !iszero(f(zero(T)))
error(
"map(f, ::TensorStorage) currently doesn't support functions that don't preserve zeros, while you passed a function such that f(0) = $(f(zero(T))). This isn't supported right now because it doesn't necessarily preserve the sparsity structure of the input tensor.",
)
end
return setdata(x, map(f, data(x)))
end

Base.fill!(S::TensorStorage, v) = (fill!(data(S), v); S)

LinearAlgebra.rmul!(S::TensorStorage, v::Number) = (rmul!(data(S), v); S)
Expand Down
46 changes: 46 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,52 @@ Note that as of Julia v1.5, in order to see deprecation warnings you will need t

After we release v1 of the package, we will start following [semantic versioning](https://semver.org).

ITensors v0.3.19 Release Notes
==============================

Bugs:

Enhancements:

- Simplify the `rrule`s for priming and tagging MPS/MPO

ITensors v0.3.18 Release Notes
==============================

Bugs:

- Extend `apply(::MPO, ::MPO)` to `apply(::MPO, ::MPO, ::MPO...)` (#949)
- Fix AD for `apply(::MPO, ::MPO)` and `contract(::MPO, ::MPO)` (#949)
- Properly use element type in `randomMPS` in the 1-site case (b66d1b7)
- Fix bug in `tr(::MPO)` rrule where the derivative was being multiplied twice into the identity MPO (b66d1b7)
- Fix directsum when specifying a single `Index` (#930)
- Fix bug in loginner when inner is negative or complex (#945)
- Fix subtraction bug in `OpSum` (#945)

Enhancements:

- Define "I" for Qudit/Boson type (b66d1b7)
- Only warn in `inner` if the result is `Inf` or `NaN` (b66d1b7)
- Make sure `randomITensor(())` and `randomITensor(Float64, ())` returns a Dense storage type (b66d1b7)
- Define `isreal` and `iszero` for ITensors (b66d1b7)
- Project element type of ITensor in reverse pass of tensor-tensor or scalar-tensor contraction (b66d1b7)
- Define reverse rules for ITensor subtraction and negation (b66d1b7)
- Define `map` for ITensors (b66d1b7)
- Throw error when performing eigendecomposition of tensor with NaN or Inf elements (b66d1b7)
- Fix `rrule` for `MPO` constructor by generalizing the `rrule` for the `MPS` constructor (#946)
- Forward truncation arguments to more operations in `rrule` for `apply` (#945)
- Add rrules for addition and subtraction of MPOs (#935)

ITensors v0.3.17 Release Notes
==============================

Bugs:

Enhancements:

- Add Zp as alias for operator Z+, etc. (#942)
- Export diag (#942)

ITensors v0.3.16 Release Notes
==============================

Expand Down
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ITensors"
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
authors = ["Matthew Fishman <mfishman@flatironinstitute.org>", "Miles Stoudenmire <mstoudenmire@flatironinstitute.org>"]
version = "0.3.17"
version = "0.3.18"

[deps]
BitIntegers = "c3b6d118-76ef-56ca-8cc7-ebb389d030a1"
Expand Down Expand Up @@ -36,7 +36,7 @@ HDF5 = "0.14, 0.15, 0.16"
IsApprox = "0.1"
KrylovKit = "0.4.2, 0.5"
LinearMaps = "3"
NDTensors = "0.1.41"
NDTensors = "0.1.42"
PackageCompiler = "1.0.0, 2"
Requires = "1.1"
SerializedElementArrays = "0.1"
Expand Down
2 changes: 1 addition & 1 deletion docs/src/Observer.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ which include:
- psi: the current wavefunction MPS
- bond: the bond `b` that was just optimized, corresponding to sites `(b,b+1)` in the two-site DMRG algorihtm
- sweep: the current sweep number
- sweep_is_done: true if at the end of the current sweep, otherwise false
- sweep\_is\_done: true if at the end of the current sweep, otherwise false
- half_sweep: the half-sweep number, equal to 1 for a left-to-right, first half sweep, or 2 for the second, right-to-left half sweep
- spec: the Spectrum object returned from factorizing the local superblock wavefunction tensor in two-site DMRG
- outputlevel: an integer specifying the amount of output to show
Expand Down
10 changes: 8 additions & 2 deletions src/ITensorChainRules/ITensorChainRules.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,13 @@ import ChainRulesCore: rrule

ITensors.dag(z::AbstractZero) = z

broadcast_notangent(a) = broadcast(_ -> NoTangent(), a)
if VERSION < v"1.7"
map_notangent(a) = map(_ -> NoTangent(), a)
else
map_notangent(a) = map(Returns(NoTangent()), a)
end

include("projection.jl")
include(joinpath("NDTensors", "tensor.jl"))
include(joinpath("NDTensors", "dense.jl"))
include("indexset.jl")
Expand All @@ -24,7 +29,7 @@ include(joinpath("mps", "mpo.jl"))
include(joinpath("LazyApply", "LazyApply.jl"))
include("zygoterules.jl")

@non_differentiable broadcast_notangent(::Any)
@non_differentiable map_notangent(::Any)
@non_differentiable Index(::Any...)
@non_differentiable delta(::Any...)
@non_differentiable dag(::Index)
Expand All @@ -39,5 +44,6 @@ include("zygoterules.jl")
@non_differentiable ITensors.filter_inds_set_function(::Function, ::Any...)
@non_differentiable ITensors.indpairs(::Any...)
@non_differentiable onehot(::Any...)
@non_differentiable Base.convert(::Type{TagSet}, str::String)

end
110 changes: 24 additions & 86 deletions src/ITensorChainRules/indexset.jl
Original file line number Diff line number Diff line change
@@ -1,85 +1,3 @@
function ChainRulesCore.rrule(::typeof(getindex), x::ITensor, I...)
y = getindex(x, I...)
function getindex_pullback(ȳ)
# TODO: add definition `ITensor(::Tuple{}) = ITensor()`
# to ITensors.jl so no splatting is needed here.
= ITensor(inds(x)...)
x̄[I...] = unthunk(ȳ)
Ī = broadcast_notangent(I)
return (NoTangent(), x̄, Ī...)
end
return y, getindex_pullback
end

# Specialized version in order to avoid call to `setindex!`
# within the pullback, should be better for taking higher order
# derivatives in Zygote.
function ChainRulesCore.rrule(::typeof(getindex), x::ITensor)
y = x[]
function getindex_pullback(ȳ)
= ITensor(unthunk(ȳ))
return (NoTangent(), x̄)
end
return y, getindex_pullback
end

function setinds_pullback(ȳ, x, a...)
= ITensors.setinds(ȳ, inds(x))
ā = broadcast_notangent(a)
return (NoTangent(), x̄, ā...)
end

function inv_op(f::Function, args...; kwargs...)
return error(
"Trying to differentiate `$f` but the inverse of the operation (`inv_op`) `$f` with arguments $args and keyword arguments $kwargs is not defined.",
)
end

function inv_op(::typeof(prime), x, n::Integer=1; kwargs...)
return prime(x, -n; kwargs...)
end

function inv_op(::typeof(replaceprime), x, n1n2::Pair; kwargs...)
return replaceprime(x, reverse(n1n2); kwargs...)
end

function inv_op(::typeof(addtags), x, args...; kwargs...)
return removetags(x, args...; kwargs...)
end

function inv_op(::typeof(removetags), x, args...; kwargs...)
return addtags(x, args...; kwargs...)
end

function inv_op(::typeof(replacetags), x, n1n2::Pair; kwargs...)
return replacetags(x, reverse(n1n2); kwargs...)
end

_check_inds(x::ITensor, y::ITensor) = hassameinds(x, y)
_check_inds(x::MPS, y::MPS) = hassameinds(siteinds, x, y)
_check_inds(x::MPO, y::MPO) = hassameinds(siteinds, x, y)

for fname in (
:prime, :setprime, :noprime, :replaceprime, :addtags, :removetags, :replacetags, :settags
)
@eval begin
function ChainRulesCore.rrule(f::typeof($fname), x::Union{MPS,MPO}, a...; kwargs...)
y = f(x, a...; kwargs...)
function f_pullback(ȳ)
= inv_op(f, unthunk(ȳ), a...; kwargs...)
if !_check_inds(x, x̄)
error(
"Trying to differentiate function `$f` with arguments $a and keyword arguments $kwargs. The forward pass indices $(inds(x)) do not match the reverse pass indices $(inds(x̄)). Likely this is because the priming/tagging operation you tried to perform is not invertible. Please write your code in a way where the index manipulation operation you are performing is invertible. For example, `prime(A::ITensor)` is invertible, with an inverse `prime(A, -1)`. However, `noprime(A)` is in general not invertible since the information about the prime levels of the original tensor are lost. Instead, you might try `prime(A, -1)` or `replaceprime(A, 1 => 0)` which are invertible.",
)
end
ā = broadcast_notangent(a)
return (NoTangent(), x̄, ā...)
end
return y, f_pullback
end
end
end

for fname in (
:prime,
:setprime,
Expand All @@ -97,17 +15,37 @@ for fname in (
:swapinds,
)
@eval begin
function ChainRulesCore.rrule(f::typeof($fname), x::ITensor, a...; kwargs...)
function rrule(f::typeof($fname), x::ITensor, a...; kwargs...)
y = f(x, a...; kwargs...)
function f_pullback(ȳ)
= unthunk(ȳ)
= replaceinds(uȳ, inds(y), inds(x))
ā = broadcast_notangent(a)
= replaceinds(unthunk(ȳ), inds(y) => inds(x))
ā = map_notangent(a)
return (NoTangent(), x̄, ā...)
end
return y, f_pullback
end
end
end

for fname in (
:prime, :setprime, :noprime, :replaceprime, :addtags, :removetags, :replacetags, :settags
)
@eval begin
function rrule(f::typeof($fname), x::Union{MPS,MPO}, a...; kwargs...)
y = f(x, a...; kwargs...)
function f_pullback(ȳ)
= copy(unthunk(ȳ))
for j in eachindex(x̄)
x̄[j] = replaceinds(ȳ[j], inds(y[j]) => inds(x[j]))
end
ā = map_notangent(a)
return (NoTangent(), x̄, ā...)
end
return y, f_pullback
end
end
end

rrule(::typeof(adjoint), x::Union{ITensor,MPS,MPO}) = rrule(prime, x)

@non_differentiable permute(::Indices, ::Indices)
Loading

0 comments on commit 62fa370

Please sign in to comment.