diff --git a/base/array.jl b/base/array.jl index b8ad7e137f25e..baef032f96b3f 100644 --- a/base/array.jl +++ b/base/array.jl @@ -1833,6 +1833,11 @@ function reverseind(a::AbstractVector, i::Integer) first(li) + last(li) - i end +# This implementation of `midpoint` is performance-optimized but safe +# only if `lo <= hi`. +midpoint(lo::T, hi::T) where T<:Integer = lo + ((hi - lo) >>> 0x01) +midpoint(lo::Integer, hi::Integer) = midpoint(promote(lo, hi)...) + """ reverse!(v [, start=1 [, stop=length(v) ]]) -> v @@ -1861,17 +1866,18 @@ julia> A """ function reverse!(v::AbstractVector, start::Integer, stop::Integer=lastindex(v)) s, n = Int(start), Int(stop) - liv = LinearIndices(v) - if n <= s # empty case; ok - elseif !(first(liv) ≤ s ≤ last(liv)) - throw(BoundsError(v, s)) - elseif !(first(liv) ≤ n ≤ last(liv)) - throw(BoundsError(v, n)) - end - r = n - @inbounds for i in s:div(s+n-1, 2) - v[i], v[r] = v[r], v[i] - r -= 1 + if n > s # non-empty and non-trivial + liv = LinearIndices(v) + if !(first(liv) ≤ s ≤ last(liv)) + throw(BoundsError(v, s)) + elseif !(first(liv) ≤ n ≤ last(liv)) + throw(BoundsError(v, n)) + end + r = n + @inbounds for i in s:midpoint(s, n-1) + v[i], v[r] = v[r], v[i] + r -= 1 + end end return v end diff --git a/base/broadcast.jl b/base/broadcast.jl index c9694d6645099..e381a225f66c5 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -1172,7 +1172,7 @@ Base.@propagate_inbounds dotview(B::BitArray, i::BitArray) = BitMaskedBitArray(B Base.show(io::IO, B::BitMaskedBitArray) = foreach(arg->show(io, arg), (typeof(B), (B.parent, B.mask))) # Override materialize! to prevent the BitMaskedBitArray from escaping to an overrideable method @inline materialize!(B::BitMaskedBitArray, bc::Broadcasted{<:Any,<:Any,typeof(identity),Tuple{Bool}}) = fill!(B, bc.args[1]) -@inline materialize!(B::BitMaskedBitArray, bc::Broadcasted{<:Any}) = materialize!(SubArray(B.parent, to_indices(B.parent, (B.mask,))), bc) +@inline materialize!(B::BitMaskedBitArray, bc::Broadcasted{<:Any}) = materialize!(@inbounds(view(B.parent, B.mask)), bc) function Base.fill!(B::BitMaskedBitArray, b::Bool) Bc = B.parent.chunks Ic = B.mask.chunks diff --git a/base/reinterpretarray.jl b/base/reinterpretarray.jl index 9211a66a99cbe..7ca50f9e2859e 100644 --- a/base/reinterpretarray.jl +++ b/base/reinterpretarray.jl @@ -152,23 +152,15 @@ strides(a::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}) = siz stride(A::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}, k::Integer) = k ≤ ndims(A) ? strides(A)[k] : length(A) -function strides(a::ReshapedReinterpretArray) - ap = parent(a) - els, elp = elsize(a), elsize(ap) - stp = strides(ap) - els == elp && return stp - els < elp && return (1, _checked_strides(stp, els, elp)...) +function strides(a::ReinterpretArray{T,<:Any,S,<:AbstractArray{S},IsReshaped}) where {T,S,IsReshaped} + _checkcontiguous(Bool, a) && return size_to_strides(1, size(a)) + stp = strides(parent(a)) + els, elp = sizeof(T), sizeof(S) + els == elp && return stp # 0dim parent is also handled here. + IsReshaped && els < elp && return (1, _checked_strides(stp, els, elp)...) stp[1] == 1 || throw(ArgumentError("Parent must be contiguous in the 1st dimension!")) - return _checked_strides(tail(stp), els, elp) -end - -function strides(a::NonReshapedReinterpretArray) - ap = parent(a) - els, elp = elsize(a), elsize(ap) - stp = strides(ap) - els == elp && return stp - stp[1] == 1 || throw(ArgumentError("Parent must be contiguous in the 1st dimension!")) - return (1, _checked_strides(tail(stp), els, elp)...) + st′ = _checked_strides(tail(stp), els, elp) + return IsReshaped ? st′ : (1, st′...) end @inline function _checked_strides(stp::Tuple, els::Integer, elp::Integer) diff --git a/base/reshapedarray.jl b/base/reshapedarray.jl index 82d293249afc6..367beaff7cc0e 100644 --- a/base/reshapedarray.jl +++ b/base/reshapedarray.jl @@ -294,14 +294,51 @@ unsafe_convert(::Type{Ptr{T}}, V::SubArray{T,N,P,<:Tuple{Vararg{Union{RangeIndex unsafe_convert(Ptr{T}, V.parent) + (first_index(V)-1)*sizeof(T) -_checkcontiguous(::Type{Bool}, A::AbstractArray) = size_to_strides(1, size(A)...) == strides(A) -_checkcontiguous(::Type{Bool}, A::Array) = true +_checkcontiguous(::Type{Bool}, A::AbstractArray) = false +# `strides(A::DenseArray)` calls `size_to_strides` by default. +# Thus it's OK to assume all `DenseArray`s are contiguously stored. +_checkcontiguous(::Type{Bool}, A::DenseArray) = true _checkcontiguous(::Type{Bool}, A::ReshapedArray) = _checkcontiguous(Bool, parent(A)) _checkcontiguous(::Type{Bool}, A::FastContiguousSubArray) = _checkcontiguous(Bool, parent(A)) function strides(a::ReshapedArray) - # We can handle non-contiguous parent if it's a StridedVector - ndims(parent(a)) == 1 && return size_to_strides(only(strides(parent(a))), size(a)...) - _checkcontiguous(Bool, a) || throw(ArgumentError("Parent must be contiguous.")) - size_to_strides(1, size(a)...) + _checkcontiguous(Bool, a) && return size_to_strides(1, size(a)...) + apsz::Dims = size(a.parent) + apst::Dims = strides(a.parent) + msz, mst, n = merge_adjacent_dim(apsz, apst) # Try to perform "lazy" reshape + n == ndims(a.parent) && return size_to_strides(mst, size(a)...) # Parent is stridevector like + return _reshaped_strides(size(a), 1, msz, mst, n, apsz, apst) +end + +function _reshaped_strides(::Dims{0}, reshaped::Int, msz::Int, ::Int, ::Int, ::Dims, ::Dims) + reshaped == msz && return () + throw(ArgumentError("Input is not strided.")) +end +function _reshaped_strides(sz::Dims, reshaped::Int, msz::Int, mst::Int, n::Int, apsz::Dims, apst::Dims) + st = reshaped * mst + reshaped = reshaped * sz[1] + if length(sz) > 1 && reshaped == msz && sz[2] != 1 + msz, mst, n = merge_adjacent_dim(apsz, apst, n + 1) + reshaped = 1 + end + sts = _reshaped_strides(tail(sz), reshaped, msz, mst, n, apsz, apst) + return (st, sts...) +end + +merge_adjacent_dim(::Dims{0}, ::Dims{0}) = 1, 1, 0 +merge_adjacent_dim(apsz::Dims{1}, apst::Dims{1}) = apsz[1], apst[1], 1 +function merge_adjacent_dim(apsz::Dims{N}, apst::Dims{N}, n::Int = 1) where {N} + sz, st = apsz[n], apst[n] + while n < N + szₙ, stₙ = apsz[n+1], apst[n+1] + if sz == 1 + sz, st = szₙ, stₙ + elseif stₙ == st * sz || szₙ == 1 + sz *= szₙ + else + break + end + n += 1 + end + return sz, st, n end diff --git a/base/sort.jl b/base/sort.jl index d26e9a4b09332..307a515dad050 100644 --- a/base/sort.jl +++ b/base/sort.jl @@ -11,7 +11,7 @@ using .Base: copymutable, LinearIndices, length, (:), AbstractMatrix, AbstractUnitRange, isless, identity, eltype, >, <, <=, >=, |, +, -, *, !, extrema, sub_with_overflow, add_with_overflow, oneunit, div, getindex, setindex!, length, resize!, fill, Missing, require_one_based_indexing, keytype, - UnitRange, max, min + UnitRange, max, min, midpoint using .Base: >>>, !== @@ -166,11 +166,6 @@ same thing as `partialsort!` but leaving `v` unmodified. partialsort(v::AbstractVector, k::Union{Integer,OrdinalRange}; kws...) = partialsort!(copymutable(v), k; kws...) -# This implementation of `midpoint` is performance-optimized but safe -# only if `lo <= hi`. -midpoint(lo::T, hi::T) where T<:Integer = lo + ((hi - lo) >>> 0x01) -midpoint(lo::Integer, hi::Integer) = midpoint(promote(lo, hi)...) - # reference on sorted binary search: # http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary @@ -505,12 +500,12 @@ function sort!(v::AbstractVector, lo::Integer, hi::Integer, ::InsertionSortAlg, j = i x = v[i] while j > lo - if lt(o, x, v[j-1]) - v[j] = v[j-1] - j -= 1 - continue + y = v[j-1] + if !lt(o, x, y) + break end - break + v[j] = y + j -= 1 end v[j] = x end diff --git a/src/Makefile b/src/Makefile index e0ab9568fd242..57dd4482a1a34 100644 --- a/src/Makefile +++ b/src/Makefile @@ -115,6 +115,11 @@ PUBLIC_HEADER_TARGETS := $(addprefix $(build_includedir)/julia/,$(notdir $(PUBLI LLVM_LDFLAGS := $(shell $(LLVM_CONFIG_HOST) --ldflags) LLVM_CXXFLAGS := $(shell $(LLVM_CONFIG_HOST) --cxxflags) +# llvm-config --cxxflags does not return -DNDEBUG +ifeq ($(shell $(LLVM_CONFIG_HOST) --assertion-mode),OFF) +LLVM_CXXFLAGS += -DNDEBUG +endif + ifeq ($(JULIACODEGEN),LLVM) ifneq ($(USE_SYSTEM_LLVM),0) CG_LLVMLINK += $(LLVM_LDFLAGS) $(shell $(LLVM_CONFIG_HOST) --libs --system-libs) diff --git a/src/jl_uv.c b/src/jl_uv.c index d2a78c26bbc72..726290605ee32 100644 --- a/src/jl_uv.c +++ b/src/jl_uv.c @@ -60,6 +60,7 @@ void JL_UV_LOCK(void) } else { jl_atomic_fetch_add_relaxed(&jl_uv_n_waiters, 1); + jl_fence(); // [^store_buffering_2] jl_wake_libuv(); JL_LOCK(&jl_uv_mutex); jl_atomic_fetch_add_relaxed(&jl_uv_n_waiters, -1); diff --git a/src/jltypes.c b/src/jltypes.c index 72a9d257f140b..ef09f6f457fd1 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -420,10 +420,8 @@ static int datatype_name_cmp(jl_value_t *a, jl_value_t *b) JL_NOTSAFEPOINT // sort singletons first, then DataTypes, then UnionAlls, // ties broken alphabetically including module name & type parameters -static int union_sort_cmp(const void *ap, const void *bp) JL_NOTSAFEPOINT +static int union_sort_cmp(jl_value_t *a, jl_value_t *b) JL_NOTSAFEPOINT { - jl_value_t *a = *(jl_value_t**)ap; - jl_value_t *b = *(jl_value_t**)bp; if (a == NULL) return b == NULL ? 0 : 1; if (b == NULL) @@ -458,16 +456,33 @@ static int union_sort_cmp(const void *ap, const void *bp) JL_NOTSAFEPOINT } } +static void isort_union(jl_value_t **a, size_t len) JL_NOTSAFEPOINT +{ + size_t i, j; + for (i = 1; i < len; i++) { + jl_value_t *x = a[i]; + for (j = i; j > 0; j--) { + jl_value_t *y = a[j - 1]; + if (!(union_sort_cmp(x, y) < 0)) + break; + a[j] = y; + } + a[j] = x; + } +} + JL_DLLEXPORT jl_value_t *jl_type_union(jl_value_t **ts, size_t n) { - if (n == 0) return (jl_value_t*)jl_bottom_type; + if (n == 0) + return (jl_value_t*)jl_bottom_type; size_t i; - for(i=0; i < n; i++) { + for (i = 0; i < n; i++) { jl_value_t *pi = ts[i]; if (!(jl_is_type(pi) || jl_is_typevar(pi))) jl_type_error("Union", (jl_value_t*)jl_type_type, pi); } - if (n == 1) return ts[0]; + if (n == 1) + return ts[0]; size_t nt = count_union_components(ts, n); jl_value_t **temp; @@ -476,9 +491,9 @@ JL_DLLEXPORT jl_value_t *jl_type_union(jl_value_t **ts, size_t n) flatten_type_union(ts, n, temp, &count); assert(count == nt); size_t j; - for(i=0; i < nt; i++) { - int has_free = temp[i]!=NULL && jl_has_free_typevars(temp[i]); - for(j=0; j < nt; j++) { + for (i = 0; i < nt; i++) { + int has_free = temp[i] != NULL && jl_has_free_typevars(temp[i]); + for (j = 0; j < nt; j++) { if (j != i && temp[i] && temp[j]) { if (temp[i] == jl_bottom_type || temp[j] == (jl_value_t*)jl_any_type || @@ -490,7 +505,7 @@ JL_DLLEXPORT jl_value_t *jl_type_union(jl_value_t **ts, size_t n) } } } - qsort(temp, nt, sizeof(jl_value_t*), union_sort_cmp); + isort_union(temp, nt); jl_value_t **ptu = &temp[nt]; *ptu = jl_bottom_type; int k; diff --git a/src/partr.c b/src/partr.c index c8cc3245ebb4c..ddba203b92b1d 100644 --- a/src/partr.c +++ b/src/partr.c @@ -40,7 +40,14 @@ static const int16_t sleeping = 1; // * 2a: `multiq_insert` // * 2b: `jl_atomic_load_relaxed(&ptls->sleep_check_state)` in `jl_wakeup_thread` returns `not_sleeping` // i.e., the dequeuer misses the enqueue and enqueuer misses the sleep state transition. - +// [^store_buffering_2]: and also +// * Enqueuer: +// * 1a: `jl_atomic_store_relaxed(jl_uv_n_waiters, 1)` in `JL_UV_LOCK` +// * 1b: "cheap read" of `handle->pending` in `uv_async_send` (via `JL_UV_LOCK`) loads `0` +// * Dequeuer: +// * 2a: store `2` to `handle->pending` in `uv_async_send` (via `JL_UV_LOCK` in `jl_task_get_next`) +// * 2b: `jl_atomic_load_relaxed(jl_uv_n_waiters)` in `jl_task_get_next` returns `0` +// i.e., the dequeuer misses the `n_waiters` is set and enqueuer misses the `uv_stop` flag (in `signal_async`) transition to cleared JULIA_DEBUG_SLEEPWAKE( uint64_t wakeup_enter; @@ -462,7 +469,7 @@ static int may_sleep(jl_ptls_t ptls) JL_NOTSAFEPOINT // by the thread itself. As a result, if this returns false, it will // continue returning false. If it returns true, we know the total // modification order of the fences. - jl_fence(); // [^store_buffering_1] + jl_fence(); // [^store_buffering_1] [^store_buffering_2] return jl_atomic_load_relaxed(&ptls->sleep_check_state) == sleeping; } diff --git a/stdlib/LinearAlgebra/src/blas.jl b/stdlib/LinearAlgebra/src/blas.jl index caa61cf94a52d..35d022e84bd17 100644 --- a/stdlib/LinearAlgebra/src/blas.jl +++ b/stdlib/LinearAlgebra/src/blas.jl @@ -169,18 +169,19 @@ end # Level 1 # A help function to pick the pointer and inc for 1d like inputs. @inline function vec_pointer_stride(x::AbstractArray, stride0check = nothing) - isdense(x) && return pointer(x), 1 # simpify runtime check when possibe - ndims(x) == 1 || strides(x) == Base.size_to_strides(stride(x, 1), size(x)...) || - throw(ArgumentError("only support vector like inputs")) - st = stride(x, 1) + Base._checkcontiguous(Bool, x) && return pointer(x), 1 # simpify runtime check when possibe + st, ptr = checkedstride(x), pointer(x) isnothing(stride0check) || (st == 0 && throw(stride0check)) - ptr = st > 0 ? pointer(x) : pointer(x, lastindex(x)) + ptr += min(st, 0) * sizeof(eltype(x)) * (length(x) - 1) ptr, st end -isdense(x) = x isa DenseArray -isdense(x::Base.FastContiguousSubArray) = isdense(parent(x)) -isdense(x::Base.ReshapedArray) = isdense(parent(x)) -isdense(x::Base.ReinterpretArray) = isdense(parent(x)) +function checkedstride(x::AbstractArray) + szs::Dims = size(x) + sts::Dims = strides(x) + _, st, n = Base.merge_adjacent_dim(szs, sts) + n === ndims(x) && return st + throw(ArgumentError("only support vector like inputs")) +end ## copy """ @@ -1565,11 +1566,27 @@ for (mfname, elty) in ((:dsymm_,:Float64), require_one_based_indexing(A, B, C) m, n = size(C) j = checksquare(A) - if j != (side == 'L' ? m : n) - throw(DimensionMismatch(lazy"A has size $(size(A)), C has size ($m,$n)")) - end - if size(B,2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)) but needs to match second dimension of C, $n")) + M, N = size(B) + if side == 'L' + if j != m + throw(DimensionMismatch(lazy"A has first dimension $j but needs to match first dimension of C, $m")) + end + if N != n + throw(DimensionMismatch(lazy"B has second dimension $N but needs to match second dimension of C, $n")) + end + if j != M + throw(DimensionMismatch(lazy"A has second dimension $j but needs to match first dimension of B, $M")) + end + else + if j != n + throw(DimensionMismatch(lazy"B has second dimension $j but needs to match second dimension of C, $n")) + end + if N != j + throw(DimensionMismatch(lazy"A has second dimension $N but needs to match first dimension of B, $j")) + end + if M != m + throw(DimensionMismatch(lazy"A has first dimension $M but needs to match first dimension of C, $m")) + end end chkstride1(A) chkstride1(B) @@ -1639,11 +1656,27 @@ for (mfname, elty) in ((:zhemm_,:ComplexF64), require_one_based_indexing(A, B, C) m, n = size(C) j = checksquare(A) - if j != (side == 'L' ? m : n) - throw(DimensionMismatch(lazy"A has size $(size(A)), C has size ($m,$n)")) - end - if size(B,2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)) but needs to match second dimension of C, $n")) + M, N = size(B) + if side == 'L' + if j != m + throw(DimensionMismatch(lazy"A has first dimension $j but needs to match first dimension of C, $m")) + end + if N != n + throw(DimensionMismatch(lazy"B has second dimension $N but needs to match second dimension of C, $n")) + end + if j != M + throw(DimensionMismatch(lazy"A has second dimension $j but needs to match first dimension of B, $M")) + end + else + if j != n + throw(DimensionMismatch(lazy"B has second dimension $j but needs to match second dimension of C, $n")) + end + if N != j + throw(DimensionMismatch(lazy"A has second dimension $N but needs to match first dimension of B, $j")) + end + if M != m + throw(DimensionMismatch(lazy"A has first dimension $M but needs to match first dimension of C, $m")) + end end chkstride1(A) chkstride1(B) diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index 33da0af79793c..8de71109082d0 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -81,7 +81,7 @@ BunchKaufman(A::AbstractMatrix{T}, ipiv::AbstractVector{<:Integer}, uplo::Abstra BunchKaufman{T,typeof(A),typeof(ipiv)}(A, ipiv, uplo, symmetric, rook, info) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(BunchKaufman(LD, ipiv, uplo, symmetric, rook, info) where {T,S}, - BunchKaufman{T,S,typeof(ipiv)}(LD, ipiv, uplo, symmetric, rook, info)) + BunchKaufman{T,S,typeof(ipiv)}(LD, ipiv, uplo, symmetric, rook, info), false) # iteration for destructuring into components Base.iterate(S::BunchKaufman) = (S.D, Val(:UL)) diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index bb831f8dca164..3e3e05757e962 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -168,7 +168,7 @@ CholeskyPivoted(A::AbstractMatrix{T}, uplo::AbstractChar, piv::AbstractVector{<: CholeskyPivoted{T,typeof(A),typeof(piv)}(A, uplo, piv, rank, tol, info) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(CholeskyPivoted{T,S}(factors, uplo, piv, rank, tol, info) where {T,S<:AbstractMatrix}, - CholeskyPivoted{T,S,typeof(piv)}(factors, uplo, piv, rank, tol, info)) + CholeskyPivoted{T,S,typeof(piv)}(factors, uplo, piv, rank, tol, info), false) # iteration for destructuring into components diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index f19df799bb4a7..9a3d77800f483 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -58,7 +58,7 @@ LQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = LQ(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(LQ{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - LQ{T,S,typeof(τ)}(factors, τ)) + LQ{T,S,typeof(τ)}(factors, τ), false) # iteration for destructuring into components Base.iterate(S::LQ) = (S.L, Val(:Q)) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index eed82093af876..7a9fd97a76f5a 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -64,7 +64,7 @@ LU{T}(factors::AbstractMatrix, ipiv::AbstractVector{<:Integer}, info::Integer) w # backwards-compatible constructors (remove with Julia 2.0) @deprecate(LU{T,S}(factors::AbstractMatrix{T}, ipiv::AbstractVector{<:Integer}, info::BlasInt) where {T,S}, - LU{T,S,typeof(ipiv)}(factors, ipiv, info)) + LU{T,S,typeof(ipiv)}(factors, ipiv, info), false) # iteration for destructuring into components Base.iterate(S::LU) = (S.L, Val(:U)) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 4e1cc83b468f5..1d6fb0ecda0bc 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -49,7 +49,7 @@ QR{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = QR(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(QR{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - QR{T,S,typeof(τ)}(factors, τ)) + QR{T,S,typeof(τ)}(factors, τ), false) # iteration for destructuring into components Base.iterate(S::QR) = (S.Q, Val(:R)) @@ -126,7 +126,7 @@ QRCompactWY{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} = QRCompactWY(convert(AbstractMatrix{S}, factors), convert(AbstractMatrix{S}, T)) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(QRCompactWY{S,M}(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S,M}, - QRCompactWY{S,M,typeof(T)}(factors, T)) + QRCompactWY{S,M,typeof(T)}(factors, T), false) # iteration for destructuring into components Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) @@ -219,7 +219,7 @@ QRPivoted{T}(factors::AbstractMatrix, τ::AbstractVector, # backwards-compatible constructors (remove with Julia 2.0) @deprecate(QRPivoted{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}, jpvt::AbstractVector{<:Integer}) where {T,S}, - QRPivoted{T,S,typeof(τ),typeof(jpvt)}(factors, τ, jpvt)) + QRPivoted{T,S,typeof(τ),typeof(jpvt)}(factors, τ, jpvt), false) # iteration for destructuring into components Base.iterate(S::QRPivoted) = (S.Q, Val(:R)) @@ -314,9 +314,9 @@ julia> a = [1. 2.; 3. 4.] 3.0 4.0 julia> qr!(a) -QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} +LinearAlgebra.QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} Q factor: -2×2 QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}: +2×2 LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}: -0.316228 -0.948683 -0.948683 0.316228 R factor: @@ -401,9 +401,9 @@ julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0] 0.0 1.0 julia> F = qr(A) -QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} +LinearAlgebra.QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} Q factor: -3×3 QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}: +3×3 LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}: -0.6 0.0 0.8 -0.8 0.0 -0.6 0.0 -1.0 0.0 @@ -541,7 +541,7 @@ QRPackedQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = QRPackedQ(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(QRPackedQ{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - QRPackedQ{T,S,typeof(τ)}(factors, τ)) + QRPackedQ{T,S,typeof(τ)}(factors, τ), false) """ QRCompactWYQ <: AbstractMatrix @@ -564,7 +564,7 @@ QRCompactWYQ{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} = QRCompactWYQ(convert(AbstractMatrix{S}, factors), convert(AbstractMatrix{S}, T)) # backwards-compatible constructors (remove with Julia 2.0) @deprecate(QRCompactWYQ{S,M}(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S,M}, - QRCompactWYQ{S,M,typeof(T)}(factors, T)) + QRCompactWYQ{S,M,typeof(T)}(factors, T), false) QRPackedQ{T}(Q::QRPackedQ) where {T} = QRPackedQ(convert(AbstractMatrix{T}, Q.factors), convert(Vector{T}, Q.τ)) AbstractMatrix{T}(Q::QRPackedQ{T}) where {T} = Q diff --git a/stdlib/LinearAlgebra/test/blas.jl b/stdlib/LinearAlgebra/test/blas.jl index d39f7c45ba205..4c2e70b6f5042 100644 --- a/stdlib/LinearAlgebra/test/blas.jl +++ b/stdlib/LinearAlgebra/test/blas.jl @@ -18,9 +18,14 @@ function pack(A, uplo) end @testset "vec_pointer_stride" begin - a = zeros(4,4,4) - @test BLAS.asum(view(a,1:2:4,:,:)) == 0 # vector like + a = float(rand(1:20,4,4,4)) + @test BLAS.asum(a) == sum(a) # dense case + @test BLAS.asum(view(a,1:2:4,:,:)) == sum(view(a,1:2:4,:,:)) # vector like + @test BLAS.asum(view(a,1:3,2:2,3:3)) == sum(view(a,1:3,2:2,3:3)) + @test BLAS.asum(view(a,1:1,1:3,1:1)) == sum(view(a,1:1,1:3,1:1)) + @test BLAS.asum(view(a,1:1,1:1,1:3)) == sum(view(a,1:1,1:1,1:3)) @test_throws ArgumentError BLAS.asum(view(a,1:3:4,:,:)) # non-vector like + @test_throws ArgumentError BLAS.asum(view(a,1:2,1:1,1:3)) end Random.seed!(100) ## BLAS tests - testing the interface code to BLAS routines @@ -222,11 +227,19 @@ Random.seed!(100) @test_throws DimensionMismatch BLAS.symm('R','U',Cmn,Cnn) @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cnn,one(elty),Cmn) @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cnn,one(elty),Cnm) + @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cmn,one(elty),Cnn) + @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cnm,one(elty),Cmn) + @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cnn,one(elty),Cnm) + @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cmn,one(elty),Cnn) if elty <: BlasComplex @test_throws DimensionMismatch BLAS.hemm('L','U',Cnm,Cnn) @test_throws DimensionMismatch BLAS.hemm('R','U',Cmn,Cnn) @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cnn,one(elty),Cmn) @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cnn,one(elty),Cnm) + @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cmn,one(elty),Cnn) + @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cnm,one(elty),Cmn) + @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cnn,one(elty),Cnm) + @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cmn,one(elty),Cnn) end end end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 47a36df5e7883..60b7f642b3b37 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -352,6 +352,9 @@ end C = zeros(eltya,n,n) @test Hermitian(aherm) * a ≈ aherm * a @test a * Hermitian(aherm) ≈ a * aherm + # rectangular multiplication + @test [a; a] * Hermitian(aherm) ≈ [a; a] * aherm + @test Hermitian(aherm) * [a a] ≈ aherm * [a a] @test Hermitian(aherm) * Hermitian(aherm) ≈ aherm*aherm @test_throws DimensionMismatch Hermitian(aherm) * Vector{eltya}(undef, n+1) LinearAlgebra.mul!(C,a,Hermitian(aherm)) @@ -360,6 +363,9 @@ end @test Symmetric(asym) * Symmetric(asym) ≈ asym*asym @test Symmetric(asym) * a ≈ asym * a @test a * Symmetric(asym) ≈ a * asym + # rectangular multiplication + @test Symmetric(asym) * [a a] ≈ asym * [a a] + @test [a; a] * Symmetric(asym) ≈ [a; a] * asym @test_throws DimensionMismatch Symmetric(asym) * Vector{eltya}(undef, n+1) LinearAlgebra.mul!(C,a,Symmetric(asym)) @test C ≈ a*asym diff --git a/test/abstractarray.jl b/test/abstractarray.jl index a2894c139eeee..43f4223affe62 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -1567,22 +1567,54 @@ end @test reshape(r, :) === reshape(r, (:,)) === r end +struct FakeZeroDimArray <: AbstractArray{Int, 0} end +Base.strides(::FakeZeroDimArray) = () +Base.size(::FakeZeroDimArray) = () @testset "strides for ReshapedArray" begin # Type-based contiguous check is tested in test/compiler/inline.jl + function check_strides(A::AbstractArray) + # Make sure stride(A, i) is equivalent with strides(A)[i] (if 1 <= i <= ndims(A)) + dims = ntuple(identity, ndims(A)) + map(i -> stride(A, i), dims) == @inferred(strides(A)) || return false + # Test strides via value check. + for i in eachindex(IndexLinear(), A) + A[i] === Base.unsafe_load(pointer(A, i)) || return false + end + return true + end # General contiguous check a = view(rand(10,10), 1:10, 1:10) - @test strides(vec(a)) == (1,) + @test check_strides(vec(a)) b = view(parent(a), 1:9, 1:10) - @test_throws "Parent must be contiguous." strides(vec(b)) + @test_throws "Input is not strided." strides(vec(b)) # StridedVector parent for n in 1:3 a = view(collect(1:60n), 1:n:60n) - @test strides(reshape(a, 3, 4, 5)) == (n, 3n, 12n) - @test strides(reshape(a, 5, 6, 2)) == (n, 5n, 30n) + @test check_strides(reshape(a, 3, 4, 5)) + @test check_strides(reshape(a, 5, 6, 2)) b = view(parent(a), 60n:-n:1) - @test strides(reshape(b, 3, 4, 5)) == (-n, -3n, -12n) - @test strides(reshape(b, 5, 6, 2)) == (-n, -5n, -30n) + @test check_strides(reshape(b, 3, 4, 5)) + @test check_strides(reshape(b, 5, 6, 2)) end + # StridedVector like parent + a = randn(10, 10, 10) + b = view(a, 1:10, 1:1, 5:5) + @test check_strides(reshape(b, 2, 5)) + # Other StridedArray parent + a = view(randn(10,10), 1:9, 1:10) + @test check_strides(reshape(a,3,3,2,5)) + @test check_strides(reshape(a,3,3,5,2)) + @test check_strides(reshape(a,9,5,2)) + @test check_strides(reshape(a,3,3,10)) + @test check_strides(reshape(a,1,3,1,3,1,5,1,2)) + @test check_strides(reshape(a,3,3,5,1,1,2,1,1)) + @test_throws "Input is not strided." strides(reshape(a,3,6,5)) + @test_throws "Input is not strided." strides(reshape(a,3,2,3,5)) + @test_throws "Input is not strided." strides(reshape(a,3,5,3,2)) + @test_throws "Input is not strided." strides(reshape(a,5,3,3,2)) + # Zero dimensional parent + a = reshape(FakeZeroDimArray(),1,1,1) + @test @inferred(strides(a)) == (1, 1, 1) end @testset "stride for 0 dims array #44087" begin diff --git a/test/broadcast.jl b/test/broadcast.jl index 5cddd0cb174f8..53f300e40292e 100644 --- a/test/broadcast.jl +++ b/test/broadcast.jl @@ -1079,3 +1079,11 @@ end y = randn(2) @inferred(test(x, y)) == [0, 0] end + +@testset "issue #45903, in place broadcast into a bit-masked bitmatrix" begin + A = BitArray(ones(3,3)) + pos = randn(3,3) + A[pos .< 0] .= false + @test all(>=(0), pos[A]) + @test count(A) == count(>=(0), pos) +end diff --git a/test/offsetarray.jl b/test/offsetarray.jl index 7621e14013627..b6aab384f2cd4 100644 --- a/test/offsetarray.jl +++ b/test/offsetarray.jl @@ -415,6 +415,23 @@ rv = reverse(v) cv = copy(v) @test reverse!(cv) == rv +@testset "reverse! (issue #45870)" begin + @testset for n in [4,5] + offset = typemax(Int)-n + vo = OffsetArray([1:n;], offset) + vo2 = OffsetArray([1:n;], offset) + @test reverse!(vo) == OffsetArray(n:-1:1, offset) + @test reverse!(vo) == vo2 + @test_throws BoundsError reverse!(vo, firstindex(vo)-1, firstindex(vo)) + @test reverse!(vo, firstindex(vo), firstindex(vo)-1) == vo2 + @test reverse!(vo, firstindex(vo), firstindex(vo)) == vo2 + @test reverse!(vo, lastindex(vo), lastindex(vo)) == vo2 + @test reverse!(vo, lastindex(vo), lastindex(vo)+1) == vo2 # overflow in stop + @test reverse!(vo, firstindex(vo)+1) == OffsetArray([1;n:-1:2], offset) + @test reverse!(vo2, firstindex(vo)+1, lastindex(vo)-1) == OffsetArray([1;n-1:-1:2;n], offset) + end +end + A = OffsetArray(rand(4,4), (-3,5)) @test lastindex(A) == 16 @test lastindex(A, 1) == 1