From f216cbb7d3eb5a070ae84e18662c9702b8ba7561 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Fri, 22 Jul 2016 14:47:18 -0400 Subject: [PATCH 1/4] treat dot operators as dot calls, e.g. x .+ y --> (+).(x,y) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit syntax deprecation for .op method definitions, and removed these deprecations from Base use range + 1, not range .+ 1, to make sure we call the specialized + method that produces a range (not an array) add depwarn for using .+ etc as function objects support dotted pipe operators eliminate most broadcast(::typeof(func), ...) methods, since fusion makes them ~useless; make broadcast produce a BitArray if a Bool array is expected test for .op loop fusion docs for new broadcasting dot-operator behavior define broadcast! earlier (fixes #18462) use specialized code for non-fusing array ± scalar, for performance work around slight slowdown in Diagonal due to broadcast vs. broadcast_elwise_op closes #11053 --- NEWS.md | 15 +- base/LineEdit.jl | 2 +- base/abstractarraymath.jl | 9 - base/arraymath.jl | 28 ++- base/bitarray.jl | 97 ----------- base/broadcast.jl | 162 ++--------------- base/dates/Dates.jl | 1 + base/dates/arithmetic.jl | 21 +-- base/dates/periods.jl | 34 ++-- base/dates/ranges.jl | 9 +- base/deprecated.jl | 30 +++- base/docs/helpdb/Base.jl | 155 ----------------- base/exports.jl | 17 -- base/irrationals.jl | 4 - base/linalg/diagonal.jl | 10 +- base/linalg/linalg.jl | 3 +- base/linalg/uniformscaling.jl | 6 +- base/multidimensional.jl | 3 +- base/operators.jl | 76 +------- base/range.jl | 65 ++++--- base/rational.jl | 2 - base/sparse/cholmod.jl | 2 +- base/sparse/sparse.jl | 2 +- base/sparse/sparsematrix.jl | 46 +---- base/sparse/sparsevector.jl | 25 +-- base/strings/basic.jl | 2 - base/sysimg.jl | 28 +-- base/test.jl | 3 +- doc/src/manual/arrays.md | 39 +++-- doc/src/manual/functions.md | 13 +- doc/src/manual/mathematical-operations.md | 56 ++++-- doc/src/manual/performance-tips.md | 47 ++++- doc/src/stdlib/math.md | 12 -- doc/src/stdlib/punctuation.md | 2 +- src/ast.scm | 25 +++ src/julia-parser.scm | 2 +- src/julia-syntax.scm | 21 ++- test/arrayops.jl | 18 +- test/bitarray.jl | 202 +++++++++++----------- test/broadcast.jl | 41 +++-- test/dates/periods.jl | 2 + test/linalg/generic.jl | 10 +- test/parse.jl | 3 +- test/ranges.jl | 2 +- test/sparse/sparsevector.jl | 32 ++-- 45 files changed, 493 insertions(+), 891 deletions(-) diff --git a/NEWS.md b/NEWS.md index dcd4f9a178f5f..6f7c04aec3095 100644 --- a/NEWS.md +++ b/NEWS.md @@ -7,7 +7,7 @@ New language features Language changes ---------------- - * Multiline and singleline nonstandard command literals have been added. A + * Multi-line and single-line nonstandard command literals have been added. A nonstandard command literal is like a nonstandard string literal, but the syntax uses backquotes (``` ` ```) instead of double quotes, and the resulting macro called is suffixed with `_cmd`. For instance, the syntax @@ -17,6 +17,11 @@ Language changes module. For instance, `Base.r"x"` is now parsed as `Base.@r_str "x"`. Previously, this syntax parsed as an implicit multiplication. ([#18690]) + * For every binary operator `⨳`, `a .⨳ b` is now automatically equivalent to + the `broadcast` call `(⨳).(a, b)`. Hence, one no longer defines methods + for `.*` etcetera. This also means that "dot operations" automatically + fuse into a single loop, along with other dot calls `f.(x)`. ([#17623]) + Breaking changes ---------------- @@ -34,6 +39,14 @@ This section lists changes that do not have deprecation warnings. * `broadcast` now handles tuples, and treats any argument that is not a tuple or an array as a "scalar" ([#16986]). + * `broadcast` now produces a `BitArray` instead of `Array{Bool}` for + functions yielding a boolean result. If you want `Array{Bool}`, use + `broadcast!` or `.=` ([#17623]). + + * Operations like `.+` and `.*` on `Range` objects are now generic + `broadcast` calls (see above) and produce an `Array`. If you want + a `Range` result, use `+` and `*`, etcetera ([#17623]). + Library improvements -------------------- diff --git a/base/LineEdit.jl b/base/LineEdit.jl index 5e476d5183564..27492222b8212 100644 --- a/base/LineEdit.jl +++ b/base/LineEdit.jl @@ -437,7 +437,7 @@ function splice_buffer!{T<:Integer}(buf::IOBuffer, r::UnitRange{T}, ins::Abstrac elseif pos > last(r) seek(buf, pos - length(r)) end - splice!(buf.data, r .+ 1, ins.data) # position(), etc, are 0-indexed + splice!(buf.data, r + 1, ins.data) # position(), etc, are 0-indexed buf.size = buf.size + sizeof(ins) - length(r) seek(buf, position(buf) + sizeof(ins)) end diff --git a/base/abstractarraymath.jl b/base/abstractarraymath.jl index 20864e69d5abc..17db951a945fd 100644 --- a/base/abstractarraymath.jl +++ b/base/abstractarraymath.jl @@ -91,15 +91,6 @@ imag{T<:Real}(x::AbstractArray{T}) = zero(x) +{T<:Number}(x::AbstractArray{T}) = x *{T<:Number}(x::AbstractArray{T,2}) = x -## Binary arithmetic operators ## - -*(A::Number, B::AbstractArray) = A .* B -*(A::AbstractArray, B::Number) = A .* B - -/(A::AbstractArray, B::Number) = A ./ B - -\(A::Number, B::AbstractArray) = B ./ A - # index A[:,:,...,i,:,:,...] where "i" is in dimension "d" """ diff --git a/base/arraymath.jl b/base/arraymath.jl index d8486bfd3a8aa..e23d8829ad871 100644 --- a/base/arraymath.jl +++ b/base/arraymath.jl @@ -60,10 +60,10 @@ end promote_array_type(F, ::Type, ::Type, T::Type) = T promote_array_type{S<:Real, A<:AbstractFloat}(F, ::Type{S}, ::Type{A}, ::Type) = A promote_array_type{S<:Integer, A<:Integer}(F, ::Type{S}, ::Type{A}, ::Type) = A -promote_array_type{S<:Integer, A<:Integer}(::typeof(./), ::Type{S}, ::Type{A}, T::Type) = T -promote_array_type{S<:Integer, A<:Integer}(::typeof(.\), ::Type{S}, ::Type{A}, T::Type) = T -promote_array_type{S<:Integer}(::typeof(./), ::Type{S}, ::Type{Bool}, T::Type) = T -promote_array_type{S<:Integer}(::typeof(.\), ::Type{S}, ::Type{Bool}, T::Type) = T +promote_array_type{S<:Integer, A<:Integer}(::typeof(/), ::Type{S}, ::Type{A}, T::Type) = T +promote_array_type{S<:Integer, A<:Integer}(::typeof(\), ::Type{S}, ::Type{A}, T::Type) = T +promote_array_type{S<:Integer}(::typeof(/), ::Type{S}, ::Type{Bool}, T::Type) = T +promote_array_type{S<:Integer}(::typeof(\), ::Type{S}, ::Type{Bool}, T::Type) = T promote_array_type{S<:Integer}(F, ::Type{S}, ::Type{Bool}, T::Type) = T for f in (:+, :-, :div, :mod, :&, :|, :xor) @@ -89,9 +89,9 @@ function _elementwise{T}(op, ::Type{T}, A::AbstractArray, B::AbstractArray) return F end -for f in (:.+, :.-, :.*, :./, :.\, :.^, :.÷, :.%, :.<<, :.>>, :div, :mod, :rem, :&, :|, :xor) - @eval begin - function ($f){T}(A::Number, B::AbstractArray{T}) +for f in (:div, :mod, :rem, :&, :|, :xor, :/, :\, :*, :+, :-) + if f != :/ + @eval function ($f){T}(A::Number, B::AbstractArray{T}) R = promote_op($f, typeof(A), T) S = promote_array_type($f, typeof(A), T, R) S === Any && return [($f)(A, b) for b in B] @@ -108,7 +108,9 @@ for f in (:.+, :.-, :.*, :./, :.\, :.^, :.÷, :.%, :.<<, :.>>, :div, :mod, :rem, end return F end - function ($f){T}(A::AbstractArray{T}, B::Number) + end + if f != :\ + @eval function ($f){T}(A::AbstractArray{T}, B::Number) R = promote_op($f, T, typeof(B)) S = promote_array_type($f, typeof(B), T, R) S === Any && return [($f)(a, B) for a in A] @@ -128,16 +130,6 @@ for f in (:.+, :.-, :.*, :./, :.\, :.^, :.÷, :.%, :.<<, :.>>, :div, :mod, :rem, end end -# familiar aliases for broadcasting operations of array ± scalar (#7226): -(+)(A::AbstractArray{Bool},x::Bool) = A .+ x -(+)(x::Bool,A::AbstractArray{Bool}) = x .+ A -(-)(A::AbstractArray{Bool},x::Bool) = A .- x -(-)(x::Bool,A::AbstractArray{Bool}) = x .- A -(+)(A::AbstractArray,x::Number) = A .+ x -(+)(x::Number,A::AbstractArray) = x .+ A -(-)(A::AbstractArray,x::Number) = A .- x -(-)(x::Number,A::AbstractArray) = x .- A - ## data movement ## function flipdim{T}(A::Array{T}, d::Integer) diff --git a/base/bitarray.jl b/base/bitarray.jl index 16e08b4852a5e..46a56ce9345fd 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -1136,9 +1136,6 @@ function empty!(B::BitVector) return B end -## Misc functions -broadcast(::typeof(abs), B::BitArray) = copy(B) - ## Unary operators ## function (-)(B::BitArray) @@ -1232,35 +1229,6 @@ for f in (:+, :-) end end -for (f) in (:.+, :.-) - for (arg1, arg2, T, t) in ((:(B::BitArray), :(x::Bool) , Int , (:b, :x)), - (:(B::BitArray), :(x::Number) , :(Bool, typeof(x)), (:b, :x)), - (:(x::Bool) , :(B::BitArray), Int , (:x, :b)), - (:(x::Number) , :(B::BitArray), :(typeof(x), Bool), (:x, :b))) - @eval function ($f)($arg1, $arg2) - $(if T === Int - quote - r = Array{Int}(size(B)) - end - else - quote - T = promote_op($f, $(T.args[1]), $(T.args[2])) - T === Any && return [($f)($(t[1]), $(t[2])) for b in B] - r = Array{T}(size(B)) - end - end) - bi = start(B) - ri = 1 - while !done(B, bi) - b, bi = next(B, bi) - @inbounds r[ri] = ($f)($(t[1]), $(t[2])) - ri += 1 - end - return r - end - end -end - for f in (:/, :\) @eval begin ($f)(A::BitArray, B::BitArray) = ($f)(Array(A), Array(B)) @@ -1359,71 +1327,6 @@ for f in (:&, :|, :xor) end end -function (.^)(B::BitArray, x::Bool) - x ? copy(B) : trues(size(B)) -end -function (.^)(x::Bool, B::BitArray) - x ? trues(size(B)) : ~B -end -function (.^)(x::Number, B::BitArray) - z = x ^ false - u = x ^ true - reshape([ B[i] ? u : z for i = 1:length(B) ], size(B)) -end -function (.^)(B::BitArray, x::Integer) - x == 0 && return trues(size(B)) - x < 0 && throw(DomainError()) - return copy(B) -end -function (.^){T<:Number}(B::BitArray, x::T) - x == 0 && return ones(typeof(true ^ x), size(B)) - T <: Real && x > 0 && return convert(Array{T}, B) - - z = nothing - u = nothing - zerr = nothing - uerr = nothing - try - z = false^x - catch err - zerr = err - end - try - u = true^x - catch err - uerr = err - end - if zerr === nothing && uerr === nothing - t = promote_type(typeof(z), typeof(u)) - elseif zerr === nothing - t = typeof(z) - else - t = typeof(u) - end - F = Array{t}(size(B)) - for i = 1:length(B) - if B[i] - if uerr === nothing - F[i] = u - else - throw(uerr) - end - else - if zerr === nothing - F[i] = z - else - throw(zerr) - end - end - end - return F -end - -(.*)(x::Bool, B::BitArray) = x & B -(.*)(B::BitArray, x::Bool) = B & x -(.*)(x::Number, B::BitArray) = x .* Array(B) -(.*)(B::BitArray, x::Number) = Array(B) .* x - ## promotion to complex ## # TODO? diff --git a/base/broadcast.jl b/base/broadcast.jl index fbeb15b1a3542..4a4382643dbee 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -5,22 +5,19 @@ module Broadcast using Base.Cartesian using Base: @pure, promote_eltype_op, _promote_op, linearindices, tail, OneTo, to_shape, _msk_end, unsafe_bitgetindex, bitcache_chunks, bitcache_size, dumpbitcache -import Base: .+, .-, .*, ./, .\, .//, .==, .<, .!=, .<=, .÷, .%, .<<, .>>, .^ -import Base: broadcast -export broadcast!, bitbroadcast, dotview +import Base: broadcast, broadcast! +export bitbroadcast, dotview export broadcast_getindex, broadcast_setindex! ## Broadcasting utilities ## -# fallback for broadcasting with zero arguments and some special cases -broadcast(f) = f() +# fallbacks for some special cases @inline broadcast(f, x::Number...) = f(x...) @inline broadcast{N}(f, t::NTuple{N}, ts::Vararg{NTuple{N}}) = map(f, t, ts...) @inline broadcast(f, As::AbstractArray...) = broadcast_c(f, Array, As...) # special cases for "X .= ..." (broadcast!) assignments broadcast!(::typeof(identity), X::AbstractArray, x::Number) = fill!(X, x) -broadcast!(f, X::AbstractArray) = fill!(X, f()) broadcast!(f, X::AbstractArray, x::Number...) = fill!(X, f(x...)) function broadcast!{T,S,N}(::typeof(identity), x::AbstractArray{T,N}, y::AbstractArray{S,N}) check_broadcast_shape(broadcast_indices(x), broadcast_indices(y)) @@ -252,6 +249,18 @@ end return B end +# default to BitArray for broadcast operations producing Bool, to save 8x space +# in the common case where this is used for logical array indexing; in +# performance-critical cases where Array{Bool} is desired, one can always +# use broadcast! instead. +function broadcast_t(f, ::Type{Bool}, shape, iter, As...) + B = similar(BitArray, shape) + nargs = length(As) + keeps, Idefaults = map_newindexer(shape, As) + _broadcast!(f, B, keeps, Idefaults, As, Val{nargs}, iter) + return B +end + # broadcast method that uses inference to find the type, but preserves abstract # container types when possible (used by binary elementwise operators) @inline broadcast_elwise_op(f, As...) = @@ -459,147 +468,6 @@ position in `X` at the indices in `A` given by the same positions in `inds`. end end -## elementwise operators ## - -for op in (:÷, :%, :<<, :>>, :-, :/, :\, ://, :^) - @eval $(Symbol(:., op))(A::AbstractArray, B::AbstractArray) = broadcast_elwise_op($op, A, B) -end -.+(As::AbstractArray...) = broadcast_elwise_op(+, As...) -.*(As::AbstractArray...) = broadcast_elwise_op(*, As...) - -# ## element-wise comparison operators returning BitArray ## - -.==(A::AbstractArray, B::AbstractArray) = bitbroadcast(==, A, B) - .<(A::AbstractArray, B::AbstractArray) = bitbroadcast(<, A, B) -.!=(A::AbstractArray, B::AbstractArray) = bitbroadcast(!=, A, B) -.<=(A::AbstractArray, B::AbstractArray) = bitbroadcast(<=, A, B) - -function broadcast_bitarrays(scalarf, bitf, A::AbstractArray{Bool}, B::AbstractArray{Bool}) - local shape - try - shape = promote_shape(indices(A), indices(B)) - catch - return bitbroadcast(scalarf, A, B) - end - F = BitArray(to_shape(shape)) - Fc = F.chunks - Ac = BitArray(A).chunks - Bc = BitArray(B).chunks - if !isempty(Ac) && !isempty(Bc) - for i = 1:length(Fc) - 1 - Fc[i] = (bitf)(Ac[i], Bc[i]) - end - Fc[end] = (bitf)(Ac[end], Bc[end]) & _msk_end(F) - end - return F -end - -biteq(a::UInt64, b::UInt64) = ~a ⊻ b -bitlt(a::UInt64, b::UInt64) = ~a & b -bitneq(a::UInt64, b::UInt64) = a ⊻ b -bitle(a::UInt64, b::UInt64) = ~a | b - -.==(A::AbstractArray{Bool}, B::AbstractArray{Bool}) = broadcast_bitarrays(==, biteq, A, B) - .<(A::AbstractArray{Bool}, B::AbstractArray{Bool}) = broadcast_bitarrays(<, bitlt, A, B) -.!=(A::AbstractArray{Bool}, B::AbstractArray{Bool}) = broadcast_bitarrays(!=, bitneq, A, B) -.<=(A::AbstractArray{Bool}, B::AbstractArray{Bool}) = broadcast_bitarrays(<=, bitle, A, B) - -function bitcache(op, A, B, refA, refB, l::Int, ind::Int, C::Vector{Bool}) - left = l - ind + 1 - @inbounds begin - for j = 1:min(bitcache_size, left) - C[j] = (op)(refA(A, ind), refB(B, ind)) - ind += 1 - end - C[left+1:bitcache_size] = false - end - return ind -end - -# note: the following are not broadcasting, but need to be defined here to avoid -# ambiguity warnings - -for (f, scalarf) in ((:.==, :(==)), - (:.< , :< ), - (:.!=, :!= ), - (:.<=, :<= )) - for (sigA, sigB, active, refA, refB) in ((:Any, :AbstractArray, :B, - :((A,ind)->A), :((B,ind)->B[ind])), - (:AbstractArray, :Any, :A, - :((A,ind)->A[ind]), :((B,ind)->B))) - shape = :(indices($active)) - @eval begin - function ($f)(A::$sigA, B::$sigB) - P = similar(BitArray, $shape) - F = parent(P) - l = length(F) - l == 0 && return F - Fc = F.chunks - C = Array{Bool}(bitcache_size) - ind = first(linearindices($active)) - cind = 1 - for i = 1:div(l + bitcache_size - 1, bitcache_size) - ind = bitcache($scalarf, A, B, $refA, $refB, l, ind, C) - dumpbitcache(Fc, cind, C) - cind += bitcache_chunks - end - return P - end - end - end -end - -## specialized element-wise operators for BitArray - -(.^)(A::BitArray, B::AbstractArray{Bool}) = (B .<= A) -(.^)(A::AbstractArray{Bool}, B::AbstractArray{Bool}) = (B .<= A) - -function bitcache_pow{T}(Ac::Vector{UInt64}, B::Array{T}, l::Int, ind::Int, C::Vector{Bool}) - left = l - ind + 1 - @inbounds begin - for j = 1:min(bitcache_size, left) - C[j] = unsafe_bitgetindex(Ac, ind) ^ B[ind] - ind += 1 - end - C[left+1:bitcache_size] = false - end - return ind -end -function (.^){T<:Integer}(A::BitArray, B::Array{T}) - local shape - try - shape = promote_shape(indices(A), indices(B)) - catch - return bitbroadcast(^, A, B) - end - F = BitArray(to_shape(shape)) - l = length(F) - l == 0 && return F - Ac = A.chunks - Fc = F.chunks - C = Array{Bool}(bitcache_size) - ind = 1 - cind = 1 - for i = 1:div(l + bitcache_size - 1, bitcache_size) - ind = bitcache_pow(Ac, B, l, ind, C) - dumpbitcache(Fc, cind, C) - cind += bitcache_chunks - end - return F -end - -for (sigA, sigB) in ((BitArray, BitArray), - (AbstractArray{Bool}, BitArray), - (BitArray, AbstractArray{Bool})) - @eval function (.*)(A::$sigA, B::$sigB) - try - return BitArray(A) & BitArray(B) - catch - return bitbroadcast(&, A, B) - end - end -end - ############################################################ # x[...] .= f.(y...) ---> broadcast!(f, dotview(x, ...), y...). diff --git a/base/dates/Dates.jl b/base/dates/Dates.jl index ad620d10125d4..b15e204924bd7 100644 --- a/base/dates/Dates.jl +++ b/base/dates/Dates.jl @@ -3,6 +3,7 @@ module Dates importall ..Base.Operators +import ..Base.broadcast using Base.Iterators diff --git a/base/dates/arithmetic.jl b/base/dates/arithmetic.jl index 94752ffe2390a..72b5313cc004b 100644 --- a/base/dates/arithmetic.jl +++ b/base/dates/arithmetic.jl @@ -66,30 +66,21 @@ end (+)(y::Period,x::TimeType) = x + y (-)(y::Period,x::TimeType) = x - y -for op in (:.+, :.-) - op_ = Symbol(string(op)[2:end]) +for op in (:+, :-) @eval begin # GeneralPeriod, AbstractArray{TimeType} - ($op){T<:TimeType}(x::AbstractArray{T}, y::GeneralPeriod) = - reshape(T[($op_)(i,y) for i in x], size(x)) - ($op){T<:TimeType}(y::GeneralPeriod, x::AbstractArray{T}) = ($op)(x,y) - ($op_){T<:TimeType}(x::AbstractArray{T}, y::GeneralPeriod) = ($op)(x,y) - ($op_){T<:TimeType}(y::GeneralPeriod, x::AbstractArray{T}) = ($op)(x,y) + ($op){T<:TimeType}(x::AbstractArray{T}, y::GeneralPeriod) = broadcast($op,x,y) + ($op){T<:TimeType}(y::GeneralPeriod, x::AbstractArray{T}) = broadcast($op,x,y) # TimeType, StridedArray{GeneralPeriod} - ($op){T<:TimeType,P<:GeneralPeriod}(x::StridedArray{P}, y::T) = - reshape(T[($op_)(i,y) for i in x], size(x)) - ($op){P<:GeneralPeriod}(y::TimeType, x::StridedArray{P}) = ($op)(x,y) - ($op_){T<:TimeType,P<:GeneralPeriod}(x::StridedArray{P}, y::T) = ($op)(x,y) - ($op_){P<:GeneralPeriod}(y::TimeType, x::StridedArray{P}) = ($op)(x,y) + ($op){T<:TimeType,P<:GeneralPeriod}(x::StridedArray{P}, y::T) = broadcast($op,x,y) + ($op){P<:GeneralPeriod}(y::TimeType, x::StridedArray{P}) = broadcast($op,x,y) end end # TimeType, AbstractArray{TimeType} -(.-){T<:TimeType}(x::AbstractArray{T}, y::T) = reshape(Period[i - y for i in x], size(x)) -(.-){T<:TimeType}(y::T, x::AbstractArray{T}) = -(x .- y) (-){T<:TimeType}(x::AbstractArray{T}, y::T) = x .- y -(-){T<:TimeType}(y::T, x::AbstractArray{T}) = -(x .- y) +(-){T<:TimeType}(y::T, x::AbstractArray{T}) = y .- x # AbstractArray{TimeType}, AbstractArray{TimeType} (-){T<:TimeType}(x::OrdinalRange{T}, y::OrdinalRange{T}) = collect(x) - collect(y) diff --git a/base/dates/periods.jl b/base/dates/periods.jl index def492fc65e26..a37e8d48a7d6d 100644 --- a/base/dates/periods.jl +++ b/base/dates/periods.jl @@ -65,7 +65,7 @@ Base.isless{P<:Period}(x::P,y::P) = isless(value(x),value(y)) =={P<:Period}(x::P,y::P) = value(x) == value(y) # Period Arithmetic, grouped by dimensionality: -import Base: div, fld, mod, rem, gcd, lcm, +, -, *, /, %, .+, .-, .*, .% +import Base: div, fld, mod, rem, gcd, lcm, +, -, *, /, % for op in (:+,:-,:lcm,:gcd) @eval ($op){P<:Period}(x::P,y::P) = P(($op)(value(x),value(y))) end @@ -84,23 +84,18 @@ for op in (:rem,:mod) end end -/{P<:Period}(X::StridedArray{P}, y::P) = X ./ y -%{P<:Period}(X::StridedArray{P}, y::P) = X .% y *{P<:Period}(x::P,y::Real) = P(value(x) * Int64(y)) *(y::Real,x::Period) = x * y -.*{P<:Period}(y::Real, X::StridedArray{P}) = X .* y -for (op,Ty,Tz) in ((:.*,Real,:P), - (:./,:P,Float64), (:./,Real,:P), +for (op,Ty,Tz) in ((:*,Real,:P), + (:/,:P,Float64), (:/,Real,:P), (:div,:P,Int64), (:div,Integer,:P), - (:.%,:P,:P), + (:%,:P,:P), (:mod,:P,:P)) - sop = string(op) - op_ = sop[1] == '.' ? Symbol(sop[2:end]) : op @eval begin function ($op){P<:Period}(X::StridedArray{P},y::$Ty) Z = similar(X, $Tz) for (Idst, Isrc) in zip(eachindex(Z), eachindex(X)) - @inbounds Z[Idst] = ($op_)(X[Isrc],y) + @inbounds Z[Idst] = ($op)(X[Isrc],y) end return Z end @@ -322,21 +317,12 @@ GeneralPeriod = Union{Period,CompoundPeriod} (+)(x::GeneralPeriod) = x (+){P<:GeneralPeriod}(x::StridedArray{P}) = x -for op in (:.+, :.-) - op_ = Symbol(string(op)[2:end]) +for op in (:+, :-) @eval begin - function ($op){P<:GeneralPeriod}(X::StridedArray{P},y::GeneralPeriod) - Z = similar(X, CompoundPeriod) - for (Idst, Isrc) in zip(eachindex(Z), eachindex(X)) - @inbounds Z[Idst] = ($op_)(X[Isrc],y) - end - return Z - end - ($op){P<:GeneralPeriod}(x::GeneralPeriod,Y::StridedArray{P}) = ($op)(Y,x) |> ($op_) - ($op_){P<:GeneralPeriod}(x::GeneralPeriod,Y::StridedArray{P}) = ($op)(Y,x) |> ($op_) - ($op_){P<:GeneralPeriod}(Y::StridedArray{P},x::GeneralPeriod) = ($op)(Y,x) - ($op_){P<:GeneralPeriod, Q<:GeneralPeriod}(X::StridedArray{P}, Y::StridedArray{Q}) = - reshape(CompoundPeriod[($op_)(x,y) for (x,y) in zip(X, Y)], promote_shape(size(X),size(Y))) + ($op){P<:GeneralPeriod}(x::GeneralPeriod,Y::StridedArray{P}) = broadcast($op,x,Y) + ($op){P<:GeneralPeriod}(Y::StridedArray{P},x::GeneralPeriod) = broadcast($op,Y,x) + ($op){P<:GeneralPeriod, Q<:GeneralPeriod}(X::StridedArray{P}, Y::StridedArray{Q}) = + reshape(CompoundPeriod[($op)(x,y) for (x,y) in zip(X, Y)], promote_shape(size(X),size(Y))) end end diff --git a/base/dates/ranges.jl b/base/dates/ranges.jl index 1ce75142df454..9ea398869f049 100644 --- a/base/dates/ranges.jl +++ b/base/dates/ranges.jl @@ -34,9 +34,6 @@ Base.start{T<:TimeType}(r::StepRange{T}) = 0 Base.next{T<:TimeType}(r::StepRange{T}, i::Int) = (r.start+r.step*i,i+1) Base.done{T<:TimeType,S<:Period}(r::StepRange{T,S}, i::Integer) = length(r) <= i -.+{T<:TimeType}(x::Period, r::Range{T}) = (x+first(r)):step(r):(x+last(r)) -.+{T<:TimeType}(r::Range{T},x::Period) = x .+ r -+{T<:TimeType}(r::Range{T},x::Period) = x .+ r -+{T<:TimeType}(x::Period,r::Range{T}) = x .+ r -.-{T<:TimeType}(r::Range{T},x::Period) = (first(r)-x):step(r):(last(r)-x) --{T<:TimeType}(r::Range{T},x::Period) = r .- x ++{T<:TimeType}(x::Period, r::Range{T}) = (x+first(r)):step(r):(x+last(r)) ++{T<:TimeType}(r::Range{T},x::Period) = x + r +-{T<:TimeType}(r::Range{T},x::Period) = (first(r)-x):step(r):(last(r)-x) diff --git a/base/deprecated.jl b/base/deprecated.jl index c6b5dde226083..ab7a4a6e1a208 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -307,26 +307,26 @@ for (Fun, func) in [(:IdFun, :identity), (:OrFun, :|), (:XorFun, :xor), (:AddFun, :+), - (:DotAddFun, :.+), + # (:DotAddFun, :.+), (:SubFun, :-), - (:DotSubFun, :.-), + # (:DotSubFun, :.-), (:MulFun, :*), - (:DotMulFun, :.*), + # (:DotMulFun, :.*), (:RDivFun, :/), - (:DotRDivFun, :./), + # (:DotRDivFun, :./), (:LDivFun, :\), (:IDivFun, :div), - (:DotIDivFun, :.÷), + # (:DotIDivFun, :.÷), (:ModFun, :mod), (:RemFun, :rem), - (:DotRemFun, :.%), + # (:DotRemFun, :.%), (:PowFun, :^), (:MaxFun, :scalarmax), (:MinFun, :scalarmin), (:LessFun, :<), (:MoreFun, :>), - (:DotLSFun, :.<<), - (:DotRSFun, :.>>), + # (:DotLSFun, :.<<), + # (:DotRSFun, :.>>), (:ElementwiseMaxFun, :max), (:ElementwiseMinFun, :min), (:ComplexFun, :complex), @@ -1003,6 +1003,20 @@ macro vectorize_2arg(S,f) end export @vectorize_1arg, @vectorize_2arg +# deprecations for uses of old dot operators (.* etc) as objects, rather than +# just calling them infix. +for op in (:(!=), :≠, :+, :-, :*, :/, :÷, :%, :<, :(<=), :≤, :(==), :>, :>=, :≥, :\, :^) + dotop = Symbol('.', op) + # define as const dotop = (a,b) -> ... + # to work around syntax deprecation for dotop(a,b) = ... + @eval const $dotop = (a,b) -> begin + depwarn(string($(string(dotop)), " is no longer a function object; use broadcast(",$op,", ...) instead"), + $(QuoteNode(dotop))) + broadcast($op, a, b) + end + @eval export $dotop +end + # Devectorize manually vectorized abs methods in favor of compact broadcast syntax @deprecate abs(f::Base.Pkg.Resolve.MaxSum.Field) abs.(f) @deprecate abs(B::BitArray) abs.(B) diff --git a/base/docs/helpdb/Base.jl b/base/docs/helpdb/Base.jl index 545bfac1bd37e..cb199b0d0c6c8 100644 --- a/base/docs/helpdb/Base.jl +++ b/base/docs/helpdb/Base.jl @@ -85,19 +85,6 @@ false """ isinteger -""" - ./(x, y) - -Element-wise right division operator. - -```jldoctest -julia> [1 2 3] ./ [1 2 3] -1×3 Array{Float64,2}: - 1.0 1.0 1.0 -``` -""" -Base.:(./) - """ prod!(r, A) @@ -146,21 +133,6 @@ losslessly, some loss is tolerated; for example, `promote_type(Int64,Float64)` r """ promote_type -""" -``` -.*(x, y) -``` - -Element-wise multiplication operator. - -```jldoctest -julia> [1 2 3] .* [1 2 3] -1×3 Array{Int64,2}: - 1 4 9 -``` -""" -Base.:(.*) - """ backtrace() @@ -312,19 +284,6 @@ the same manner as C. """ unsafe_copy!(dest::Array, d, src::Array, so, N) -""" - .^(x, y) - -Element-wise exponentiation operator. - -```jldoctest -julia> [1 2 3] .^ [1 2 3] -1×3 Array{Int64,2}: - 1 4 27 -``` -""" -Base.:(.^) - """ Float32(x [, mode::RoundingMode]) @@ -934,21 +893,6 @@ In-place version of [`reverse`](@ref). """ reverse! -""" - .<(x, y) - -Element-wise less-than comparison operator. - -```jldoctest -julia> [1; 2; 3] .< [2; 1; 4] -3-element BitArray{1}: - true - false - true -``` -""" -Base.:(.<) - """ UndefRefError() @@ -2072,21 +2016,6 @@ Compute the minimum value of `A` over the singleton dimensions of `r`, and write """ minimum! -""" - .-(x, y) - -Element-wise subtraction operator. - -```jldoctest -julia> [4; 5; 6] .- [1; 2; 4] -3-element Array{Int64,1}: - 3 - 3 - 2 -``` -""" -Base.:(.-) - """ unsafe_trunc(T, x) @@ -2259,52 +2188,6 @@ Assign `x` to a named field in `value` of composite type. The syntax `a.b = c` c """ setfield! -""" - .\\(x, y) - -Element-wise left division operator. - -```jldoctest -julia> A = [1 2; 3 4] -2×2 Array{Int64,2}: - 1 2 - 3 4 - -julia> A .\\ [1 2] -2×2 Array{Float64,2}: - 1.0 1.0 - 0.333333 0.5 -``` - -```jldoctest -julia> A = [1 0; 0 -1]; - -julia> B = [0 1; 1 0]; - -julia> C = [A, B] -2-element Array{Array{Int64,2},1}: - [1 0; 0 -1] - [0 1; 1 0] - -julia> x = [1; 0]; - -julia> y = [0; 1]; - -julia> D = [x, y] -2-element Array{Array{Int64,1},1}: - [1,0] - [0,1] - -julia> C .\\ D -2-element Array{Array{Float64,1},1}: - [1.0,-0.0] - [1.0,0.0] -``` - -See also [`broadcast`](@ref). -""" -Base.:(.\)(x,y) - """ ``` *(x, y...) @@ -2487,19 +2370,6 @@ An attempted access to a [`Nullable`](@ref) with no defined value. """ NullException -""" - .==(x, y) - -Element-wise equality comparison operator. - -```jldoctest -julia> [1 2 3] .== [1 2 4] -1×3 BitArray{2}: - true true false -``` -""" -Base.:(.==) - """ cfunction(function::Function, ReturnType::Type, (ArgumentTypes...)) @@ -2892,31 +2762,6 @@ Compute the midpoints of the bins with edges `e`. The result is a vector/range o """ midpoints -""" - .+(x, y) - -Element-wise addition operator. - -```jldoctest -julia> A = [1 2; 3 4]; - -julia> B = [5 6; 7 8]; - -julia> C = [A, B] -2-element Array{Array{Int64,2},1}: - [1 2; 3 4] - [5 6; 7 8] - -julia> C .+ [[1; 2] [3; 4]] -2×2 Array{Array{Int64,2},2}: - [2 3; 4 5] [4 5; 6 7] - [7 8; 9 10] [9 10; 11 12] -``` - -See also [`broadcast`](@ref). -""" -Base.:(.+) - """ reverseind(v, i) diff --git a/base/exports.jl b/base/exports.jl index a3a58c8ec737a..c11bea901fd19 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -213,23 +213,6 @@ export *, +, -, - .!=, - .≠, - .+, - .-, - .*, - ./, - .÷, - .%, - .<, - .<=, - .≤, - .==, - .>, - .>=, - .≥, - .\, - .^, /, //, .//, diff --git a/base/irrationals.jl b/base/irrationals.jl index b917cb588b66c..d3f470312f6ea 100644 --- a/base/irrationals.jl +++ b/base/irrationals.jl @@ -182,14 +182,10 @@ catalan # use exp for e^x or e.^x, as in # ^(::Irrational{:e}, x::Number) = exp(x) -# .^(::Irrational{:e}, x) = exp(x) # but need to loop over types to prevent ambiguity with generic rules for ^(::Number, x) etc. for T in (Irrational, Rational, Integer, Number) ^(::Irrational{:e}, x::T) = exp(x) end -for T in (Range, BitArray, StridedArray, AbstractArray) - .^(::Irrational{:e}, x::T) = exp.(x) -end log(::Irrational{:e}) = 1 # use 1 to correctly promote expressions like log(x)/log(e) log(::Irrational{:e}, x::Number) = log(x) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index 8b168f5629e32..5fa8e304f71b0 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -141,8 +141,8 @@ end *{T<:Number}(x::T, D::Diagonal) = Diagonal(x * D.diag) *{T<:Number}(D::Diagonal, x::T) = Diagonal(D.diag * x) /{T<:Number}(D::Diagonal, x::T) = Diagonal(D.diag / x) -*(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag) -*(D::Diagonal, V::AbstractVector) = D.diag .* V +*(Da::Diagonal, Db::Diagonal) = Diagonal(broadcast_elwise_op(*, Da.diag, Db.diag)) +*(D::Diagonal, V::AbstractVector) = broadcast_elwise_op(*, D.diag, V) (*)(A::AbstractTriangular, D::Diagonal) = A_mul_B!(copy(A), D) (*)(D::Diagonal, B::AbstractTriangular) = A_mul_B!(D, copy(B)) @@ -223,7 +223,7 @@ A_mul_B!(A::AbstractMatrix,B::Diagonal) = scale!(A,B.diag) A_mul_Bt!(A::AbstractMatrix,B::Diagonal) = scale!(A,B.diag) A_mul_Bc!(A::AbstractMatrix,B::Diagonal) = scale!(A,conj(B.diag)) -/(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag ./ Db.diag ) +/(Da::Diagonal, Db::Diagonal) = Diagonal(broadcast_elwise_op(/, Da.diag, Db.diag)) function A_ldiv_B!{T}(D::Diagonal{T}, v::AbstractVector{T}) if length(v) != length(D.diag) throw(DimensionMismatch("diagonal matrix is $(length(D.diag)) by $(length(D.diag)) but right hand side has $(length(v)) rows")) @@ -292,8 +292,8 @@ function A_ldiv_B!(D::Diagonal, B::StridedVecOrMat) return B end (\)(D::Diagonal, A::AbstractMatrix) = D.diag .\ A -(\)(D::Diagonal, b::AbstractVector) = D.diag .\ b -(\)(Da::Diagonal, Db::Diagonal) = Diagonal(Db.diag ./ Da.diag) +(\)(D::Diagonal, b::AbstractVector) = broadcast_elwise_op(\, D.diag, b) +(\)(Da::Diagonal, Db::Diagonal) = Diagonal(broadcast_elwise_op(\, Da.diag, Db.diag)) function inv{T}(D::Diagonal{T}) Di = similar(D.diag, typeof(inv(zero(T)))) diff --git a/base/linalg/linalg.jl b/base/linalg/linalg.jl index 13c7dcdd1167a..a2fc34a4a55e3 100644 --- a/base/linalg/linalg.jl +++ b/base/linalg/linalg.jl @@ -2,7 +2,7 @@ module LinAlg -import Base: \, /, *, ^, +, -, ==, ./, .* +import Base: \, /, *, ^, +, -, == import Base: A_mul_Bt, At_ldiv_Bt, A_rdiv_Bc, At_ldiv_B, Ac_mul_Bc, A_mul_Bc, Ac_mul_B, Ac_ldiv_B, Ac_ldiv_Bc, At_mul_Bt, A_rdiv_Bt, At_mul_B import Base: USE_BLAS64, abs, big, ceil, conj, convert, copy, copy!, copy_transpose!, @@ -13,6 +13,7 @@ import Base: USE_BLAS64, abs, big, ceil, conj, convert, copy, copy!, copy_transp using Base: promote_op, _length # We use `_length` because of non-1 indices; releases after julia 0.5 # can go back to `length`. `_length(A)` is equivalent to `length(linearindices(A))`. +using Base.Broadcast: broadcast_elwise_op export # Modules diff --git a/base/linalg/uniformscaling.jl b/base/linalg/uniformscaling.jl index afec8815e77e3..a26c2fcc1808d 100644 --- a/base/linalg/uniformscaling.jl +++ b/base/linalg/uniformscaling.jl @@ -158,10 +158,10 @@ inv(J::UniformScaling) = UniformScaling(inv(J.λ)) \(x::Number, J::UniformScaling) = UniformScaling(x\J.λ) -.*(x::Number,J::UniformScaling) = UniformScaling(x*J.λ) -.*(J::UniformScaling,x::Number) = UniformScaling(J.λ*x) +broadcast(::typeof(*), x::Number,J::UniformScaling) = UniformScaling(x*J.λ) +broadcast(::typeof(*), J::UniformScaling,x::Number) = UniformScaling(J.λ*x) -./(J::UniformScaling,x::Number) = UniformScaling(J.λ/x) +broadcast(::typeof(/), J::UniformScaling,x::Number) = UniformScaling(J.λ/x) ==(J1::UniformScaling,J2::UniformScaling) = (J1.λ == J2.λ) diff --git a/base/multidimensional.jl b/base/multidimensional.jl index 72e85b818bdae..eeeb92463b013 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -1247,8 +1247,7 @@ end (@nref $N B i) = (AI, AI) end Bmax = sB - Istart = ones(Int,ndims(A)) - Istart[([sB...].==1) & ([sA...].!=1)] = 2 + Istart = Int[sB[i] == 1 != sA[i] ? 2 : 1 for i = 1:ndims(A)] @inbounds @nloops $N i d->(Istart[d]:size(A,d)) begin AI = @nref $N A i @nexprs $N d->(j_d = min(Bmax[d], i_{d})) diff --git a/base/operators.jl b/base/operators.jl index a8c4939000c3a..3dc835b4a61b3 100644 --- a/base/operators.jl +++ b/base/operators.jl @@ -155,22 +155,6 @@ Greater-than-or-equals comparison operator. >=(x,y) = (y <= x) const ≥ = >= -""" - .>(x, y) - -Element-wise greater-than comparison operator. -""" -.>(x,y) = y .< x - -""" - .>=(x, y) - .≥(x,y) - -Element-wise greater-than-or-equals comparison operator. -""" -.>=(x,y) = y .<= x -const .≥ = .>= - # this definition allows Number types to implement < instead of isless, # which is more idiomatic: isless(x::Real, y::Real) = x defaults to -./(x::Number,y::Number) = x/y -.\(x::Number,y::Number) = y./x -.*(x::Number,y::Number) = x*y -.^(x::Number,y::Number) = x^y -.+(x::Number,y::Number) = x+y -.-(x::Number,y::Number) = x-y -.<<(x::Integer,y::Integer) = x<>(x::Integer,y::Integer) = x>>y - -.==(x::Number,y::Number) = x == y - -""" - .!=(x, y) - .≠(x,y) - -Element-wise not-equals comparison operator. -""" -.!=(x::Number,y::Number) = x != y -.<( x::Real,y::Real) = x < y - -""" - .<=(x, y) - .≤(x,y) - -Element-wise less-than-or-equals comparison operator. -""" -.<=(x::Real,y::Real) = x <= y - -const .≤ = .<= -const .≠ = .!= - # Core <<, >>, and >>> take either Int or UInt as second arg. Signed shift # counts can shift in either direction, and are translated here to unsigned # counts. Integer datatypes only need to implement the unsigned version. @@ -478,7 +430,6 @@ x == div(x,y)*y + rem(x,y) """ rem const % = rem -.%(x::Real, y::Real) = x%y """ div(x, y) @@ -488,8 +439,6 @@ The quotient from Euclidean division. Computes `x/y`, truncated to an integer. """ div const ÷ = div -.÷(x::Real, y::Real) = x÷y - """ mod1(x, y) @@ -1000,25 +949,11 @@ export ===, xor, %, - .%, ÷, - .÷, &, *, +, -, - .!=, - .+, - .-, - .*, - ./, - .<, - .<=, - .==, - .>, - .>=, - .\, - .^, /, //, <, @@ -1032,12 +967,7 @@ export ≥, ≤, ≠, - .≥, - .≤, - .≠, >>, - .>>, - .<<, >>>, \, ^, @@ -1068,10 +998,10 @@ export transpose, ctranspose -import ..this_module: !, !=, xor, %, .%, ÷, .÷, &, *, +, -, .!=, .+, .-, .*, ./, .<, .<=, .==, .>, - .>=, .\, .^, /, //, <, <:, <<, <=, ==, >, >=, >>, .>>, .<<, >>>, +import ..this_module: !, !=, xor, %, ÷, &, *, +, -, + /, //, <, <:, <<, <=, ==, >, >=, >>, >>>, <|, |>, \, ^, |, ~, !==, ===, >:, colon, hcat, vcat, hvcat, getindex, setindex!, transpose, ctranspose, - ≥, ≤, ≠, .≥, .≤, .≠, ⋅, ×, ∈, ∉, ∋, ∌, ⊆, ⊈, ⊊, ∩, ∪, √, ∛, ⊻ + ≥, ≤, ≠, ⋅, ×, ∈, ∉, ∋, ∌, ⊆, ⊈, ⊊, ∩, ∪, √, ∛, ⊻ end diff --git a/base/range.jl b/base/range.jl index e22514ccde58c..48aaf0de11592 100644 --- a/base/range.jl +++ b/base/range.jl @@ -743,41 +743,41 @@ end -(r::FloatRange) = FloatRange(-r.start, -r.step, r.len, r.divisor) -(r::LinSpace) = LinSpace(-r.start, -r.stop, r.len, r.divisor) -.+(x::Real, r::AbstractUnitRange) = range(x + first(r), length(r)) -.+(x::Real, r::Range) = (x+first(r)):step(r):(x+last(r)) -#.+(x::Real, r::StepRange) = range(x + r.start, r.step, length(r)) -.+(x::Real, r::FloatRange) = FloatRange(r.divisor*x + r.start, r.step, r.len, r.divisor) -function .+{T}(x::Real, r::LinSpace{T}) ++(x::Real, r::AbstractUnitRange) = range(x + first(r), length(r)) ++(x::Real, r::Range) = (x+first(r)):step(r):(x+last(r)) +#+(x::Real, r::StepRange) = range(x + r.start, r.step, length(r)) ++(x::Real, r::FloatRange) = FloatRange(r.divisor*x + r.start, r.step, r.len, r.divisor) +function +{T}(x::Real, r::LinSpace{T}) x2 = x * r.divisor / (r.len - 1) LinSpace(x2 + r.start, x2 + r.stop, r.len, r.divisor) end -.+(r::Range, x::Real) = x + r -#.+(r::FloatRange, x::Real) = x + r ++(r::Range, x::Real) = x + r +#+(r::FloatRange, x::Real) = x + r -.-(x::Real, r::Range) = (x-first(r)):-step(r):(x-last(r)) -.-(x::Real, r::FloatRange) = FloatRange(r.divisor*x - r.start, -r.step, r.len, r.divisor) -function .-(x::Real, r::LinSpace) +-(x::Real, r::Range) = (x-first(r)):-step(r):(x-last(r)) +-(x::Real, r::FloatRange) = FloatRange(r.divisor*x - r.start, -r.step, r.len, r.divisor) +function -(x::Real, r::LinSpace) x2 = x * r.divisor / (r.len - 1) LinSpace(x2 - r.start, x2 - r.stop, r.len, r.divisor) end -.-(r::AbstractUnitRange, x::Real) = range(first(r)-x, length(r)) -.-(r::StepRange , x::Real) = range(r.start-x, r.step, length(r)) -.-(r::FloatRange, x::Real) = FloatRange(r.start - r.divisor*x, r.step, r.len, r.divisor) -function .-(r::LinSpace, x::Real) +-(r::AbstractUnitRange, x::Real) = range(first(r)-x, length(r)) +-(r::StepRange , x::Real) = range(r.start-x, r.step, length(r)) +-(r::FloatRange, x::Real) = FloatRange(r.start - r.divisor*x, r.step, r.len, r.divisor) +function -(r::LinSpace, x::Real) x2 = x * r.divisor / (r.len - 1) LinSpace(r.start - x2, r.stop - x2, r.len, r.divisor) end -.*(x::Real, r::OrdinalRange) = range(x*first(r), x*step(r), length(r)) -.*(x::Real, r::FloatRange) = FloatRange(x*r.start, x*r.step, r.len, r.divisor) -.*(x::Real, r::LinSpace) = LinSpace(x * r.start, x * r.stop, r.len, r.divisor) -.*(r::Range, x::Real) = x .* r -.*(r::FloatRange, x::Real) = x .* r -.*(r::LinSpace, x::Real) = x .* r +*(x::Real, r::OrdinalRange) = range(x*first(r), x*step(r), length(r)) +*(x::Real, r::FloatRange) = FloatRange(x*r.start, x*r.step, r.len, r.divisor) +*(x::Real, r::LinSpace) = LinSpace(x * r.start, x * r.stop, r.len, r.divisor) +*(r::Range, x::Real) = x * r +*(r::FloatRange, x::Real) = x * r +*(r::LinSpace, x::Real) = x * r -./(r::OrdinalRange, x::Real) = range(first(r)/x, step(r)/x, length(r)) -./(r::FloatRange, x::Real) = FloatRange(r.start/x, r.step/x, r.len, r.divisor) -./(r::LinSpace, x::Real) = LinSpace(r.start / x, r.stop / x, r.len, r.divisor) +/(r::OrdinalRange, x::Real) = range(first(r)/x, step(r)/x, length(r)) +/(r::FloatRange, x::Real) = FloatRange(r.start/x, r.step/x, r.len, r.divisor) +/(r::LinSpace, x::Real) = LinSpace(r.start / x, r.stop / x, r.len, r.divisor) promote_rule{T1,T2}(::Type{UnitRange{T1}},::Type{UnitRange{T2}}) = UnitRange{promote_type(T1,T2)} @@ -844,20 +844,17 @@ convert{T<:AbstractFloat}(::Type{LinSpace}, r::FloatRange{T}) = ## non-linear operations on ranges and fallbacks for non-real numbers ## -.+(x::Number, r::Range) = [ x+y for y=r ] -.+(r::Range, y::Number) = [ x+y for x=r ] ++(x::Number, r::Range) = [ x+y for y=r ] ++(r::Range, y::Number) = [ x+y for x=r ] -.-(x::Number, r::Range) = [ x-y for y=r ] -.-(r::Range, y::Number) = [ x-y for x=r ] +-(x::Number, r::Range) = [ x-y for y=r ] +-(r::Range, y::Number) = [ x-y for x=r ] -.*(x::Number, r::Range) = [ x*y for y=r ] -.*(r::Range, y::Number) = [ x*y for x=r ] +*(x::Number, r::Range) = [ x*y for y=r ] +*(r::Range, y::Number) = [ x*y for x=r ] -./(x::Number, r::Range) = [ x/y for y=r ] -./(r::Range, y::Number) = [ x/y for x=r ] - -.^(x::Number, r::Range) = [ x^y for y=r ] -.^(r::Range, y::Number) = [ x^y for x=r ] +/(x::Number, r::Range) = [ x/y for y=r ] +/(r::Range, y::Number) = [ x/y for x=r ] ## concatenation ## diff --git a/base/rational.jl b/base/rational.jl index f65397c064788..3304247d9783e 100644 --- a/base/rational.jl +++ b/base/rational.jl @@ -40,8 +40,6 @@ end //(X::AbstractArray, y::Number) = X .// y -.//(X::AbstractArray, y::Number) = reshape([ x // y for x in X ], size(X)) -.//(y::Number, X::AbstractArray) = reshape([ y // x for x in X ], size(X)) function show(io::IO, x::Rational) show(io, numerator(x)) diff --git a/base/sparse/cholmod.jl b/base/sparse/cholmod.jl index 11d35b320633e..d78eb1b75d1b1 100644 --- a/base/sparse/cholmod.jl +++ b/base/sparse/cholmod.jl @@ -1097,7 +1097,7 @@ sparse{Tv}(FC::FactorComponent{Tv,:LD}) = sparse(Sparse(Factor(FC))) # Calculate the offset into the stype field of the cholmod_sparse_struct and # change the value -let offset = fieldoffset(C_Sparse{Float64}, findfirst(fieldnames(C_Sparse) .== :stype)) +let offset = fieldoffset(C_Sparse{Float64}, findfirst(name -> name === :stype, fieldnames(C_Sparse))) global change_stype! function change_stype!(A::Sparse, i::Integer) unsafe_store!(convert(Ptr{Cint}, A.p), i, div(offset, 4) + 1) diff --git a/base/sparse/sparse.jl b/base/sparse/sparse.jl index a1b86b7fa3f9e..7b93d33cafdf9 100644 --- a/base/sparse/sparse.jl +++ b/base/sparse/sparse.jl @@ -6,7 +6,7 @@ using Base: ReshapedArray, promote_op, setindex_shape_check, to_shape, tail using Base.Sort: Forward using Base.LinAlg: AbstractTriangular, PosDefException -import Base: +, -, *, \, &, |, xor, .+, .-, .*, ./, .\, .^, .<, .!=, == +import Base: +, -, *, \, /, &, |, xor, == import Base: A_mul_B!, Ac_mul_B, Ac_mul_B!, At_mul_B, At_mul_B! import Base: A_mul_Bc, A_mul_Bt, Ac_mul_Bc, At_mul_Bt import Base: At_ldiv_B, Ac_ldiv_B, A_ldiv_B! diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 8dc0b4cea5c2c..27fcf80b2b411 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -1433,6 +1433,11 @@ function broadcast!{Tf,N}(f::Tf, C::SparseMatrixCSC, A::SparseMatrixCSC, Bs::Var return fpreszeros ? _broadcast_zeropres!(f, C, A, Bs...) : _broadcast_notzeropres!(f, fofzeros, C, A, Bs...) end +# needed to eliminate method ambiguity: +function broadcast!(::typeof(identity), C::SparseMatrixCSC, A::SparseMatrixCSC) + _checksameshape(C, A) + return copy!(C, A) +end function broadcast{Tf,N}(f::Tf, A::SparseMatrixCSC, Bs::Vararg{SparseMatrixCSC,N}) _aresameshape(A, Bs...) && return map(f, A, Bs...) # could avoid a second dims check in map fofzeros = f(_zeros_eltypes(A, Bs...)...) @@ -2277,7 +2282,6 @@ round{To}(::Type{To}, A::SparseMatrixCSC) = round.(To, A) ## Binary arithmetic and boolean operators - (+)(A::SparseMatrixCSC, B::SparseMatrixCSC) = map(+, A, B) (-)(A::SparseMatrixCSC, B::SparseMatrixCSC) = map(-, A, B) # TODO: Vectorized min, max, |, and xor should be deprecated in favor of compact-broadcast syntax. @@ -2287,51 +2291,11 @@ max(A::SparseMatrixCSC, B::SparseMatrixCSC) = map(max, A, B) (|)(A::SparseMatrixCSC, B::SparseMatrixCSC) = map(|, A, B) xor(A::SparseMatrixCSC, B::SparseMatrixCSC) = map(xor, A, B) -(.+)(A::SparseMatrixCSC, B::Number) = Array(A) .+ B ( +)(A::SparseMatrixCSC, B::Array ) = Array(A) + B -(.+)(A::Number, B::SparseMatrixCSC) = A .+ Array(B) ( +)(A::Array , B::SparseMatrixCSC) = A + Array(B) - -(.-)(A::SparseMatrixCSC, B::Number) = Array(A) .- B ( -)(A::SparseMatrixCSC, B::Array ) = Array(A) - B -(.-)(A::Number, B::SparseMatrixCSC) = A .- Array(B) ( -)(A::Array , B::SparseMatrixCSC) = A - Array(B) -(.*)(A::AbstractArray, B::AbstractArray) = broadcast_zpreserving(*, A, B) -(.*)(A::SparseMatrixCSC, B::Number) = SparseMatrixCSC(A.m, A.n, copy(A.colptr), copy(A.rowval), A.nzval .* B) -(.*)(A::Number, B::SparseMatrixCSC) = SparseMatrixCSC(B.m, B.n, copy(B.colptr), copy(B.rowval), A .* B.nzval) - -(./)(A::SparseMatrixCSC, B::Number) = SparseMatrixCSC(A.m, A.n, copy(A.colptr), copy(A.rowval), A.nzval ./ B) -(./)(A::Number, B::SparseMatrixCSC) = (./)(A, Array(B)) -(./)(A::SparseMatrixCSC, B::Array) = (./)(Array(A), B) -(./)(A::Array, B::SparseMatrixCSC) = (./)(A, Array(B)) -(./)(A::SparseMatrixCSC, B::SparseMatrixCSC) = (./)(Array(A), Array(B)) - -(.\)(A::SparseMatrixCSC, B::Number) = (.\)(Array(A), B) -(.\)(A::Number, B::SparseMatrixCSC) = SparseMatrixCSC(B.m, B.n, copy(B.colptr), copy(B.rowval), A .\ B.nzval ) -(.\)(A::SparseMatrixCSC, B::Array) = (.\)(Array(A), B) -(.\)(A::Array, B::SparseMatrixCSC) = (.\)(A, Array(B)) -(.\)(A::SparseMatrixCSC, B::SparseMatrixCSC) = (.\)(Array(A), Array(B)) - -(.^)(A::SparseMatrixCSC, B::Number) = - B==0 ? sparse(ones(typeof(one(eltype(A)).^B), A.m, A.n)) : - SparseMatrixCSC(A.m, A.n, copy(A.colptr), copy(A.rowval), A.nzval .^ B) -(.^)(::Irrational{:e}, B::SparseMatrixCSC) = exp.(B) -(.^)(A::Number, B::SparseMatrixCSC) = (.^)(A, Array(B)) -(.^)(A::SparseMatrixCSC, B::Array) = (.^)(Array(A), B) -(.^)(A::Array, B::SparseMatrixCSC) = (.^)(A, Array(B)) - -.+{Tv1,Ti1,Tv2,Ti2}(A_1::SparseMatrixCSC{Tv1,Ti1}, A_2::SparseMatrixCSC{Tv2,Ti2}) = - broadcast!(+, spzeros(promote_op(+, Tv1, Tv2), promote_type(Ti1, Ti2), to_shape(broadcast_indices(A_1, A_2))), A_1, A_2) - -function .-{Tva,Tia,Tvb,Tib}(A::SparseMatrixCSC{Tva,Tia}, B::SparseMatrixCSC{Tvb,Tib}) - broadcast!(-, spzeros(promote_op(-, Tva, Tvb), promote_type(Tia, Tib), to_shape(broadcast_indices(A, B))), A, B) -end - -## element-wise comparison operators returning SparseMatrixCSC ## -.<{Tv1,Ti1,Tv2,Ti2}(A_1::SparseMatrixCSC{Tv1,Ti1}, A_2::SparseMatrixCSC{Tv2,Ti2}) = broadcast!(<, spzeros( Bool, promote_type(Ti1, Ti2), to_shape(broadcast_indices(A_1, A_2))), A_1, A_2) -.!={Tv1,Ti1,Tv2,Ti2}(A_1::SparseMatrixCSC{Tv1,Ti1}, A_2::SparseMatrixCSC{Tv2,Ti2}) = broadcast!(!=, spzeros( Bool, promote_type(Ti1, Ti2), to_shape(broadcast_indices(A_1, A_2))), A_1, A_2) - ## full equality function ==(A1::SparseMatrixCSC, A2::SparseMatrixCSC) size(A1)!=size(A2) && return false diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index 4574733208805..5120f860608a9 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -1251,19 +1251,22 @@ for (vop, fun, mode) in [(:_vadd, :+, 1), end # to workaround the ambiguities with BitVector -.*(x::BitVector, y::AbstractSparseVector{Bool}) = _vmul(x, y) -.*(x::AbstractSparseVector{Bool}, y::BitVector) = _vmul(x, y) +broadcast(::typeof(*), x::BitVector, y::AbstractSparseVector{Bool}) = _vmul(x, y) +broadcast(::typeof(*), x::AbstractSparseVector{Bool}, y::BitVector) = _vmul(x, y) # definition of operators -for (op, vop) in [(:+, :_vadd), (:(.+), :_vadd), - (:-, :_vsub), (:(.-), :_vsub), - (:.*, :_vmul)] - @eval begin +for (op, vop) in [(:+, :_vadd), (:-, :_vsub), (:*, :_vmul)] + op != :* && @eval begin $(op)(x::AbstractSparseVector, y::AbstractSparseVector) = $(vop)(x, y) $(op)(x::StridedVector, y::AbstractSparseVector) = $(vop)(x, y) $(op)(x::AbstractSparseVector, y::StridedVector) = $(vop)(x, y) end + @eval begin + broadcast(::typeof($op), x::AbstractSparseVector, y::AbstractSparseVector) = $(vop)(x, y) + broadcast(::typeof($op), x::StridedVector, y::AbstractSparseVector) = $(vop)(x, y) + broadcast(::typeof($op), x::AbstractSparseVector, y::StridedVector) = $(vop)(x, y) + end end # definition of other binary functions @@ -1372,10 +1375,12 @@ scale!(a::Real, x::AbstractSparseVector) = (scale!(nonzeros(x), a); x) scale!(a::Complex, x::AbstractSparseVector) = (scale!(nonzeros(x), a); x) -.*(x::AbstractSparseVector, a::Number) = SparseVector(length(x), copy(nonzeroinds(x)), nonzeros(x) * a) -.*(a::Number, x::AbstractSparseVector) = SparseVector(length(x), copy(nonzeroinds(x)), a * nonzeros(x)) -./(x::AbstractSparseVector, a::Number) = SparseVector(length(x), copy(nonzeroinds(x)), nonzeros(x) / a) - +*(x::AbstractSparseVector, a::Number) = SparseVector(length(x), copy(nonzeroinds(x)), nonzeros(x) * a) +*(a::Number, x::AbstractSparseVector) = SparseVector(length(x), copy(nonzeroinds(x)), a * nonzeros(x)) +/(x::AbstractSparseVector, a::Number) = SparseVector(length(x), copy(nonzeroinds(x)), nonzeros(x) / a) +broadcast(::typeof(*), x::AbstractSparseVector, a::Number) = x * a +broadcast(::typeof(*), a::Number, x::AbstractSparseVector) = a * x +broadcast(::typeof(/), x::AbstractSparseVector, a::Number) = x / a # dot diff --git a/base/strings/basic.jl b/base/strings/basic.jl index 13dfef7f3018a..daa3c8c057a52 100644 --- a/base/strings/basic.jl +++ b/base/strings/basic.jl @@ -105,8 +105,6 @@ julia> "Hello " * "world" ``` """ (*)(s1::AbstractString, ss::AbstractString...) = string(s1, ss...) -(.*){T<:AbstractString}(v::Vector{T},s::AbstractString) = [i*s for i in v] -(.*){T<:AbstractString}(s::AbstractString,v::Vector{T}) = [s*i for i in v] length(s::DirectIndexString) = endof(s) diff --git a/base/sysimg.jl b/base/sysimg.jl index 7540c9b9ff138..915d9eff063ae 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -79,6 +79,13 @@ end # vararg Symbol constructor Symbol(x...) = Symbol(string(x...)) +# Define the broadcast function, which is mostly implemented in +# broadcast.jl, so that we can overload broadcast methods for +# specific array types etc. +# --Here, just define fallback routines for broadcasting with no arguments +broadcast(f) = f() +broadcast!(f, X::AbstractArray) = fill!(X, f()) + # array structures include("array.jl") include("abstractarray.jl") @@ -165,6 +172,17 @@ include("parse.jl") include("shell.jl") include("regex.jl") include("show.jl") + +# multidimensional arrays +include("cartesian.jl") +using .Cartesian +include("multidimensional.jl") +include("permuteddimsarray.jl") +using .PermutedDimsArrays +include("broadcast.jl") +importall .Broadcast + +# base64 conversions (need broadcast) include("base64.jl") importall .Base64 @@ -208,13 +226,6 @@ importall .Math const (√)=sqrt const (∛)=cbrt -# multidimensional arrays -include("cartesian.jl") -using .Cartesian -include("multidimensional.jl") -include("permuteddimsarray.jl") -using .PermutedDimsArrays - let SOURCE_PATH = "" global function _include(path) prev = SOURCE_PATH @@ -308,9 +319,6 @@ include("client.jl") # misc useful functions & macros include("util.jl") -include("broadcast.jl") -importall .Broadcast - # dense linear algebra include("linalg/linalg.jl") importall .LinAlg diff --git a/base/test.jl b/base/test.jl index f5d9965bbfab6..194ce11e7a937 100644 --- a/base/test.jl +++ b/base/test.jl @@ -228,8 +228,9 @@ end # can be displayed nicely. function get_test_result(ex) orig_ex = Expr(:inert, ex) - # Normalize comparison operator calls to :comparison expressions + # Normalize non-dot comparison operator calls to :comparison expressions if isa(ex, Expr) && ex.head == :call && length(ex.args)==3 && + first(string(ex.args[1])) != '.' && (ex.args[1] === :(==) || Base.operator_precedence(ex.args[1]) == comparison_prec) testret = :(eval_comparison(Expr(:comparison, $(esc(ex.args[2])), $(esc(ex.args[1])), $(esc(ex.args[3]))))) diff --git a/doc/src/manual/arrays.md b/doc/src/manual/arrays.md index 972d50a1ffbbc..4224afe201ffb 100644 --- a/doc/src/manual/arrays.md +++ b/doc/src/manual/arrays.md @@ -402,29 +402,33 @@ specify this trait, the default value `LinearSlow()` is used. ### Vectorized Operators and Functions -The following operators are supported for arrays. The dot version of a binary operator should -be used for elementwise operations. +The following operators are supported for arrays. Also, *every* binary +operator supports a [dot version](@ref man-dot-operators) that can be +applied to arrays (and combinations of arrays and scalars) as a +[fused broadcasting operation](@ref man-vectorized). (For comparison +operations like `<`, *only* the `.<` version is applicable to arrays.) 1. Unary arithmetic -- `-`, `+`, `!` -2. Binary arithmetic -- `+`, `-`, `*`, `.*`, `/`, `./`, `\`, `.\`, `^`, `.^`, `div`, `mod` -3. Comparison -- `.==`, `.!=`, `.<`, `.<=`, `.>`, `.>=` +2. Binary arithmetic -- `+`, `-`, `*`, `/`, `\`, `^`, `.^`, `div`, `mod` +3. Comparison -- `==`, `!=`, `≈` ([`isapprox`](@ref)), `≉` 4. Unary Boolean or bitwise -- `~` -5. Binary Boolean or bitwise -- `&`, `|`, `$` +5. Binary Boolean or bitwise -- `&`, `|`, `⊻` ([`xor`](@ref)) -Some operators without dots operate elementwise anyway when one argument is a scalar. These operators -are `*`, `+`, `-`, and the bitwise operators. The operators `/` and `\` operate elementwise when +Some operators without dots operate elementwise anyway when one argument is a scalar: +`*`, `+`, `-`, and the bitwise operators. The operators `/` and `\` operate elementwise when the denominator is a scalar. Note that comparisons such as `==` operate on whole arrays, giving a single boolean answer. Use -dot operators for elementwise comparisons. +dot operators like `.==` for elementwise comparisons. -To enable convenient vectorization of mathematical and other operations, Julia provides the compact -syntax `f.(args...)`, e.g. `sin.(x)` or `min.(x,y)`, for elementwise operations over arrays or -mixtures of arrays and scalars (a [`broadcast()`](@ref) operation). See [Dot Syntax for Vectorizing Functions](@ref). +To enable convenient vectorization of mathematical and other operations, Julia [provides the compact +syntax](@ref man-vectorized) `f.(args...)`, e.g. `sin.(x)` or `min.(x,y)`, for elementwise operations over arrays or mixtures of arrays and scalars (a [Broadcasting](@ref) operation); these +have the additional advantage of "fusing" into a single loop when combined with +dot operators and other dot calls. Note that there is a difference between `max.(a,b)`, which `broadcast`s [`max()`](@ref) elementwise -over `a` and `b`, and `maximum(a)`, which finds the largest value within `a`. The same statements -hold for `min.(a,b)` and `minimum(a)`. +over `a` and `b`, and `maximum(a)`, which finds the largest value within `a`. The same relationship +holds for `min.(a,b)` and `minimum(a)`. ### Broadcasting @@ -461,11 +465,14 @@ julia> broadcast(+, a, b) 1.73659 0.873631 ``` -Elementwise operators such as `.+` and `.*` perform broadcasting if necessary. There is also a -[`broadcast!()`](@ref) function to specify an explicit destination, and [`broadcast_getindex()`](@ref) +[Dotted operators](@ref man-dot-operators) such as `.+` and `.*` are equivalent +to `broadcast` calls (except that they fuse, as described below). There is also a +[`broadcast!()`](@ref) function to specify an explicit destination (which can also +be accessed in a fusing fashion by `.=` assignment), and functions [`broadcast_getindex()`](@ref) and [`broadcast_setindex!()`](@ref) that broadcast the indices before indexing. Moreover, `f.(args...)` is equivalent to `broadcast(f, args...)`, providing a convenient syntax to broadcast any function -([Dot Syntax for Vectorizing Functions](@ref)). +([dot syntax](@ref man-vectorized)). Nested "dot calls" `f.(...)` (including calls to `.+` etcetera) +[automatically fuse](@ref man-dot-operators) into a single `broadcast` call. Additionally, [`broadcast()`](@ref) is not limited to arrays (see the function documentation), it also handles tuples and treats any argument that is not an array or a tuple as a "scalar". diff --git a/doc/src/manual/functions.md b/doc/src/manual/functions.md index 8a8fbc3773766..134cfd0934f86 100644 --- a/doc/src/manual/functions.md +++ b/doc/src/manual/functions.md @@ -554,7 +554,7 @@ normally or threw an exception. (The `try/finally` construct will be described i With the `do` block syntax, it helps to check the documentation or implementation to know how the arguments of the user function are initialized. -## Dot Syntax for Vectorizing Functions +## [Dot Syntax for Vectorizing Functions](@id man-vectorized) In technical-computing languages, it is common to have "vectorized" versions of functions, which simply apply a given function `f(x)` to each element of an array `A` to yield a new array via @@ -583,7 +583,7 @@ there is only a single loop over `X`, and a single array is allocated for the re `tmp=cos(X)`, and then compute `sin(tmp)` in a separate loop, allocating a second array.] This loop fusion is not a compiler optimization that may or may not occur, it is a *syntactic guarantee* whenever nested `f.(args...)` calls are encountered. Technically, the fusion stops as soon as -a "non-dot" function is encountered; for example, in `sin.(sort(cos.(X)))` the `sin` and `cos` +a "non-dot" function call is encountered; for example, in `sin.(sort(cos.(X)))` the `sin` and `cos` loops cannot be merged because of the intervening `sort` function. Finally, the maximum efficiency is typically achieved when the output array of a vectorized operation @@ -595,10 +595,10 @@ overwriting `X` with `sin.(Y)` in-place. If the left-hand side is an array-index e.g. `X[2:end] .= sin.(Y)`, then it translates to `broadcast!` on a `view`, e.g. `broadcast!(sin, view(X, 2:endof(X)), Y)`, so that the left-hand side is updated in-place. -(In future versions of Julia, operators like `.*` will also be handled with the same mechanism: -they will be equivalent to `broadcast` calls and will be fused with other nested "dot" calls. - `X .+= Y` is equivalent to `X .= X .+ Y` and will eventually result in a fused in-place assignment. -Similarly for `.*=` etcetera.) +Operators like `.*` are handled with the same mechanism: +they are equivalent to `broadcast` calls and are fused with other nested "dot" calls. + `X .+= Y` etcetera is equivalent to `X .= X .+ Y` and results in a fused in-place assignment; + see also [dot operators](@ref man-dot-operators). ## Further Reading @@ -607,4 +607,3 @@ a sophisticated type system and allows multiple dispatch on argument types. None given here provide any type annotations on their arguments, meaning that they are applicable to all types of arguments. The type system is described in [Types](@ref man-types) and defining a function in terms of methods chosen by multiple dispatch on run-time argument types is described in [Methods](@ref). - diff --git a/doc/src/manual/mathematical-operations.md b/doc/src/manual/mathematical-operations.md index 608da6f15eb6c..d2a50fc8a2eaf 100644 --- a/doc/src/manual/mathematical-operations.md +++ b/doc/src/manual/mathematical-operations.md @@ -127,6 +127,37 @@ The updating versions of all the binary arithmetic and bitwise operators are: true ``` +## [Vectorized "dot" operators](@id man-dot-operators) + +For *every* binary operation like `^`, there is a corresponding +"dot" operation `.^` that is *automatically* defined +to perform `^` element-by-element on arrays. For example, +`[1,2,3] ^ 3` is not defined, since there is no standard +mathematical meaning to "cubing" an array, but `[1,2,3] .^ 3` +is defined as computing the elementwise +(or "vectorized") result `[1^3, 2^3, 3^3]`. + +More specifically, `a .^ b` is parsed as the ["dot" call](@ref man-vectorized) +`(^).(a,b)`, which performs a [broadcast](@ref Broadcasting) operation: +it can combine arrays and scalars, arrays of the same size (performing +the operation elementwise), and even arrays of different shapes (e.g. +combining row and column vectors to produce a matrix). Moreover, like +all vectorized "dot calls," these "dot operators" are +*fusing*. For example, if you compute `2 .* A.^2 .+ sin.(A)` for an +array `A`, it performs a *single* loop over `A`, computing `2a^2 + sin(a)` +for each element of `A`. In particular, nested dot calls like `f.(g.(x))` +are fused, and "adjacent" binary operators like `x .+ 3 .* x.^2` are +equivalent to nested dot calls `(+).(x, (*).(3, (^).(x, 2)))`. + +Furthermore, "dotted" updating operators like `a .+= b` are parsed +as `a .= a .+ b`, where `.=` is a fused *in-place* assignment operation +(see the [dot syntax documentation](@ref man-vectorized)). + +Note the dot syntax is also applicable to user-defined operators. +For example, if you define `⊗(A,B) = kron(A,B)` to give a convenient +infix syntax `A ⊗ B` for Kronecker products ([`kron`](@ref)), then +`[A,B] .⊗ [C,D]` will compute `[A⊗C, B⊗D]` with no additional coding. + ## Numeric Comparisons Standard comparison operations are defined for all the primitive numeric types: @@ -265,13 +296,6 @@ Chaining comparisons is often quite convenient in numerical code. Chained compar which allows them to work on arrays. For example, `0 .< A .< 1` gives a boolean array whose entries are true where the corresponding elements of `A` are between 0 and 1. -The operator [`.<`](@ref) is intended for array objects; the operation `A .< B` is valid only -if `A` and `B` have the same dimensions. The operator returns an array with boolean entries and -with the same dimensions as `A` and `B`. Such operators are called *elementwise*; Julia offers -a suite of elementwise operators: [`.*`](@ref), [`.+`](@ref), etc. Some of the elementwise operators -can take a scalar operand such as the example `0 .< A .< 1` in the preceding paragraph. This notation -means that the scalar operand should be replicated for each entry of the array. - Note the evaluation behavior of chained comparisons: ```julia @@ -303,15 +327,15 @@ Julia applies the following order of operations, from highest precedence to lowe | Category | Operators | |:-------------- |:------------------------------------------------------------------------------------------------- | | Syntax | `.` followed by `::` | -| Exponentiation | `^` and its elementwise equivalent `.^` | -| Fractions | `//` and `.//` | -| Multiplication | `* / % & \` and `.* ./ .% .\` | -| Bitshifts | `<< >> >>>` and `.<< .>> .>>>` | -| Addition | `+ - \| ⊻` and `.+ .-` | +| Exponentiation | `^` | +| Fractions | `//` | +| Multiplication | `* / % & \` | +| Bitshifts | `<< >> >>>` | +| Addition | `+ - \| ⊻` | | Syntax | `: ..` followed by `\|>` | -| Comparisons | `> < >= <= == === != !== <:` and `.> .< .>= .<= .== .!=` | +| Comparisons | `> < >= <= == === != !== <:` | | Control flow | `&&` followed by `\|\|` followed by `?` | -| Assignments | `= += -= *= /= //= \= ^= ÷= %= \|= &= ⊻= <<= >>= >>>=` and `.+= .-= .*= ./= .//= .\= .^= .÷= .%=` | +| Assignments | `= += -= *= /= //= \= ^= ÷= %= \|= &= ⊻= <<= >>= >>>=` | ### Elementary Functions @@ -321,8 +345,8 @@ including integers, floating-point numbers, rationals, and complexes, wherever s make sense. Moreover, these functions (like any Julia function) can be applied in "vectorized" fashion to -arrays and other collections with the syntax `f.(A)`, e.g. `sin.(A)` will compute the elementwise -sine of each element of an array `A`. See [Dot Syntax for Vectorizing Functions](@ref). +arrays and other collections with the [dot syntax](@ref man-vectorized) `f.(A)`, +e.g. `sin.(A)` will compute the elementwise sine of each element of an array `A`. ## Numerical Conversions diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index 05417b9d48fea..75adec530f037 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -863,7 +863,52 @@ type from an algorithm. In the example above, we could have passed a `SubArray` Taken to its extreme, pre-allocation can make your code uglier, so performance measurements and some judgment may be required. However, for "vectorized" (element-wise) functions, the convenient syntax `x .= f.(y)` can be used for in-place operations with fused loops and no temporary arrays -([Dot Syntax for Vectorizing Functions](@ref)). +(see the [dot syntax for vectorizing functions](@ref man-vectorized)). + +## More dots: Fuse vectorized operations + +Julia has a special [dot syntax](@ref man-vectorized) that converts +any scalar function into a "vectorized" function call, and any operator +into a "vectorized" operator, with the special property that nested +"dot calls" are *fusing*: they are combined at the syntax level into +a single loop, without allocating temporary arrays. If you use `.=` and +similar assignment operators, the result can also be stored in-place +in a pre-allocated array (see above). + +In a linear-algebra context, this means that even though operations like +`vector + vector` and `vector * scalar` are defined, it can be advantageous +to instead use `vector .+ vector` and `vector .* scalar` because the +resulting loops can be fused with surrounding computations. For example, +consider the two functions: + +```julia +f(x) = 3 * x.^2 + 4 * x + 7 * x.^3 +fdot(x) = 3 .* x.^2 .+ 4 .* x .+ 7 .* x.^3 +``` + +Both `f` and `fdot` compute the same thing. However, `fdot` is +significantly faster when applied to an array: + +```julia +julia> x = rand(10^6); + +julia> @time f(x); + 0.020244 seconds (26 allocations: 53.407 MB, 35.88% gc time) + +julia> @time fdot(x); + 0.004579 seconds (10 allocations: 7.630 MB) + +julia> @time f.(x); + 0.004391 seconds (35 allocations: 7.631 MB) +``` + +That is, `fdot(x)` is more than four times faster and allocates 1/7 the +memory of `f(x)`, because each `*` and `+` operation in `f(x)` allocates +a new temporary array and executes in a separate loop. (Of course, +if you just do `f.(x)` then it is as fast as `fdot(x)` in this +example, but in many contexts it is more convenient to just sprinkle +some dots in your expressions rather than defining a separate function +for each vectorized operation.) ## Avoid string interpolation for I/O diff --git a/doc/src/stdlib/math.md b/doc/src/stdlib/math.md index 9854047b105df..e559679112557 100644 --- a/doc/src/stdlib/math.md +++ b/doc/src/stdlib/math.md @@ -10,12 +10,6 @@ Base.:*(::Any, ::Any...) Base.:(/) Base.:\(::Any, ::Any) Base.:^(::Any, ::Any) -Base.:(.+) -Base.:(.-) -Base.:(.*) -Base.:(./) -Base.:(.\) -Base.:(.^) Base.fma Base.muladd Base.div @@ -46,12 +40,6 @@ Base.:(<) Base.:(<=) Base.:(>) Base.:(>=) -Base.:(.==) -Base.:(.!=) -Base.:(.<) -Base.:(.<=) -Base.:(.>) -Base.:(.>=) Base.cmp Base.:(~) Base.:(&) diff --git a/doc/src/stdlib/punctuation.md b/doc/src/stdlib/punctuation.md index 1a94ca6e66b35..0b44fbb77391a 100644 --- a/doc/src/stdlib/punctuation.md +++ b/doc/src/stdlib/punctuation.md @@ -35,7 +35,7 @@ Extended documentation for mathematical symbols & functions is [here](@ref math- | `''` | delimit character literals | | ``` ` ` ``` | delimit external process (command) specifications | | `...` | splice arguments into a function call or declare a varargs function or type | -| `.` | access named fields in objects or names inside modules, also prefixes elementwise operators | +| `.` | access named fields in objects/modules, also prefixes elementwise operator/function calls | | `a:b` | range a, a+1, a+2, ..., b | | `a:s:b` | range a, a+s, a+2s, ..., b | | `:` | index an entire dimension (1:end) | diff --git a/src/ast.scm b/src/ast.scm index fb9399943e7c0..b7380bfe3bb33 100644 --- a/src/ast.scm +++ b/src/ast.scm @@ -197,6 +197,31 @@ (define (dotop? o) (and (symbol? o) (eqv? (string.char (string o) 0) #\.))) +; convert '.xx to 'xx +(define (undotop op) + (let ((str (string op))) + (assert (eqv? (string.char str 0) #\.)) + (symbol (string.sub str 1 (length str))))) + +; convert '.xx to 'xx, and (|.| _ '.xx) to (|.| _ 'xx), and otherwise return #f +(define (maybe-undotop e) + (if (symbol? e) + (let ((str (string e))) + (if (eqv? (string.char str 0) #\.) + (symbol (string.sub str 1 (length str))) + #f)) + (if (pair? e) + (if (eq? (car e) '|.|) + (let ((op (maybe-undotop (caddr e)))) + (if op + (list '|.| (cadr e) op) + #f)) + (if (quoted? e) + (let ((op (maybe-undotop (cadr e)))) + (if op (list (car e) op) #f)) + #f)) + #f))) + (define (vararg? x) (and (pair? x) (eq? (car x) '...))) (define (varargexpr? x) (and (pair? x) diff --git a/src/julia-parser.scm b/src/julia-parser.scm index e8056e4dd3514..44692bb8dff6f 100644 --- a/src/julia-parser.scm +++ b/src/julia-parser.scm @@ -18,7 +18,7 @@ (define prec-comparison (append! '(|<:| |>:| |.!|) (add-dots '(> < >= ≥ <= ≤ == === ≡ != ≠ !== ≢ ∈ ∉ ∋ ∌ ⊆ ⊈ ⊂ ⊄ ⊊ ∝ ∊ ∍ ∥ ∦ ∷ ∺ ∻ ∽ ∾ ≁ ≃ ≄ ≅ ≆ ≇ ≈ ≉ ≊ ≋ ≌ ≍ ≎ ≐ ≑ ≒ ≓ ≔ ≕ ≖ ≗ ≘ ≙ ≚ ≛ ≜ ≝ ≞ ≟ ≣ ≦ ≧ ≨ ≩ ≪ ≫ ≬ ≭ ≮ ≯ ≰ ≱ ≲ ≳ ≴ ≵ ≶ ≷ ≸ ≹ ≺ ≻ ≼ ≽ ≾ ≿ ⊀ ⊁ ⊃ ⊅ ⊇ ⊉ ⊋ ⊏ ⊐ ⊑ ⊒ ⊜ ⊩ ⊬ ⊮ ⊰ ⊱ ⊲ ⊳ ⊴ ⊵ ⊶ ⊷ ⋍ ⋐ ⋑ ⋕ ⋖ ⋗ ⋘ ⋙ ⋚ ⋛ ⋜ ⋝ ⋞ ⋟ ⋠ ⋡ ⋢ ⋣ ⋤ ⋥ ⋦ ⋧ ⋨ ⋩ ⋪ ⋫ ⋬ ⋭ ⋲ ⋳ ⋴ ⋵ ⋶ ⋷ ⋸ ⋹ ⋺ ⋻ ⋼ ⋽ ⋾ ⋿ ⟈ ⟉ ⟒ ⦷ ⧀ ⧁ ⧡ ⧣ ⧤ ⧥ ⩦ ⩧ ⩪ ⩫ ⩬ ⩭ ⩮ ⩯ ⩰ ⩱ ⩲ ⩳ ⩴ ⩵ ⩶ ⩷ ⩸ ⩹ ⩺ ⩻ ⩼ ⩽ ⩾ ⩿ ⪀ ⪁ ⪂ ⪃ ⪄ ⪅ ⪆ ⪇ ⪈ ⪉ ⪊ ⪋ ⪌ ⪍ ⪎ ⪏ ⪐ ⪑ ⪒ ⪓ ⪔ ⪕ ⪖ ⪗ ⪘ ⪙ ⪚ ⪛ ⪜ ⪝ ⪞ ⪟ ⪠ ⪡ ⪢ ⪣ ⪤ ⪥ ⪦ ⪧ ⪨ ⪩ ⪪ ⪫ ⪬ ⪭ ⪮ ⪯ ⪰ ⪱ ⪲ ⪳ ⪴ ⪵ ⪶ ⪷ ⪸ ⪹ ⪺ ⪻ ⪼ ⪽ ⪾ ⪿ ⫀ ⫁ ⫂ ⫃ ⫄ ⫅ ⫆ ⫇ ⫈ ⫉ ⫊ ⫋ ⫌ ⫍ ⫎ ⫏ ⫐ ⫑ ⫒ ⫓ ⫔ ⫕ ⫖ ⫗ ⫘ ⫙ ⫷ ⫸ ⫹ ⫺ ⊢ ⊣)))) ;; (plus `in`) -(define prec-pipe '(|\|>| |<\||)) +(define prec-pipe (add-dots '(|\|>| |<\||))) (define prec-colon '(: |..|)) (define prec-plus (append! '($) (add-dots '(+ - |\|| ⊕ ⊖ ⊞ ⊟ |++| ∪ ∨ ⊔ ± ∓ ∔ ∸ ≂ ≏ ⊎ ⊻ ⊽ ⋎ ⋓ ⧺ ⧻ ⨈ ⨢ ⨣ ⨤ ⨥ ⨦ ⨧ ⨨ ⨩ ⨪ ⨫ ⨬ ⨭ ⨮ ⨹ ⨺ ⩁ ⩂ ⩅ ⩊ ⩌ ⩏ ⩐ ⩒ ⩔ ⩖ ⩗ ⩛ ⩝ ⩡ ⩢ ⩣)))) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index e1e3e16b09866..f2446ba6c06bd 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -927,6 +927,13 @@ (argl (cddr name)) (has-sp (and (pair? head) (eq? (car head) 'curly))) (name (deprecate-dotparen (if has-sp (cadr head) head))) + (op (let ((op_ (maybe-undotop name))) ; handle .op -> broadcast deprecation + (if op_ + (syntax-deprecation #f (string "function " (deparse name) "(...)") + (string "function Base.broadcast(::typeof(" (deparse op_) "), ...)"))) + op_)) + (name (if op '(|.| Base (inert broadcast)) name)) + (argl (if op (cons `(|::| (call (core Typeof) ,op)) argl) argl)) (sparams (if has-sp (cddr head) '())) (isstaged (eq? (car e) 'stagedfunction)) (adj-decl (lambda (n) (if (and (decl? n) (length= n 2)) @@ -1652,7 +1659,9 @@ (if (or (eq? (car x) 'quote) (eq? (car x) 'inert) (eq? (car x) '$)) `(call (core getfield) ,f ,x) (make-fuse f (cdr x)))) - e)) + (if (and (pair? e) (eq? (car e) 'call) (dotop? (cadr e))) + (make-fuse (undotop (cadr e)) (cddr e)) + e))) ; given e == (fuse lambda args), compress the argument list by removing (pure) ; duplicates in args, inlining literals, and moving any varargs to the end: (define (compress-fuse e) @@ -1928,7 +1937,9 @@ (lambda (e) (if (length> e 2) (let ((f (cadr e))) - (cond ((and (pair? (caddr e)) + (cond ((dotop? f) + (expand-fuse-broadcast '() `(|.| ,(undotop f) (tuple ,@(cddr e))))) + ((and (pair? (caddr e)) (eq? (car (caddr e)) 'parameters)) ;; (call f (parameters . kwargs) ...) (expand-forms @@ -1966,15 +1977,15 @@ (expand-forms `(call (core _apply) ,f ,@(tuple-wrap argl '()))))) - ((and (eq? (cadr e) '*) (length= e 4)) + ((and (eq? f '*) (length= e 4)) (expand-transposed-op e #(Ac_mul_Bc Ac_mul_B At_mul_Bt At_mul_B A_mul_Bc A_mul_Bt))) - ((and (eq? (cadr e) '/) (length= e 4)) + ((and (eq? f '/) (length= e 4)) (expand-transposed-op e #(Ac_rdiv_Bc Ac_rdiv_B At_rdiv_Bt At_rdiv_B A_rdiv_Bc A_rdiv_Bt))) - ((and (eq? (cadr e) '\\) (length= e 4)) + ((and (eq? f '\\) (length= e 4)) (expand-transposed-op e #(Ac_ldiv_Bc Ac_ldiv_B At_ldiv_Bt At_ldiv_B A_ldiv_Bc A_ldiv_Bt))) diff --git a/test/arrayops.jl b/test/arrayops.jl index e81eaf44c8796..694f6f14614a4 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -1533,7 +1533,7 @@ end module RetTypeDecl using Base.Test - import Base: +, *, .*, convert + import Base: +, *, broadcast, convert immutable MeterUnits{T,P} <: Number val::T @@ -1546,15 +1546,15 @@ module RetTypeDecl (+){T,pow}(x::MeterUnits{T,pow}, y::MeterUnits{T,pow}) = MeterUnits{T,pow}(x.val+y.val) (*){T,pow}(x::Int, y::MeterUnits{T,pow}) = MeterUnits{typeof(x*one(T)),pow}(x*y.val) (*){T}(x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val) - (.*){T}(x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val) + broadcast{T}(::typeof(*), x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val) convert{T,pow}(::Type{MeterUnits{T,pow}}, y::Real) = MeterUnits{T,pow}(convert(T,y)) @test @inferred(m+[m,m]) == [m+m,m+m] @test @inferred([m,m]+m) == [m+m,m+m] - @test @inferred(m.*[m,m]) == [m2,m2] - @test @inferred([m,m].*m) == [m2,m2] + @test @inferred(broadcast(*,m,[m,m])) == [m2,m2] + @test @inferred(broadcast(*,[m,m],m)) == [m2,m2] @test @inferred([m 2m; m m]*[m,m]) == [3m2,2m2] - @test @inferred([m m].*[m,m]) == [m2 m2; m2 m2] + @test @inferred(broadcast(*,[m m],[m,m])) == [m2 m2; m2 m2] end # range, range ops @@ -1926,3 +1926,11 @@ using TestHelpers.OAs @test accumulate(op, [10,20, 30]) == [10, op(10, 20), op(op(10, 20), 30)] == [10, 40, 110] @test accumulate(op, [10 20 30], 2) == [10 op(10, 20) op(op(10, 20), 30)] == [10 40 110] end + +# issue #11053 +type T11053 + a::Float64 +end +Base.:*(a::T11053, b::Real) = T11053(a.a*b) +Base.:(==)(a::T11053, b::T11053) = a.a == b.a +@test [T11053(1)] * 5 == [T11053(1)] .* 5 == [T11053(5.0)] diff --git a/test/bitarray.jl b/test/bitarray.jl index c53202e8ec91d..67f0be23d74e9 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -787,9 +787,9 @@ let b1 = bitrand(n1, n2) @check_bit_operation xor(b1, b2) BitMatrix @check_bit_operation (+)(b1, b2) Matrix{Int} @check_bit_operation (-)(b1, b2) Matrix{Int} - @check_bit_operation (.*)(b1, b2) BitMatrix - @check_bit_operation (./)(b1, b2) Matrix{Float64} - @check_bit_operation (.^)(b1, b2) BitMatrix + @check_bit_operation broadcast(*, b1, b2) BitMatrix + @check_bit_operation broadcast(/, b1, b2) Matrix{Float64} + @check_bit_operation broadcast(^, b1, b2) BitMatrix @check_bit_operation (/)(b1,1) Matrix{Float64} b2 = trues(n1, n2) @@ -816,7 +816,7 @@ let b0 = falses(0) @check_bit_operation (&)(b0, b0) BitVector @check_bit_operation (|)(b0, b0) BitVector @check_bit_operation xor(b0, b0) BitVector - @check_bit_operation (.*)(b0, b0) BitVector + @check_bit_operation broadcast(*, b0, b0) BitVector @check_bit_operation (*)(b0, b0') Matrix{Int} end @@ -829,9 +829,9 @@ let b1 = bitrand(n1, n2) @check_bit_operation xor(b1, i2) Matrix{Int} @check_bit_operation (+)(b1, i2) Matrix{Int} @check_bit_operation (-)(b1, i2) Matrix{Int} - @check_bit_operation (.*)(b1, i2) Matrix{Int} - @check_bit_operation (./)(b1, i2) Matrix{Float64} - @check_bit_operation (.^)(b1, i2) BitMatrix + @check_bit_operation broadcast(*, b1, i2) Matrix{Int} + @check_bit_operation broadcast(/, b1, i2) Matrix{Float64} + @check_bit_operation broadcast(^, b1, i2) BitMatrix @check_bit_operation div(b1, i2) Matrix{Int} @check_bit_operation mod(b1, i2) Matrix{Int} end @@ -840,9 +840,9 @@ end let b1 = bitrand(n1, n2) f2 = 1.0 .+ rand(n1, n2) - @check_bit_operation (.*)(b1, f2) Matrix{Float64} - @check_bit_operation (./)(b1, f2) Matrix{Float64} - @check_bit_operation (.^)(b1, f2) Matrix{Float64} + @check_bit_operation broadcast(*, b1, f2) Matrix{Float64} + @check_bit_operation broadcast(/, b1, f2) Matrix{Float64} + @check_bit_operation broadcast(^, b1, f2) Matrix{Float64} @check_bit_operation div(b1, f2) Matrix{Float64} @check_bit_operation mod(b1, f2) Matrix{Float64} end @@ -860,66 +860,66 @@ let b2 = bitrand(n1, n2) @check_bit_operation (&)(i1, b2) Matrix{Int} @check_bit_operation (|)(i1, b2) Matrix{Int} @check_bit_operation xor(i1, b2) Matrix{Int} - @check_bit_operation (.+)(i1, b2) Matrix{Int} - @check_bit_operation (.-)(i1, b2) Matrix{Int} - @check_bit_operation (.*)(i1, b2) Matrix{Int} + @check_bit_operation broadcast(+, i1, b2) Matrix{Int} + @check_bit_operation broadcast(-, i1, b2) Matrix{Int} + @check_bit_operation broadcast(*, i1, b2) Matrix{Int} @check_bit_operation (&)(u1, b2) Matrix{UInt8} @check_bit_operation (|)(u1, b2) Matrix{UInt8} @check_bit_operation xor(u1, b2) Matrix{UInt8} - @check_bit_operation (.+)(u1, b2) Matrix{UInt8} - @check_bit_operation (.-)(u1, b2) Matrix{UInt8} - @check_bit_operation (.*)(u1, b2) Matrix{UInt8} + @check_bit_operation broadcast(+, u1, b2) Matrix{UInt8} + @check_bit_operation broadcast(-, u1, b2) Matrix{UInt8} + @check_bit_operation broadcast(*, u1, b2) Matrix{UInt8} for (x1,t1) = [(f1, Float64), (ci1, Complex{Int}), (cu1, Complex{UInt8}), (cf1, Complex128)] - @check_bit_operation (.+)(x1, b2) Matrix{t1} - @check_bit_operation (.-)(x1, b2) Matrix{t1} - @check_bit_operation (.*)(x1, b2) Matrix{t1} + @check_bit_operation broadcast(+, x1, b2) Matrix{t1} + @check_bit_operation broadcast(-, x1, b2) Matrix{t1} + @check_bit_operation broadcast(*, x1, b2) Matrix{t1} end b2 = trues(n1, n2) - @check_bit_operation (./)(true, b2) Matrix{Float64} + @check_bit_operation broadcast(/, true, b2) Matrix{Float64} @check_bit_operation div(true, b2) BitMatrix @check_bit_operation mod(true, b2) BitMatrix - @check_bit_operation (./)(false, b2) Matrix{Float64} + @check_bit_operation broadcast(/, false, b2) Matrix{Float64} @check_bit_operation div(false, b2) BitMatrix @check_bit_operation mod(false, b2) BitMatrix - @check_bit_operation (./)(i1, b2) Matrix{Float64} + @check_bit_operation broadcast(/, i1, b2) Matrix{Float64} @check_bit_operation div(i1, b2) Matrix{Int} @check_bit_operation mod(i1, b2) Matrix{Int} - @check_bit_operation (./)(u1, b2) Matrix{Float64} + @check_bit_operation broadcast(/, u1, b2) Matrix{Float64} @check_bit_operation div(u1, b2) Matrix{UInt8} @check_bit_operation mod(u1, b2) Matrix{UInt8} - @check_bit_operation (./)(f1, b2) Matrix{Float64} + @check_bit_operation broadcast(/, f1, b2) Matrix{Float64} @check_bit_operation div(f1, b2) Matrix{Float64} @check_bit_operation mod(f1, b2) Matrix{Float64} - @check_bit_operation (./)(ci1, b2) Matrix{Complex128} - @check_bit_operation (./)(cu1, b2) Matrix{Complex128} - @check_bit_operation (./)(cf1, b2) Matrix{Complex128} + @check_bit_operation broadcast(/, ci1, b2) Matrix{Complex128} + @check_bit_operation broadcast(/, cu1, b2) Matrix{Complex128} + @check_bit_operation broadcast(/, cf1, b2) Matrix{Complex128} b2 = bitrand(n1, n2) - @check_bit_operation (.^)(false, b2) BitMatrix - @check_bit_operation (.^)(true, b2) BitMatrix - @check_bit_operation (.^)(0x0, b2) Matrix{UInt8} - @check_bit_operation (.^)(0x1, b2) Matrix{UInt8} - @check_bit_operation (.^)(-1, b2) Matrix{Int} - @check_bit_operation (.^)(0, b2) Matrix{Int} - @check_bit_operation (.^)(1, b2) Matrix{Int} - @check_bit_operation (.^)(0.0, b2) Matrix{Float64} - @check_bit_operation (.^)(1.0, b2) Matrix{Float64} - @check_bit_operation (.^)(0.0im, b2) Matrix{Complex128} - @check_bit_operation (.^)(1.0im, b2) Matrix{Complex128} - @check_bit_operation (.^)(0im, b2) Matrix{Complex{Int}} - @check_bit_operation (.^)(1im, b2) Matrix{Complex{Int}} - @check_bit_operation (.^)(0x0im, b2) Matrix{Complex{UInt8}} - @check_bit_operation (.^)(0x1im, b2) Matrix{Complex{UInt8}} + @check_bit_operation broadcast(^, false, b2) BitMatrix + @check_bit_operation broadcast(^, true, b2) BitMatrix + @check_bit_operation broadcast(^, 0x0, b2) Matrix{UInt8} + @check_bit_operation broadcast(^, 0x1, b2) Matrix{UInt8} + @check_bit_operation broadcast(^, -1, b2) Matrix{Int} + @check_bit_operation broadcast(^, 0, b2) Matrix{Int} + @check_bit_operation broadcast(^, 1, b2) Matrix{Int} + @check_bit_operation broadcast(^, 0.0, b2) Matrix{Float64} + @check_bit_operation broadcast(^, 1.0, b2) Matrix{Float64} + @check_bit_operation broadcast(^, 0.0im, b2) Matrix{Complex128} + @check_bit_operation broadcast(^, 1.0im, b2) Matrix{Complex128} + @check_bit_operation broadcast(^, 0im, b2) Matrix{Complex{Int}} + @check_bit_operation broadcast(^, 1im, b2) Matrix{Complex{Int}} + @check_bit_operation broadcast(^, 0x0im, b2) Matrix{Complex{UInt8}} + @check_bit_operation broadcast(^, 0x1im, b2) Matrix{Complex{UInt8}} end # Matrix/Number @@ -945,16 +945,16 @@ let b1 = bitrand(n1, n2) @check_bit_operation xor(b1, false) BitMatrix @check_bit_operation xor(true, b1) BitMatrix @check_bit_operation xor(false, b1) BitMatrix - @check_bit_operation (.+)(b1, true) Matrix{Int} - @check_bit_operation (.+)(b1, false) Matrix{Int} - @check_bit_operation (.-)(b1, true) Matrix{Int} - @check_bit_operation (.-)(b1, false) Matrix{Int} - @check_bit_operation (.*)(b1, true) BitMatrix - @check_bit_operation (.*)(b1, false) BitMatrix - @check_bit_operation (.*)(true, b1) BitMatrix - @check_bit_operation (.*)(false, b1) BitMatrix - @check_bit_operation (./)(b1, true) Matrix{Float64} - @check_bit_operation (./)(b1, false) Matrix{Float64} + @check_bit_operation broadcast(+, b1, true) Matrix{Int} + @check_bit_operation broadcast(+, b1, false) Matrix{Int} + @check_bit_operation broadcast(-, b1, true) Matrix{Int} + @check_bit_operation broadcast(-, b1, false) Matrix{Int} + @check_bit_operation broadcast(*, b1, true) BitMatrix + @check_bit_operation broadcast(*, b1, false) BitMatrix + @check_bit_operation broadcast(*, true, b1) BitMatrix + @check_bit_operation broadcast(*, false, b1) BitMatrix + @check_bit_operation broadcast(/, b1, true) Matrix{Float64} + @check_bit_operation broadcast(/, b1, false) Matrix{Float64} @check_bit_operation div(b1, true) BitMatrix @check_bit_operation mod(b1, true) BitMatrix @@ -967,65 +967,65 @@ let b1 = bitrand(n1, n2) @check_bit_operation (&)(b1, i2) Matrix{Int} @check_bit_operation (|)(b1, i2) Matrix{Int} @check_bit_operation xor(b1, i2) Matrix{Int} - @check_bit_operation (.+)(b1, i2) Matrix{Int} - @check_bit_operation (.-)(b1, i2) Matrix{Int} - @check_bit_operation (.*)(b1, i2) Matrix{Int} - @check_bit_operation (./)(b1, i2) Matrix{Float64} + @check_bit_operation broadcast(+, b1, i2) Matrix{Int} + @check_bit_operation broadcast(-, b1, i2) Matrix{Int} + @check_bit_operation broadcast(*, b1, i2) Matrix{Int} + @check_bit_operation broadcast(/, b1, i2) Matrix{Float64} @check_bit_operation div(b1, i2) Matrix{Int} @check_bit_operation mod(b1, i2) Matrix{Int} @check_bit_operation (&)(b1, u2) Matrix{UInt8} @check_bit_operation (|)(b1, u2) Matrix{UInt8} @check_bit_operation xor(b1, u2) Matrix{UInt8} - @check_bit_operation (.+)(b1, u2) Matrix{UInt8} - @check_bit_operation (.-)(b1, u2) Matrix{UInt8} - @check_bit_operation (.*)(b1, u2) Matrix{UInt8} - @check_bit_operation (./)(b1, u2) Matrix{Float64} + @check_bit_operation broadcast(+, b1, u2) Matrix{UInt8} + @check_bit_operation broadcast(-, b1, u2) Matrix{UInt8} + @check_bit_operation broadcast(*, b1, u2) Matrix{UInt8} + @check_bit_operation broadcast(/, b1, u2) Matrix{Float64} @check_bit_operation div(b1, u2) Matrix{UInt8} @check_bit_operation mod(b1, u2) Matrix{UInt8} - @check_bit_operation (.+)(b1, f2) Matrix{Float64} - @check_bit_operation (.-)(b1, f2) Matrix{Float64} - @check_bit_operation (.*)(b1, f2) Matrix{Float64} - @check_bit_operation (./)(b1, f2) Matrix{Float64} + @check_bit_operation broadcast(+, b1, f2) Matrix{Float64} + @check_bit_operation broadcast(-, b1, f2) Matrix{Float64} + @check_bit_operation broadcast(*, b1, f2) Matrix{Float64} + @check_bit_operation broadcast(/, b1, f2) Matrix{Float64} @check_bit_operation div(b1, f2) Matrix{Float64} @check_bit_operation mod(b1, f2) Matrix{Float64} - @check_bit_operation (.+)(b1, ci2) Matrix{Complex{Int}} - @check_bit_operation (.-)(b1, ci2) Matrix{Complex{Int}} - @check_bit_operation (.*)(b1, ci2) Matrix{Complex{Int}} - @check_bit_operation (./)(b1, ci2) Matrix{Complex128} - - @check_bit_operation (.+)(b1, cu2) Matrix{Complex{UInt8}} - @check_bit_operation (.-)(b1, cu2) Matrix{Complex{UInt8}} - @check_bit_operation (.*)(b1, cu2) Matrix{Complex{UInt8}} - @check_bit_operation (./)(b1, cu2) Matrix{Complex128} - - @check_bit_operation (.+)(b1, cf2) Matrix{Complex128} - @check_bit_operation (.-)(b1, cf2) Matrix{Complex128} - @check_bit_operation (.*)(b1, cf2) Matrix{Complex128} - @check_bit_operation (./)(b1, cf2) Matrix{Complex128} - - @check_bit_operation (.^)(b1, false) BitMatrix - @check_bit_operation (.^)(b1, true) BitMatrix - @check_bit_operation (.^)(b1, 0x0) BitMatrix - @check_bit_operation (.^)(b1, 0x1) BitMatrix - @check_bit_operation (.^)(b1, 0) BitMatrix - @check_bit_operation (.^)(b1, 1) BitMatrix - @check_bit_operation (.^)(b1, -1.0) Matrix{Float64} - @check_bit_operation (.^)(b1, 0.0) Matrix{Float64} - @check_bit_operation (.^)(b1, 1.0) Matrix{Float64} - @check_bit_operation (.^)(b1, 0.0im) Matrix{Complex128} - @check_bit_operation (.^)(b1, 0x0im) Matrix{Complex128} - @check_bit_operation (.^)(b1, 0im) Matrix{Complex128} - @test_throws DomainError (.^)(b1, -1) + @check_bit_operation broadcast(+, b1, ci2) Matrix{Complex{Int}} + @check_bit_operation broadcast(-, b1, ci2) Matrix{Complex{Int}} + @check_bit_operation broadcast(*, b1, ci2) Matrix{Complex{Int}} + @check_bit_operation broadcast(/, b1, ci2) Matrix{Complex128} + + @check_bit_operation broadcast(+, b1, cu2) Matrix{Complex{UInt8}} + @check_bit_operation broadcast(-, b1, cu2) Matrix{Complex{UInt8}} + @check_bit_operation broadcast(*, b1, cu2) Matrix{Complex{UInt8}} + @check_bit_operation broadcast(/, b1, cu2) Matrix{Complex128} + + @check_bit_operation broadcast(+, b1, cf2) Matrix{Complex128} + @check_bit_operation broadcast(-, b1, cf2) Matrix{Complex128} + @check_bit_operation broadcast(*, b1, cf2) Matrix{Complex128} + @check_bit_operation broadcast(/, b1, cf2) Matrix{Complex128} + + @check_bit_operation broadcast(^, b1, false) BitMatrix + @check_bit_operation broadcast(^, b1, true) BitMatrix + @check_bit_operation broadcast(^, b1, 0x0) BitMatrix + @check_bit_operation broadcast(^, b1, 0x1) BitMatrix + @check_bit_operation broadcast(^, b1, 0) BitMatrix + @check_bit_operation broadcast(^, b1, 1) BitMatrix + @check_bit_operation broadcast(^, b1, -1.0) Matrix{Float64} + @check_bit_operation broadcast(^, b1, 0.0) Matrix{Float64} + @check_bit_operation broadcast(^, b1, 1.0) Matrix{Float64} + @check_bit_operation broadcast(^, b1, 0.0im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, 0x0im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, 0im) Matrix{Complex128} + @test_throws DomainError broadcast(^, b1, -1) b1 = trues(n1, n2) - @check_bit_operation (.^)(b1, -1.0im) Matrix{Complex128} - @check_bit_operation (.^)(b1, 1.0im) Matrix{Complex128} - @check_bit_operation (.^)(b1, -1im) Matrix{Complex128} - @check_bit_operation (.^)(b1, 1im) Matrix{Complex128} - @check_bit_operation (.^)(b1, 0x1im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, -1.0im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, 1.0im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, -1im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, 1im) Matrix{Complex128} + @check_bit_operation broadcast(^, b1, 0x1im) Matrix{Complex128} end timesofar("binary arithmetic") @@ -1033,10 +1033,10 @@ timesofar("binary arithmetic") ## Binary comparison operators ## let b1 = bitrand(n1, n2), b2 = bitrand(n1, n2) - @check_bit_operation (.==)(b1, b2) BitMatrix - @check_bit_operation (.!=)(b1, b2) BitMatrix - @check_bit_operation (.<)(b1, b2) BitMatrix - @check_bit_operation (.<=)(b1, b2) BitMatrix + @check_bit_operation broadcast(==, b1, b2) BitMatrix + @check_bit_operation broadcast(!=, b1, b2) BitMatrix + @check_bit_operation broadcast(<, b1, b2) BitMatrix + @check_bit_operation broadcast(<=, b1, b2) BitMatrix end timesofar("binary comparison") diff --git a/test/broadcast.jl b/test/broadcast.jl index bd20d69c5a20c..3cc0f0c4d50f3 100644 --- a/test/broadcast.jl +++ b/test/broadcast.jl @@ -83,7 +83,7 @@ function as_sub{T}(x::AbstractArray{T,3}) y end -bittest(f::Function, ewf::Function, a...) = (@test ewf(a...) == BitArray(broadcast(f, a...))) +bittest(f::Function, a...) = (@test f.(a...) == BitArray(broadcast(f, a...))) n1 = 21 n2 = 32 n3 = 17 @@ -97,7 +97,7 @@ for arr in (identity, as_sub) @test broadcast(+, arr([1, 0]), arr([1, 4])) == [2, 4] @test broadcast(+, arr([1, 0]), 2) == [3, 2] - @test @inferred(arr(eye(2)) .+ arr([1, 4])) == arr([2 1; 4 5]) + @test @inferred(broadcast(+, arr(eye(2)), arr([1, 4]))) == arr([2 1; 4 5]) @test arr(eye(2)) .+ arr([1 4]) == arr([2 4; 1 5]) @test arr([1 0]) .+ arr([1, 4]) == arr([2 1; 5 4]) @test arr([1, 0]) .+ arr([1 4]) == arr([2 5; 1 4]) @@ -137,19 +137,16 @@ for arr in (identity, as_sub) @test A == diagm(10:12) @test_throws BoundsError broadcast_setindex!(A, 7, [1,-1], [1 2]) - for (f, ewf) in (((==), (.==)), - ((<) , (.<) ), - ((!=), (.!=)), - ((<=), (.<=))) - bittest(f, ewf, arr(eye(2)), arr([1, 4])) - bittest(f, ewf, arr(eye(2)), arr([1 4])) - bittest(f, ewf, arr([0, 1]), arr([1 4])) - bittest(f, ewf, arr([0 1]), arr([1, 4])) - bittest(f, ewf, arr([1, 0]), arr([1, 4])) - bittest(f, ewf, arr(rand(rb, n1, n2, n3)), arr(rand(rb, n1, n2, n3))) - bittest(f, ewf, arr(rand(rb, 1, n2, n3)), arr(rand(rb, n1, 1, n3))) - bittest(f, ewf, arr(rand(rb, 1, n2, 1)), arr(rand(rb, n1, 1, n3))) - bittest(f, ewf, arr(bitrand(n1, n2, n3)), arr(bitrand(n1, n2, n3))) + for f in ((==), (<) , (!=), (<=)) + bittest(f, arr(eye(2)), arr([1, 4])) + bittest(f, arr(eye(2)), arr([1 4])) + bittest(f, arr([0, 1]), arr([1 4])) + bittest(f, arr([0 1]), arr([1, 4])) + bittest(f, arr([1, 0]), arr([1, 4])) + bittest(f, arr(rand(rb, n1, n2, n3)), arr(rand(rb, n1, n2, n3))) + bittest(f, arr(rand(rb, 1, n2, n3)), arr(rand(rb, n1, 1, n3))) + bittest(f, arr(rand(rb, 1, n2, 1)), arr(rand(rb, n1, 1, n3))) + bittest(f, arr(bitrand(n1, n2, n3)), arr(bitrand(n1, n2, n3))) end end @@ -163,10 +160,8 @@ m = [1:2;]' @test m./r2 ≈ [ratio 2ratio] @test m./[r2;] ≈ [ratio 2ratio] -@test @inferred([0,1.2].+reshape([0,-2],1,1,2)) == reshape([0 -2; 1.2 -0.8],2,1,2) -rt = Base.return_types(.+, Tuple{Array{Float64, 3}, Array{Int, 1}}) -@test length(rt) == 1 && rt[1] == Array{Float64, 3} -rt = Base.return_types(broadcast, Tuple{typeof(.+), Array{Float64, 3}, Array{Int, 3}}) +@test @inferred(broadcast(+,[0,1.2],reshape([0,-2],1,1,2))) == reshape([0 -2; 1.2 -0.8],2,1,2) +rt = Base.return_types(broadcast, Tuple{typeof(+), Array{Float64, 3}, Array{Int, 1}}) @test length(rt) == 1 && rt[1] == Array{Float64, 3} rt = Base.return_types(broadcast!, Tuple{Function, Array{Float64, 3}, Array{Float64, 3}, Array{Int, 1}}) @test length(rt) == 1 && rt[1] == Array{Float64, 3} @@ -297,6 +292,14 @@ import Base.Meta: isexpr @test isexpr(expand(:(f.(x,1.0))), :thunk) @test isexpr(expand(:(f.(x,$π))), :thunk) +# PR #17623: Fused binary operators +@test [true] .* [true] == [true] +@test [1,2,3] .|> (x->x+1) == [2,3,4] +let g = Int[], ⊕ = (a,b) -> let c=a+2b; push!(g, c); c; end + @test [1,2,3] .⊕ [10,11,12] .⊕ [100,200,300] == [221,424,627] + @test g == [21,221,24,424,27,627] # test for loop fusion +end + # PR 16988 @test Base.promote_op(+, Bool) === Int @test isa(broadcast(+, [true]), Array{Int,1}) diff --git a/test/dates/periods.jl b/test/dates/periods.jl index e41a87248e0cd..f8802a35ede89 100644 --- a/test/dates/periods.jl +++ b/test/dates/periods.jl @@ -328,6 +328,8 @@ emptyperiod = ((y + d) - d) - y pa = [1y 1m 1w 1d; 1h 1mi 1s 1ms] cpa = [1y+1s 1m+1s 1w+1s 1d+1s; 1h+1s 1mi+1s 2m+1s 1s+1ms] +@test +pa == pa == -(-pa) +@test -pa == map(-, pa) @test 1y .+ pa == [2y 1y+1m 1y+1w 1y+1d; 1y+1h 1y+1mi 1y+1s 1y+1ms] @test (1y+1m) .+ pa == [2y+1m 1y+2m 1y+1m+1w 1y+1m+1d; 1y+1m+1h 1y+1m+1mi 1y+1m+1s 1y+1m+1ms] @test pa .+ 1y == [2y 1y+1m 1y+1w 1y+1d; 1y+1h 1y+1mi 1y+1s 1y+1ms] diff --git a/test/linalg/generic.jl b/test/linalg/generic.jl index 33695481100b0..10d47bec2599a 100644 --- a/test/linalg/generic.jl +++ b/test/linalg/generic.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license -import Base: -, * +import Base: -, *, /, \ using Base.Test # A custom Quaternion type with minimal defined interface and methods. @@ -16,6 +16,7 @@ Base.abs2(q::Quaternion) = q.s*q.s + q.v1*q.v1 + q.v2*q.v2 + q.v3*q.v3 Base.abs(q::Quaternion) = sqrt(abs2(q)) Base.real{T}(::Type{Quaternion{T}}) = T Base.conj(q::Quaternion) = Quaternion(q.s, -q.v1, -q.v2, -q.v3) +Base.isfinite(q::Quaternion) = isfinite(q.s) & isfinite(q.v1) & isfinite(q.v2) & isfinite(q.v3) (-)(ql::Quaternion, qr::Quaternion) = Quaternion(ql.s - qr.s, ql.v1 - qr.v1, ql.v2 - qr.v2, ql.v3 - qr.v3) @@ -23,6 +24,9 @@ Base.conj(q::Quaternion) = Quaternion(q.s, -q.v1, -q.v2, -q.v3) q.s*w.v1 + q.v1*w.s + q.v2*w.v3 - q.v3*w.v2, q.s*w.v2 - q.v1*w.v3 + q.v2*w.s + q.v3*w.v1, q.s*w.v3 + q.v1*w.v2 - q.v2*w.v1 + q.v3*w.s) +(*)(q::Quaternion, r::Real) = Quaternion(q.s*r, q.v1*r, q.v2*r, q.v3*r) +(/)(q::Quaternion, w::Quaternion) = q * conj(w) * (1.0 / abs2(w)) +(\)(q::Quaternion, w::Quaternion) = conj(q) * w * (1.0 / abs2(q)) debug = false @@ -206,8 +210,10 @@ q = Quaternion(0.44567, 0.755871, 0.882548, 0.423612) qmat = [Quaternion(0.015007, 0.355067, 0.418645, 0.318373)] @test scale!(q, copy(qmat)) != scale!(copy(qmat), q) ## Test * because it doesn't dispatch to scale! -@test q*qmat != qmat*q +@test q*qmat ≉ qmat*q @test conj(q*qmat) ≈ conj(qmat)*conj(q) +@test q * (q \ qmat) ≈ qmat ≈ (qmat / q) * q +@test q\qmat ≉ qmat/q # test ops on Numbers for elty in [Float32,Float64,Complex64,Complex128] diff --git a/test/parse.jl b/test/parse.jl index e46da0d29bf7d..88d1ebbb259d4 100644 --- a/test/parse.jl +++ b/test/parse.jl @@ -20,13 +20,14 @@ end # issue #9684 let + undot(op) = Symbol(string(op)[2:end]) for (ex1, ex2) in [("5.≠x", "5.!=x"), ("5.≥x", "5.>=x"), ("5.≤x", "5.<=x")] ex1 = parse(ex1); ex2 = parse(ex2) @test ex1.head === :call && (ex1.head === ex2.head) @test ex1.args[2] === 5 && ex2.args[2] === 5 - @test eval(Main, ex1.args[1]) === eval(Main, ex2.args[1]) + @test eval(Main, undot(ex1.args[1])) === eval(Main, undot(ex2.args[1])) @test ex1.args[3] === :x && (ex1.args[3] === ex2.args[3]) end end diff --git a/test/ranges.jl b/test/ranges.jl index 425ac8aa50d66..e75624be37ffc 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -260,7 +260,7 @@ end @test (1:2:6) + 0.3 == 1+0.3:2:5+0.3 @test (1:2:6) - 1 == 0:2:4 @test (1:2:6) - 0.3 == 1-0.3:2:5-0.3 -@test 2 .- (1:3) == 1:-1:-1 +@test 2 - (1:3) == 1:-1:-1 # operations between ranges and arrays @test all(([1:5;] + (5:-1:1)) .== 6) diff --git a/test/sparse/sparsevector.jl b/test/sparse/sparsevector.jl index 96046216ba012..1b2499edb06dd 100644 --- a/test/sparse/sparsevector.jl +++ b/test/sparse/sparsevector.jl @@ -589,7 +589,9 @@ let x = spv_x1, x2 = x2 = spv_x2 # multiplies xm = SparseVector(8, [2, 6], [5.0, -19.25]) - @test exact_equal(x .* x, abs2(x)) + let y=x # workaround for broadcast not preserving sparsity in general + @test exact_equal(x .* y, abs2(x)) + end @test exact_equal(x .* x2, xm) @test exact_equal(x2 .* x, xm) @@ -732,28 +734,28 @@ let x = sprand(16, 0.5), x2 = sprand(16, 0.4) end # scale - let sx = SparseVector(x.n, x.nzind, x.nzval * 2.5) - @test exact_equal(x * 2.5, sx) - @test exact_equal(x * (2.5 + 0.0*im), complex(sx)) - @test exact_equal(2.5 * x, sx) - @test exact_equal((2.5 + 0.0*im) * x, complex(sx)) - @test exact_equal(x * 2.5, sx) - @test exact_equal(2.5 * x, sx) - @test exact_equal(x .* 2.5, sx) - @test exact_equal(2.5 .* x, sx) - @test exact_equal(x / 2.5, SparseVector(x.n, x.nzind, x.nzval / 2.5)) + let α = 2.5, sx = SparseVector(x.n, x.nzind, x.nzval * α) + @test exact_equal(x * α, sx) + @test exact_equal(x * (α + 0.0*im), complex(sx)) + @test exact_equal(α * x, sx) + @test exact_equal((α + 0.0*im) * x, complex(sx)) + @test exact_equal(x * α, sx) + @test exact_equal(α * x, sx) + @test exact_equal(x .* α, sx) + @test exact_equal(α .* x, sx) + @test exact_equal(x / α, SparseVector(x.n, x.nzind, x.nzval / α)) xc = copy(x) - @test scale!(xc, 2.5) === xc + @test scale!(xc, α) === xc @test exact_equal(xc, sx) xc = copy(x) - @test scale!(2.5, xc) === xc + @test scale!(α, xc) === xc @test exact_equal(xc, sx) xc = copy(x) - @test scale!(xc, complex(2.5, 0.0)) === xc + @test scale!(xc, complex(α, 0.0)) === xc @test exact_equal(xc, sx) xc = copy(x) - @test scale!(complex(2.5, 0.0), xc) === xc + @test scale!(complex(α, 0.0), xc) === xc @test exact_equal(xc, sx) end From 61e44cbd0a5c313ee6b666a169532c37badfc3ab Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Wed, 14 Dec 2016 17:46:28 -0600 Subject: [PATCH 2/4] sparse column-slice speedup --- base/sparse/sparsevector.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index 5120f860608a9..404a5f8ca5294 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -402,7 +402,7 @@ function getindex(x::SparseMatrixCSC, I::UnitRange, j::Integer) # Restrict to the selected rows r1 = searchsortedfirst(x.rowval, first(I), c1, c2, Forward) r2 = searchsortedlast(x.rowval, last(I), c1, c2, Forward) - SparseVector(length(I), x.rowval[r1:r2] - first(I) + 1, x.nzval[r1:r2]) + SparseVector(length(I), [x.rowval[i] - first(I) + 1 for i = r1:r2], x.nzval[r1:r2]) end # In the general case, we piggy back upon SparseMatrixCSC's optimized solution From 9e98ce3986d13f64036f7d3e22d936a5cf328923 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Wed, 14 Dec 2016 18:36:20 -0600 Subject: [PATCH 3/4] all(foo .== 0) was an extremely common pattern, so condensed it into the unexported function iszero(foo); also eliminated temporaries for some other all/any calls made iszero(x) = x==zero(x) the default with special cases for bignums and complex for performance, to better handle dimensionful quantities etc --- base/abstractarraymath.jl | 1 + base/complex.jl | 5 +++-- base/deprecated.jl | 4 ++-- base/gmp.jl | 1 + base/linalg/bidiag.jl | 4 ++-- base/linalg/diagonal.jl | 4 ++-- base/linalg/eigen.jl | 10 +++++----- base/linalg/generic.jl | 2 +- base/linalg/lapack.jl | 6 ++++-- base/linalg/linalg.jl | 2 +- base/linalg/special.jl | 14 +++++++------- base/linalg/svd.jl | 2 +- base/linalg/symmetric.jl | 4 ++-- base/linalg/tridiag.jl | 8 ++++---- base/mpfr.jl | 2 ++ base/number.jl | 8 ++++++++ base/profile.jl | 3 ++- 17 files changed, 48 insertions(+), 32 deletions(-) diff --git a/base/abstractarraymath.jl b/base/abstractarraymath.jl index 17db951a945fd..6580333dd0a65 100644 --- a/base/abstractarraymath.jl +++ b/base/abstractarraymath.jl @@ -5,6 +5,7 @@ isinteger(x::AbstractArray) = all(isinteger,x) isinteger{T<:Integer,n}(x::AbstractArray{T,n}) = true isreal(x::AbstractArray) = all(isreal,x) +iszero(x::AbstractArray) = all(iszero,x) isreal{T<:Real,n}(x::AbstractArray{T,n}) = true ctranspose(a::AbstractArray) = error("ctranspose not implemented for $(typeof(a)). Consider adding parentheses, e.g. A*(B*C') instead of A*B*C' to avoid explicit calculation of the transposed matrix.") transpose(a::AbstractArray) = error("transpose not implemented for $(typeof(a)). Consider adding parentheses, e.g. A*(B*C.') instead of A*B*C' to avoid explicit calculation of the transposed matrix.") diff --git a/base/complex.jl b/base/complex.jl index be34002a3ca2e..95ca6ae4cdec2 100644 --- a/base/complex.jl +++ b/base/complex.jl @@ -76,17 +76,18 @@ real{T<:Real}(::Type{T}) = T real{T<:Real}(::Type{Complex{T}}) = T isreal(x::Real) = true -isreal(z::Complex) = imag(z) == 0 +isreal(z::Complex) = iszero(imag(z)) """ isimag(z) -> Bool Test whether `z` is purely imaginary, i.e. has a real part equal to 0. """ -isimag(z::Number) = real(z) == 0 +isimag(z::Number) = iszero(real(z)) isinteger(z::Complex) = isreal(z) & isinteger(real(z)) isfinite(z::Complex) = isfinite(real(z)) & isfinite(imag(z)) isnan(z::Complex) = isnan(real(z)) | isnan(imag(z)) isinf(z::Complex) = isinf(real(z)) | isinf(imag(z)) +iszero(z::Complex) = iszero(real(z)) & iszero(imag(z)) """ complex(r, [i]) diff --git a/base/deprecated.jl b/base/deprecated.jl index ab7a4a6e1a208..d16ac4a791185 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -815,7 +815,7 @@ function convert(::Type{Base.LinAlg.UnitUpperTriangular}, A::Diagonal) "that convert `Diagonal`/`Bidiagonal` to `<:AbstractTriangular` are deprecated. ", "Consider calling the `UnitUpperTriangular` constructor directly ", "(`Base.LinAlg.UnitUpperTriangular(A)`) instead."), :convert) - if !all(A.diag .== one(eltype(A))) + if !all(x -> x == one(x), A.diag) throw(ArgumentError("matrix cannot be represented as UnitUpperTriangular")) end Base.LinAlg.UnitUpperTriangular(Array(A)) @@ -825,7 +825,7 @@ function convert(::Type{Base.LinAlg.UnitLowerTriangular}, A::Diagonal) "that convert `Diagonal`/`Bidiagonal` to `<:AbstractTriangular` are deprecated. ", "Consider calling the `UnitLowerTriangular` constructor directly ", "(`Base.LinAlg.UnitLowerTriangular(A)`) instead."), :convert) - if !all(A.diag .== one(eltype(A))) + if !all(x -> x == one(x), A.diag) throw(ArgumentError("matrix cannot be represented as UnitLowerTriangular")) end Base.LinAlg.UnitLowerTriangular(Array(A)) diff --git a/base/gmp.jl b/base/gmp.jl index 8c131b480b46b..7ee13579a17b8 100644 --- a/base/gmp.jl +++ b/base/gmp.jl @@ -497,6 +497,7 @@ binomial(n::BigInt, k::Integer) = k < 0 ? BigInt(0) : binomial(n, UInt(k)) ==(i::Integer, x::BigInt) = cmp(x,i) == 0 ==(x::BigInt, f::CdoubleMax) = isnan(f) ? false : cmp(x,f) == 0 ==(f::CdoubleMax, x::BigInt) = isnan(f) ? false : cmp(x,f) == 0 +iszero(x::BigInt) = x == Clong(0) <=(x::BigInt, y::BigInt) = cmp(x,y) <= 0 <=(x::BigInt, i::Integer) = cmp(x,i) <= 0 diff --git a/base/linalg/bidiag.jl b/base/linalg/bidiag.jl index 72aa768bda274..aa6ab1d4778c3 100644 --- a/base/linalg/bidiag.jl +++ b/base/linalg/bidiag.jl @@ -263,8 +263,8 @@ end transpose(M::Bidiagonal) = Bidiagonal(M.dv, M.ev, !M.isupper) ctranspose(M::Bidiagonal) = Bidiagonal(conj(M.dv), conj(M.ev), !M.isupper) -istriu(M::Bidiagonal) = M.isupper || all(M.ev .== 0) -istril(M::Bidiagonal) = !M.isupper || all(M.ev .== 0) +istriu(M::Bidiagonal) = M.isupper || iszero(M.ev) +istril(M::Bidiagonal) = !M.isupper || iszero(M.ev) function tril!(M::Bidiagonal, k::Integer=0) n = length(M.dv) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index 5fa8e304f71b0..524e67ef29d91 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -101,9 +101,9 @@ end parent(D::Diagonal) = D.diag ishermitian{T<:Real}(D::Diagonal{T}) = true -ishermitian(D::Diagonal) = all(D.diag .== real(D.diag)) +ishermitian(D::Diagonal) = isreal(D.diag) issymmetric(D::Diagonal) = true -isposdef(D::Diagonal) = all(D.diag .> 0) +isposdef(D::Diagonal) = all(x -> x > 0, D.diag) factorize(D::Diagonal) = D diff --git a/base/linalg/eigen.jl b/base/linalg/eigen.jl index 6ec8b931022d5..93203248430e6 100644 --- a/base/linalg/eigen.jl +++ b/base/linalg/eigen.jl @@ -23,14 +23,14 @@ function getindex(A::Union{Eigen,GeneralizedEigen}, d::Symbol) throw(KeyError(d)) end -isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(A.values .> 0) +isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) function eigfact!{T<:BlasReal}(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) n = size(A, 2) n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) issymmetric(A) && return eigfact!(Symmetric(A)) A, WR, WI, VL, VR, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A) - all(WI .== 0.) && return Eigen(WR, VR) + iszero(WI) && return Eigen(WR, VR) evec = zeros(Complex{T}, n, n) j = 1 while j <= n @@ -164,7 +164,7 @@ Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead function eigvals!{T<:BlasReal}(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) issymmetric(A) && return eigvals!(Symmetric(A)) _, valsre, valsim, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'N', 'N', A) - return all(valsim .== 0) ? valsre : complex(valsre, valsim) + return iszero(valsim) ? valsre : complex(valsre, valsim) end function eigvals!{T<:BlasComplex}(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) ishermitian(A) && return eigvals(Hermitian(A)) @@ -269,7 +269,7 @@ function eigfact!{T<:BlasReal}(A::StridedMatrix{T}, B::StridedMatrix{T}) issymmetric(A) && isposdef(B) && return eigfact!(Symmetric(A), Symmetric(B)) n = size(A, 1) alphar, alphai, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) - all(alphai .== 0) && return GeneralizedEigen(alphar ./ beta, vr) + iszero(alphai) && return GeneralizedEigen(alphar ./ beta, vr) vecs = zeros(Complex{T}, n, n) j = 1 @@ -352,7 +352,7 @@ Same as [`eigvals`](@ref), but saves space by overwriting the input `A` (and `B` function eigvals!{T<:BlasReal}(A::StridedMatrix{T}, B::StridedMatrix{T}) issymmetric(A) && isposdef(B) && return eigvals!(Symmetric(A), Symmetric(B)) alphar, alphai, beta, vl, vr = LAPACK.ggev!('N', 'N', A, B) - return (all(alphai .== 0) ? alphar : complex(alphar, alphai))./beta + return (iszero(alphai) ? alphar : complex(alphar, alphai))./beta end function eigvals!{T<:BlasComplex}(A::StridedMatrix{T}, B::StridedMatrix{T}) ishermitian(A) && isposdef(B) && return eigvals!(Hermitian(A), Hermitian(B)) diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index f70d76f1731e0..b240cbaa4741b 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -628,7 +628,7 @@ By default, the value of `tol` is the largest dimension of `M` multiplied by the [`eps`](@ref) of the [`eltype`](@ref) of `M`. """ -rank(A::AbstractMatrix, tol::Real) = sum(svdvals(A) .> tol) +rank(A::AbstractMatrix, tol::Real) = mapreduce(x -> x > tol, +, 0, svdvals(A)) function rank(A::AbstractMatrix) m,n = size(A) (m == 0 || n == 0) && return 0 diff --git a/base/linalg/lapack.jl b/base/linalg/lapack.jl index 876a5f44bbd1a..4542cb847cc4f 100644 --- a/base/linalg/lapack.jl +++ b/base/linalg/lapack.jl @@ -10,6 +10,8 @@ import ..LinAlg.BLAS.@blasfunc import ..LinAlg: BlasFloat, Char, BlasInt, LAPACKException, DimensionMismatch, SingularException, PosDefException, chkstride1, checksquare +using Base: iszero + #Generic LAPACK error handlers """ Handle only negative LAPACK error codes @@ -5502,7 +5504,7 @@ for (gees, gges, elty) in work = Array{$elty}(lwork) end end - A, vs, all(wi .== 0) ? wr : complex(wr, wi) + A, vs, iszero(wi) ? wr : complex(wr, wi) end # * .. Scalar Arguments .. @@ -5751,7 +5753,7 @@ for (trexc, trsen, tgsen, elty) in iwork = Array{BlasInt}(liwork) end end - T, Q, all(wi .== 0) ? wr : complex(wr, wi) + T, Q, iszero(wi) ? wr : complex(wr, wi) end trsen!(select::StridedVector{BlasInt}, T::StridedMatrix{$elty}, Q::StridedMatrix{$elty}) = trsen!('N', 'V', select, T, Q) diff --git a/base/linalg/linalg.jl b/base/linalg/linalg.jl index a2fc34a4a55e3..65b1d2249180f 100644 --- a/base/linalg/linalg.jl +++ b/base/linalg/linalg.jl @@ -10,7 +10,7 @@ import Base: USE_BLAS64, abs, big, ceil, conj, convert, copy, copy!, copy_transp imag, inv, isapprox, kron, ndims, parent, power_by_squaring, print_matrix, promote_rule, real, round, setindex!, show, similar, size, transpose, transpose!, trunc, broadcast -using Base: promote_op, _length +using Base: promote_op, _length, iszero # We use `_length` because of non-1 indices; releases after julia 0.5 # can go back to `length`. `_length(A)` is equivalent to `length(linearindices(A))`. using Base.Broadcast: broadcast_elwise_op diff --git a/base/linalg/special.jl b/base/linalg/special.jl index 69b13517045f5..9600141f58aaf 100644 --- a/base/linalg/special.jl +++ b/base/linalg/special.jl @@ -8,14 +8,14 @@ convert{T}(::Type{SymTridiagonal}, A::Diagonal{T})=SymTridiagonal(A.diag, zeros( convert{T}(::Type{Tridiagonal}, A::Diagonal{T})=Tridiagonal(zeros(T, size(A.diag,1)-1), A.diag, zeros(T, size(A.diag,1)-1)) function convert(::Type{Diagonal}, A::Union{Bidiagonal, SymTridiagonal}) - if !all(A.ev .== 0) + if !iszero(A.ev) throw(ArgumentError("matrix cannot be represented as Diagonal")) end Diagonal(A.dv) end function convert(::Type{SymTridiagonal}, A::Bidiagonal) - if !all(A.ev .== 0) + if !iszero(A.ev) throw(ArgumentError("matrix cannot be represented as SymTridiagonal")) end SymTridiagonal(A.dv, A.ev) @@ -24,28 +24,28 @@ end convert{T}(::Type{Tridiagonal}, A::Bidiagonal{T})=Tridiagonal(A.isupper?zeros(T, size(A.dv,1)-1):A.ev, A.dv, A.isupper?A.ev:zeros(T, size(A.dv,1)-1)) function convert(::Type{Bidiagonal}, A::SymTridiagonal) - if !all(A.ev .== 0) + if !iszero(A.ev) throw(ArgumentError("matrix cannot be represented as Bidiagonal")) end Bidiagonal(A.dv, A.ev, true) end function convert(::Type{Diagonal}, A::Tridiagonal) - if !(all(A.dl .== 0) && all(A.du .== 0)) + if !(iszero(A.dl) && iszero(A.du)) throw(ArgumentError("matrix cannot be represented as Diagonal")) end Diagonal(A.d) end function convert(::Type{Bidiagonal}, A::Tridiagonal) - if all(A.dl .== 0) return Bidiagonal(A.d, A.du, true) - elseif all(A.du .== 0) return Bidiagonal(A.d, A.dl, false) + if iszero(A.dl) return Bidiagonal(A.d, A.du, true) + elseif iszero(A.du) return Bidiagonal(A.d, A.dl, false) else throw(ArgumentError("matrix cannot be represented as Bidiagonal")) end end function convert(::Type{SymTridiagonal}, A::Tridiagonal) - if !all(A.dl .== A.du) + if A.dl != A.du throw(ArgumentError("matrix cannot be represented as SymTridiagonal")) end SymTridiagonal(A.d, A.dl) diff --git a/base/linalg/svd.jl b/base/linalg/svd.jl index 766c8b911c751..c4e0514126a69 100644 --- a/base/linalg/svd.jl +++ b/base/linalg/svd.jl @@ -122,7 +122,7 @@ end Returns the singular values of `A`, saving space by overwriting the input. """ -svdvals!{T<:BlasFloat}(A::StridedMatrix{T}) = any([size(A)...].==0) ? zeros(T, 0) : LAPACK.gesdd!('N', A)[2] +svdvals!{T<:BlasFloat}(A::StridedMatrix{T}) = findfirst(size(A), 0) > 0 ? zeros(T, 0) : LAPACK.gesdd!('N', A)[2] svdvals{T<:BlasFloat}(A::AbstractMatrix{T}) = svdvals!(copy(A)) """ diff --git a/base/linalg/symmetric.jl b/base/linalg/symmetric.jl index 611bb2adb5191..5e01a89e2c8a6 100644 --- a/base/linalg/symmetric.jl +++ b/base/linalg/symmetric.jl @@ -169,9 +169,9 @@ end ishermitian(A::Hermitian) = true ishermitian{T<:Real,S}(A::Symmetric{T,S}) = true -ishermitian{T<:Complex,S}(A::Symmetric{T,S}) = all(imag(A.data) .== 0) +ishermitian{T<:Complex,S}(A::Symmetric{T,S}) = isreal(A.data) issymmetric{T<:Real,S}(A::Hermitian{T,S}) = true -issymmetric{T<:Complex,S}(A::Hermitian{T,S}) = all(imag(A.data) .== 0) +issymmetric{T<:Complex,S}(A::Hermitian{T,S}) = isreal(A.data) issymmetric(A::Symmetric) = true transpose(A::Symmetric) = A ctranspose{T<:Real}(A::Symmetric{T}) = A diff --git a/base/linalg/tridiag.jl b/base/linalg/tridiag.jl index b9410803b468f..55bf04368d7b7 100644 --- a/base/linalg/tridiag.jl +++ b/base/linalg/tridiag.jl @@ -206,8 +206,8 @@ eigvecs{T<:BlasFloat,Eigenvalue<:Real}(A::SymTridiagonal{T}, eigvals::Vector{Eig #tril and triu -istriu(M::SymTridiagonal) = all(M.ev .== 0) -istril(M::SymTridiagonal) = all(M.ev .== 0) +istriu(M::SymTridiagonal) = iszero(M.ev) +istril(M::SymTridiagonal) = iszero(M.ev) function tril!(M::SymTridiagonal, k::Integer=0) n = length(M.dv) @@ -526,8 +526,8 @@ end #tril and triu -istriu(M::Tridiagonal) = all(M.dl .== 0) -istril(M::Tridiagonal) = all(M.du .== 0) +istriu(M::Tridiagonal) = iszero(M.dl) +istril(M::Tridiagonal) = iszero(M.du) function tril!(M::Tridiagonal, k::Integer=0) n = length(M.d) diff --git a/base/mpfr.jl b/base/mpfr.jl index 71278f7fc1d9b..13762b22f7fd9 100644 --- a/base/mpfr.jl +++ b/base/mpfr.jl @@ -826,6 +826,8 @@ end isfinite(x::BigFloat) = !isinf(x) && !isnan(x) +iszero(x::BigFloat) = x == Clong(0) + @eval typemax(::Type{BigFloat}) = $(BigFloat( Inf)) @eval typemin(::Type{BigFloat}) = $(BigFloat(-Inf)) diff --git a/base/number.jl b/base/number.jl index c96c3dcff8f07..9ed1f47487331 100644 --- a/base/number.jl +++ b/base/number.jl @@ -4,6 +4,14 @@ isinteger(x::Integer) = true +""" + iszero(x) + +Return `true` if `x == zero(x)`; if `x` is an array, this checks whether +all of the elements of `x` are zero. +""" +iszero(x) = x == zero(x) # fallback method + size(x::Number) = () size(x::Number,d) = convert(Int,d)<1 ? throw(BoundsError()) : 1 indices(x::Number) = () diff --git a/base/profile.jl b/base/profile.jl index 9672b05d88834..1c10ad12c5699 100644 --- a/base/profile.jl +++ b/base/profile.jl @@ -3,6 +3,7 @@ module Profile import Base.StackTraces: lookup, UNKNOWN, show_spec_linfo +using Base: iszero export @profile @@ -430,7 +431,7 @@ end ## A tree representation # Identify and counts repetitions of all unique backtraces function tree_aggregate(data::Vector{UInt64}) - iz = find(data .== 0) # find the breaks between backtraces + iz = find(iszero, data) # find the breaks between backtraces treecount = Dict{Vector{UInt64},Int}() istart = 1 + btskip for iend in iz From 1274eabd39ccea460cd19a656370ddaec325e757 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Thu, 15 Dec 2016 10:33:40 -0600 Subject: [PATCH 4/4] faster duplicate-checking in date parser --- base/dates/io.jl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/base/dates/io.jl b/base/dates/io.jl index 4d70bdc9da189..bdd1d60a615a1 100644 --- a/base/dates/io.jl +++ b/base/dates/io.jl @@ -81,7 +81,14 @@ SLOT_RULE['M'] = Minute SLOT_RULE['S'] = Second SLOT_RULE['s'] = Millisecond -duplicates(slots) = any(map(x->count(y->x.parser==y.parser,slots),slots) .> 1) +function anyduplicates(slots) + for i = 1:length(slots), j = i+1:length(slots) + if slots[i].parser == slots[j].parser + return true + end + end + return false +end """ DateFormat(format::AbstractString, locale::AbstractString="english") -> DateFormat @@ -130,7 +137,7 @@ function DateFormat(f::AbstractString, locale::AbstractString="english") push!(slots,slot) end - duplicates(slots) && throw(ArgumentError("Two separate periods of the same type detected")) + anyduplicates(slots) && throw(ArgumentError("Two separate periods of the same type detected")) return DateFormat(slots,prefix,locale) end