diff --git a/.travis.yml b/.travis.yml index b42ce14dc..4aaab2e25 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,8 @@ os: - linux - osx julia: - - 0.6 + - 0.7 + - 1.0 notifications: email: false sudo: false @@ -13,10 +14,5 @@ addons: - liblapack-dev - libgmp-dev - libglpk-dev -script: - - if [[ -a .git/shallow ]]; then git fetch --unshallow; fi - - julia -e 'Pkg.clone(pwd())' - - julia -e 'Pkg.test("Convex", coverage=true)' after_success: - - julia -e 'Pkg.add("Coverage")' - - julia -e 'cd(Pkg.dir("Convex")); using Coverage; Coveralls.submit(process_folder())' + - julia -e 'using Pkg; Pkg.add("Coverage"); using Coverage; Coveralls.submit(process_folder())' diff --git a/REQUIRE b/REQUIRE index aa0c2464c..8900d2031 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,4 +1,3 @@ -julia 0.6 0.7- -MathProgBase 0.5 0.8 +julia 0.7 +MathProgBase 0.7 0.8 DataStructures -Compat 0.24 diff --git a/src/Convex.jl b/src/Convex.jl index 8b609ba91..2bccabe28 100644 --- a/src/Convex.jl +++ b/src/Convex.jl @@ -2,8 +2,8 @@ __precompile__() module Convex import DataStructures -importall Base.Operators -using Compat +using LinearAlgebra +using SparseArrays global DEFAULT_SOLVER = nothing ### modeling framework @@ -49,9 +49,6 @@ include("atoms/lp_cone/min.jl") include("atoms/lp_cone/sumlargest.jl") include("atoms/lp_cone/dotsort.jl") - - - ### SOC atoms include("atoms/second_order_cone/norm.jl") # also includes some lp atoms include("atoms/second_order_cone/norm2.jl") @@ -83,7 +80,6 @@ include("atoms/exp_+_sdp_cone/logdet.jl") ### utilities include("utilities/show.jl") include("utilities/iteration.jl") -include("utilities/deprecated.jl") include("utilities/broadcast.jl") #Temporary workaround for memory leak (https://github.com/JuliaOpt/Convex.jl/issues/83) diff --git a/src/atoms/affine/add_subtract.jl b/src/atoms/affine/add_subtract.jl index bedd5f972..2ed472d42 100644 --- a/src/atoms/affine/add_subtract.jl +++ b/src/atoms/affine/add_subtract.jl @@ -6,8 +6,7 @@ # Please read expressions.jl first. ############################################################################# -import Base.broadcast -export +, -, broadcast +export +, - export sign, curvature, monotonicity, evaluate ### Unary Negation @@ -98,7 +97,8 @@ function curvature(x::AdditionAtom) end function evaluate(x::AdditionAtom) - return sum([evaluate(child) for child in x.children]) + # broadcast function is used here instead of sum to support addition between scalars and arrays + return broadcast(+, map(evaluate, x.children)...) end function conic_form!(x::AdditionAtom, unique_conic_forms::UniqueConicForms=UniqueConicForms()) diff --git a/src/atoms/affine/conv.jl b/src/atoms/affine/conv.jl index b6d57d7df..99d2d9ee7 100644 --- a/src/atoms/affine/conv.jl +++ b/src/atoms/affine/conv.jl @@ -3,7 +3,6 @@ # Handles convolution between a constant vector and an expression vector. ############################################################################# -import Base.conv export conv function conv(x::Value, y::AbstractExpr) @@ -19,4 +18,4 @@ function conv(x::Value, y::AbstractExpr) return X*y end -conv(x::AbstractExpr, y::Value) = conv(y, x) \ No newline at end of file +conv(x::AbstractExpr, y::Value) = conv(y, x) diff --git a/src/atoms/affine/diag.jl b/src/atoms/affine/diag.jl index dadbb9255..19c87f4e1 100644 --- a/src/atoms/affine/diag.jl +++ b/src/atoms/affine/diag.jl @@ -6,7 +6,7 @@ ############################################################################# # k >= min(num_cols, num_rows) || k <= -min(num_rows, num_cols) -import Base.diag +import LinearAlgebra.diag export diag #export sign, curvature, monotonicity, evaluate diff --git a/src/atoms/affine/diagm.jl b/src/atoms/affine/diagm.jl index 2c74fb87a..b220fc962 100644 --- a/src/atoms/affine/diagm.jl +++ b/src/atoms/affine/diagm.jl @@ -5,8 +5,8 @@ # Please read expressions.jl first. ############################################################################# -import Base.diagm -export diagm +import LinearAlgebra.diagm, LinearAlgebra.Diagonal +export diagm, Diagonal struct DiagMatrixAtom <: AbstractExpr head::Symbol @@ -22,7 +22,7 @@ struct DiagMatrixAtom <: AbstractExpr elseif num_cols == 1 sz = num_rows else - error("Only vectors are allowed for diagm. Did you mean to use diag?") + throw(ArgumentError("Only vectors are allowed for diagm/Diagonal. Did you mean to use diag?")) end children = (x, ) @@ -46,10 +46,14 @@ function curvature(x::DiagMatrixAtom) end function evaluate(x::DiagMatrixAtom) - return diagm(vec(evaluate(x.children[1]))) + return Diagonal(vec(evaluate(x.children[1]))) end -diagm(x::AbstractExpr) = DiagMatrixAtom(x) +function diagm((d, x)::Pair{<:Integer, <:AbstractExpr}) + d == 0 || throw(ArgumentError("only the main diagonal is supported")) + return DiagMatrixAtom(x) +end +Diagonal(x::AbstractExpr) = DiagMatrixAtom(x) function conic_form!(x::DiagMatrixAtom, unique_conic_forms::UniqueConicForms=UniqueConicForms()) if !has_conic_form(unique_conic_forms, x) diff --git a/src/atoms/affine/dot.jl b/src/atoms/affine/dot.jl index a266b15e9..d4e1ba710 100644 --- a/src/atoms/affine/dot.jl +++ b/src/atoms/affine/dot.jl @@ -1,4 +1,4 @@ -import Base.dot, Base.vecdot +import LinearAlgebra.dot export vecdot, dot diff --git a/src/atoms/affine/index.jl b/src/atoms/affine/index.jl index 1f1688f3e..68b5182d7 100644 --- a/src/atoms/affine/index.jl +++ b/src/atoms/affine/index.jl @@ -1,7 +1,7 @@ import Base: getindex, to_index export IndexAtom, getindex -const ArrayOrNothing = Union{AbstractArray, Void} +const ArrayOrNothing = Union{AbstractArray, Nothing} struct IndexAtom <: AbstractExpr head::Symbol @@ -54,7 +54,7 @@ function conic_form!(x::IndexAtom, unique_conic_forms::UniqueConicForms=UniqueCo if x.inds == nothing sz = length(x.cols) * length(x.rows) - J = Array{Int}(sz) + J = Array{Int}(undef, sz) k = 1 num_rows = x.children[1].size[1] @@ -78,14 +78,14 @@ end ## API Definition begins -getindex(x::AbstractExpr, rows::AbstractArray{T, 1}, cols::AbstractArray{T, 1}) where {T <: Real} = IndexAtom(x, rows, cols) -getindex(x::AbstractExpr, inds::AbstractArray{T, 1}) where {T <: Real} = IndexAtom(x, inds) +getindex(x::AbstractExpr, rows::AbstractVector{T}, cols::AbstractVector{T}) where {T<:Real} = IndexAtom(x, rows, cols) +getindex(x::AbstractExpr, inds::AbstractVector{<:Real}) = IndexAtom(x, inds) getindex(x::AbstractExpr, ind::Real) = getindex(x, ind:ind) getindex(x::AbstractExpr, row::Real, col::Real) = getindex(x, row:row, col:col) -getindex(x::AbstractExpr, row::Real, cols::AbstractArray{T, 1}) where {T <: Real} = getindex(x, row:row, cols) -getindex(x::AbstractExpr, rows::AbstractArray{T, 1}, col::Real) where {T <: Real} = getindex(x, rows, col:col) +getindex(x::AbstractExpr, row::Real, cols::AbstractVector{<:Real}) = getindex(x, row:row, cols) +getindex(x::AbstractExpr, rows::AbstractVector{<:Real}, col::Real) = getindex(x, rows, col:col) # XXX todo: speed test; there are lots of possible solutions for this -function getindex(x::AbstractExpr, I::AbstractArray{Bool,2}) +function getindex(x::AbstractExpr, I::AbstractMatrix{Bool}) return [xi for (xi,ii) in zip(x,I) if ii] end function getindex(x::AbstractExpr, I::AbstractVector{Bool}) diff --git a/src/atoms/affine/inner_product.jl b/src/atoms/affine/inner_product.jl index b67c9f080..d90e4a67d 100644 --- a/src/atoms/affine/inner_product.jl +++ b/src/atoms/affine/inner_product.jl @@ -2,11 +2,11 @@ export inner_product function inner_product(x::AbstractExpr,y::AbstractExpr) if x.size==y.size && x.size[1] == x.size[2] - return(real(trace(x'*y))) + return(real(tr(x'*y))) else error("Arguments must be square matrix of same dimension") end end inner_product(x::Value, y::AbstractExpr) = inner_product(Constant(x),y) -inner_product(x::AbstractExpr, y::Value) = inner_product(x,Constant(y)) \ No newline at end of file +inner_product(x::AbstractExpr, y::Value) = inner_product(x,Constant(y)) diff --git a/src/atoms/affine/kron.jl b/src/atoms/affine/kron.jl index 6bbce28db..1b9b23fbb 100644 --- a/src/atoms/affine/kron.jl +++ b/src/atoms/affine/kron.jl @@ -1,4 +1,4 @@ -import Base.kron +import LinearAlgebra.kron export kron function kron(a::Value, b::AbstractExpr) @@ -28,4 +28,4 @@ function kron(a::AbstractExpr, b::Value) push!(rows, foldl(hcat, row)) end return foldl(vcat, rows) -end \ No newline at end of file +end diff --git a/src/atoms/affine/multiply_divide.jl b/src/atoms/affine/multiply_divide.jl index 3d1a5a260..864dc31e6 100644 --- a/src/atoms/affine/multiply_divide.jl +++ b/src/atoms/affine/multiply_divide.jl @@ -6,8 +6,7 @@ # Please read expressions.jl first. ############################################################################# -import Base.broadcast -export broadcast +import Base.Broadcast.broadcasted export sign, monotonicity, curvature, evaluate, conic_form! ### Scalar and matrix multiplication @@ -83,11 +82,11 @@ function conic_form!(x::MultiplyAtom, unique_conic_forms::UniqueConicForms=Uniqu # left matrix multiplication elseif x.children[1].head == :constant objective = conic_form!(x.children[2], unique_conic_forms) - objective = kron(speye(x.size[2]), x.children[1].value) * objective + objective = kron(sparse(1.0I, x.size[2], x.size[2]), x.children[1].value) * objective # right matrix multiplication else objective = conic_form!(x.children[1], unique_conic_forms) - objective = kron(x.children[2].value', speye(x.size[1])) * objective + objective = kron(x.children[2].value', sparse(1.0I, x.size[1], x.size[1])) * objective end cache_conic_form!(unique_conic_forms, x, objective) end @@ -103,7 +102,7 @@ end *(x::Value, y::AbstractExpr) = MultiplyAtom(Constant(x), y) *(x::AbstractExpr, y::Value) = MultiplyAtom(x, Constant(y)) -/(x::AbstractExpr, y::Value) = MultiplyAtom(x, Constant(1./y)) +/(x::AbstractExpr, y::Value) = MultiplyAtom(x, Constant(1 ./ y)) ### .* # All constructors of this check (and so this function requires) @@ -171,14 +170,14 @@ function conic_form!(x::DotMultiplyAtom, unique_conic_forms::UniqueConicForms=Un var = var*ones(1,size(coeff,1)) end - const_multiplier = spdiagm(vec(coeff)) # used to be spdiagm((vec(coeff),), (0,)), not sure why + const_multiplier = spdiagm(0 => vec(coeff)) objective = const_multiplier * conic_form!(var, unique_conic_forms) cache_conic_form!(unique_conic_forms, x, objective) end return get_conic_form(unique_conic_forms, x) end -function broadcast(::typeof(*), x::Constant, y::AbstractExpr) +function broadcasted(::typeof(*), x::Constant, y::AbstractExpr) if x.size == (1, 1) || y.size == (1, 1) return x * y elseif size(y,1) < size(x,1) && size(y,1) == 1 @@ -189,10 +188,10 @@ function broadcast(::typeof(*), x::Constant, y::AbstractExpr) return DotMultiplyAtom(x, y) end end -broadcast(::typeof(*), y::AbstractExpr, x::Constant) = DotMultiplyAtom(x, y) +broadcasted(::typeof(*), y::AbstractExpr, x::Constant) = DotMultiplyAtom(x, y) # if neither is a constant it's not DCP, but might be nice to support anyway for eg MultiConvex -function broadcast(::typeof(*), x::AbstractExpr, y::AbstractExpr) +function broadcasted(::typeof(*), x::AbstractExpr, y::AbstractExpr) if x.size == (1, 1) || y.size == (1, 1) return x * y elseif vexity(x) == ConstVexity() @@ -203,7 +202,7 @@ function broadcast(::typeof(*), x::AbstractExpr, y::AbstractExpr) return DotMultiplyAtom(y, x) end end -broadcast(::typeof(*), x::Value, y::AbstractExpr) = DotMultiplyAtom(Constant(x), y) -broadcast(::typeof(*), x::AbstractExpr, y::Value) = DotMultiplyAtom(Constant(y), x) -broadcast(::typeof(/), x::AbstractExpr, y::Value) = DotMultiplyAtom(Constant(1./y), x) +broadcasted(::typeof(*), x::Value, y::AbstractExpr) = DotMultiplyAtom(Constant(x), y) +broadcasted(::typeof(*), x::AbstractExpr, y::Value) = DotMultiplyAtom(Constant(y), x) +broadcasted(::typeof(/), x::AbstractExpr, y::Value) = DotMultiplyAtom(Constant(1 ./ y), x) # x ./ y and x / y for x constant, y variable is defined in second_order_cone.qol_elemwise.jl diff --git a/src/atoms/affine/partialtrace.jl b/src/atoms/affine/partialtrace.jl index 03810a9ce..d8364fed2 100644 --- a/src/atoms/affine/partialtrace.jl +++ b/src/atoms/affine/partialtrace.jl @@ -44,8 +44,8 @@ function evaluate(x::PartialTraceAtom) subsystem = function(sys) function term(ρ, j::Int) - a = speye(1) - b = speye(1) + a = sparse(1.0I, 1, 1) + b = sparse(1.0I, 1, 1) i_sys = 1 for dim in dims if i_sys == sys @@ -55,8 +55,8 @@ function evaluate(x::PartialTraceAtom) a = kron(a, v') b = kron(b, v) else - a = kron(a, speye(dim)) - b = kron(b, speye(dim)) + a = kron(a, sparse(1.0I, dim, dim)) + b = kron(b, sparse(1.0I, dim, dim)) end i_sys += 1 end @@ -65,7 +65,7 @@ function evaluate(x::PartialTraceAtom) return sum([term(ρ, j) for j in 1:dims[sys]]) end sub_systems = [subsystem(i) for i in 1:length(dims)] - a = eye(1) + a = Matrix(1.0I, 1, 1) for i in 1:length(dims) if i == x.sys continue @@ -73,7 +73,7 @@ function evaluate(x::PartialTraceAtom) a = kron(a,sub_systems[i]) end end - return trace(sub_systems[x.sys])*a + return tr(sub_systems[x.sys])*a end @@ -87,8 +87,8 @@ function conic_form!(x::PartialTraceAtom, unique_conic_forms::UniqueConicForms=U # in the system we want to trace out # This function returns every term in the sum function term(ρ, j::Int) - a = speye(1) - b = speye(1) + a = sparse(1.0I, 1, 1) + b = sparse(1.0I, 1, 1) i_sys = 1 for dim in dims if i_sys == sys @@ -98,8 +98,8 @@ function conic_form!(x::PartialTraceAtom, unique_conic_forms::UniqueConicForms=U a = kron(a, v') b = kron(b, v) else - a = kron(a, speye(dim)) - b = kron(b, speye(dim)) + a = kron(a, sparse(1.0I, dim, dim)) + b = kron(b, sparse(1.0I, dim, dim)) end i_sys += 1 end @@ -113,4 +113,4 @@ function conic_form!(x::PartialTraceAtom, unique_conic_forms::UniqueConicForms=U return get_conic_form(unique_conic_forms, x) end -partialtrace(x::AbstractExpr, sys::Int, dim::Vector) = PartialTraceAtom(x, sys, dim) \ No newline at end of file +partialtrace(x::AbstractExpr, sys::Int, dim::Vector) = PartialTraceAtom(x, sys, dim) diff --git a/src/atoms/affine/reshape.jl b/src/atoms/affine/reshape.jl index a659fc9e6..1fddab16d 100644 --- a/src/atoms/affine/reshape.jl +++ b/src/atoms/affine/reshape.jl @@ -13,7 +13,7 @@ struct ReshapeAtom <: AbstractExpr if m * n != get_vectorized_size(x) error("Cannot reshape expression of size $(x.size) to ($(m), $(n))") end - return new(:reshape, object_id(x), (x,), (m, n)) + return new(:reshape, objectid(x), (x,), (m, n)) end end diff --git a/src/atoms/affine/stack.jl b/src/atoms/affine/stack.jl index 0ddfdef0e..dda9e7190 100644 --- a/src/atoms/affine/stack.jl +++ b/src/atoms/affine/stack.jl @@ -1,5 +1,5 @@ -import Base.vcat, Base.hcat -export vcat, hcat, HcatAtom +import Base.vcat, Base.hcat, Base.hvcat +export vcat, hcat, hvcat, HcatAtom export sign, curvature, monotonicity, evaluate, conic_form! struct HcatAtom <: AbstractExpr @@ -35,7 +35,7 @@ function curvature(x::HcatAtom) end function evaluate(x::HcatAtom) - return hcat([evaluate(c) for c in x.children]...) + return hcat(map(evaluate, x.children)...) end @@ -51,7 +51,7 @@ function conic_form!(x::HcatAtom, unique_conic_forms::UniqueConicForms=UniqueCon for objective in objectives for id in keys(objective) if !(id in keys(variable_to_sizes)) - if id == object_id(:constant) + if id == objectid(:constant) variable_to_sizes[id] = 1 else variable_to_sizes[id] = get_vectorized_size(id_to_variables[id]) @@ -111,26 +111,29 @@ function conic_form!(x::HcatAtom, unique_conic_forms::UniqueConicForms=UniqueCon end hcat(args::AbstractExpr...) = HcatAtom(args...) -hcat(args::AbstractExprOrValue...) = HcatAtom([convert(AbstractExpr, arg) for arg in args]...) -hcat(args::Value...) = Base.cat(2, args...) +hcat(args::AbstractExprOrValue...) = HcatAtom(map(arg -> convert(AbstractExpr, arg), args)...) +hcat(args::Value...) = Base.cat(args..., dims=Val(2)) # TODO: implement vertical concatenation in a more efficient way -vcat(args::AbstractExpr...) = transpose(HcatAtom([transpose(arg) for arg in args]...)) -vcat(args::AbstractExprOrValue...) = transpose(HcatAtom([transpose(convert(AbstractExpr, arg)) for arg in args]...)) -vcat(args::Value...) = Base.cat(1, args...) # Note: this makes general vcat slower for anyone using Convex... - - -Base.vect(args::T...) where {T<:AbstractExpr} = transpose(HcatAtom([transpose(arg) for arg in args]...)) -Base.vect(args::AbstractExpr...) = transpose(HcatAtom([transpose(arg) for arg in args]...)) -Base.vect(args::AbstractExprOrValue...) = transpose(HcatAtom([transpose(convert(AbstractExpr,arg)) for arg in args]...)) -if Base._oldstyle_array_vcat_ - Base.vect(args::Value...) = Base.vcat(args...) - # This is ugly, because the method redefines simple cases like [1,2,3] - -else - function Base.vect(args::Value...) - T = Base.promote_typeof(args...) - return copy!(Array{T}(length(args)), args) +vcat(args::AbstractExpr...) = transpose(HcatAtom(map(transpose, args)...)) +vcat(args::AbstractExprOrValue...) = transpose(HcatAtom(map(arg -> transpose(convert(AbstractExpr, arg)), args)...)) +vcat(args::Value...) = Base.cat(args..., dims=Val(1)) # Note: this makes general vcat slower for anyone using Convex... + + +function hvcat(rows::Tuple{Vararg{Int}}, args::AbstractExprOrValue...) + nbr = length(rows) + rs = Vector{Any}(undef, nbr) + a = 1 + for i = 1:nbr + rs[i] = hcat(args[a:a-1+rows[i]]...) + a += rows[i] end + return vcat(rs...) end + +Base.vect(args::AbstractExpr...) = transpose(HcatAtom(map(transpose, args)...)) +Base.vect(args::AbstractExprOrValue...) = transpose(HcatAtom(map(arg -> transpose(convert(AbstractExpr, arg)), args)...)) + +# XXX: Reimplementation of the Base method +Base.vect(args::Value...) = copyto!(Vector{Base.promote_typeof(args...)}(undef, length(args)), args) diff --git a/src/atoms/affine/sum.jl b/src/atoms/affine/sum.jl index 2fcb5b582..36681e4a9 100644 --- a/src/atoms/affine/sum.jl +++ b/src/atoms/affine/sum.jl @@ -48,8 +48,8 @@ function conic_form!(x::SumAtom, unique_conic_forms::UniqueConicForms=UniqueConi objective = conic_form!(x.children[1], unique_conic_forms) new_obj = copy(objective) for var in keys(new_obj) - re = sum(new_obj[var][1], 1) - im = sum(new_obj[var][2], 1) + re = sum(new_obj[var][1], dims=1) + im = sum(new_obj[var][2], dims=1) new_obj[var] = (re,im) end cache_conic_form!(unique_conic_forms, x, new_obj) diff --git a/src/atoms/affine/trace.jl b/src/atoms/affine/trace.jl index 3d214ecde..0cf4969ff 100644 --- a/src/atoms/affine/trace.jl +++ b/src/atoms/affine/trace.jl @@ -1,6 +1,6 @@ -import Base.trace -export trace +import LinearAlgebra.tr +export tr -function trace(e::AbstractExpr) +function tr(e::AbstractExpr) return sum(diag(e)) end diff --git a/src/atoms/affine/transpose.jl b/src/atoms/affine/transpose.jl index 467edff5a..6525cf820 100644 --- a/src/atoms/affine/transpose.jl +++ b/src/atoms/affine/transpose.jl @@ -5,8 +5,8 @@ # Please read expressions.jl first. ############################################################################# -import Base.transpose, Base.ctranspose -export transpose, ctranspose, TransposeAtom, CTransposeAtom +import Base.transpose, Base.adjoint +export transpose, adjoint, TransposeAtom, AdjointAtom export sign, curvature, monotonicity, evaluate, conic_form! struct TransposeAtom <: AbstractExpr @@ -48,8 +48,8 @@ function conic_form!(x::TransposeAtom, unique_conic_forms::UniqueConicForms=Uniq num_rows = x.size[1] num_cols = x.size[2] - I = Array{Int}(sz) - J = Array{Int}(sz) + I = Array{Int}(undef, sz) + J = Array{Int}(undef, sz) k = 1 for r = 1:num_rows @@ -72,37 +72,37 @@ transpose(x::AbstractExpr) = TransposeAtom(x) -struct CTransposeAtom <: AbstractExpr +struct AdjointAtom <: AbstractExpr head::Symbol id_hash::UInt64 children::Tuple{AbstractExpr} size::Tuple{Int, Int} - function CTransposeAtom(x::AbstractExpr) + function AdjointAtom(x::AbstractExpr) children = (x,) - return new(:ctranspose, hash(children), children, (x.size[2], x.size[1])) + return new(:adjoint, hash(children), children, (x.size[2], x.size[1])) end end -function sign(x::CTransposeAtom) +function sign(x::AdjointAtom) return sign(x.children[1]) end -function monotonicity(x::CTransposeAtom) +function monotonicity(x::AdjointAtom) return (Nondecreasing(),) end -function curvature(x::CTransposeAtom) +function curvature(x::AdjointAtom) return ConstVexity() end -function evaluate(x::CTransposeAtom) +function evaluate(x::AdjointAtom) return evaluate(x.children[1])' end # Since everything is vectorized, we simply need to multiply x by a permutation # matrix such that coeff * vectorized(x) - vectorized(x') = 0 -function conic_form!(x::CTransposeAtom, unique_conic_forms::UniqueConicForms=UniqueConicForms()) +function conic_form!(x::AdjointAtom, unique_conic_forms::UniqueConicForms=UniqueConicForms()) if !has_conic_form(unique_conic_forms, x) objective = conic_form!(x.children[1], unique_conic_forms) @@ -111,8 +111,8 @@ function conic_form!(x::CTransposeAtom, unique_conic_forms::UniqueConicForms=Uni num_rows = x.size[1] num_cols = x.size[2] - I = Array{Int}(sz) - J = Array{Int}(sz) + I = Array{Int}(undef, sz) + J = Array{Int}(undef, sz) k = 1 for r = 1:num_rows @@ -136,6 +136,5 @@ function conic_form!(x::CTransposeAtom, unique_conic_forms::UniqueConicForms=Uni return get_conic_form(unique_conic_forms, x) end - -ctranspose(x::AbstractExpr) = CTransposeAtom(x) -ctranspose(x::Constant) = Constant(x.value') +adjoint(x::AbstractExpr) = AdjointAtom(x) +adjoint(x::Constant) = Constant(x.value') diff --git a/src/atoms/exp_+_sdp_cone/logdet.jl b/src/atoms/exp_+_sdp_cone/logdet.jl index 9a2296210..bf75def4f 100644 --- a/src/atoms/exp_+_sdp_cone/logdet.jl +++ b/src/atoms/exp_+_sdp_cone/logdet.jl @@ -1,4 +1,5 @@ -import Base.logdet +import LinearAlgebra.logdet +export logdet struct LogDetAtom <: AbstractExpr head::Symbol diff --git a/src/atoms/exp_cone/entropy.jl b/src/atoms/exp_cone/entropy.jl index a8bb79f1e..f2a2aa20a 100644 --- a/src/atoms/exp_cone/entropy.jl +++ b/src/atoms/exp_cone/entropy.jl @@ -13,7 +13,7 @@ export sign, curvature, monotonicity, evaluate # TODO: make this work for a *list* of inputs, rather than just for scalar/vector/matrix inputs # Entropy atom: -xlogx entrywise -type EntropyAtom <: AbstractExpr +mutable struct EntropyAtom <: AbstractExpr head::Symbol id_hash::UInt64 children::Tuple{AbstractExpr} diff --git a/src/atoms/exp_cone/log.jl b/src/atoms/exp_cone/log.jl index 820a50915..a6b4ddec5 100644 --- a/src/atoms/exp_cone/log.jl +++ b/src/atoms/exp_cone/log.jl @@ -11,7 +11,7 @@ export sign, curvature, monotonicity, evaluate ### Logarithm -type LogAtom <: AbstractExpr +mutable struct LogAtom <: AbstractExpr head::Symbol id_hash::UInt64 children::Tuple{AbstractExpr} diff --git a/src/atoms/exp_cone/relative_entropy.jl b/src/atoms/exp_cone/relative_entropy.jl index 875571daf..6bfbddcc4 100644 --- a/src/atoms/exp_cone/relative_entropy.jl +++ b/src/atoms/exp_cone/relative_entropy.jl @@ -41,7 +41,7 @@ end function evaluate(e::RelativeEntropyAtom) x = evaluate(e.children[1]) y = evaluate(e.children[2]) - if any(isnan.(y)) return Inf end + if any(isnan, y) return Inf end out = x.*log.(x./y) # fix value when x=0: diff --git a/src/atoms/sdp_cone/lambda_min_max.jl b/src/atoms/sdp_cone/lambda_min_max.jl index 2a9f66f37..85d757d2c 100644 --- a/src/atoms/sdp_cone/lambda_min_max.jl +++ b/src/atoms/sdp_cone/lambda_min_max.jl @@ -54,7 +54,7 @@ function conic_form!(x::LambdaMaxAtom, unique_conic_forms) A = x.children[1] m, n = size(A) t = Variable() - p = minimize(t, t*eye(n) - A ⪰ 0) + p = minimize(t, t*Matrix(1.0I, n, n) - A ⪰ 0) cache_conic_form!(unique_conic_forms, x, p) end return get_conic_form(unique_conic_forms, x) @@ -107,7 +107,7 @@ function conic_form!(x::LambdaMinAtom, unique_conic_forms) A = x.children[1] m, n = size(A) t = Variable() - p = maximize(t, A - t*eye(n) ⪰ 0) + p = maximize(t, A - t*Matrix(1.0I, n, n) ⪰ 0) cache_conic_form!(unique_conic_forms, x, p) end return get_conic_form(unique_conic_forms, x) diff --git a/src/atoms/sdp_cone/nuclearnorm.jl b/src/atoms/sdp_cone/nuclearnorm.jl index 0013420c4..57d716ed6 100644 --- a/src/atoms/sdp_cone/nuclearnorm.jl +++ b/src/atoms/sdp_cone/nuclearnorm.jl @@ -40,7 +40,7 @@ end nuclearnorm(x::AbstractExpr) = NuclearNormAtom(x) # Create the equivalent conic problem: -# minimize (trace(U) + trace(V))/2 +# minimize (tr(U) + tr(V))/2 # subject to # [U A; A' V] ⪰ 0 # see eg Recht, Fazel, Parillo 2008 "Guaranteed Minimum-Rank Solutions of Linear Matrix Equations via Nuclear Norm Minimization" @@ -51,7 +51,7 @@ function conic_form!(x::NuclearNormAtom, unique_conic_forms) m, n = size(A) U = Variable(m,m) V = Variable(n,n) - p = minimize(.5*(trace(U) + trace(V)), [U A; A' V] ⪰ 0) + p = minimize(.5*(tr(U) + tr(V)), [U A; A' V] ⪰ 0) cache_conic_form!(unique_conic_forms, x, p) end return get_conic_form(unique_conic_forms, x) diff --git a/src/atoms/sdp_cone/operatornorm.jl b/src/atoms/sdp_cone/operatornorm.jl index d247012e6..cfe4373c6 100644 --- a/src/atoms/sdp_cone/operatornorm.jl +++ b/src/atoms/sdp_cone/operatornorm.jl @@ -36,7 +36,7 @@ end # in julia, `norm` on matrices is the operator norm function evaluate(x::OperatorNormAtom) - norm(evaluate(x.children[1]), 2) + opnorm(evaluate(x.children[1]), 2) end operatornorm(x::AbstractExpr) = OperatorNormAtom(x) @@ -53,7 +53,7 @@ function conic_form!(x::OperatorNormAtom, unique_conic_forms) A = x.children[1] m, n = size(A) t = Variable() - p = minimize(t, [t*speye(m) A; A' t*speye(n)] ⪰ 0) + p = minimize(t, [t*sparse(1.0I, m, m) A; A' t*sparse(1.0I, n, n)] ⪰ 0) cache_conic_form!(unique_conic_forms, x, p) end return get_conic_form(unique_conic_forms, x) diff --git a/src/atoms/sdp_cone/sumlargesteigs.jl b/src/atoms/sdp_cone/sumlargesteigs.jl index b0554a460..8e4d1a921 100644 --- a/src/atoms/sdp_cone/sumlargesteigs.jl +++ b/src/atoms/sdp_cone/sumlargesteigs.jl @@ -5,7 +5,7 @@ # All expressions and atoms are subtypes of AbstractExpr. # Please read expressions.jl first. ############################################################################# -import Base.eigvals +import LinearAlgebra.eigvals export sumlargesteigs ### sumlargesteigs @@ -61,8 +61,8 @@ function conic_form!(x::SumLargestEigs, unique_conic_forms) m, n = size(A) Z = Variable(n, n) s = Variable() - p = minimize(s*k + trace(Z), - Z + s*eye(n) - A ⪰ 0, + p = minimize(s*k + tr(Z), + Z + s*Matrix(1.0I, n, n) - A ⪰ 0, A ⪰ 0, Z ⪰ 0) cache_conic_form!(unique_conic_forms, x, p) end diff --git a/src/atoms/second_order_cone/norm.jl b/src/atoms/second_order_cone/norm.jl index 18f6b8560..616d5efd8 100755 --- a/src/atoms/second_order_cone/norm.jl +++ b/src/atoms/second_order_cone/norm.jl @@ -1,4 +1,4 @@ -import Base.norm, Base.vecnorm +import LinearAlgebra.norm export norm_inf, norm, norm_1, vecnorm # deprecate these soon @@ -9,7 +9,7 @@ norm_fro(x::AbstractExpr) = norm2(vec(x)) # behavior of norm should be consistent with julia: # * vector norms for vectors # * operator norms for matrices -function norm(x::AbstractExpr, p::Number=2) +function norm(x::AbstractExpr, p::Real=2) if length(size(x)) <= 1 || minimum(size(x))==1 # x is a vector if p == 1 diff --git a/src/atoms/second_order_cone/norm2.jl b/src/atoms/second_order_cone/norm2.jl index 67732a903..58a1bc313 100644 --- a/src/atoms/second_order_cone/norm2.jl +++ b/src/atoms/second_order_cone/norm2.jl @@ -4,7 +4,7 @@ # All expressions and atoms are subtpyes of AbstractExpr. # Please read expressions.jl first. ############################################################################# -import Base.vecnorm +import LinearAlgebra.norm2 export EucNormAtom, norm2, vecnorm export sign, monotonicity, curvature, conic_form! @@ -56,4 +56,4 @@ function norm2(x::AbstractExpr) else return EucNormAtom(x) end -end \ No newline at end of file +end diff --git a/src/atoms/second_order_cone/power_to_socp.jl b/src/atoms/second_order_cone/power_to_socp.jl index a96b8182c..9a16594f9 100644 --- a/src/atoms/second_order_cone/power_to_socp.jl +++ b/src/atoms/second_order_cone/power_to_socp.jl @@ -12,7 +12,6 @@ # This reduction is documented in the pdf available at # https://github.com/JuliaOpt/Convex.jl/raw/master/docs/supplementary/rational_to_socp.pdf -using Compat module psocp mutable struct InequalityExpression @@ -73,12 +72,12 @@ end function ProductToSimpleInequalities(first_power::Int, second_power::Int) # Construct the first InequalityExpression, which is an inequality # of the form x^n <= t^p1 s^p2. - assert(first_power > 0 && second_power > 0); + @assert first_power > 0 && second_power > 0; n = first_power + second_power; var_list = [1, 2, 3]; init_inequality = InequalityExpression(first_power, second_power, 0, 1, 2, 3, -1); - return ReducePowers(init_inequality, Array{SimpleInequalityExpression}(0), + return ReducePowers(init_inequality, Array{SimpleInequalityExpression}(undef, 0), var_list); end @@ -108,8 +107,8 @@ function ReducePowers(curr_inequality::InequalityExpression, p2 = curr_inequality.power2; p3 = curr_inequality.power3; n = (p1 + p2 + p3); - assert(p1 >= 1); # , "Must have at least 1 on first power"); - assert(p2 >= 1); # , "Must have at least 1 on second power"); + @assert p1 >= 1; # , "Must have at least 1 on first power"); + @assert p2 >= 1; # , "Must have at least 1 on second power"); # Evaluate cases for variables if (p3 == 0) # Double check if we have p1 == 1, p2 == 1 @@ -181,8 +180,8 @@ function ReduceThirdZero(curr_inequality::InequalityExpression, p2 = curr_inequality.power2; p3 = curr_inequality.power3; n = (p1 + p2); - assert(p3 == 0); - assert(n >= 2); + @assert p3 == 0; + @assert n >= 2; if (mod(n, 2) == 0) # n is even, so check even-ness of power1, power2 if (p1 == p2) @@ -261,8 +260,8 @@ function ReduceThirdOne(curr_inequality::InequalityExpression, p2 = curr_inequality.power2; p3 = curr_inequality.power3; n = (p1 + p2 + p3); - assert(p3 == 1); # , "Must have third power 1"); - assert(n >= 3); # "Must have power n >= 3"); + @assert p3 == 1; # , "Must have third power 1"); + @assert n >= 3; # "Must have power n >= 3"); if (mod(n, 2) == 0) # Exactly one of p1, p2 is odd, find it and reduce if (mod(p1, 2) == 1) @@ -367,9 +366,9 @@ function ReduceThirdTwo(curr_inequality::InequalityExpression, p2 = curr_inequality.power2; p3 = curr_inequality.power3; n = (p1 + p2 + p3); - assert(p3 == 2); # , "Must have third power 2"); - assert(n >= 6); # , "Must have power n >= 6"); - assert(p1 >= 2 && p2 >= 2); # , "Did not rearrange powers properly"); + @assert p3 == 2; # , "Must have third power 2"); + @assert n >= 6; # , "Must have power n >= 6"); + @assert p1 >= 2 && p2 >= 2; # , "Did not rearrange powers properly"); if (mod(n, 2) == 0) # Either both p1, p2 are even or both are odd if (mod(p1, 2) == 0) diff --git a/src/atoms/second_order_cone/qol_elementwise.jl b/src/atoms/second_order_cone/qol_elementwise.jl index 2150815e2..6d7d268d8 100644 --- a/src/atoms/second_order_cone/qol_elementwise.jl +++ b/src/atoms/second_order_cone/qol_elementwise.jl @@ -1,6 +1,6 @@ -import Base.broadcast +import Base.Broadcast.broadcasted export QolElemAtom, qol_elementwise, square, sumsquares, invpos, / -export sign, monotonicity, curvature, conic_form!, broadcast +export sign, monotonicity, curvature, conic_form! struct QolElemAtom <: AbstractExpr head::Symbol @@ -49,16 +49,16 @@ end qol_elementwise(x::AbstractExpr, y::AbstractExpr) = QolElemAtom(x, y) -broadcast(::typeof(^),x::AbstractExpr,k::Int) = k==2 ? QolElemAtom(x, Constant(ones(x.size[1], x.size[2]))) : error("raising variables to powers other than 2 is not implemented") +broadcasted(::typeof(^),x::AbstractExpr,k::Int) = k==2 ? QolElemAtom(x, Constant(ones(x.size[1], x.size[2]))) : error("raising variables to powers other than 2 is not implemented") invpos(x::AbstractExpr) = QolElemAtom(Constant(ones(x.size[1], x.size[2])), x) -broadcast(::typeof(/), x::Value, y::AbstractExpr) = DotMultiplyAtom(Constant(x), invpos(y)) +broadcasted(::typeof(/), x::Value, y::AbstractExpr) = DotMultiplyAtom(Constant(x), invpos(y)) /(x::Value, y::AbstractExpr) = size(y) == (1,1) ? MultiplyAtom(Constant(x), invpos(y)) : error("cannot divide by a variable of size $(size(y))") sumsquares(x::AbstractExpr) = square(norm2(x)) function square(x::AbstractExpr) if sign(x) == ComplexSign() - error(warn("Square of complex number is not DCP. Did you mean square_modulus?")) + error("Square of complex number is not DCP. Did you mean square_modulus?") else QolElemAtom(x, Constant(ones(x.size[1], x.size[2]))) end diff --git a/src/atoms/second_order_cone/quadform.jl b/src/atoms/second_order_cone/quadform.jl index 91ba6ad59..6786efbf0 100644 --- a/src/atoms/second_order_cone/quadform.jl +++ b/src/atoms/second_order_cone/quadform.jl @@ -11,7 +11,7 @@ function quadform(x::AbstractExpr, A::Value) if !issymmetric(A) error("Quadratic form only defined for symmetric matrices") end - V = eigvals(Symmetric(full(A))) + V = eigvals(Symmetric(Matrix(A))) if all(V .>= 0) factor = 1 @@ -21,6 +21,6 @@ function quadform(x::AbstractExpr, A::Value) error("Quadratic forms supported only for semidefinite matrices") end - P = real(sqrtm(full(factor * A))) + P = real(sqrt(Matrix(factor * A))) return factor * square(norm2(P * x)) end diff --git a/src/atoms/second_order_cone/rationalnorm.jl b/src/atoms/second_order_cone/rationalnorm.jl index 164ede792..b076f38e3 100644 --- a/src/atoms/second_order_cone/rationalnorm.jl +++ b/src/atoms/second_order_cone/rationalnorm.jl @@ -86,10 +86,10 @@ function conic_form!(x::RationalNormAtom, unique_conic_forms) var_list) = psocp.ProductToSimpleInequalities(denom, num - denom); if (length(ineq_list) > 10) - warn(string("Rational norm generating ", length(ineq_list), + @warn string("Rational norm generating ", length(ineq_list), " intermediate constraints.\n\tIncreasing ", ":max_iters or decreasing solver tolerance\n\tmay give ", - "more accurate solutions")); + "more accurate solutions") end # u corresponds to "introduced" variables; make a matrix of them # and then add equality constraints for the first and second diff --git a/src/conic_form.jl b/src/conic_form.jl index 338709886..0b8dfe9e4 100644 --- a/src/conic_form.jl +++ b/src/conic_form.jl @@ -73,8 +73,8 @@ end function promote_size(c::ConicObj, vectorized_size::Int) new_obj = copy(c) for var in keys(new_obj) - x1 = repmat(new_obj[var][1], vectorized_size, 1) - x2 = repmat(new_obj[var][2], vectorized_size, 1) + x1 = repeat(new_obj[var][1], vectorized_size, 1) + x2 = repeat(new_obj[var][2], vectorized_size, 1) new_obj[var] = (x1,x2) end return new_obj diff --git a/src/constant.jl b/src/constant.jl index 389ad5469..4480455aa 100644 --- a/src/constant.jl +++ b/src/constant.jl @@ -15,7 +15,7 @@ struct Constant <: AbstractExpr function Constant(x::Value, sign::Sign) sz = (size(x, 1), size(x, 2)) - return new(:constant, object_id(x), x, sz, ConstVexity(), sign) + return new(:constant, objectid(x), x, sz, ConstVexity(), sign) end function Constant(x::Value, check_sign::Bool=true) @@ -61,7 +61,7 @@ function conic_form!(x::Constant, unique_conic_forms::UniqueConicForms=UniqueCon #real_Value = real_conic_form(x) #imag_Value = imag_conic_form(x) objective = ConicObj() - objective[object_id(:constant)] = (real_conic_form(x), imag_conic_form(x)) + objective[objectid(:constant)] = (real_conic_form(x), imag_conic_form(x)) cache_conic_form!(unique_conic_forms, x, objective) end return get_conic_form(unique_conic_forms, x) diff --git a/src/constraints/constraints.jl b/src/constraints/constraints.jl index c64046a5d..0346f83da 100644 --- a/src/constraints/constraints.jl +++ b/src/constraints/constraints.jl @@ -5,7 +5,7 @@ export ==, <=, >= conic_constr_to_constr = Dict{ConicConstr, Constraint}() ### Linear equality constraint -type EqConstraint <: Constraint +mutable struct EqConstraint <: Constraint head::Symbol id_hash::UInt64 lhs::AbstractExpr @@ -62,7 +62,7 @@ end ### Linear inequality constraints -type LtConstraint <: Constraint +mutable struct LtConstraint <: Constraint head::Symbol id_hash::UInt64 lhs::AbstractExpr @@ -114,7 +114,7 @@ end <(lhs::Value, rhs::AbstractExpr) = <=(Constant(lhs), rhs) -type GtConstraint <: Constraint +mutable struct GtConstraint <: Constraint head::Symbol id_hash::UInt64 lhs::AbstractExpr @@ -165,12 +165,12 @@ end >(lhs::AbstractExpr, rhs::Value) = >=(lhs, Constant(rhs)) >(lhs::Value, rhs::AbstractExpr) = >=(Constant(lhs), rhs) -function +{T<:Constraint, T2<:Constraint}(constraints_one::Array{T}, constraints_two::Array{T2}) +function +(constraints_one::Array{<:Constraint}, constraints_two::Array{<:Constraint}) constraints = append!(Constraint[], constraints_one) return append!(constraints, constraints_two) end +(constraint_one::Constraint, constraint_two::Constraint) = [constraint_one] + [constraint_two] -+{T<:Constraint}(constraint_one::Constraint, constraints_two::Array{T}) = ++(constraint_one::Constraint, constraints_two::Array{<:Constraint}) = [constraint_one] + constraints_two -+{T<:Constraint}(constraints_one::Array{T}, constraint_two::Constraint) = ++(constraints_one::Array{<:Constraint}, constraint_two::Constraint) = constraints_one + [constraint_two] diff --git a/src/constraints/exp_constraints.jl b/src/constraints/exp_constraints.jl index 344d27156..c3a6c237a 100644 --- a/src/constraints/exp_constraints.jl +++ b/src/constraints/exp_constraints.jl @@ -41,7 +41,7 @@ function conic_form!(c::ExpConstraint, unique_conic_forms::UniqueConicForms=Uniq if !has_conic_form(unique_conic_forms, c) conic_constrs = ConicConstr[] if c.size == (1, 1) - objectives = Array{ConicObj}(3) + objectives = Array{ConicObj}(undef, 3) for iobj=1:3 objectives[iobj] = conic_form!(c.children[iobj], unique_conic_forms) end @@ -49,7 +49,7 @@ function conic_form!(c::ExpConstraint, unique_conic_forms::UniqueConicForms=Uniq else for i=1:c.size[1] for j=1:c.size[2] - objectives = Array{ConicObj}(3) + objectives = Array{ConicObj}(undef, 3) for iobj=1:3 objectives[iobj] = conic_form!(c.children[iobj][i,j], unique_conic_forms) end diff --git a/src/constraints/sdp_constraints.jl b/src/constraints/sdp_constraints.jl index d8694b47a..bad972e38 100644 --- a/src/constraints/sdp_constraints.jl +++ b/src/constraints/sdp_constraints.jl @@ -1,4 +1,5 @@ -import Base.isposdef, Base.in +import LinearAlgebra.isposdef +import Base.in export SDPConstraint, isposdef, in, ⪰, ⪯ ### Positive semidefinite cone constraint @@ -47,10 +48,10 @@ function conic_form!(c::SDPConstraint, unique_conic_forms::UniqueConicForms=Uniq # symmetry => c.child[upperpart] # scale off-diagonal elements by sqrt(2) rescale = sqrt(2)*tril(ones(n,n)) - rescale[find(diagm(ones(n)))] = 1.0 - diagandlowerpart = find(rescale) - lowerpart = Array{Int}(div(n*(n-1),2)) - upperpart = Array{Int}(div(n*(n-1),2)) + rescale[diagind(n, n)] .= 1.0 + diagandlowerpart = findall(!iszero, vec(rescale)) + lowerpart = Array{Int}(undef, div(n*(n-1),2)) + upperpart = Array{Int}(undef, div(n*(n-1),2)) klower = 0 # diagandlowerpart in column-major order: # ie the (1,1), (2,1), ..., (n,1), (2,2), (3,2), ... diff --git a/src/constraints/soc_constraints.jl b/src/constraints/soc_constraints.jl index 33c350d67..b13cc4ed3 100644 --- a/src/constraints/soc_constraints.jl +++ b/src/constraints/soc_constraints.jl @@ -18,7 +18,7 @@ end function conic_form!(c::SOCConstraint, unique_conic_forms::UniqueConicForms=UniqueConicForms()) if !has_conic_form(unique_conic_forms, c) - objectives = Array{ConicObj}(length(c.children)) + objectives = Array{ConicObj}(undef, length(c.children)) for iobj=1:length(c.children) objectives[iobj] = conic_form!(c.children[iobj], unique_conic_forms) end @@ -47,8 +47,8 @@ function conic_form!(c::SOCElemConstraint, unique_conic_forms::UniqueConicForms= if !has_conic_form(unique_conic_forms, c) num_constrs = get_vectorized_size(c.children[1]) num_children = length(c.children) - conic_constrs = Array{ConicConstr}(num_constrs) - objectives = Array{ConicObj}(num_children) + conic_constrs = Array{ConicConstr}(undef, num_constrs) + objectives = Array{ConicObj}(undef, num_children) for iobj = 1:num_children objectives[iobj] = conic_form!(c.children[iobj], unique_conic_forms) end diff --git a/src/dcp.jl b/src/dcp.jl index 5293664cb..f2b8dfcb0 100644 --- a/src/dcp.jl +++ b/src/dcp.jl @@ -10,14 +10,14 @@ # http://web.stanford.edu/~boyd/papers/disc_cvx_prog.html ############################################################################# -import Base.-, Base.+, Base.* +import Base.-, Base.+, Base.*, Base./ export Vexity, ConstVexity, AffineVexity, ConvexVexity, ConcaveVexity, NotDcp export Monotonicity, Nonincreasing, Nondecreasing, NoMonotonicity export Sign, Positive, Negative, NoSign, ComplexSign export -, +, * # Vexity subtypes -@compat abstract type Vexity end +abstract type Vexity end struct ConstVexity <: Vexity end struct AffineVexity <: Vexity end struct ConvexVexity <: Vexity end @@ -25,20 +25,20 @@ struct ConcaveVexity <: Vexity end struct NotDcp <: Vexity function NotDcp() - warn("Expression not DCP compliant. Trying to solve non-DCP compliant problems can lead to unexpected behavior.") - return new() + @warn "Expression not DCP compliant. Trying to solve non-DCP compliant problems can lead to unexpected behavior." + return new() end end # Monotonocity subtypes -@compat abstract type Monotonicity end +abstract type Monotonicity end struct Nonincreasing <: Monotonicity end struct Nondecreasing <: Monotonicity end struct ConstMonotonicity <: Monotonicity end struct NoMonotonicity <: Monotonicity end # Sign subtypes -@compat abstract type Sign end +abstract type Sign end struct Positive <: Sign end struct Negative <: Sign end struct NoSign <: Sign end diff --git a/src/expressions.jl b/src/expressions.jl index 711dd5695..a3b4c0bb8 100644 --- a/src/expressions.jl +++ b/src/expressions.jl @@ -28,17 +28,17 @@ # ############################################################################# -import Base.sign, Base.size, Base.length, Base.endof, Base.ndims, Base.convert +import Base.sign, Base.size, Base.length, Base.lastindex, Base.ndims, Base.convert export AbstractExpr, Constraint export vexity, sign, size, evaluate, monotonicity, curvature, length, convert export conic_form! -export endof, ndims +export lastindex, ndims export Value, ValueOrNothing export get_vectorized_size ### Abstract types -@compat abstract type AbstractExpr end -@compat abstract type Constraint end +abstract type AbstractExpr end +abstract type Constraint end # Override hash function because of # https://github.com/JuliaLang/julia/issues/10267 @@ -100,7 +100,7 @@ end ### User-defined Unions const Value = Union{Number, AbstractArray} -const ValueOrNothing = Union{Value, Void} +const ValueOrNothing = Union{Value, Nothing} const AbstractExprOrValue = Union{AbstractExpr, Value} convert(::Type{AbstractExpr}, x::Value) = Constant(x) @@ -118,4 +118,4 @@ end ndims(x::AbstractExpr) = 2 get_vectorized_size(x::AbstractExpr) = reduce(*, size(x)) -endof(x::AbstractExpr) = get_vectorized_size(x) +lastindex(x::AbstractExpr) = get_vectorized_size(x) diff --git a/src/problems.jl b/src/problems.jl index c55fad269..71d7deb42 100644 --- a/src/problems.jl +++ b/src/problems.jl @@ -3,7 +3,7 @@ import MathProgBase export Problem, Solution, minimize, maximize, satisfy, add_constraint!, add_constraints! export Float64OrNothing -const Float64OrNothing = Union{Float64, Void} +const Float64OrNothing = Union{Float64, Nothing} # TODO: Cleanup mutable struct Solution{T<:Number} @@ -33,14 +33,14 @@ mutable struct Problem if sign(objective)== Convex.ComplexSign() error("Objective can not be a complex expression") else - return new(head, objective, constraints, "not yet solved", nothing, model) + return new(head, objective, constraints, Symbol("not yet solved"), nothing, model) end end end # constructor if model is not specified function Problem(head::Symbol, objective::AbstractExpr, constraints::Array=Constraint[], - solver::MathProgBase.AbstractMathProgSolver = get_default_solver()) + solver::MathProgBase.AbstractMathProgSolver=get_default_solver()) Problem(head, objective, MathProgBase.ConicModel(solver), constraints) end @@ -57,7 +57,7 @@ function find_variable_ranges(constraints) for constraint in constraints for i = 1:length(constraint.objs) for (id, val) in constraint.objs[i] - if !haskey(var_to_ranges, id) && id != object_id(:constant) + if !haskey(var_to_ranges, id) && id != objectid(:constant) var = id_to_variables[id] if var.sign == ComplexSign() var_to_ranges[id] = (index + 1, index + 2*get_vectorized_size(var)) @@ -81,12 +81,12 @@ function vexity(p::Problem) if p.head == :maximize obj_vex = -obj_vex end - typeof(obj_vex) in bad_vex && warn("Problem not DCP compliant: objective is not DCP") + typeof(obj_vex) in bad_vex && @warn "Problem not DCP compliant: objective is not DCP" constr_vex = ConstVexity() for i in 1:length(p.constraints) vex = vexity(p.constraints[i]) - typeof(vex) in bad_vex && warn("Problem not DCP compliant: constraint $i is not DCP") + typeof(vex) in bad_vex && @warn "Problem not DCP compliant: constraint $i is not DCP" constr_vex += vex end problem_vex = obj_vex + constr_vex @@ -131,7 +131,7 @@ function conic_problem(p::Problem) var_size, constr_size, var_to_ranges = find_variable_ranges(constraints) c = spzeros(var_size, 1) objective_range = var_to_ranges[objective_var_id] - c[objective_range[1]:objective_range[2]] = 1 + c[objective_range[1]:objective_range[2]] .= 1 # slot in all of the coefficients in the conic forms into A and b A = spzeros(constr_size, var_size) @@ -143,7 +143,7 @@ function conic_problem(p::Problem) for i = 1:length(constraint.objs) sz = constraint.sizes[i] for (id, val) in constraint.objs[i] - if id == object_id(:constant) + if id == objectid(:constant) for l in 1:sz b[constr_index + l] = val[1][l]==0 ? val[2][l] : val[1][l] end @@ -195,31 +195,31 @@ Problem(head::Symbol, objective::AbstractExpr, constraints::Constraint...) = # Allow users to simply type minimize minimize(objective::AbstractExpr, constraints::Constraint...) = Problem(:minimize, objective, collect(constraints)) -minimize(objective::AbstractExpr, constraints::Array{T}=Constraint[]) where {T<:Constraint} = +minimize(objective::AbstractExpr, constraints::Array{<:Constraint}=Constraint[]) = Problem(:minimize, objective, constraints) minimize(objective::Value, constraints::Constraint...) = minimize(convert(AbstractExpr, objective), collect(constraints)) -minimize(objective::Value, constraints::Array{T}=Constraint[]) where {T<:Constraint} = +minimize(objective::Value, constraints::Array{<:Constraint}=Constraint[]) = minimize(convert(AbstractExpr, objective), constraints) # Allow users to simply type maximize maximize(objective::AbstractExpr, constraints::Constraint...) = Problem(:maximize, objective, collect(constraints)) -maximize(objective::AbstractExpr, constraints::Array{T}=Constraint[]) where {T<:Constraint} = +maximize(objective::AbstractExpr, constraints::Array{<:Constraint}=Constraint[]) = Problem(:maximize, objective, constraints) maximize(objective::Value, constraints::Constraint...) = maximize(convert(AbstractExpr, objective), collect(constraints)) -maximize(objective::Value, constraints::Array{T}=Constraint[]) where {T<:Constraint} = +maximize(objective::Value, constraints::Array{<:Constraint}=Constraint[]) = maximize(convert(AbstractExpr, objective), constraints) # Allow users to simply type satisfy (if there is no objective) satisfy(constraints::Constraint...) = Problem(:minimize, Constant(0), [constraints...]) -satisfy(constraints::Array{T}=Constraint[]) where {T<:Constraint} = +satisfy(constraints::Array{<:Constraint}=Constraint[]) = Problem(:minimize, Constant(0), constraints) satisfy(constraint::Constraint) = satisfy([constraint]) # +(constraints, constraints) is defined in constraints.jl -add_constraints!(p::Problem, constraints::Array{T}) where {T<:Constraint} = +add_constraints!(p::Problem, constraints::Array{<:Constraint}) = +(p.constraints, constraints) add_constraints!(p::Problem, constraint::Constraint) = add_constraints!(p, [constraint]) add_constraint! = add_constraints! diff --git a/src/solution.jl b/src/solution.jl index ed43da18f..e0ba5dc46 100644 --- a/src/solution.jl +++ b/src/solution.jl @@ -37,7 +37,7 @@ function solve!(problem::Problem; # and the primal (and possibly dual) variables with values populate_solution!(m, problem, var_to_ranges, conic_constraints) if !(problem.status==:Optimal) && verbose - warn("Problem status $(problem.status); solution may be inaccurate.") + @warn "Problem status $(problem.status); solution may be inaccurate." end end @@ -50,15 +50,15 @@ function set_warmstart!(m::MathProgBase.AbstractConicModel, try primal = problem.solution.primal catch - warn("Unable to use cached solution to warmstart problem. + @warn "Unable to use cached solution to warmstart problem. (Perhaps this is the first time you're solving this problem?) - Warmstart may be ineffective.") + Warmstart may be ineffective." primal = zeros(n) end if !(length(primal) == n) - warn("Unable to use cached solution to warmstart problem. + @warn "Unable to use cached solution to warmstart problem. (Perhaps the number of variables or constraints in the problem have changed since you last solved it?) - Warmstart may be ineffective.") + Warmstart may be ineffective." primal = zeros(n) end @@ -69,9 +69,9 @@ function set_warmstart!(m::MathProgBase.AbstractConicModel, try MathProgBase.setwarmstart!(m, primal) catch - warn("Unable to warmstart solution. + @warn "Unable to warmstart solution. (Perhaps the solver doesn't support warm starts?) - Using a cold start instead.") + Using a cold start instead." end m end @@ -79,8 +79,7 @@ end function load_problem!(m::MathProgBase.AbstractConicModel, c, A, b, cones, vartypes) # no conic constraints on variables var_cones = fill((:Free, 1:size(A, 2)),1) - # TODO: Get rid of full once c and b are not sparse - MathProgBase.loadproblem!(m, vec(full(c)), A, vec(full(b)), cones, var_cones) + MathProgBase.loadproblem!(m, vec(Array(c)), A, vec(Array(b)), cones, var_cones) # add integer and binary constraints on variables if !all(Bool[t==:Cont for t in vartypes]) @@ -115,7 +114,7 @@ function populate_solution!(m::MathProgBase.AbstractConicModel, NaN end - if any(isnan.(dual)) + if any(isnan, dual) problem.solution = Solution(solution, MathProgBase.status(m), objective) else problem.solution = Solution(solution, dual, MathProgBase.status(m), objective) @@ -179,7 +178,7 @@ function load_primal_solution!(primal::Array{Float64,1}, var_to_ranges::Dict{UIn end end -function populate_duals!{T}(constraints::Array{ConicConstr}, dual::Array{T, 1}) +function populate_duals!(constraints::Array{ConicConstr}, dual::Vector) constr_index = 1 for constraint in constraints # conic_constr_to_constr only has keys for conic constraints with a single objective diff --git a/src/solver_info.jl b/src/solver_info.jl index 163b3ff57..b756f47a5 100644 --- a/src/solver_info.jl +++ b/src/solver_info.jl @@ -1,6 +1,7 @@ +using Pkg import MathProgBase export can_solve_mip, can_solve_socp, can_solve_sdp, can_solve_exp -export set_default_solver, get_default_solver +export set_default_solver, get_default_solver, isinstalled function set_default_solver(solver::MathProgBase.AbstractMathProgSolver) global DEFAULT_SOLVER @@ -19,37 +20,35 @@ function get_default_solver() end # TODO: I have not listed solvers such as CPLEX etc because I have not tested Convex with them -solvers = [("SCS", "SCSSolver"), ("ECOS", "ECOSSolver"), ("Gurobi", "GurobiSolver"), ("Mosek", "MosekSolver"), +solvers = [("ECOS", "ECOSSolver"), ("SCS", "SCSSolver"), ("Gurobi", "GurobiSolver"), ("Mosek", "MosekSolver"), ("GLPKMathProgInterface", "GLPKSolverMIP")] function isinstalled(pkg) - if isdir(Pkg.dir(pkg)); return true; end - for path in Base.LOAD_PATH - if isdir(joinpath(path, pkg)); return true; end + for path in Base.DEPOT_PATH + if isdir(joinpath(path, pkg)) || isdir(joinpath(path, "packages", pkg)) + return true + end end return false end for (dir, solver) in solvers if isinstalled(dir) && DEFAULT_SOLVER == nothing - eval(parse("using "*dir)) - eval(parse("set_default_solver("*solver*"())")) + eval(Meta.parse("using $dir")) + eval(Meta.parse("set_default_solver($solver())")) end end if get_default_solver() == nothing - packages = "" - for (dir, solver) in solvers - packages = packages*dir*" | " - end - warn("*********************************************************************************************** + packages = join([dir for (dir, solver) in solvers], " | ") + @warn "*********************************************************************************************** You don't have any of "*packages*" installed. You must have at least one of these solvers. You can install a solver such as SCS by running: Pkg.add(\"SCS\") You will have to restart Julia after that. - ***********************************************************************************************") + ***********************************************************************************************" end function can_solve_mip(solver) @@ -57,7 +56,7 @@ function can_solve_mip(solver) if name == :GurobiSolver || name == :MosekSolver || name == :GLPKSolverMIP || name == :CPLEXSolver || name == :CbcSolver return true else - info("$name cannot solve mixed integer programs. Consider using Gurobi, Mosek, or GLPK.") + @info "$name cannot solve mixed integer programs. Consider using Gurobi, Mosek, or GLPK." return false end end @@ -67,7 +66,7 @@ function can_solve_socp(solver) return true else name = typeof(solver).name.name - info("$name cannot solve second order cone programs. Consider using SCS, ECOS, Mosek, or Gurobi.") + @info "$name cannot solve second order cone programs. Consider using SCS, ECOS, Mosek, or Gurobi." return false end end @@ -77,7 +76,7 @@ function can_solve_exp(solver) return true else name = typeof(solver).name.name - info("$name cannot solve exponential programs. Consider using SCS or ECOS.") + @info "$name cannot solve exponential programs. Consider using SCS or ECOS." return false end end @@ -87,7 +86,7 @@ function can_solve_sdp(solver) return true else name = typeof(solver).name.name - info("$name cannot solve semidefinite programs. Consider using SCS or Mosek.") + @info "$name cannot solve semidefinite programs. Consider using SCS or Mosek." return false end end diff --git a/src/utilities/broadcast.jl b/src/utilities/broadcast.jl index 51628ddd4..c5508b0ef 100644 --- a/src/utilities/broadcast.jl +++ b/src/utilities/broadcast.jl @@ -1,8 +1,7 @@ # This file adds a new syntax to call elementwise operations on Convex expressions # because overloading broadcast no longer works in Julia v0.6 -import Base: dot - +import LinearAlgebra.dot export dot # multiplication diff --git a/src/utilities/deprecated.jl b/src/utilities/deprecated.jl deleted file mode 100644 index 8a9f31e51..000000000 --- a/src/utilities/deprecated.jl +++ /dev/null @@ -1,35 +0,0 @@ -using Base.depwarn - -# Remove underscores - -# SOC atoms -Base.@deprecate geo_mean geomean -Base.@deprecate norm_2 norm2 -Base.@deprecate quad_form quadform -Base.@deprecate quad_over_lin quadoverlin -Base.@deprecate rational_norm rationalnorm -Base.@deprecate sum_squares sumsquares -Base.@deprecate square_modulus squaremodulus - -# SDP atoms -Base.@deprecate lambda_max lambdamax -Base.@deprecate lambda_min lambdamin -Base.@deprecate matrix_frac matrixfrac -Base.@deprecate nuclear_norm nuclearnorm -Base.@deprecate operator_norm operatornorm - -# other atoms -Base.@deprecate dot_sort dotsort -Base.@deprecate sum_largest sumlargest -Base.@deprecate sum_smallest sumsmallest - -# broadcasting -if VERSION length(x)) \ No newline at end of file +function iterate(x::Variable, (el, s)=(x[1], 0)) + return s >= length(x) ? nothing : (el, (x[s+1], s+1)) +end diff --git a/src/utilities/show.jl b/src/utilities/show.jl index f6fa95c79..785bfdd13 100644 --- a/src/utilities/show.jl +++ b/src/utilities/show.jl @@ -81,6 +81,6 @@ function show(io::IO, p::Problem) join(io, p.constraints, "\n\t\t") print(io, "\ncurrent status: $(p.status)") if p.status == "solved" - print(io, " with optimal value of $(round(p.optval, 4))") + print(io, " with optimal value of $(round(p.optval, digits=4))") end end diff --git a/src/variable.jl b/src/variable.jl index a1985fe70..dddf861f5 100644 --- a/src/variable.jl +++ b/src/variable.jl @@ -18,7 +18,7 @@ mutable struct Variable <: AbstractExpr function Variable(size::Tuple{Int, Int}, sign::Sign=NoSign(), sets::Symbol...) this = new(:variable, 0, nothing, size, AffineVexity(), sign, Symbol[sets...]) - this.id_hash = object_id(this) + this.id_hash = objectid(this) id_to_variables[this.id_hash] = this return this end @@ -77,13 +77,13 @@ end function real_conic_form(x::Variable) vec_size = get_vectorized_size(x) - return speye(vec_size) + return sparse(1.0I, vec_size, vec_size) end function imag_conic_form(x::Variable) vec_size = get_vectorized_size(x) if x.sign == ComplexSign() - return im*speye(vec_size) + return im*sparse(1.0I, vec_size, vec_size) else return spzeros(vec_size, vec_size) end @@ -94,14 +94,14 @@ function conic_form!(x::Variable, unique_conic_forms::UniqueConicForms=UniqueCon if :fixed in x.sets # do exactly what we would for a constant objective = ConicObj() - objective[object_id(:constant)] = (vec([real(x.value);]),vec([imag(x.value);])) + objective[objectid(:constant)] = (vec([real(x.value);]),vec([imag(x.value);])) cache_conic_form!(unique_conic_forms, x, objective) else objective = ConicObj() vec_size = get_vectorized_size(x) objective[x.id_hash] = (real_conic_form(x), imag_conic_form(x)) - objective[object_id(:constant)] = (spzeros(vec_size, 1), spzeros(vec_size, 1)) + objective[objectid(:constant)] = (spzeros(vec_size, 1), spzeros(vec_size, 1)) # placeholder values in unique constraints prevent infinite recursion depth cache_conic_form!(unique_conic_forms, x, objective) if !(x.sign == NoSign() || x.sign == ComplexSign()) @@ -122,12 +122,13 @@ function fix!(x::Variable) x.vexity = ConstVexity() x end -function fix!(x::Variable, v) - # TODO: check sizes match - x.value = Array{Float64}(size(x)) - x.value[:] = v +function fix!(x::Variable, v::AbstractArray) + size(x) == size(v) || throw(DimensionMismatch("Variable and value sizes do not match!")) + x.value = Array{Float64}(undef, size(x)) + x.value[:,:] = v fix!(x) end +fix!(x::Variable, v::Number) = fix!(x, fill(v, (1, 1))) function free!(x::Variable) # TODO this won't work if :fixed appears other than at the end of x.sets diff --git a/test/REQUIRE b/test/REQUIRE index b57ce3d03..15f2da5bd 100644 --- a/test/REQUIRE +++ b/test/REQUIRE @@ -1,3 +1,3 @@ GLPKMathProgInterface ECOS -SCS 0.3.0 0.4.0 +SCS 0.4.0 diff --git a/test/runtests.jl b/test/runtests.jl index 4f8482a45..30abfd656 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,41 +1,32 @@ using Convex -using Base.Test +using Test +using ECOS +using SCS +using GLPKMathProgInterface +using Random # Seed random number stream to improve test reliability -srand(2) +Random.seed!(2) solvers = Any[] -if isdir(Pkg.dir("ECOS")) - using ECOS - push!(solvers, ECOSSolver(verbose=0)) -end - -if isdir(Pkg.dir("SCS")) - using SCS - push!(solvers, SCSSolver(verbose=0, eps=1e-5)) -end +push!(solvers, ECOSSolver(verbose=0)) +push!(solvers, GLPKSolverMIP()) +push!(solvers, SCSSolver(verbose=0, eps=1e-6)) -if isdir(Pkg.dir("Gurobi")) +if isinstalled("Gurobi") using Gurobi push!(solvers, GurobiSolver(OutputFlag=0)) end -if isdir(Pkg.dir("Mosek")) +if isinstalled("Mosek") using Mosek push!(solvers, MosekSolver(LOG=0)) end -if isdir(Pkg.dir("GLPK")) && isdir(Pkg.dir("GLPKMathProgInterface")) - using GLPKMathProgInterface - push!(solvers, GLPKSolverMIP()) -end - - for solver in solvers println("Running tests with $(solver):") set_default_solver(solver) println(get_default_solver()) include("runtests_single_solver.jl") end - diff --git a/test/runtests_single_solver.jl b/test/runtests_single_solver.jl index de0ce5048..4c49b080c 100644 --- a/test/runtests_single_solver.jl +++ b/test/runtests_single_solver.jl @@ -1,5 +1,5 @@ using Convex -using Base.Test +using Test tests = ["test_utilities.jl", "test_affine.jl", @@ -13,53 +13,49 @@ tests_complex = ["test_complex.jl"] println("Running tests:") -# The following syntax can be used to solve it using other solvers -# using Gurobi -# set_default_solver(GurobiSolver()) - for curtest in tests - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end if can_solve_socp(get_default_solver()) for curtest in tests_socp - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end end if can_solve_sdp(get_default_solver()) for curtest in tests_sdp - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end end if can_solve_exp(get_default_solver()) for curtest in tests_exp - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end end if can_solve_sdp(get_default_solver()) && can_solve_exp(get_default_solver()) for curtest in tests_exp_and_sdp - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end end if can_solve_mip(get_default_solver()) for curtest in tests_int - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end end if can_solve_sdp(get_default_solver()) for curtest in tests_complex - info(" Test: $(curtest)") + @info " Test: $(curtest)" include(curtest) end -end \ No newline at end of file +end diff --git a/test/test_affine.jl b/test/test_affine.jl index 3d3efa3c2..3ff56fd22 100644 --- a/test/test_affine.jl +++ b/test/test_affine.jl @@ -1,7 +1,10 @@ using Convex -using Base.Test +using Test +import LinearAlgebra.I +using Convex: DotMultiplyAtom TOL = 1e-3 +eye(n) = Matrix(1.0I, n, n) @testset "Affine Atoms" begin @@ -10,8 +13,8 @@ TOL = 1e-3 p = minimize(-x, [x <= 0]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(-x), 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(-x) ≈ 0 atol=TOL end @testset "multiply atom" begin @@ -19,17 +22,17 @@ TOL = 1e-3 p = minimize(2.0 * x, [x >= 2, x <= 4]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox((evaluate(2.0x))[1], 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test (evaluate(2.0x))[1] ≈ 4 atol=TOL x = Variable(2) A = 1.5 * eye(2) p = minimize([2 2] * x, [A * x >= [1.1; 1.1]]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 2.93333, atol=TOL) - @test isapprox((evaluate([2 2] * x))[1], 2.93333, atol=TOL) - @test isapprox(vec(evaluate(A * x)), [1.1; 1.1], atol=TOL) + @test p.optval ≈ 2.93333 atol=TOL + @test (evaluate([2 2] * x))[1] ≈ 2.93333 atol=TOL + @test vec(evaluate(A * x)) ≈ [1.1; 1.1] atol=TOL y = Variable(1) x = Variable(3) @@ -40,12 +43,12 @@ TOL = 1e-3 p = Problem(:minimize, o, c) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL p = Problem(:minimize, o, c...) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL end @testset "dot atom" begin @@ -53,17 +56,17 @@ TOL = 1e-3 p = minimize(dot([2.0; 2.0], x), x >= [1.1; 1.1]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 4.4, atol=TOL) - @test isapprox((evaluate(dot([2.0; 2.0], x)))[1], 4.4, atol=TOL) + @test p.optval ≈ 4.4 atol=TOL + @test (evaluate(dot([2.0; 2.0], x)))[1] ≈ 4.4 atol=TOL end @testset "vecdot atom" begin x = Variable(2,2) - p = minimize(vecdot(2*ones(2,2), x), x >= 1.1) + p = minimize(vecdot(fill(2.0, (2,2)), x), x >= 1.1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 8.8, atol=TOL) - @test isapprox((evaluate(vecdot(2 * ones(2, 2), x)))[1], 8.8, atol=TOL) + @test p.optval ≈ 8.8 atol=TOL + @test (evaluate(vecdot(fill(2.0, (2, 2)), x)))[1] ≈ 8.8 atol=TOL end @testset "add atom" begin @@ -72,22 +75,22 @@ TOL = 1e-3 p = minimize(x + y, [x >= 3, y >= 2]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 5, atol=TOL) - @test isapprox(evaluate(x + y), 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL + @test evaluate(x + y) ≈ 5 atol=TOL x = Variable(1) p = minimize(x, [eye(2) + x >= eye(2)]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(eye(2) + x), eye(2), atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(eye(2) + x) ≈ eye(2) atol=TOL y = Variable() p = minimize(y - 5, y >= -1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, -6, atol=TOL) - @test isapprox(evaluate(y - 5), -6, atol=TOL) + @test p.optval ≈ -6 atol=TOL + @test evaluate(y - 5) ≈ -6 atol=TOL end @testset "transpose atom" begin @@ -96,16 +99,16 @@ TOL = 1e-3 p = minimize(x' * c, x >= 1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox((evaluate(x' * c))[1], 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test (evaluate(x' * c))[1] ≈ 2 atol=TOL X = Variable(2, 2) c = ones(2, 1) p = minimize(c' * X' * c, [X >= ones(2, 2)]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox((evaluate(c' * X' * c))[1], 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test (evaluate(c' * X' * c))[1] ≈ 4 atol=TOL rows = 2 cols = 3 @@ -119,8 +122,8 @@ TOL = 1e-3 @test vexity(p) == AffineVexity() solve!(p) s = sum(max.(r, r_2')) * 3 - @test isapprox(p.optval, s, atol=TOL) - @test isapprox((evaluate(c * x' * d + d' * x * c' + (c * ((((x')')')')' * d)'))[1], s, atol=TOL) + @test p.optval ≈ s atol=TOL + @test (evaluate(c * x' * d + d' * x * c' + (c * ((((x')')')')' * d)'))[1] ≈ s atol=TOL end @testset "index atom" begin @@ -128,16 +131,16 @@ TOL = 1e-3 p = minimize(x[1] + x[2], [x >= 1]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox((evaluate(x[1] + x[2]))[1], 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test (evaluate(x[1] + x[2]))[1] ≈ 2 atol=TOL x = Variable(3) I = [true true false] p = minimize(sum(x[I]), [x >= 1]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox((evaluate(sum(x[I])))[1], 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test (evaluate(sum(x[I])))[1] ≈ 2 atol=TOL rows = 6 cols = 8 @@ -149,8 +152,8 @@ TOL = 1e-3 @test vexity(p) == AffineVexity() solve!(p) s = c * A[1:n, 5:5+n-1]' * c' - @test isapprox(p.optval, s[1], atol=TOL) - @test isapprox(evaluate(c * (X[1:n, 5:(5 + n) - 1])' * c'), s, atol=TOL) + @test p.optval ≈ s[1] atol=TOL + @test evaluate(c * (X[1:n, 5:(5 + n) - 1])' * c') ≈ s atol=TOL end @testset "sum atom" begin @@ -158,23 +161,23 @@ TOL = 1e-3 p = minimize(sum(x), x>=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox(evaluate(sum(x)), 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test evaluate(sum(x)) ≈ 4 atol=TOL x = Variable(2,2) p = minimize(sum(x) - 2*x[1,1], x>=1, x[1,1]<=2) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox((evaluate(sum(x) - 2 * x[1, 1]))[1], 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test (evaluate(sum(x) - 2 * x[1, 1]))[1] ≈ 1 atol=TOL x = Variable(10) a = rand(10, 1) p = maximize(sum(x[2:6]), x <= a) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, sum(a[2:6]), atol=TOL) - @test isapprox(evaluate(sum(x[2:6])), sum(a[2:6]), atol=TOL) + @test p.optval ≈ sum(a[2:6]) atol=TOL + @test evaluate(sum(x[2:6])) ≈ sum(a[2:6]) atol=TOL end @testset "diag atom" begin @@ -182,24 +185,24 @@ TOL = 1e-3 p = minimize(sum(diag(x,1)), x >= 1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox(evaluate(sum(diag(x, 1))), 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test evaluate(sum(diag(x, 1))) ≈ 1 atol=TOL x = Variable(4, 4) p = minimize(sum(diag(x)), x >= 2) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 8, atol=TOL) - @test isapprox(evaluate(sum(diag(x))), 8, atol=TOL) + @test p.optval ≈ 8 atol=TOL + @test evaluate(sum(diag(x))) ≈ 8 atol=TOL end @testset "trace atom" begin x = Variable(2,2) - p = minimize(trace(x), x >= 1) + p = minimize(tr(x), x >= 1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox(evaluate(trace(x)), 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test evaluate(tr(x)) ≈ 2 atol=TOL end @testset "dot multiply atom" begin @@ -207,43 +210,48 @@ TOL = 1e-3 p = maximize(sum(dot(*)(x,[1,2,3])), x<=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 6, atol=TOL) - @test isapprox(evaluate(sum((dot(*))(x, [1, 2, 3]))), 6, atol=TOL) + @test p.optval ≈ 6 atol=TOL + @test evaluate(sum((dot(*))(x, [1, 2, 3]))) ≈ 6 atol=TOL x = Variable(3, 3) p = maximize(sum(dot(*)(x,eye(3))), x<=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(sum((dot(*))(x, eye(3)))), 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(sum((dot(*))(x, eye(3)))) ≈ 3 atol=TOL x = Variable(5, 5) p = minimize(x[1, 1], dot(*)(3,x) >= 3) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox((evaluate(x[1, 1]))[1], 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test (evaluate(x[1, 1]))[1] ≈ 1 atol=TOL x = Variable(3,1) p = minimize(sum(dot(*)(ones(3,3), x)), x>=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 9, atol=TOL) - @test isapprox((evaluate(x[1, 1]))[1], 1, atol=TOL) + @test p.optval ≈ 9 atol=TOL + @test (evaluate(x[1, 1]))[1] ≈ 1 atol=TOL x = Variable(1,3) p = minimize(sum(dot(*)(ones(3,3), x)), x>=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 9, atol=TOL) - @test isapprox((evaluate(x[1, 1]))[1], 1, atol=TOL) + @test p.optval ≈ 9 atol=TOL + @test (evaluate(x[1, 1]))[1] ≈ 1 atol=TOL x = Variable(1, 3, Positive()) p = maximize(sum(dot(/)(x,[1 2 3])), x<=1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 11 / 6, atol=TOL) - @test isapprox(evaluate(sum((dot(/))(x, [1 2 3]))), 11 / 6, atol=TOL) + @test p.optval ≈ 11 / 6 atol=TOL + @test evaluate(sum((dot(/))(x, [1 2 3]))) ≈ 11 / 6 atol=TOL + + # Broadcast fusion works + x = Variable(5, 5) + a = 2.0 .* x .* ones(Int, 5) + @test a isa DotMultiplyAtom end @testset "reshape atom" begin @@ -253,15 +261,15 @@ TOL = 1e-3 p = minimize(sum(reshape(X, 2, 3) + A), X >= c) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, sum(A + c), atol=TOL) - @test isapprox(evaluate(sum(reshape(X, 2, 3) + A)), sum(A + c), atol=TOL) + @test p.optval ≈ sum(A .+ c) atol=TOL + @test evaluate(sum(reshape(X, 2, 3) + A)) ≈ sum(A .+ c) atol=TOL b = rand(6) p = minimize(sum(vec(X) + b), X >= c) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, sum(b + c), atol=TOL) - @test isapprox(evaluate(sum(vec(X) + b)), sum(b + c), atol=TOL) + @test p.optval ≈ sum(b .+ c) atol=TOL + @test evaluate(sum(vec(X) + b)) ≈ sum(b .+ c) atol=TOL x = Variable(4, 4) c = ones(16, 1) @@ -271,56 +279,56 @@ TOL = 1e-3 @test vexity(p) == AffineVexity() solve!(p) # TODO: why is accuracy lower here? - @test isapprox(p.optval, 136, atol=10TOL) - @test isapprox((evaluate(c' * reshaped))[1], 136, atol=10TOL) + @test p.optval ≈ 136 atol=10TOL + @test (evaluate(c' * reshaped))[1] ≈ 136 atol=10TOL end @testset "hcat atom" begin x = Variable(4, 4) y = Variable(4, 6) - p = maximize(sum(x) + sum([y 4*ones(4)]), [x y 2*ones(4, 2)] <= 2) + p = maximize(sum(x) + sum([y fill(4.0, 4)]), [x y fill(2.0, (4, 2))] <= 2) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 96, atol=TOL) - @test isapprox(evaluate(sum(x) + sum([y 4 * ones(4)])), 96, atol=TOL) - @test isapprox(evaluate([x y 2 * ones(4, 2)]), 2 * ones(4, 12), atol=TOL) + @test p.optval ≈ 96 atol=TOL + @test evaluate(sum(x) + sum([y fill(4.0, 4)])) ≈ 96 atol=TOL + @test evaluate([x y fill(2.0, (4, 2))]) ≈ fill(2.0, (4, 12)) atol=TOL end @testset "vcat atom" begin x = Variable(4, 4) y = Variable(4, 6) +# TODO: fix dimension mismatch [y 4*eye(4); x -ones(4, 6)] p = maximize(sum(x) + sum([y 4*eye(4); x -ones(4, 6)]), [x;y'] <= 2) @test vexity(p) == AffineVexity() solve!(p) # TODO: why is accuracy lower here? - @test isapprox(p.optval, 104, atol=10TOL) - @test isapprox(evaluate(sum(x) + sum([y 4 * eye(4); x -(ones(4, 6))])), 104, atol=10TOL) - @test isapprox(evaluate([x; y']), 2 * ones(10, 4), atol=TOL) - + @test p.optval ≈ 104 atol=10TOL + @test evaluate(sum(x) + sum([y 4 * eye(4); x -(ones(4, 6))])) ≈ 104 atol=10TOL + @test evaluate([x; y']) ≈ 2 * ones(10, 4) atol=TOL end - @testset "diagm atom" begin + @testset "Diagonal atom" begin x = Variable(2, 2) - @test_throws Exception diagm(x) + @test_throws ArgumentError Diagonal(x) x = Variable(4) - p = minimize(sum(diagm(x)), x == [1; 2; 3; 4]) + p = minimize(sum(Diagonal(x)), x == [1, 2, 3, 4]) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 10, atol=TOL) - @test all(abs.(evaluate(diagm(x)) - diagm([1; 2; 3; 4])) .<= TOL) + @test p.optval ≈ 10 atol=TOL + @test all(abs.(evaluate(Diagonal(x)) - Diagonal([1, 2, 3, 4])) .<= TOL) x = Variable(3) c = [1; 2; 3] - p = minimize(c' * diagm(x) * c, x >= 1, sum(x) == 10) + p = minimize(c' * Diagonal(x) * c, x >= 1, sum(x) == 10) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 21, atol=TOL) + @test p.optval ≈ 21 atol=TOL x = Variable(3) - p = minimize(sum(x), x >= 1, diagm(x)[1, 2] == 1) - @test solve!(p) == nothing + p = minimize(sum(x), x >= 1, Diagonal(x)[1, 2] == 1) + @test solve!(p) === nothing @test p.status != :Optimal end @@ -330,16 +338,16 @@ TOL = 1e-3 p = minimize(sum(conv(h, x)) + sum(x), x >= 1, x <= 2) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(sum(conv(h, x))), 0, atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(sum(conv(h, x))) ≈ 0 atol=TOL x = Variable(3) h = [1, -1] p = minimize(sum(conv(x, h)) + sum(x), x >= 1, x <= 2) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(sum(conv(h, x))), 0, atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(sum(conv(h, x))) ≈ 0 atol=TOL end @@ -375,7 +383,7 @@ TOL = 1e-3 solve!(p) if p.solution.has_dual println("Solution object has dual value, checking for dual correctness.") - @test isapprox(p.constraints[1].dual, 1, atol=TOL) + @test p.constraints[1].dual ≈ 1 atol=TOL end x = Variable() @@ -383,7 +391,7 @@ TOL = 1e-3 solve!(p) if p.solution.has_dual println("Solution object has dual value, checking for dual correctness.") - @test isapprox(p.constraints[1].dual, 1, atol=TOL) + @test p.constraints[1].dual ≈ 1 atol=TOL end x = Variable() @@ -391,8 +399,8 @@ TOL = 1e-3 solve!(p) if p.solution.has_dual println("Solution object has dual value, checking for dual correctness.") - @test isapprox(p.constraints[1].dual, 0, atol=TOL) - @test isapprox(abs.(p.constraints[2].dual), 1, atol=TOL) + @test p.constraints[1].dual ≈ 0 atol=TOL + @test abs.(p.constraints[2].dual) ≈ 1 atol=TOL end x = Variable(2) @@ -406,7 +414,4 @@ TOL = 1e-3 end end - - - -end \ No newline at end of file +end diff --git a/test/test_complex.jl b/test/test_complex.jl index 6cf17a055..ba86132d6 100644 --- a/test/test_complex.jl +++ b/test/test_complex.jl @@ -1,5 +1,6 @@ using Convex -using Base.Test +using Test +import LinearAlgebra.eigen TOL = 1e-3 @@ -40,9 +41,9 @@ TOL = 1e-3 #x2 = xr.value + im*xi.value real_diff = real(x1) - xr.value - @test isapprox(real_diff, zeros(10, 1), atol=TOL) + @test real_diff ≈ zeros(10, 1) atol=TOL imag_diff = imag(x1) - xi.value - @test isapprox(imag_diff, zeros(10, 1), atol=TOL) + @test imag_diff ≈ zeros(10, 1) atol=TOL #@fact x1==x2 --> true end @@ -54,12 +55,12 @@ TOL = 1e-3 c1 = real(x)>=0 p = minimize(objective,c1) solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(objective), 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(objective) ≈ 0 atol=TOL real_diff = real(x.value) - real(a) imag_diff = imag(x.value) - imag(a) - @test isapprox(real_diff, 0, atol=TOL) - @test isapprox(imag_diff, 0, atol=TOL) + @test real_diff ≈ 0 atol=TOL + @test imag_diff ≈ 0 atol=TOL end @testset "sumsquares atom" begin @@ -69,12 +70,12 @@ TOL = 1e-3 c1 = real(x)>=0 p = minimize(objective,c1) solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(objective), zeros(1, 1), atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(objective) ≈ zeros(1, 1) atol=TOL real_diff = real.(x.value) - real.(a) imag_diff = imag.(x.value) - imag.(a) - @test isapprox(real_diff, zeros(2, 1), atol=TOL) - @test isapprox(imag_diff, zeros(2, 1), atol=TOL) + @test real_diff ≈ zeros(2, 1) atol=TOL + @test imag_diff ≈ zeros(2, 1) atol=TOL end @testset "abs atom" begin @@ -84,12 +85,12 @@ TOL = 1e-3 c1 = real(x)>=0 p = minimize(objective,c1) solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(objective), zeros(1), atol=TOL) - real_diff = real(x.value) - real(a) - imag_diff = imag(x.value) - imag(a) - @test isapprox(real_diff, zeros(1), atol=TOL) - @test isapprox(imag_diff, zeros(1), atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(objective) ≈ zeros(1) atol=TOL + real_diff = real(x.value) .- real(a) + imag_diff = imag(x.value) .- imag(a) + @test real_diff ≈ zeros(1) atol=TOL + @test imag_diff ≈ zeros(1) atol=TOL end @testset "Complex Semidefinite constraint" begin @@ -102,12 +103,12 @@ TOL = 1e-3 p = minimize(objective, c1) solve!(p) # test that X is approximately equal to posA: - l,v = eig(A) - posA = v*diagm(max.(l,0))*v' + l,v = eigen(A) + posA = v*Diagonal(max.(l,0))*v' real_diff = real.(x.value) - real.(posA) imag_diff = imag.(x.value) - imag.(posA) - @test isapprox(real_diff, zeros(n, n), atol=TOL) - @test isapprox(imag_diff, zeros(n, n), atol=TOL) + @test real_diff ≈ zeros(n, n) atol=TOL + @test imag_diff ≈ zeros(n, n) atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_exp.jl b/test/test_exp.jl index c0ceee8fe..f0d6a00fc 100644 --- a/test/test_exp.jl +++ b/test/test_exp.jl @@ -1,5 +1,5 @@ using Convex -using Base.Test +using Test TOL = 1e-3 @@ -10,28 +10,28 @@ TOL = 1e-3 p = minimize(exp(y), y>=0) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox(evaluate(exp(y)), 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test evaluate(exp(y)) ≈ 1 atol=TOL y = Variable() p = minimize(exp(y), y>=1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, exp(1), atol=TOL) - @test isapprox(evaluate(exp(y)), exp(1), atol=TOL) + @test p.optval ≈ exp(1) atol=TOL + @test evaluate(exp(y)) ≈ exp(1) atol=TOL y = Variable(5) p = minimize(sum(exp(y)), y>=0) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 5, atol=TOL) - @test isapprox(evaluate(sum(exp(y))), 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL + @test evaluate(sum(exp(y))) ≈ 5 atol=TOL y = Variable(5) p = minimize(sum(exp(y)), y>=0) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL end @testset "log atom" begin @@ -39,19 +39,19 @@ TOL = 1e-3 p = maximize(log(y), y<=1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL y = Variable() p = maximize(log(y), y<=2) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, log(2), atol=TOL) + @test p.optval ≈ log(2) atol=TOL y = Variable() p = maximize(log(y), [y<=2, exp(y)<=10]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, log(2), atol=TOL) + @test p.optval ≈ log(2) atol=TOL end @testset "log sum exp atom" begin @@ -59,7 +59,7 @@ TOL = 1e-3 p = minimize(logsumexp(y), y>=1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, log(exp(1) * 5), atol=TOL) + @test p.optval ≈ log(exp(1) * 5) atol=TOL end @testset "logistic loss atom" begin @@ -67,7 +67,7 @@ TOL = 1e-3 p = minimize(logisticloss(y), y>=1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, log(exp(1) + 1) * 5, atol=TOL) + @test p.optval ≈ log(exp(1) + 1) * 5 atol=TOL end @testset "entropy atom" begin @@ -75,7 +75,7 @@ TOL = 1e-3 p = maximize(entropy(y), sum(y)<=1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, -(log(1 / 5)), atol=TOL) + @test p.optval ≈ -(log(1 / 5)) atol=TOL end @testset "relative entropy atom" begin @@ -85,7 +85,7 @@ TOL = 1e-3 p = minimize(relative_entropy(x,y), y==1, x >= 2) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 2 * log(2), atol=TOL) + @test p.optval ≈ 2 * log(2) atol=TOL end @testset "log perspective atom" begin @@ -95,7 +95,7 @@ TOL = 1e-3 p = maximize(log_perspective(x,y), y==5, x <= 10) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 5 * log(2), atol=TOL) + @test p.optval ≈ 5 * log(2) atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_exp_and_sdp.jl b/test/test_exp_and_sdp.jl index e32c62fb0..d070a9b57 100644 --- a/test/test_exp_and_sdp.jl +++ b/test/test_exp_and_sdp.jl @@ -1,5 +1,5 @@ using Convex -using Base.Test +using Test TOL = 1e-2 @@ -10,8 +10,8 @@ TOL = 1e-2 p = maximize(logdet(x), [x[1, 1] == 1, x[2, 2] == 1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(logdet(x)), 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(logdet(x)) ≈ 0 atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_int.jl b/test/test_int.jl index 2f3fc9de5..b06682910 100644 --- a/test/test_int.jl +++ b/test/test_int.jl @@ -1,5 +1,5 @@ using Convex -using Base.Test +using Test TOL = 1e-2 @@ -19,13 +19,13 @@ end p = minimize(x, x>=4.3) @test vexity(p) == AffineVexity() solve!(p, LPsolver()) - @test isapprox(p.optval, 4.3, atol=TOL) + @test p.optval ≈ 4.3 atol=TOL x = Variable(2) p = minimize(norm(x,1), x[1]>=4.3) @test vexity(p) == ConvexVexity() solve!(p, LPsolver()) - @test isapprox(p.optval, 4.3, atol=TOL) + @test p.optval ≈ 4.3 atol=TOL end @testset "integer variables" begin @@ -33,38 +33,38 @@ end p = minimize(x, x>=4.3) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL x = Variable(2, :Int) p = minimize(sum(x), x>=4.3) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 10, atol=TOL) + @test p.optval ≈ 10 atol=TOL x = Variable(:Int) y = Variable() p = minimize(sum(x + y), x>=4.3, y>=7) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 12, atol=TOL) + @test p.optval ≈ 12 atol=TOL x = Variable(2, :Int) p = minimize(norm(x, 1), x[1]>=4.3) @test vexity(p) == ConvexVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL x = Variable(2, :Int) p = minimize(sum(x), x[1]>=4.3, x>=0) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 5, atol=TOL) + @test p.optval ≈ 5 atol=TOL x = Variable(2, :Int) p = minimize(sum(x), x>=.5) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL end @testset "binary variables" begin @@ -72,13 +72,13 @@ end p = minimize(sum(x), x>=.5) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL x = Variable(2, :Bin) p = minimize(sum(x), x[1]>=.5, x>=0) @test vexity(p) == AffineVexity() solve!(p, MIPsolver()) - @test isapprox(p.optval, 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_lp.jl b/test/test_lp.jl index e5b668917..b57f2edbd 100644 --- a/test/test_lp.jl +++ b/test/test_lp.jl @@ -1,5 +1,6 @@ using Convex -using Base.Test +using Test +import Random.shuffle TOL = 1e-3 @@ -10,76 +11,76 @@ TOL = 1e-3 p = minimize(abs(x), x<=-1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox(evaluate(abs(x)), 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test evaluate(abs(x)) ≈ 1 atol=TOL x = Variable(2,2) p = minimize(sum(abs(x)), x[2,2]>=1, x[1,1]>=1, x>=0) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox(evaluate(sum(abs(x))), 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test evaluate(sum(abs(x))) ≈ 2 atol=TOL end @testset "maximum atom" begin x = Variable(10) - a = rand(10, 1) + a = shuffle(collect(0.1:0.1:1.0)) p = minimize(maximum(x), x >= a) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, maximum(a), atol=TOL) - @test isapprox(evaluate(maximum(x)), maximum(a), atol=TOL) + @test p.optval ≈ maximum(a) atol=TOL + @test evaluate(maximum(x)) ≈ maximum(a) atol=TOL end @testset "minimum atom" begin x = Variable(1) - a = rand(10, 10) + a = reshape(shuffle(collect(0.01:0.01:1.0)), (10, 10)) p = maximize(minimum(x), x <= a) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, minimum(a), atol=TOL) - @test isapprox(evaluate(minimum(x)), minimum(a), atol=TOL) + @test p.optval ≈ minimum(a) atol=TOL + @test evaluate(minimum(x)) ≈ minimum(a) atol=TOL x = Variable(4, 4) y = Variable(4, 6) z = Variable(1) c = ones(4, 1) - d = 2 * ones(6, 1) + d = fill(2.0, (6, 1)) constraints = [[x y] <= 2, z <= 0, z <= x, 2z >= -1] objective = sum(x + z) + minimum(y) + c' * y * d p = maximize(objective, constraints) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 130, atol=TOL) - @test isapprox((evaluate(objective))[1], 130, atol=TOL) + @test p.optval ≈ 130 atol=TOL + @test (evaluate(objective))[1] ≈ 130 atol=TOL end @testset "max atom" begin x = Variable(10, 10) y = Variable(10, 10) - a = rand(10, 10) - b = rand(10, 10) + a = reshape(shuffle(collect(0.01:0.01:1.0)), (10, 10)) + b = reshape(shuffle(collect(0.01:0.01:1.0)), (10, 10)) p = minimize(maximum(max(x, y)), [x >= a, y >= b]) @test vexity(p) == ConvexVexity() solve!(p) max_a = maximum(a) max_b = maximum(b) - @test isapprox(p.optval, max(max_a, max_b), atol=TOL) - @test isapprox(evaluate(maximum(max(x, y))), max(max_a, max_b), atol=TOL) + @test p.optval ≈ max(max_a, max_b) atol=10TOL + @test evaluate(maximum(max(x, y))) ≈ max(max_a, max_b) atol=10TOL end @testset "min atom" begin x = Variable(10, 10) y = Variable(10, 10) - a = rand(10, 10) - b = rand(10, 10) + a = reshape(shuffle(collect(0.01:0.01:1.0)), (10, 10)) + b = reshape(shuffle(collect(0.01:0.01:1.0)), (10, 10)) p = maximize(minimum(min(x, y)), [x <= a, y <= b]) @test vexity(p) == ConvexVexity() solve!(p) min_a = minimum(a) min_b = minimum(b) - @test isapprox(p.optval, min(min_a, min_b), atol=TOL) - @test isapprox(evaluate(minimum(min(x, y))), min(min_a, min_b), atol=TOL) + @test p.optval ≈ min(min_a, min_b) atol=10TOL + @test evaluate(minimum(min(x, y))) ≈ min(min_a, min_b) atol=10TOL end @testset "pos atom" begin @@ -88,8 +89,8 @@ TOL = 1e-3 p = minimize(sum(pos(x)), [x >= a, x <= 2]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(sum(pos(x))), 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(sum(pos(x))) ≈ 3 atol=TOL end @testset "neg atom" begin @@ -97,8 +98,8 @@ TOL = 1e-3 p = minimize(1, [x >= -2, x <= -2, neg(x) >= -3]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) - @test isapprox(evaluate(sum(neg(x))), -6, atol=TOL) + @test p.optval ≈ 1 atol=TOL + @test evaluate(sum(neg(x))) ≈ -6 atol=TOL end @testset "sumlargest atom" begin @@ -106,15 +107,15 @@ TOL = 1e-3 p = minimize(sumlargest(x, 2), x >= [1; 1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox(evaluate(sumlargest(x, 2)), 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test evaluate(sumlargest(x, 2)) ≈ 2 atol=TOL x = Variable(4, 4) p = minimize(sumlargest(x, 3), x >= eye(4), x[1, 1] >= 1.5, x[2, 3] >= 2.1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 4.6, atol=TOL) - @test isapprox(evaluate(sumlargest(x, 2)), 3.6, atol=TOL) + @test p.optval ≈ 4.6 atol=TOL + @test evaluate(sumlargest(x, 2)) ≈ 3.6 atol=TOL end @testset "sumsmallest atom" begin @@ -122,15 +123,15 @@ TOL = 1e-3 p = minimize(sumlargest(x, 2), sumsmallest(x, 4) >= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.5, atol=TOL) - @test isapprox(evaluate(sumsmallest(x, 4)), 1, atol=TOL) + @test p.optval ≈ 0.5 atol=TOL + @test evaluate(sumsmallest(x, 4)) ≈ 1 atol=TOL x = Variable(3, 2) - p = maximize(sumsmallest(x, 3), x >= 1, x <= 5, sumlargest(x, 3) <= 12) + p = maximize(sumsmallest(x, 3), x >= 2, x <= 5, sumlargest(x, 3) <= 12) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 12, atol=TOL) - @test isapprox(evaluate(sumsmallest(x, 3)), 12, atol=TOL) + @test p.optval ≈ 12 atol=TOL + @test evaluate(sumsmallest(x, 3)) ≈ 12 atol=TOL end @testset "dotsort atom" begin @@ -138,16 +139,16 @@ TOL = 1e-3 p = minimize(dotsort(x, [1, 2, 3, 4]), sum(x) >= 7, x >= 0, x <= 2, x[4] <= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 19, atol=TOL) - @test isapprox(vec(x.value), [2; 2; 2; 1], atol=TOL) - @test isapprox(evaluate(dotsort(x, [1, 2, 3, 4])), 19, atol=TOL) + @test p.optval ≈ 19 atol=TOL + @test vec(x.value) ≈ [2; 2; 2; 1] atol=TOL + @test evaluate(dotsort(x, [1, 2, 3, 4])) ≈ 19 atol=TOL x = Variable(2, 2) p = minimize(dotsort(x, [1 2; 3 4]), sum(x) >= 7, x >= 0, x <= 2, x[2, 2] <= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 19, atol=TOL) - @test isapprox(evaluate(dotsort(x, [1, 2, 3, 4])), 19, atol=TOL) + @test p.optval ≈ 19 atol=TOL + @test evaluate(dotsort(x, [1, 2, 3, 4])) ≈ 19 atol=TOL end @testset "hinge loss atom" begin @@ -159,8 +160,8 @@ TOL = 1e-3 p = minimize(norm_inf(x), [-2 <= x, x <= 1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(norm_inf(x)), 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(norm_inf(x)) ≈ 0 atol=TOL end @testset "norm 1 atom" begin @@ -168,8 +169,8 @@ TOL = 1e-3 p = minimize(norm_1(x), [-2 <= x, x <= 1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) - @test isapprox(evaluate(norm_1(x)), 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL + @test evaluate(norm_1(x)) ≈ 0 atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_params.jl b/test/test_params.jl index 0a16e9fb4..8f052eb86 100644 --- a/test/test_params.jl +++ b/test/test_params.jl @@ -1,5 +1,6 @@ using Convex -using Base.Test +using Test +import Statistics.mean TOL = 1e-3 @@ -11,36 +12,36 @@ TOL = 1e-3 p = minimize(x+y, x>=0, y>=0) solve!(p) - @test isapprox(p.optval, 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL y.value = 4 fix!(y) solve!(p) - @test isapprox(p.optval, 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL free!(y) solve!(p) - @test isapprox(p.optval, 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL end @testset "fix multiplication" begin - a = [1,2,3,2,1] - x = Variable(length(a)) - gamma = Variable(Positive()) - fix!(gamma, 0.7) - - p = minimize(norm(x-a) + gamma*norm(x[1:end-1] - x[2:end])) - solve!(p) - o1 = p.optval + a = [1,2,3,2,1] + x = Variable(length(a)) + gamma = Variable(Positive()) + fix!(gamma, 0.7) + + p = minimize(norm(x-a) + gamma*norm(x[1:end-1] - x[2:end])) + solve!(p) + o1 = p.optval # x should be very close to a - @test isapprox(o1, 0.7 * norm(a[1:end - 1] - a[2:end]), atol=TOL) + @test o1 ≈ 0.7 * norm(a[1:end - 1] - a[2:end]) atol=TOL # increase regularization - fix!(gamma, 1.0) - solve!(p) - o2 = p.optval + fix!(gamma, 1.0) + solve!(p) + o2 = p.optval # x should be very close to mean(a) - @test isapprox(o2, norm(a - mean(a)), atol=TOL) + @test o2 ≈ norm(a .- mean(a)) atol=TOL - @test o1 <= o2 + @test o1 <= o2 end -end \ No newline at end of file +end diff --git a/test/test_sdp.jl b/test/test_sdp.jl index 9909e2d66..678809522 100644 --- a/test/test_sdp.jl +++ b/test/test_sdp.jl @@ -1,7 +1,10 @@ using Convex -using Base.Test +using Test +import LinearAlgebra.I TOL = 1e-2 +eye(n) = Matrix(1.0I, n, n) + # TODO: uncomment vexity checks once SDP on vars/constraints changes vexity of problem @@ -11,13 +14,13 @@ TOL = 1e-2 p = minimize(y[1,1]) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL y = Variable((3,3), :Semidefinite) p = minimize(y[1,1], y[2,2]==1) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 0, atol=TOL) + @test p.optval ≈ 0 atol=TOL # Solution is obtained as y[2,2] -> infinity # This test fails on Mosek. See @@ -32,20 +35,20 @@ TOL = 1e-2 p = minimize(sum(diag(y)), y[1, 1] == 1) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL y = Variable((3, 3), :Semidefinite) - p = minimize(trace(y), y[2,1]<=4, y[2,2]>=3) + p = minimize(tr(y), y[2,1]<=4, y[2,2]>=3) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL x = Variable(Positive()) y = Semidefinite(3) p = minimize(y[1, 2], y[2, 1] == 1) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL end @testset "sdp constraints" begin @@ -55,7 +58,7 @@ TOL = 1e-2 p = minimize(x + y[1, 1], isposdef(y), x >= 1, y[2, 1] == 1) # @fact vexity(p) --> ConvexVexity() solve!(p) - @test isapprox(p.optval, 1, atol=TOL) + @test p.optval ≈ 1 atol=TOL end @testset "nuclear norm atom" begin @@ -63,8 +66,8 @@ TOL = 1e-2 p = minimize(nuclearnorm(y), y[2,1]<=4, y[2,2]>=3, y[3,3]<=2) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(nuclearnorm(y)), 3, atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(nuclearnorm(y)) ≈ 3 atol=TOL end @testset "operator norm atom" begin @@ -72,8 +75,8 @@ TOL = 1e-2 p = minimize(operatornorm(y), y[2,1]<=4, y[2,2]>=3, sum(y)>=12) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox(evaluate(operatornorm(y)), 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test evaluate(operatornorm(y)) ≈ 4 atol=TOL end @testset "sigma max atom" begin @@ -81,8 +84,8 @@ TOL = 1e-2 p = minimize(sigmamax(y), y[2,1]<=4, y[2,2]>=3, sum(y)>=12) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox(evaluate(sigmamax(y)), 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test evaluate(sigmamax(y)) ≈ 4 atol=TOL end @testset "lambda max atom" begin @@ -90,17 +93,17 @@ TOL = 1e-2 p = minimize(lambdamax(y), y[1,1]>=4) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox(evaluate(lambdamax(y)), 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test evaluate(lambdamax(y)) ≈ 4 atol=TOL end @testset "lambda min atom" begin y = Semidefinite(3) - p = maximize(lambdamin(y), trace(y)<=6) + p = maximize(lambdamin(y), tr(y)<=6) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox(evaluate(lambdamin(y)), 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test evaluate(lambdamin(y)) ≈ 2 atol=TOL end @testset "matrix frac atom" begin @@ -109,8 +112,8 @@ TOL = 1e-2 p = minimize(matrixfrac(x, P), P <= 2*eye(3), P >= 0.5 * eye(3)) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 7, atol=TOL) - @test isapprox((evaluate(matrixfrac(x, P)))[1], 7, atol=TOL) + @test p.optval ≈ 7 atol=TOL + @test (evaluate(matrixfrac(x, P)))[1] ≈ 7 atol=TOL end @testset "matrix frac atom both arguments variable" begin @@ -119,21 +122,21 @@ TOL = 1e-2 p = minimize(matrixfrac(x, P), lambdamax(P) <= 2, x[1] >= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.5, atol=TOL) - @test isapprox((evaluate(matrixfrac(x, P)))[1], 0.5, atol=TOL) + @test p.optval ≈ 0.5 atol=TOL + @test (evaluate(matrixfrac(x, P)))[1] ≈ 0.5 atol=TOL end @testset "sum largest eigs" begin x = Semidefinite(3) p = minimize(sumlargesteigs(x, 2), x >= 1) solve!(p) - @test isapprox(p.optval, 3, atol=TOL) - @test isapprox(evaluate(x), ones(3, 3), atol=TOL) + @test p.optval ≈ 3 atol=TOL + @test evaluate(x) ≈ ones(3, 3) atol=TOL x = Semidefinite(3) p = minimize(sumlargesteigs(x, 2), [x[i,:] >= i for i=1:3]...) solve!(p) - @test isapprox(p.optval, 8.4853, atol=TOL) + @test p.optval ≈ 8.4853 atol=TOL x1 = Semidefinite(3) p1 = minimize(lambdamax(x1), x1[1,1]>=4) @@ -143,7 +146,7 @@ TOL = 1e-2 p2 = minimize(sumlargesteigs(x2, 1), x2[1,1]>=4) solve!(p2) - @test isapprox(p1.optval, p2.optval, atol=TOL) + @test p1.optval ≈ p2.optval atol=TOL x1 = Semidefinite(3) p1 = minimize(lambdamax(x1), [x1[i,:] >= i for i=1:3]...) @@ -153,7 +156,7 @@ TOL = 1e-2 p2 = minimize(sumlargesteigs(x2, 1), [x2[i,:] >= i for i=1:3]...) solve!(p2) - @test isapprox(p1.optval, p2.optval, atol=TOL) + @test p1.optval ≈ p2.optval atol=TOL println(p1.optval) end @@ -162,10 +165,10 @@ TOL = 1e-2 id = eye(4) X = Semidefinite(4) W = kron(id, X) - p = maximize(trace(W), trace(X) ≤ 1) + p = maximize(tr(W), tr(X) ≤ 1) @test vexity(p) == AffineVexity() solve!(p) - @test isapprox(p.optval, 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL end @testset "Partial trace" begin @@ -175,7 +178,7 @@ TOL = 1e-2 constraints = [partialtrace(ρ, 1, [2; 2]) == [0.09942819 0.29923607; 0.29923607 0.90057181], ρ in :SDP] p = satisfy(constraints) solve!(p) - @test isapprox(evaluate(ρ), [0.09942819 0.29923607 0 0; 0.299237 0.900572 0 0; 0 0 0 0; 0 0 0 0], atol=TOL) - @test isapprox(evaluate(partialtrace(ρ, 1, [2; 2])), [1.0 0; 0 0], atol=TOL) + @test evaluate(ρ) ≈ [0.09942819 0.29923607 0 0; 0.299237 0.900572 0 0; 0 0 0 0; 0 0 0 0] atol=TOL + @test evaluate(partialtrace(ρ, 1, [2; 2])) ≈ [1.0 0; 0 0] atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_socp.jl b/test/test_socp.jl index 6308bebe3..d465f2230 100644 --- a/test/test_socp.jl +++ b/test/test_socp.jl @@ -1,5 +1,6 @@ using Convex -using Base.Test +using Test +import LinearAlgebra.opnorm TOL = 1e-3 @@ -12,8 +13,8 @@ TOL = 1e-3 p = minimize(norm2(A * x + b)) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.64888, atol=TOL) - @test isapprox(evaluate(norm2(A * x + b)), 0.64888, atol=TOL) + @test p.optval ≈ 0.64888 atol=TOL + @test evaluate(norm2(A * x + b)) ≈ 0.64888 atol=TOL x = Variable(2, 1) A = [1 2; 2 1; 3 4] @@ -22,8 +23,8 @@ TOL = 1e-3 p = minimize(norm2(A * x + b) + lambda * norm2(x), x >= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 14.9049, atol=TOL) - @test isapprox(evaluate(norm2(A * x + b) + lambda * norm2(x)), 14.9049, atol=TOL) + @test p.optval ≈ 14.9049 atol=TOL + @test evaluate(norm2(A * x + b) + lambda * norm2(x)) ≈ 14.9049 atol=TOL x = Variable(2) @@ -31,8 +32,8 @@ TOL = 1e-3 @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 14.9049, atol=TOL) - @test isapprox(evaluate(norm2(A * x + b) + lambda * norm2(x)), 14.9049, atol=TOL) + @test p.optval ≈ 14.9049 atol=TOL + @test evaluate(norm2(A * x + b) + lambda * norm2(x)) ≈ 14.9049 atol=TOL x = Variable(2, 1) A = [1 2; 2 1; 3 4] @@ -41,8 +42,8 @@ TOL = 1e-3 p = minimize(norm2(A * x + b) + lambda * norm_1(x), x >= 1) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 15.4907, atol=TOL) - @test isapprox(evaluate(norm2(A * x + b) + lambda * norm_1(x)), 15.4907, atol=TOL) + @test p.optval ≈ 15.4907 atol=TOL + @test evaluate(norm2(A * x + b) + lambda * norm_1(x)) ≈ 15.4907 atol=TOL end @testset "frobenius norm atom" begin @@ -51,8 +52,8 @@ TOL = 1e-3 p = minimize(vecnorm(m, 2), c) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, sqrt(35), atol=TOL) - @test isapprox(evaluate(vecnorm(m, 2)), sqrt(35), atol=TOL) + @test p.optval ≈ sqrt(35) atol=TOL + @test evaluate(vecnorm(m, 2)) ≈ sqrt(35) atol=TOL end @testset "quad over lin atom" begin @@ -64,8 +65,8 @@ TOL = 1e-3 p = minimize(quadoverlin(A*x + b, c*x + d)) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 17.7831, atol=TOL) - @test isapprox((evaluate(quadoverlin(A * x + b, c * x + d)))[1], 17.7831, atol=TOL) + @test p.optval ≈ 17.7831 atol=TOL + @test (evaluate(quadoverlin(A * x + b, c * x + d)))[1] ≈ 17.7831 atol=TOL end @testset "sum squares atom" begin @@ -75,8 +76,8 @@ TOL = 1e-3 p = minimize(sumsquares(A*x + b)) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.42105, atol=TOL) - @test isapprox((evaluate(sumsquares(A * x + b)))[1], 0.42105, atol=TOL) + @test p.optval ≈ 0.42105 atol=TOL + @test (evaluate(sumsquares(A * x + b)))[1] ≈ 0.42105 atol=TOL end @testset "square atom" begin @@ -86,8 +87,8 @@ TOL = 1e-3 p = minimize(sum(square(A*x + b))) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.42105, atol=TOL) - @test isapprox(evaluate(sum(square(A * x + b))), 0.42105, atol=TOL) + @test p.optval ≈ 0.42105 atol=TOL + @test evaluate(sum(square(A * x + b))) ≈ 0.42105 atol=TOL x = Variable(2, 1) A = [1 2; 2 1; 3 4] @@ -96,14 +97,14 @@ TOL = 1e-3 p = minimize(sum(dot(^)(expr,2))) # elementwise ^ @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.42105, atol=TOL) - @test isapprox(evaluate(sum(broadcast(^, expr, 2))), 0.42105, atol=TOL) + @test p.optval ≈ 0.42105 atol=TOL + @test evaluate(sum(broadcast(^, expr, 2))) ≈ 0.42105 atol=TOL p = minimize(sum(dot(*)(expr, expr))) # elementwise * @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 0.42105, atol=TOL) - @test isapprox(evaluate(sum((dot(*))(expr, expr))), 0.42105, atol=TOL) + @test p.optval ≈ 0.42105 atol=TOL + @test evaluate(sum((dot(*))(expr, expr))) ≈ 0.42105 atol=TOL end @testset "inv pos atom" begin @@ -111,22 +112,22 @@ TOL = 1e-3 p = minimize(sum(invpos(x)), invpos(x) < 2, x > 1, x == 2, 2 == x) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 2, atol=TOL) - @test isapprox(evaluate(sum(invpos(x))), 2, atol=TOL) + @test p.optval ≈ 2 atol=TOL + @test evaluate(sum(invpos(x))) ≈ 2 atol=TOL x = Variable(3) p = minimize(sum(dot(/)([3,6,9], x)), x<=3) solve!(p) - @test isapprox(x.value, 3 * ones(3, 1), atol=TOL) - @test isapprox(p.optval, 6, atol=TOL) - @test isapprox(evaluate(sum((dot(/))([3, 6, 9], x))), 6, atol=TOL) + @test x.value ≈ fill(3.0, (3, 1)) atol=TOL + @test p.optval ≈ 6 atol=TOL + @test evaluate(sum((dot(/))([3, 6, 9], x))) ≈ 6 atol=TOL x = Variable() p = minimize(sum([3,6,9]/x), x<=3) solve!(p) - @test isapprox(x.value, 3, atol=TOL) - @test isapprox(p.optval, 6, atol=TOL) - @test isapprox(evaluate(sum([3, 6, 9] / x)), 6, atol=TOL) + @test x.value ≈ 3 atol=TOL + @test p.optval ≈ 6 atol=TOL + @test evaluate(sum([3, 6, 9] / x)) ≈ 6 atol=TOL end @testset "geo mean atom" begin @@ -141,8 +142,8 @@ TOL = 1e-3 p = maximize(sum(geomean(x, y)), 1 < x, x < 2, y < 2) solve!(p) - @test isapprox(p.optval, 4, atol=TOL) - @test isapprox(evaluate(sum(geomean(x, y))), 4, atol=TOL) + @test p.optval ≈ 4 atol=TOL + @test evaluate(sum(geomean(x, y))) ≈ 4 atol=TOL end @testset "sqrt atom" begin @@ -156,8 +157,8 @@ TOL = 1e-3 p = minimize(quadform(x, A), [x >= 1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 6.1464, atol=TOL) - @test isapprox((evaluate(quadform(x, A)))[1], 6.1464, atol=TOL) + @test p.optval ≈ 6.1464 atol=TOL + @test (evaluate(quadform(x, A)))[1] ≈ 6.1464 atol=TOL x = Variable(3, 1) A = -1.0*[0.8608 0.3131 0.5458; 0.3131 0.8584 0.5836; 0.5458 0.5836 1.5422] @@ -165,8 +166,8 @@ TOL = 1e-3 p = maximize(c*x , [quadform(x, A) >= -1]) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 3.7713, atol=TOL) - @test isapprox((evaluate(quadform(x, A)))[1], -1, atol=TOL) + @test p.optval ≈ 3.7713 atol=TOL + @test (evaluate(quadform(x, A)))[1] ≈ -1 atol=TOL end @testset "huber atom" begin @@ -174,8 +175,8 @@ TOL = 1e-3 p = minimize(sum(huber(x, 1)), x >= 2) @test vexity(p) == ConvexVexity() solve!(p) - @test isapprox(p.optval, 9, atol=TOL) - @test isapprox(evaluate(sum(huber(x, 1))), 9, atol=TOL) + @test p.optval ≈ 9 atol=TOL + @test evaluate(sum(huber(x, 1))) ≈ 9 atol=TOL end @testset "rational norm atom" begin @@ -186,8 +187,8 @@ TOL = 1e-3 @test vexity(p) == ConvexVexity() # Solution is approximately x = [1, .93138, 1.04575] solve!(p) - @test isapprox(p.optval, 1.2717, atol=TOL) - @test isapprox(evaluate(norm(x, 4.5)), 1.2717, atol=TOL) + @test p.optval ≈ 1.2717 atol=TOL + @test evaluate(norm(x, 4.5)) ≈ 1.2717 atol=TOL end @testset "rational norm dual norm" begin @@ -199,10 +200,10 @@ TOL = 1e-3 p.constraints += (norm(x, q) <= 1) @test vexity(p) == ConvexVexity() solve!(p) # Solution is -norm(v, q / (q - 1)) - @test isapprox(p.optval, -2.144087, atol=TOL) - @test isapprox(sum(evaluate(x' * v)), -2.144087, atol=TOL) - @test isapprox(evaluate(norm(x, q)), 1, atol=TOL) - @test isapprox(sum(evaluate(x' * v)), -(sum(abs.(v) .^ qs) ^ (1 / qs)), atol=TOL) + @test p.optval ≈ -2.144087 atol=TOL + @test sum(evaluate(x' * v)) ≈ -2.144087 atol=TOL + @test evaluate(norm(x, q)) ≈ 1 atol=TOL + @test sum(evaluate(x' * v)) ≈ -(sum(abs.(v) .^ qs) ^ (1 / qs)) atol=TOL end @testset "rational norm atom sum" begin @@ -221,23 +222,23 @@ TOL = 1e-3 qs = q / (q - 1); # Conjugate denom = sum(abs.(margins).^q)^(1/qs) g = x_opt + A' * (abs.(margins).^(q-1) .* sign.(margins)) / denom - @test isapprox(p.optval, 1.7227, atol=TOL) - @test isapprox(norm(g, 2) ^ 2, 0, atol=TOL) + @test p.optval ≈ 1.7227 atol=TOL + @test norm(g, 2) ^ 2 ≈ 0 atol=TOL end @testset "norm consistent with Base" begin A = randn(4, 4) x = Variable(4, 4) x.value = A - @test isapprox(evaluate(norm(x)), norm(A), atol=TOL) - @test isapprox(evaluate(norm(x, 1)), norm(A, 1), atol=TOL) - @test isapprox(evaluate(norm(x, 2)), norm(A, 2), atol=TOL) - @test isapprox(evaluate(norm(x, Inf)), norm(A, Inf), atol=TOL) - @test isapprox(evaluate(vecnorm(x, 1)), norm(vec(A), 1), atol=TOL) - @test isapprox(evaluate(vecnorm(x, 2)), norm(vec(A), 2), atol=TOL) - @test isapprox(evaluate(vecnorm(x, 7)), norm(vec(A), 7), atol=TOL) - @test isapprox(evaluate(vecnorm(x, Inf)), norm(vec(A), Inf), atol=TOL) + @test evaluate(norm(x)) ≈ opnorm(A) atol=TOL + @test evaluate(norm(x, 1)) ≈ opnorm(A, 1) atol=TOL + @test evaluate(norm(x, 2)) ≈ opnorm(A, 2) atol=TOL + @test evaluate(norm(x, Inf)) ≈ opnorm(A, Inf) atol=TOL + @test evaluate(vecnorm(x, 1)) ≈ norm(vec(A), 1) atol=TOL + @test evaluate(vecnorm(x, 2)) ≈ norm(vec(A), 2) atol=TOL + @test evaluate(vecnorm(x, 7)) ≈ norm(vec(A), 7) atol=TOL + @test evaluate(vecnorm(x, Inf)) ≈ norm(vec(A), Inf) atol=TOL end -end \ No newline at end of file +end diff --git a/test/test_utilities.jl b/test/test_utilities.jl index 2170dc18d..df8f03d38 100644 --- a/test/test_utilities.jl +++ b/test/test_utilities.jl @@ -1,5 +1,5 @@ using Convex -using Base.Test +using Test @testset "Utilities" begin @@ -27,4 +27,4 @@ using Base.Test # @fact evaluate(s) --> 21 # end -end \ No newline at end of file +end