diff --git a/src/api/derivative.jl b/src/api/derivative.jl index b918f203..1ff285ce 100644 --- a/src/api/derivative.jl +++ b/src/api/derivative.jl @@ -35,9 +35,7 @@ end end end -# The below code generation enables better type inferencing in the event that -# `f` is a type (see https://github.com/JuliaDiff/ForwardDiff.jl/issues/54). -closure_deriv_def = quote +function derivative{A}(f, ::Type{A}=Void; mutates=false) if mutates d!(output, x::Number) = ForwardDiff.derivative!(output, f, x, A) return d! @@ -46,8 +44,3 @@ closure_deriv_def = quote return d end end - -@eval begin - derivative{A}(f, ::Type{A}=Void; mutates=false) = $closure_deriv_def - derivative{A,f}(::Type{f}, ::Type{A}=Void; mutates=false) = $closure_deriv_def -end diff --git a/src/api/jacobian.jl b/src/api/jacobian.jl index d7f2dddf..39e9e605 100644 --- a/src/api/jacobian.jl +++ b/src/api/jacobian.jl @@ -48,8 +48,8 @@ function jacobian{A}(f, ::Type{A}=Void; # appropriate closure if output_length > 0 output_cache = ForwardDiffCache() - function newf{G<:GradientNumber}(x::Vector{G}) - output = get_workvec!(output_cache, G, output_length) + newf = (x::Vector) -> begin + output = get_workvec!(output_cache, eltype(x), output_length) f(output, x) return output end diff --git a/test/test_behaviors.jl b/test/test_behaviors.jl index 77ea6bff..53ef2bc5 100644 --- a/test/test_behaviors.jl +++ b/test/test_behaviors.jl @@ -53,61 +53,57 @@ j = x -> ForwardDiff.jacobian(g, x, chunk_size=2)/2 # jacobian in chunk_mode # Promote type Issues # ####################### -# Test overloading of `promote_array_type` -#-------------------------------------------------------# +# Test overloading of `promote_array_type` # +#------------------------------------------# promtyp = Base.promote_array_type(Base.DotAddFun(), ForwardDiff.ForwardDiffNumber{2, Float64, Tuple{Float64, Float64}}, Float64) -fdiffnum = ForwardDiff.ForwardDiffNumber{2,Float64,Tuple{Float64,Float64}} +fdiffnum = ForwardDiff.ForwardDiffNumber{2,Float64,Tuple{Float64,Float64}} @test promtyp <: fdiffnum promtyp = Base.promote_array_type(Base.DotAddFun(), ForwardDiff.GradientNumber{2, Float64, - Tuple{Float64, Float64}}, Float64) -gradnum = ForwardDiff.GradientNumber{2,Float64,Tuple{Float64,Float64}} + Tuple{Float64, Float64}}, Float64) +gradnum = ForwardDiff.GradientNumber{2,Float64,Tuple{Float64,Float64}} @test promtyp <: gradnum promtyp = Base.promote_array_type(Base.DotAddFun(), ForwardDiff.HessianNumber{2, Float64, - Tuple{Float64, Float64}}, Float64) -hessnum = ForwardDiff.HessianNumber{2,Float64,Tuple{Float64,Float64}} + Tuple{Float64, Float64}}, Float64) +hessnum = ForwardDiff.HessianNumber{2,Float64,Tuple{Float64,Float64}} @test promtyp <: hessnum promtyp = Base.promote_array_type(Base.DotAddFun(), ForwardDiff.TensorNumber{2, Float64, - Tuple{Float64, Float64}}, Float64) -tensnum = ForwardDiff.TensorNumber{2,Float64,Tuple{Float64,Float64}} + Tuple{Float64, Float64}}, Float64) +tensnum = ForwardDiff.TensorNumber{2,Float64,Tuple{Float64,Float64}} @test promtyp <: tensnum - -# functions involving `.-`, `.+`, etc. # -#-------------------------------------------------------# -a = ones(4) +# Arithmetic element-wise functions # +#-----------------------------------# -## Test jacobian +N = 4 +a = ones(N) jac0 = reshape(vcat([[zeros(N*(i-1)); a; zeros(N^2-N*i)] for i = 1:N]...), N^2, N) -for op = [:-, :+, :./, :.*] - @eval fn(x) = [($op)(x[1], a); ($op)(x[2], a); ($op)(x[3], a); ($op)(x[4], a)] - jac = ForwardDiff.jacobian(fn, a) +for op in (-, +, .-, .+, ./, .*) + + f = x -> [op(x[1], a); op(x[2], a); op(x[3], a); op(x[4], a)] + + # jacobian + jac = ForwardDiff.jacobian(f, a) @test reduce(&, -jac + jac0 .== 0) -end -## Test hessian -for op = [:-, :+, :./, :.*] - @eval fn(x) = sum([($op)(x[1], a); ($op)(x[2], a); ($op)(x[3], a); ($op)(x[4], a)]) - hess = ForwardDiff.hessian(fn, a) + f = x -> sum([op(x[1], a); op(x[2], a); op(x[3], a); op(x[4], a)]) + + # hessian + hess = ForwardDiff.hessian(f, a) @test reduce(&, -hess + zeros(N, N) .== 0) -end -## Test tensor -for op = [:-, :+, :./, :.*] - @eval fn(x) = sum([($op)(x[1], a); ($op)(x[2], a); ($op)(x[3], a); ($op)(x[4], a)]) - tens = ForwardDiff.tensor(fn, a) + # tensor + tens = ForwardDiff.tensor(f, a) @test reduce(&, -tens + zeros(N, N, N) .== 0) end - - diff --git a/test/test_deprecated.jl b/test/test_deprecated.jl index d71b1f3d..e42b2ecd 100644 --- a/test/test_deprecated.jl +++ b/test/test_deprecated.jl @@ -1,8 +1,8 @@ T = Float64 dummy_fsym = :sin -testexpr = :(sin(a) + exp(b) - tan(c) * cos(d)) +testexpr = :(sin(a) + exp(b) - tan(c) * cos(d)) -@eval function testf(x::Vector) +testf = @eval (x::Vector) -> begin a,b,c,d = x return $testexpr end @@ -31,7 +31,7 @@ testout = Array(T, M, N) testexpr_jac = [:(sin(a) + cos(b)), :(-tan(c)), :(4 * exp(d)), :(cos(b)^5), :(sin(a))] testresult = jacob_test_result(testexpr_jac, testx) -@eval function jactestf(x::Vector) +jactestf = @eval (x::Vector) -> begin a,b,c,d = x return [$(testexpr_jac...)] end @@ -49,10 +49,10 @@ jacf = forwarddiff_jacobian(jactestf, T) N = 6 testx = hess_test_x(dummy_fsym, N) testout = Array(T, N, N) -testexpr_hess = :(sin(a) + exp(b) - tan(c) * cos(l) + sin(m) * exp(r)) +testexpr_hess = :(sin(a) + exp(b) - tan(c) * cos(l) + sin(m) * exp(r)) testresult = hess_test_result(testexpr_hess, testx) -@eval function hess_testf(x::Vector) +hess_testf = @eval (x::Vector) -> begin a,b,c,l,m,r = x return $testexpr_hess end diff --git a/test/test_derivatives.jl b/test/test_derivatives.jl index a1113351..8cdf0fc2 100644 --- a/test/test_derivatives.jl +++ b/test/test_derivatives.jl @@ -32,8 +32,8 @@ end for fsym in ForwardDiff.auto_defined_unary_funcs func_expr = :($(fsym)(x) + 4^$(fsym)(x) - x * $(fsym)(x)) deriv = Calculus.differentiate(func_expr) - try - @eval begin + try + @eval begin x = deriv_test_x($fsym) testdf = x -> $func_expr val_result = testdf(x) diff --git a/test/test_gradients.jl b/test/test_gradients.jl index e4606226..5f238f03 100644 --- a/test/test_gradients.jl +++ b/test/test_gradients.jl @@ -279,7 +279,7 @@ chunk_sizes = (ForwardDiff.default_chunk_size, 1, Int(N/2), N) for fsym in map(first, Calculus.symbolic_derivatives_1arg()) testexpr = :($(fsym)(a) + $(fsym)(b) - $(fsym)(c) * $(fsym)(d)) - @eval function testf(x::Vector) + testf = @eval (x::Vector) -> begin a,b,c,d = x return $testexpr end diff --git a/test/test_hessians.jl b/test/test_hessians.jl index cf07a5f8..6ede9266 100644 --- a/test/test_hessians.jl +++ b/test/test_hessians.jl @@ -268,7 +268,7 @@ chunk_sizes = (ForwardDiff.default_chunk_size, 2, Int(N/2), N) for fsym in ForwardDiff.auto_defined_unary_hess_funcs testexpr = :($(fsym)(a) + $(fsym)(b) - $(fsym)(c) * $(fsym)(l) - $(fsym)(m) + $(fsym)(r)) - @eval function testf(x::Vector) + testf = @eval (x::Vector) -> begin a,b,c,l,m,r = x return $testexpr end diff --git a/test/test_jacobians.jl b/test/test_jacobians.jl index ffc70ba6..f9d1834d 100644 --- a/test/test_jacobians.jl +++ b/test/test_jacobians.jl @@ -40,12 +40,12 @@ for fsym in ForwardDiff.auto_defined_unary_funcs :($(fsym)(b)^5), :($(fsym)(a))] - @eval function testf(x::Vector) + testf = @eval (x::Vector) -> begin a,b,c,d = x return [$(testexprs...)] end - @eval function testf!(output::Vector, x::Vector) + testf! = @eval (output::Vector, x::Vector) -> begin a,b,c,d = x output[1] = $(testexprs[1]) output[2] = $(testexprs[2]) @@ -59,7 +59,7 @@ for fsym in ForwardDiff.auto_defined_unary_funcs testx = jacob_test_x(fsym, N) val_result = testf(testx) jacob_result = jacob_test_result(testexprs, testx) - + # Non-AllResults test_jacob = (testout) -> @test_approx_eq testout jacob_result @@ -67,7 +67,7 @@ for fsym in ForwardDiff.auto_defined_unary_funcs test_jacob(testout) test_jacob(ForwardDiff.jacobian(testf, testx; chunk_size=chunk)) - + jacf! = ForwardDiff.jacobian(testf; mutates=true, chunk_size=chunk) testout = similar(testout) jacf!(testout, testx) @@ -123,4 +123,4 @@ for fsym in ForwardDiff.auto_defined_unary_funcs throw(err) end end -end \ No newline at end of file +end diff --git a/test/test_tensors.jl b/test/test_tensors.jl index 6ad6ae8c..0d6f608d 100644 --- a/test/test_tensors.jl +++ b/test/test_tensors.jl @@ -195,7 +195,7 @@ rand_tens = TensorNumber(rand_hess, rand_tensvec) # Multiplication/Division # #-------------------------# function tens_approx_eq(a::TensorNumber, b::TensorNumber) - eps = 1e-9 + eps = 1e-8 try @test_approx_eq_eps value(a) value(b) eps @test_approx_eq_eps collect(grad(a)) collect(grad(b)) eps @@ -285,7 +285,7 @@ end for fsym in ForwardDiff.auto_defined_unary_tens_funcs testexpr = :($(fsym)(a) + $(fsym)(b) - $(fsym)(c) * $(fsym)(d)) - @eval function testf(x::Vector) + testf = @eval (x::Vector) -> begin a,b,c,d = x return $testexpr end