Skip to content

Commit

Permalink
Revert runtests
Browse files Browse the repository at this point in the history
  • Loading branch information
maetshju committed Oct 13, 2020
2 parents f471337 + ff6ddf5 commit 58d0500
Show file tree
Hide file tree
Showing 7 changed files with 38 additions and 13 deletions.
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
* Added [helper functions](https://github.com/FluxML/Flux.jl/pull/873) `Flux.convfilter` and `Flux.depthwiseconvfilter` to construct weight arrays for convolutions outside of layer constructors so as to not have to depend on the default layers for custom implementations.
* `dropout` function now has a mandatory [active](https://github.com/FluxML/Flux.jl/pull/1263)
keyword argument. The `Dropout` struct *whose behavior is left unchanged) is the recommended choice for common usage.
* Connectionist temporal classification loss added
* and many more fixes and additions...

# v0.10.1 - v0.10.4
Expand Down
1 change: 1 addition & 0 deletions docs/src/models/losses.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,5 @@ Flux.Losses.hinge_loss
Flux.Losses.squared_hinge_loss
Flux.Losses.dice_coeff_loss
Flux.Losses.tversky_loss
Flux.Losses.ctc
```
3 changes: 2 additions & 1 deletion src/losses/Losses.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@ export mse, mae, msle,
tversky_loss,
dice_coeff_loss,
poisson_loss,
hinge_loss, siquared_hinge_loss,
hinge_loss, squared_hinge_loss,
ctc

include("utils.jl")
include("functions.jl")
include("ctc.jl")
if CUDA.functional() include("ctc-gpu.jl") end

end #module
14 changes: 14 additions & 0 deletions src/losses/ctc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,22 +113,36 @@ function ctc_(ŷ, y)

# Fill bottom-right corner so bounding errors can be avoided
# by starting `u` at `U′-1`
<<<<<<< HEAD
β[T,U′] = typedZero
β[T,U′-1] = typedZero

# start at T-1 so that β(T, u) = log(0) for all u < U′ - 1
for t=(T-1):-1:1
for u=U′:-1:1
if u > 2t || u > U′ + 1
=======
β[T,U′] = 0.0
β[T,U′-1] = 0.0

for t=T:-1:1
for u=U′:-1:1
if t == T && u >= U′ - 1
β[t,u] = 0.0
elseif t == T && u < U′ - 1
>>>>>>> ff6ddf5fae0e3705814bea8e318717a3c6bb0343
continue
end
<<<<<<< HEAD

idx = u+2
idx -= z′[u] == blank || (idx < U′ && z′[u+2] == z′[u])
idx = min(idx, U′)

v = [β[t+1,i] + ŷ[z′[i], t+1] for i=u:idx]
β[t, u] = logsum(v)
=======
>>>>>>> ff6ddf5fae0e3705814bea8e318717a3c6bb0343
end
end

Expand Down
23 changes: 16 additions & 7 deletions test/ctc-gpu.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
using Test
using Flux
using Flux: ctc_
using Flux.Losses: ctc, ctc_
using Zygote: gradient
using LinearAlgebra
using CuArrays
using CUDA
using Statistics

# Custom function to check numerical gradient of ctc loss,
# based on `ngradient` in `Tracker.jl`
# based on `ngradient` in `Zygote.jl`
#
# Needs to check loss as defined at a particular time step
# related to the change in x because slight deviations in
Expand Down Expand Up @@ -40,16 +40,16 @@ end
y_cu = CuArray(y)

g1 = gradient(ctc, x_cu, y_cu)[1]
g1 = g1 |> collect
g1 = collect(g1)

g2 = ctc_ngradient(x, y)[1]
g2 = ctc_ngradient(x_cu, y_cu)[1] |> collect

@test all(isapprox.(g1, g2, rtol=1e-5, atol=1e-5))

# test that GPU loss matches CPU implementation

l1 = Flux.ctc_(x_cu, y_cu)[1]
l2 = Flux.ctc_(x, y)[1]
l1 = Flux.Losses.ctc_(x_cu, y_cu)[1]
l2 = Flux.Losses.ctc_(Float32.(x), y)[1]

@test all(isapprox.(l1, l2, rtol=1e-5, atol=1e-5))

Expand All @@ -64,5 +64,14 @@ end
ghat = gradient(ctc, x_cu, y_cu)[1] |> collect

@test all(isapprox.(g, ghat, rtol=1e-5, atol=1e-5))

x_cu = [-3. 12. 8. 15.; 4. 20. -2. 20.; 8. -33. 6. 5.] |> CuArray
y_cu = [1 1 0 0; 0 0 1 1; 0 0 0 0] |> CuArray
@test ctc(x_cu, y_cu) 8.02519869363453

g = [-2.29294774655333e-06 -0.999662657278862 1.75500863563993e-06 0.00669284889063; 0.017985914969696 0.999662657278861 -1.9907078755387e-06 -0.006693150917307; -0.01798362202195 -2.52019580677916e-20 2.35699239251042e-07 3.02026677058789e-07]

ghat = gradient(ctc, x_cu, y_cu)[1] |> collect
@test all(isapprox.(g, ghat, rtol=1e-5, atol=1e-5))

end
2 changes: 1 addition & 1 deletion test/ctc.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
using Test
using Flux
using Flux: ctc_
using Flux.Losses: ctc, ctc_
using Zygote: gradient
using LinearAlgebra

Expand Down
7 changes: 3 additions & 4 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ using Random, Statistics, LinearAlgebra
using IterTools: ncycle

Random.seed!(0)
#=

@testset "Utils" begin
include("utils.jl")
end
Expand All @@ -21,13 +21,13 @@ end
@testset "Data" begin
include("data.jl")
end
=#

@testset "Losses" begin
include("losses.jl")
include("ctc.jl")
if Flux.use_cuda[] include("ctc-gpu.jl") end
end
#=

@testset "Layers" begin
include("layers/basic.jl")
include("layers/normalisation.jl")
Expand All @@ -50,4 +50,3 @@ end
doctest(Flux)
end
end
=#

0 comments on commit 58d0500

Please sign in to comment.