Skip to content

Commit

Permalink
Merge #1239
Browse files Browse the repository at this point in the history
1239: add adaptive pool r=CarloLucibello a=dnabanita7

I have added ``AdaptiveMaxPool`` and ``AdaptiveMeanPool`` so that we can do a similar [PyTorch implementation](darsnack/FluxModels.jl#1 (comment)). cc @darsnack 

### PR 

- [x] Tests are added
- [x] Entry in NEWS.md
- [x] Documentation, if applicable
- [ ] Final review from `@MikeInnes` or `@dhairyagandhi96` (for API changes).

### Flux issue linking
[Flux#1224](#1224)
### MLH issue linking
[0.3.x-projects#26](https://github.com/MLH-Fellowship/0.3.x-projects/issues/26)

Co-authored-by: Nabanita Dash <dashnabanita@gmail.com>
  • Loading branch information
bors[bot] and dnabanita7 authored Jun 30, 2020
2 parents 318ef9d + 72f5d14 commit 4182b48
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 3 deletions.
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
* Change to `DataLoader`'s constructor [https://github.com/FluxML/Flux.jl/pull/1152]
* Use `DataLoader` with `NamedTuple`s, so that tensors can be accessed by name [https://github.com/FluxML/Flux.jl/pull/1221].
* Error if Dense layers weights and biases are not arrays [https://github.com/FluxML/Flux.jl/pull/1218].
* Add `Adaptive Pooling` in Flux layers [https://github.com/FluxML/Flux.jl/pull/1239].

# v0.10.5
* Add option for [same padding](https://github.com/FluxML/Flux.jl/pull/901) to conv and pooling layers by setting `pad=SamePad()`.
Expand Down
2 changes: 2 additions & 0 deletions docs/src/models/layers.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ These layers are used to build convolutional neural networks (CNNs).

```@docs
Conv
AdaptiveMaxPool
MaxPool
GlobalMaxPool
AdaptiveMeanPool
MeanPool
GlobalMeanPool
DepthwiseConv
Expand Down
7 changes: 4 additions & 3 deletions src/Flux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@ using Zygote: Params, @adjoint, gradient, pullback, @nograd
export gradient

export Chain, Dense, Maxout, RNN, LSTM, GRU, SamePad, Conv, CrossCor, ConvTranspose,
GlobalMaxPool, GlobalMeanPool, MaxPool, MeanPool, flatten,
DepthwiseConv, Dropout, AlphaDropout, LayerNorm, BatchNorm, InstanceNorm, GroupNorm,
SkipConnection, params, fmap, cpu, gpu, f32, f64, testmode!, trainmode!
AdaptiveMaxPool, AdaptiveMeanPool, GlobalMaxPool, GlobalMeanPool, MaxPool,
MeanPool, flatten, DepthwiseConv, Dropout, AlphaDropout, LayerNorm, BatchNorm,
InstanceNorm, GroupNorm, SkipConnection, params, fmap, cpu, gpu, f32, f64,
testmode!, trainmode!

include("optimise/Optimise.jl")
using .Optimise
Expand Down
48 changes: 48 additions & 0 deletions src/layers/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,54 @@ end
outdims(l::CrossCor, isize) =
output_size(DenseConvDims(_paddims(isize, size(l.weight)), size(l.weight); stride = l.stride, padding = l.pad, dilation = l.dilation))

"""
AdaptiveMaxPool(out)
Adaptive max pooling layer. `out` is the desired output size (batch and channel dimension excluded).
"""
struct AdaptiveMaxPool{S, O}
out::NTuple{O, Int}
AdaptiveMaxPool(out::NTuple{O, Int}) where O = new{O + 2, O}(out)
end

function (a::AdaptiveMaxPool{S})(x::AbstractArray{T, S}) where {S, T}
insize = size(x)[1:end-2]
outsize = a.out
stride = insize outsize
k = insize .- (outsize .- 1) .* stride
pad = 0
pdims = PoolDims(x, k; padding=pad, stride=stride)
return maxpool(x, pdims)
end

function Base.show(io::IO, a::AdaptiveMaxPool)
print(io, "AdaptiveMaxPool(", a.out, ")")
end

"""
AdaptiveMeanPool(out)
Adaptive mean pooling layer. `out` is the desired output size (batch and channel dimension excluded).
"""
struct AdaptiveMeanPool{S, O}
out::NTuple{O, Int}
AdaptiveMeanPool(out::NTuple{O, Int}) where O = new{O + 2, O}(out)
end

function (a::AdaptiveMeanPool{S})(x::AbstractArray{T, S}) where {S, T}
insize = size(x)[1:end-2]
outsize = a.out
stride = insize outsize
k = insize .- (outsize .- 1) .* stride
pad = 0
pdims = PoolDims(x, k; padding=pad, stride=stride)
return meanpool(x, pdims)
end

function Base.show(io::IO, a::AdaptiveMeanPool)
print(io, "AdaptiveMeanPool(", a.out, ")")
end

"""
GlobalMaxPool()
Expand Down
3 changes: 3 additions & 0 deletions test/cuda/layers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ gradtest("Conv", conv_layers, r, (2,2), 1=>3)
pooling_layers = [MaxPool, MeanPool]
gradtest("Pooling", pooling_layers, r, (2,2))

adaptive_pooling_layers = [AdaptiveMaxPool, AdaptiveMeanPool]
gradtest("AdaptivePooling", adaptive_pooling_layers, r, (7,7))

dropout_layers = [Dropout, AlphaDropout]
gradtest("Dropout", dropout_layers, r, 0.5f0)

Expand Down
9 changes: 9 additions & 0 deletions test/layers/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,15 @@ using Flux: gradient

@testset "Pooling" begin
x = randn(Float32, 10, 10, 3, 2)
y = randn(Float32, 20, 20, 3, 2)
ampx = AdaptiveMaxPool((5,5))
@test ampx(x) == maxpool(x, PoolDims(x, 2))
ampx = AdaptiveMeanPool((5,5))
@test ampx(x) == meanpool(x, PoolDims(x, 2))
ampy = AdaptiveMaxPool((10, 5))
@test ampy(y) == maxpool(y, PoolDims(y, (2, 4)))
ampy = AdaptiveMeanPool((10, 5))
@test ampy(y) == meanpool(y, PoolDims(y, (2, 4)))
gmp = GlobalMaxPool()
@test size(gmp(x)) == (1, 1, 3, 2)
gmp = GlobalMeanPool()
Expand Down

0 comments on commit 4182b48

Please sign in to comment.