diff --git a/src/NNlib.jl b/src/NNlib.jl index 6088746d7..182d00faa 100644 --- a/src/NNlib.jl +++ b/src/NNlib.jl @@ -5,13 +5,19 @@ using Requires using ChainRulesCore import ChainRulesCore: rrule using Base.Broadcast: broadcasted +using Base.Threads +using Statistics using Statistics: mean +using LinearAlgebra +using LinearAlgebra: BlasFloat, Transpose, Adjoint, AdjOrTransAbsMat +using LinearAlgebra.BLAS: libblas, BlasInt, @blasfunc const IntOrIntTuple = Union{Integer, NTuple{N,<:Integer} where N} const Numeric = Union{AbstractArray{<:T}, T} where {T<:Number} # Include APIs include("dim_helpers.jl") +export ConvDims, DenseConvDims, PoolDims, DepthwiseConvDims is_nnpack_available() = false @@ -27,14 +33,46 @@ is_nnpack_available() = false end include("activations.jl") +for f in ACTIVATIONS + @eval export $(f) +end +export sigmoid, hardsigmoid, logsigmoid, thresholdrelu # Aliases + include("softmax.jl") +export softmax, softmax!, ∇softmax, ∇softmax!, logsoftmax, + logsoftmax!, ∇logsoftmax, ∇logsoftmax!, logsumexp + +include("batched/batchedadjtrans.jl") include("batched/batchedmul.jl") +export batched_mul, batched_mul!, ⊠, batched_vec, + batched_transpose, batched_adjoint + include("gemm.jl") +export grid_sample, ∇grid_sample + include("conv.jl") +export conv, conv!, ∇conv_data, ∇conv_data!, ∇conv_filter, + ∇conv_filter!, depthwiseconv, depthwiseconv!, + ∇depthwiseconv_data, ∇depthwiseconv_data!, + ∇depthwiseconv_filter, ∇depthwiseconv_filter! + include("conv_bias_act.jl") +export conv_bias_act, conv_bias_act! + include("pooling.jl") +export maxpool, maxpool!, meanpool, meanpool!, + ∇maxpool, ∇maxpool!, ∇meanpool, ∇meanpool! + include("padding.jl") +export pad_constant, pad_repeat, pad_reflect, pad_zeros + include("upsample.jl") +export upsample_nearest, ∇upsample_nearest, + upsample_linear, ∇upsample_linear, + upsample_bilinear, ∇upsample_bilinear, + upsample_trilinear, ∇upsample_trilinear, + pixel_shuffle + include("gather.jl") include("scatter.jl") include("utils.jl") diff --git a/src/activations.jl b/src/activations.jl index 9e6f7e908..9829d2386 100644 --- a/src/activations.jl +++ b/src/activations.jl @@ -11,13 +11,6 @@ ACTIVATIONS = [ :tanh_fast, :sigmoid_fast, ] -for f in ACTIVATIONS - @eval export $(f) -end - -# Aliases -export sigmoid, hardsigmoid, logsigmoid, thresholdrelu - # of type float (to allow for integer inputs) oftf(x, y) = oftype(float(x), y) diff --git a/src/batched/batchedadjtrans.jl b/src/batched/batchedadjtrans.jl index aac227676..8c2417645 100644 --- a/src/batched/batchedadjtrans.jl +++ b/src/batched/batchedadjtrans.jl @@ -1,5 +1,3 @@ -using LinearAlgebra - import Base: - import Adapt: adapt_structure, adapt diff --git a/src/batched/batchedmul.jl b/src/batched/batchedmul.jl index 490584882..7e5e7fd72 100644 --- a/src/batched/batchedmul.jl +++ b/src/batched/batchedmul.jl @@ -1,11 +1,3 @@ - -export batched_mul, batched_mul!, ⊠, batched_vec -export batched_transpose, batched_adjoint - -include("./batchedadjtrans.jl") - -using LinearAlgebra: BlasFloat, Transpose, Adjoint, AdjOrTransAbsMat - _unbatch(A) = A _unbatch(A::BatchedAdjOrTrans) = parent(A) diff --git a/src/conv.jl b/src/conv.jl index b940e8477..4aeb3b063 100644 --- a/src/conv.jl +++ b/src/conv.jl @@ -1,7 +1,3 @@ -export conv, conv!, ∇conv_data, ∇conv_data!, ∇conv_filter, ∇conv_filter!, depthwiseconv, - depthwiseconv!, ∇depthwiseconv_data, ∇depthwiseconv_data!, ∇depthwiseconv_filter, - ∇depthwiseconv_filter! - ## Convolution API # # We provide the following generic methods, for 3d, 4d, and 5d tensors, calculating 1d, diff --git a/src/conv_bias_act.jl b/src/conv_bias_act.jl index adaf3f605..0c2b63c5e 100644 --- a/src/conv_bias_act.jl +++ b/src/conv_bias_act.jl @@ -1,5 +1,3 @@ -export conv_bias_act, conv_bias_act! - function conv_bias_act(x::AbstractArray{xT,N}, w::AbstractArray{wT,N}, cdims::ConvDims, b::AbstractArray{bT,N}, σ=identity; kwargs...) where {xT, wT, bT, N} y = similar(x, promote_type(xT, wT, bT), output_size(cdims)..., channels_out(cdims), size(x,N)) diff --git a/src/dim_helpers/ConvDims.jl b/src/dim_helpers/ConvDims.jl index c9023164e..1b1c3f271 100644 --- a/src/dim_helpers/ConvDims.jl +++ b/src/dim_helpers/ConvDims.jl @@ -1,5 +1,3 @@ -export ConvDims - """ ConvDims diff --git a/src/dim_helpers/DenseConvDims.jl b/src/dim_helpers/DenseConvDims.jl index 8ceda7abe..a37eb9af6 100644 --- a/src/dim_helpers/DenseConvDims.jl +++ b/src/dim_helpers/DenseConvDims.jl @@ -1,5 +1,3 @@ -export DenseConvDims - """ DenseConvDims diff --git a/src/dim_helpers/DepthwiseConvDims.jl b/src/dim_helpers/DepthwiseConvDims.jl index 6bbb8b926..8163a3def 100644 --- a/src/dim_helpers/DepthwiseConvDims.jl +++ b/src/dim_helpers/DepthwiseConvDims.jl @@ -1,5 +1,3 @@ -export DepthwiseConvDims - """ DepthwiseConvDims diff --git a/src/dim_helpers/PoolDims.jl b/src/dim_helpers/PoolDims.jl index aacfc4ae1..e7219ff72 100644 --- a/src/dim_helpers/PoolDims.jl +++ b/src/dim_helpers/PoolDims.jl @@ -1,5 +1,3 @@ -export PoolDims - """ PoolDims(x_size::NTuple{M}, k::Union{NTuple{L, Int}, Int}; stride=k, padding=0, dilation=1) where {M, L} diff --git a/src/functions.jl b/src/functions.jl index 77a7bf133..4a660cec1 100644 --- a/src/functions.jl +++ b/src/functions.jl @@ -1,4 +1,3 @@ -using NNlib: sigmoid """ glu(x, dim = 1) diff --git a/src/gemm.jl b/src/gemm.jl index 07ef8616a..10cb49f62 100644 --- a/src/gemm.jl +++ b/src/gemm.jl @@ -1,9 +1,6 @@ ## Low level gemm! call with pointers ## Borrowed from Knet.jl, adapted for compile-time constants -using LinearAlgebra -using LinearAlgebra.BLAS: libblas, BlasInt, @blasfunc - using Compat: get_num_threads, set_num_threads # needs Compat 3.13, for any Julia < 1.6 """ diff --git a/src/impl/conv_direct.jl b/src/impl/conv_direct.jl index c44b36b37..f2b6ff60e 100644 --- a/src/impl/conv_direct.jl +++ b/src/impl/conv_direct.jl @@ -1,5 +1,4 @@ ## This file contains direct Julia implementations of 2d and 3d convolutions -using Base.Threads # Helper functions for restricting x/w overreach function clamp_lo(x, w) diff --git a/src/impl/pooling_direct.jl b/src/impl/pooling_direct.jl index af2c6711b..566406eb2 100644 --- a/src/impl/pooling_direct.jl +++ b/src/impl/pooling_direct.jl @@ -1,5 +1,3 @@ -using Statistics - # Pooling is so similar, we abstract over meanpooling and maxpooling, simply replacing # the inner loop operation and a few initialization parameters. for name in (:max, :mean) diff --git a/src/padding.jl b/src/padding.jl index bf5236991..79685e608 100644 --- a/src/padding.jl +++ b/src/padding.jl @@ -1,5 +1,3 @@ -export pad_constant, pad_repeat, pad_reflect, pad_zeros - """ pad_zeros(x, pad::Tuple; [dims]) pad_zeros(x, pad::Int; [dims]) diff --git a/src/pooling.jl b/src/pooling.jl index 538ff349f..f13390342 100644 --- a/src/pooling.jl +++ b/src/pooling.jl @@ -1,5 +1,3 @@ -export maxpool, maxpool!, meanpool, meanpool!, ∇maxpool, ∇maxpool!, ∇meanpool, ∇meanpool! - ## Pooling API # # We provide the following generic methods, for 3d, 4d, and 5d tensors, calculating 1d, diff --git a/src/sampling.jl b/src/sampling.jl index bbce62123..f3de51660 100644 --- a/src/sampling.jl +++ b/src/sampling.jl @@ -1,5 +1,3 @@ -export grid_sample, ∇grid_sample - @inline in_bounds(h, w, H, W) = 1 ≤ h ≤ H && 1 ≤ w ≤ W # Borders are considered out-of-bounds for gradient. @inline clip_coordinate(coordinate, dim_size) = min(dim_size, max(1, coordinate)) diff --git a/src/softmax.jl b/src/softmax.jl index 73234c81c..89807c4c6 100644 --- a/src/softmax.jl +++ b/src/softmax.jl @@ -1,13 +1,3 @@ -export softmax, - softmax!, - ∇softmax, - ∇softmax!, - logsoftmax, - logsoftmax!, - ∇logsoftmax, - ∇logsoftmax!, - logsumexp - """ softmax(x; dims = 1) diff --git a/src/upsample.jl b/src/upsample.jl index 80f2389e9..74c856213 100644 --- a/src/upsample.jl +++ b/src/upsample.jl @@ -1,9 +1,3 @@ -export upsample_nearest, ∇upsample_nearest, - upsample_linear, ∇upsample_linear, - upsample_bilinear, ∇upsample_bilinear, - upsample_trilinear, ∇upsample_trilinear, - pixel_shuffle - """ upsample_nearest(x, scale::NTuple{S,Int}) upsample_nearest(x; size::NTuple{S,Int})