Skip to content

Commit

Permalink
Oops, forgot .JuliaFormatter.toml this time
Browse files Browse the repository at this point in the history
  • Loading branch information
Saransh-cpp committed Oct 6, 2022
1 parent 39c08fc commit 289aa9d
Show file tree
Hide file tree
Showing 42 changed files with 1,448 additions and 1,778 deletions.
9 changes: 9 additions & 0 deletions .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
style = "sciml"
whitespace_in_kwargs = true
format_docstrings = true
always_for_in = true
join_lines_based_on_source = true
separate_kwargs_with_semicolon = true
always_use_return = true
margin = 92
indent = 4
108 changes: 54 additions & 54 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,61 +1,61 @@
using Documenter,
Flux, NNlib, Functors, MLUtils, BSON, Optimisers, OneHotArrays, Zygote, ChainRulesCore
Flux, NNlib, Functors, MLUtils, BSON, Optimisers, OneHotArrays, Zygote, ChainRulesCore

DocMeta.setdocmeta!(Flux, :DocTestSetup, :(using Flux); recursive = true)

makedocs(;
modules = [
Flux,
NNlib,
Functors,
MLUtils,
BSON,
Optimisers,
OneHotArrays,
Zygote,
ChainRulesCore,
Base,
],
doctest = false,
sitename = "Flux",
# strict = [:cross_references,],
pages = [
"Home" => "index.md",
"Building Models" => [
"Overview" => "models/overview.md",
"Basics" => "models/basics.md",
"Recurrence" => "models/recurrence.md",
"Layer Reference" => "models/layers.md",
"Loss Functions" => "models/losses.md",
"Regularisation" => "models/regularisation.md",
"Custom Layers" => "models/advanced.md",
"NNlib.jl" => "models/nnlib.md",
"Activation Functions" => "models/activation.md",
],
"Handling Data" =>
["MLUtils.jl" => "data/mlutils.md", "OneHotArrays.jl" => "data/onehot.md"],
"Training Models" => [
"Optimisers" => "training/optimisers.md",
"Training" => "training/training.md",
"Callback Helpers" => "training/callbacks.md",
"Zygote.jl" => "training/zygote.md",
],
"GPU Support" => "gpu.md",
"Model Tools" => [
"Saving & Loading" => "saving.md",
"Shape Inference" => "outputsize.md",
"Weight Initialisation" => "utilities.md",
"Functors.jl" => "models/functors.md",
],
"Performance Tips" => "performance.md",
"Flux's Ecosystem" => "ecosystem.md",
],
format = Documenter.HTML(;
sidebar_sitename = false,
analytics = "UA-36890222-9",
assets = ["assets/flux.css"],
prettyurls = get(ENV, "CI", nothing) == "true",
),
)
modules = [
Flux,
NNlib,
Functors,
MLUtils,
BSON,
Optimisers,
OneHotArrays,
Zygote,
ChainRulesCore,
Base,
],
doctest = false,
sitename = "Flux",
# strict = [:cross_references,],
pages = [
"Home" => "index.md",
"Building Models" => [
"Overview" => "models/overview.md",
"Basics" => "models/basics.md",
"Recurrence" => "models/recurrence.md",
"Layer Reference" => "models/layers.md",
"Loss Functions" => "models/losses.md",
"Regularisation" => "models/regularisation.md",
"Custom Layers" => "models/advanced.md",
"NNlib.jl" => "models/nnlib.md",
"Activation Functions" => "models/activation.md",
],
"Handling Data" => [
"MLUtils.jl" => "data/mlutils.md",
"OneHotArrays.jl" => "data/onehot.md",
],
"Training Models" => [
"Optimisers" => "training/optimisers.md",
"Training" => "training/training.md",
"Callback Helpers" => "training/callbacks.md",
"Zygote.jl" => "training/zygote.md",
],
"GPU Support" => "gpu.md",
"Model Tools" => [
"Saving & Loading" => "saving.md",
"Shape Inference" => "outputsize.md",
"Weight Initialisation" => "utilities.md",
"Functors.jl" => "models/functors.md",
],
"Performance Tips" => "performance.md",
"Flux's Ecosystem" => "ecosystem.md",
],
format = Documenter.HTML(;
sidebar_sitename = false,
analytics = "UA-36890222-9",
assets = ["assets/flux.css"],
prettyurls = get(ENV, "CI", nothing) == "true"))

deploydocs(; repo = "github.com/FluxML/Flux.jl.git", target = "build", push_preview = true)
6 changes: 3 additions & 3 deletions perf/bench_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,19 @@ function run_benchmark(model, x; cuda = true)
fw(model, x)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(fw($model, $x)) teardown = (GC.gc(); CUDA.reclaim())
@btime CUDA.@sync(fw($model, $x)) teardown=(GC.gc(); CUDA.reclaim())

println(" backward")
bw(back)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(bw($back)) teardown = (GC.gc(); CUDA.reclaim())
@btime CUDA.@sync(bw($back)) teardown=(GC.gc(); CUDA.reclaim())

println(" forw and back")
fwbw(model, ps, x)
GC.gc()
CUDA.reclaim() #warmup
@btime CUDA.@sync(fwbw($model, $ps, $x)) teardown = (GC.gc(); CUDA.reclaim())
@btime CUDA.@sync(fwbw($model, $ps, $x)) teardown=(GC.gc(); CUDA.reclaim())
else
println(" forward")
fw(model, x) #warmup
Expand Down
2 changes: 1 addition & 1 deletion perf/recurrent.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ end

for rnn_type in [Flux.RNN, Flux.GRU, Flux.LSTM]
rnn_benchmark_sweep(rnn_type) do n, ts
return [randn(Float32, n, n) for _ = 1:ts], "Vec"
return [randn(Float32, n, n) for _ in 1:ts], "Vec"
end
end

Expand Down
76 changes: 37 additions & 39 deletions perf/vgg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,45 +6,43 @@ using CUDA
using Zygote: pullback

function vgg16()
return Chain(
Conv((3, 3), 3 => 64, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(64),
Conv((3, 3), 64 => 64, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(64),
MaxPool((2, 2)),
Conv((3, 3), 64 => 128, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(128),
Conv((3, 3), 128 => 128, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(128),
MaxPool((2, 2)),
Conv((3, 3), 128 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
Conv((3, 3), 256 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
Conv((3, 3), 256 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
MaxPool((2, 2)),
Conv((3, 3), 256 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
MaxPool((2, 2)),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
MaxPool((2, 2)),
flatten,
Dense(512, 4096, relu),
Dropout(0.5),
Dense(4096, 4096, relu),
Dropout(0.5),
Dense(4096, 10),
)
return Chain(Conv((3, 3), 3 => 64, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(64),
Conv((3, 3), 64 => 64, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(64),
MaxPool((2, 2)),
Conv((3, 3), 64 => 128, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(128),
Conv((3, 3), 128 => 128, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(128),
MaxPool((2, 2)),
Conv((3, 3), 128 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
Conv((3, 3), 256 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
Conv((3, 3), 256 => 256, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(256),
MaxPool((2, 2)),
Conv((3, 3), 256 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
MaxPool((2, 2)),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
Conv((3, 3), 512 => 512, relu; pad = (1, 1), stride = (1, 1)),
BatchNorm(512),
MaxPool((2, 2)),
flatten,
Dense(512, 4096, relu),
Dropout(0.5),
Dense(4096, 4096, relu),
Dropout(0.5),
Dense(4096, 10))
end

let model = vgg16(), x = rand(Float32, 32, 32, 3, 64)
Expand Down
118 changes: 57 additions & 61 deletions src/Flux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,85 +15,81 @@ export gradient

# Pirate error to catch a common mistake. (Internal function `base` because overloading `update!` is more likely to give ambiguities.)
function Optimisers.base(dx::Zygote.Grads)
return error(
"Optimisers.jl cannot be used with Zygote.jl's implicit gradients, `Params` & `Grads`",
)
return error("Optimisers.jl cannot be used with Zygote.jl's implicit gradients, `Params` & `Grads`")
end

export Chain,
Dense,
Embedding,
Maxout,
SkipConnection,
Parallel,
PairwiseFusion,
RNN,
LSTM,
GRU,
GRUv3,
SamePad,
Conv,
CrossCor,
ConvTranspose,
DepthwiseConv,
AdaptiveMaxPool,
AdaptiveMeanPool,
GlobalMaxPool,
GlobalMeanPool,
MaxPool,
MeanPool,
Dropout,
AlphaDropout,
LayerNorm,
BatchNorm,
InstanceNorm,
GroupNorm,
Upsample,
PixelShuffle,
fmap,
cpu,
gpu,
f32,
f64,
testmode!,
trainmode!
Dense,
Embedding,
Maxout,
SkipConnection,
Parallel,
PairwiseFusion,
RNN,
LSTM,
GRU,
GRUv3,
SamePad,
Conv,
CrossCor,
ConvTranspose,
DepthwiseConv,
AdaptiveMaxPool,
AdaptiveMeanPool,
GlobalMaxPool,
GlobalMeanPool,
MaxPool,
MeanPool,
Dropout,
AlphaDropout,
LayerNorm,
BatchNorm,
InstanceNorm,
GroupNorm,
Upsample,
PixelShuffle,
fmap,
cpu,
gpu,
f32,
f64,
testmode!,
trainmode!

include("optimise/Optimise.jl")
using .Optimise
using .Optimise: @epochs
using .Optimise: skip
export Descent,
Adam,
Momentum,
Nesterov,
RMSProp,
AdaGrad,
AdaMax,
AdaDelta,
AMSGrad,
NAdam,
OAdam,
AdamW,
RAdam,
AdaBelief,
InvDecay,
ExpDecay,
WeightDecay,
ClipValue,
ClipNorm
Adam,
Momentum,
Nesterov,
RMSProp,
AdaGrad,
AdaMax,
AdaDelta,
AMSGrad,
NAdam,
OAdam,
AdamW,
RAdam,
AdaBelief,
InvDecay,
ExpDecay,
WeightDecay,
ClipValue,
ClipNorm

using CUDA
const use_cuda = Ref{Union{Nothing,Bool}}(nothing)
const use_cuda = Ref{Union{Nothing, Bool}}(nothing)

using Adapt, Functors, OneHotArrays
include("utils.jl")
include("functor.jl")

# Pirate error to catch a common mistake.
function Functors.functor(::Type{<:MLUtils.DataLoader}, x)
return error(
"`DataLoader` does not support Functors.jl, thus functions like `Flux.gpu` will not act on its contents.",
)
return error("`DataLoader` does not support Functors.jl, thus functions like `Flux.gpu` will not act on its contents.")
end

include("layers/stateless.jl")
Expand Down
Loading

0 comments on commit 289aa9d

Please sign in to comment.