From bf5af3099088512593dbc8dcf153556e0f57f81c Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 25 Nov 2024 09:15:21 -0500 Subject: [PATCH 1/5] add functors macro from PR destroyed by rebase, but now with at-warn --- src/deprecations.jl | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/deprecations.jl b/src/deprecations.jl index ab8f711f89..353f1ede3c 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -93,6 +93,29 @@ function params(m...) return ps end + +""" + @functor MyLayer + +Flux used to require the use of `Functors.@functor` to mark any new layer-like struct. +This allowed it to explore inside the struct, and update any trainable parameters within. +Flux@0.15 removes this requirement. This is because Functors@0.5 changed ist behaviour +to be opt-out instead of opt-in. Arbitrary structs will now be explored without special marking. +Hence calling `@functor` is no longer required. + +Calling `Flux.@layer MyLayer` is, however, still recommended. This adds various convenience methods +for your layer type, such as pretty printing, and use with Adapt.jl. +""" +macro functor(ex) + @warn """The use of `Flux.@functor` is deprecated. + Most likely, you should write `Flux.@layer MyLayer` which will add various convenience methods for your type, + such as pretty-printing, and use with Adapt.jl. + However, this is not required. Flux.jl v0.15 uses Functors.jl v0.5, which makes exploration of most nested `struct`s + opt-out instead of opt-in... so Flux will automatically see inside any custom struct definitions. + """ maxlog=1 + _layer_macro(ex) +end + # Allows caching of the parameters when params is called within gradient() to fix #2040. # @non_differentiable params(m...) # https://github.com/FluxML/Flux.jl/pull/2054 # That speeds up implicit use, and silently breaks explicit use. From 467ea36275a1b13d24135eb2766db998ad5c3e04 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 25 Nov 2024 09:15:49 -0500 Subject: [PATCH 2/5] fix https://github.com/FluxML/Flux.jl/issues/2533 --- src/deprecations.jl | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/deprecations.jl b/src/deprecations.jl index 353f1ede3c..fa36fd2382 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -83,11 +83,20 @@ function params!(p::Zygote.Params, x, seen = IdSet()) end end +""" + params(model) + +Returns a `Zygote.Params` object containing all parameter arrays from the model. +This is deprecated! + +This function was the cornerstone of how Flux used Zygote's implicit mode gradients, +but since Flux 0.13 we use explicit mode `gradient(m -> loss(m, x, y), model)` instead. + +To collect all the parameter arrays for other purposes, use `Flux.trainables(model)`. +""" function params(m...) - Base.depwarn(""" - Flux.params(m...) is deprecated. Use `Flux.trainable(model)` for parameters' collection - and the explicit `gradient(m -> loss(m, x, y), model)` for gradient computation. - """, :params) + @warn """`Flux.params(m...)` is deprecated. Use `Flux.trainable(model)` for parameter collection, + and the explicit `gradient(m -> loss(m, x, y), model)` for gradient computation.""" maxlog=1 ps = Params() params!(ps, m) return ps From bab47cfb5e0c1943b5b1eea9551bd82a1b366b22 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 25 Nov 2024 09:38:04 -0500 Subject: [PATCH 3/5] move up two more --- src/deprecations.jl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/deprecations.jl b/src/deprecations.jl index fa36fd2382..74c8d16f5c 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -133,6 +133,8 @@ Zygote._pullback(::Zygote.Context{true}, ::typeof(params), m...) = params(m), _ include("optimise/Optimise.jl") ## deprecated Module +Base.@deprecate_binding Optimiser OptimiserChain +Base.@deprecate_binding ClipValue ClipGrad # TODO this friendly error should go in Optimisers.jl. # remove after https://github.com/FluxML/Optimisers.jl/pull/181 @@ -151,9 +153,6 @@ end ### v0.16 deprecations #################### -# Enable these when 0.16 is released, and delete const ClipGrad = Optimise.ClipValue etc: -# Base.@deprecate_binding Optimiser OptimiserChain -# Base.@deprecate_binding ClipValue ClipGrad # train!(loss::Function, ps::Zygote.Params, data, opt) = throw(ArgumentError( # """On Flux 0.16, `train!` no longer accepts implicit `Zygote.Params`. From 043fd57de7fc6908e14a5059d46cfe6417613249 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 25 Nov 2024 09:49:43 -0500 Subject: [PATCH 4/5] upgrade Optimiser and ClipValue warnings --- src/deprecations.jl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/deprecations.jl b/src/deprecations.jl index 74c8d16f5c..8b27256170 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -133,8 +133,14 @@ Zygote._pullback(::Zygote.Context{true}, ::typeof(params), m...) = params(m), _ include("optimise/Optimise.jl") ## deprecated Module -Base.@deprecate_binding Optimiser OptimiserChain -Base.@deprecate_binding ClipValue ClipGrad +function Optimiser(rules...) + @warn "`Flux.Optimiser(...)` has been removed, please call `OptimiserChain(...)`, exported by Flux from Optimisers.jl" maxlog=1 + OptimiserChain(rules...) +end +function ClipValue(val) + @warn "`Flux.ClipValue(...)` has been removed, please call `ClipGrad(...)`, exported by Flux from Optimisers.jl" maxlog=1 + ClipGrad(val) +end # TODO this friendly error should go in Optimisers.jl. # remove after https://github.com/FluxML/Optimisers.jl/pull/181 From 130e04c7a07ffffda10d7dc9cc4549d1e0fb8630 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 25 Nov 2024 09:50:09 -0500 Subject: [PATCH 5/5] add noisy warnings to `train!` and `update!` too. --- src/optimise/train.jl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/optimise/train.jl b/src/optimise/train.jl index 52488107ef..d38cbe408d 100644 --- a/src/optimise/train.jl +++ b/src/optimise/train.jl @@ -5,6 +5,10 @@ function update!(opt::AbstractOptimiser, x::AbstractArray, x̄) end function update!(opt::AbstractOptimiser, xs::Params, gs) + @warn """The method `Flux.update!(optimiser, ps::Params, grads)` is deprecated, + as part of Flux's move away from Zyote's implicit mode. + Please use explicit-style `update!(opt_state, model, grad)` instead, + where `grad = Flux.gradient(m -> loss(m,x,y), model)` and `opt_state = Flux.setup(rule, model)`.""" maxlog=1 for x in xs isnothing(gs[x]) && continue update!(opt, x, gs[x]) @@ -21,6 +25,10 @@ batchmemaybe(x) = tuple(x) batchmemaybe(x::Tuple) = x function train!(loss, ps::Params, data, opt::AbstractOptimiser; cb = () -> ()) + @warn """The method `Flux.train!(loss2, ps::Params, data, optimiser)` is deprecated, + as part of Flux's move away from Zyote's implicit parameters. + Please use explicit-style `train!(loss, model, data, opt_state)` instead, + where `loss(m, xy...)` accepts the model, and `opt_state = Flux.setup(rule, model)`.""" maxlog=1 cb = runall(cb) itrsz = Base.IteratorSize(typeof(data)) n = (itrsz == Base.HasLength()) || (itrsz == Base.HasShape{1}()) ? length(data) : 0