Skip to content

Commit 93b9868

Browse files
mhaurupenelopeysm
andauthored
Update to Turing 0.40 (#636)
* Update to Turing 0.40 * Update version in _quarto.yml * Fix DPPL 0.37 run_ad change * Fix VI tutorial * Fix model-manual's use of contexts * Fix references to __context__ * Fix use of addlogprob for log prior * Fix typo * Regenerate manifest * Remove version pin of DelayDiffEq and update Manifest * Fix call to evaluate * Add note about contexts tutorial being out of date * Apply suggestions from code review Co-authored-by: Penelope Yong <penelopeysm@gmail.com> --------- Co-authored-by: Penelope Yong <penelopeysm@gmail.com>
1 parent 8bbaed1 commit 93b9868

File tree

10 files changed

+41
-39
lines changed

10 files changed

+41
-39
lines changed

Manifest.toml

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
# This file is machine-generated - editing it directly is not advised
22

3-
julia_version = "1.11.6"
3+
julia_version = "1.11.7"
44
manifest_format = "2.0"
5-
project_hash = "2db94c909342da49eb3ff313436b75e25977ace6"
5+
project_hash = "6ed163b5c6c108dfaae31b757b21ff71e297872f"
66

77
[[deps.ADTypes]]
88
git-tree-sha1 = "27cecae79e5cc9935255f90c53bb831cc3c870d7"
@@ -39,10 +39,14 @@ uuid = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
3939
version = "5.8.0"
4040

4141
[[deps.AbstractPPL]]
42-
deps = ["AbstractMCMC", "Accessors", "DensityInterface", "JSON", "Random", "StatsBase"]
43-
git-tree-sha1 = "478b0b6176125cf0a5e6e9fd69fdd0923754531c"
42+
deps = ["AbstractMCMC", "Accessors", "DensityInterface", "JSON", "LinearAlgebra", "Random", "StatsBase"]
43+
git-tree-sha1 = "3a9c1e6e7418494454260b0c8efd7b00fca251bc"
4444
uuid = "7a57a42e-76ec-4ea3-a279-07e840d6d9cf"
45-
version = "0.12.0"
45+
version = "0.13.1"
46+
weakdeps = ["Distributions"]
47+
48+
[deps.AbstractPPL.extensions]
49+
AbstractPPLDistributionsExt = ["Distributions"]
4650

4751
[[deps.AbstractTrees]]
4852
git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177"
@@ -686,9 +690,9 @@ version = "0.1.2"
686690

687691
[[deps.DelayDiffEq]]
688692
deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "FastBroadcast", "ForwardDiff", "LinearAlgebra", "Logging", "OrdinaryDiffEq", "OrdinaryDiffEqCore", "OrdinaryDiffEqDefault", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqFunctionMap", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqRosenbrock", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SimpleUnPack", "SymbolicIndexingInterface"]
689-
git-tree-sha1 = "c50981fb5b6441a1213debb1423385a462e88032"
693+
git-tree-sha1 = "bb84c7ab3de56ada66430b5671db3b3cdaadcd61"
690694
uuid = "bcd4f6db-9728-5f36-b5f7-82caef46ccdb"
691-
version = "5.56.0"
695+
version = "5.61.0"
692696

693697
[[deps.DelimitedFiles]]
694698
deps = ["Mmap"]
@@ -894,10 +898,10 @@ uuid = "bbc10e6e-7c05-544b-b16e-64fede858acb"
894898
version = "3.5.1"
895899

896900
[[deps.DynamicPPL]]
897-
deps = ["ADTypes", "AbstractMCMC", "AbstractPPL", "Accessors", "BangBang", "Bijectors", "Chairmarks", "Compat", "ConstructionBase", "DifferentiationInterface", "Distributions", "DocStringExtensions", "InteractiveUtils", "LinearAlgebra", "LogDensityProblems", "MacroTools", "OrderedCollections", "Random", "Requires", "Statistics", "Test"]
898-
git-tree-sha1 = "07218fc45d4f1abc9de4498e8cf966b6672c84f0"
901+
deps = ["ADTypes", "AbstractMCMC", "AbstractPPL", "Accessors", "BangBang", "Bijectors", "Chairmarks", "Compat", "ConstructionBase", "DifferentiationInterface", "Distributions", "DocStringExtensions", "InteractiveUtils", "LinearAlgebra", "LogDensityProblems", "MacroTools", "OrderedCollections", "Printf", "Random", "Requires", "Statistics", "Test"]
902+
git-tree-sha1 = "65649377104973412c10fb00ebc2f8430dcf8b93"
899903
uuid = "366bfd00-2699-11ea-058f-f148b4cae6d8"
900-
version = "0.36.15"
904+
version = "0.37.2"
901905

902906
[deps.DynamicPPL.extensions]
903907
DynamicPPLChainRulesCoreExt = ["ChainRulesCore"]
@@ -1752,7 +1756,6 @@ version = "3.40.1"
17521756
LinearSolveCUDAExt = "CUDA"
17531757
LinearSolveCUDSSExt = "CUDSS"
17541758
LinearSolveCUSOLVERRFExt = ["CUSOLVERRF", "SparseArrays"]
1755-
LinearSolveCliqueTreesExt = ["CliqueTrees", "SparseArrays"]
17561759
LinearSolveEnzymeExt = "EnzymeCore"
17571760
LinearSolveFastAlmostBandedMatricesExt = "FastAlmostBandedMatrices"
17581761
LinearSolveFastLapackInterfaceExt = "FastLapackInterface"
@@ -1774,7 +1777,6 @@ version = "3.40.1"
17741777
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
17751778
CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e"
17761779
CUSOLVERRF = "a8cc9031-bad2-4722-94f5-40deabb4245c"
1777-
CliqueTrees = "60701a23-6482-424a-84db-faee86b9b1f8"
17781780
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
17791781
FastAlmostBandedMatrices = "9d29842c-ecb8-4973-b1e9-a27b1157504e"
17801782
FastLapackInterface = "29a986be-02c6-4525-aec4-84b980013641"
@@ -3655,9 +3657,9 @@ version = "1.6.0"
36553657

36563658
[[deps.Turing]]
36573659
deps = ["ADTypes", "AbstractMCMC", "AbstractPPL", "Accessors", "AdvancedHMC", "AdvancedMH", "AdvancedPS", "AdvancedVI", "BangBang", "Bijectors", "Compat", "DataStructures", "Distributions", "DistributionsAD", "DocStringExtensions", "DynamicPPL", "EllipticalSliceSampling", "ForwardDiff", "Libtask", "LinearAlgebra", "LogDensityProblems", "MCMCChains", "NamedArrays", "Optimization", "OptimizationOptimJL", "OrderedCollections", "Printf", "Random", "Reexport", "SciMLBase", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"]
3658-
git-tree-sha1 = "58ad6cfe65311dc4ddc0e13e6747f3fad26f1034"
3660+
git-tree-sha1 = "8e1e790cfdffd15524575b84c4709033d1ea9550"
36593661
uuid = "fce5fe82-541a-59a6-adf8-730c64b5f9a0"
3660-
version = "0.39.10"
3662+
version = "0.40.3"
36613663
weakdeps = ["DynamicHMC", "Optim"]
36623664

36633665
[deps.Turing.extensions]

Project.toml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,5 +56,4 @@ Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0"
5656
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"
5757

5858
[compat]
59-
Turing = "0.39"
60-
DelayDiffEq = "~5.56"
59+
Turing = "0.40"

_quarto.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ website:
4343
href: https://turinglang.org/team/
4444
right:
4545
# Current version
46-
- text: "v0.39"
46+
- text: "v0.40"
4747
menu:
4848
- text: Changelog
4949
href: https://turinglang.org/docs/changelog.html

developers/compiler/minituring-contexts/index.qmd

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@ Pkg.instantiate();
1414

1515
In the [Mini Turing]({{< meta minituring >}}) tutorial we developed a miniature version of the Turing language, to illustrate its core design. A passing mention was made of contexts. In this tutorial we develop that aspect of our mini Turing language further to demonstrate how and why contexts are an important part of Turing's design.
1616

17+
::: {.callout-important}
18+
Note: The way Turing actually uses contexts changed somewhat in releases 0.39 and 0.40. The content of this page remains relevant, the principles of how contexts operate remain the same, and concepts like leaf and parent contexts still exist. However, we've moved away from using contexts for quite as many things as we used to. Most importantly, whether to accumulate the log joint, log prior, or log likelihood is no longer done using different contexts. Please keep this in mind as you read this page: The principles remain, but the details have changed. We will update this page once the refactoring of internals that is happening around releases like 0.39 and 0.40 is done.
19+
:::
20+
1721
# Mini Turing expanded, now with more contexts
1822

1923
If you haven't read [Mini Turing]({{< meta minituring >}}) yet, you should do that first. We start by repeating verbatim much of the code from there. Define the type for holding values for variables:

developers/compiler/model-manual/index.qmd

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,21 @@ Taking the `gdemo` model above as an example, the macro-based definition can be
3333
using DynamicPPL
3434
3535
# Create the model function.
36-
function gdemo2(model, varinfo, context, x)
36+
function gdemo2(model, varinfo, x)
3737
# Assume s² has an InverseGamma distribution.
3838
s², varinfo = DynamicPPL.tilde_assume!!(
39-
context, InverseGamma(2, 3), @varname(s²), varinfo
39+
model.context, InverseGamma(2, 3), @varname(s²), varinfo
4040
)
4141
4242
# Assume m has a Normal distribution.
4343
m, varinfo = DynamicPPL.tilde_assume!!(
44-
context, Normal(0, sqrt(s²)), @varname(m), varinfo
44+
model.context, Normal(0, sqrt(s²)), @varname(m), varinfo
4545
)
4646
4747
# Observe each value of x[i] according to a Normal distribution.
4848
for i in eachindex(x)
4949
_retval, varinfo = DynamicPPL.tilde_observe!!(
50-
context, Normal(m, sqrt(s²)), x[i], @varname(x[i]), varinfo
50+
model.context, Normal(m, sqrt(s²)), x[i], @varname(x[i]), varinfo
5151
)
5252
end
5353

developers/contexts/submodel-condition/index.qmd

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,13 +108,13 @@ unwrap_sampling_context(ctx::DynamicPPL.SamplingContext) = ctx.context
108108
unwrap_sampling_context(ctx::DynamicPPL.AbstractContext) = ctx
109109
110110
@model function inner()
111-
println("inner context: $(unwrap_sampling_context(__context__))")
111+
println("inner context: $(unwrap_sampling_context(__model__.context))")
112112
x ~ Normal()
113113
return y ~ Normal()
114114
end
115115
116116
@model function outer()
117-
println("outer context: $(unwrap_sampling_context(__context__))")
117+
println("outer context: $(unwrap_sampling_context(__model__.context))")
118118
return a ~ to_submodel(inner())
119119
end
120120
@@ -124,7 +124,7 @@ with_outer_cond = outer() | (@varname(a.x) => 1.0)
124124
# 'Inner conditioning'
125125
inner_cond = inner() | (@varname(x) => 1.0)
126126
@model function outer2()
127-
println("outer context: $(unwrap_sampling_context(__context__))")
127+
println("outer context: $(unwrap_sampling_context(__model__.context))")
128128
return a ~ to_submodel(inner_cond)
129129
end
130130
with_inner_cond = outer2()

developers/transforms/dynamicppl/index.qmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ Hence, one might expect that if we try to evaluate the model using this `VarInfo
351351
Here, `evaluate!!` returns two things: the model's return value itself (which we defined above to be a `NamedTuple`), and the resulting `VarInfo` post-evaluation.
352352

353353
```{julia}
354-
retval, ret_varinfo = DynamicPPL.evaluate!!(model, vi_linked, DefaultContext())
354+
retval, ret_varinfo = DynamicPPL.evaluate!!(model, vi_linked)
355355
getlogp(ret_varinfo)
356356
```
357357

tutorials/variational-inference/index.qmd

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -182,8 +182,8 @@ Usually, `q_avg` will perform better than the last-iterate `q_last`.
182182
For instance, we can compare the ELBO of the two:
183183
```{julia}
184184
@info("Objective of q_avg and q_last",
185-
ELBO_q_avg = estimate_objective(AdvancedVI.RepGradELBO(32), q_avg, Turing.Variational.make_logdensity(m)),
186-
ELBO_q_last = estimate_objective(AdvancedVI.RepGradELBO(32), q_last, Turing.Variational.make_logdensity(m))
185+
ELBO_q_avg = estimate_objective(AdvancedVI.RepGradELBO(32), q_avg, LogDensityFunction(m)),
186+
ELBO_q_last = estimate_objective(AdvancedVI.RepGradELBO(32), q_last, LogDensityFunction(m))
187187
)
188188
```
189189
We can see that `ELBO_q_avg` is slightly more optimal.
@@ -205,9 +205,9 @@ For example, the following callback function estimates the ELBO on `q_avg` every
205205
```{julia}
206206
function callback(; stat, averaged_params, restructure, kwargs...)
207207
if mod(stat.iteration, 10) == 1
208-
q_avg = restructure(averaged_params)
209-
obj = AdvancedVI.RepGradELBO(128)
210-
elbo_avg = estimate_objective(obj, q_avg, Turing.Variational.make_logdensity(m))
208+
q_avg = restructure(averaged_params)
209+
obj = AdvancedVI.RepGradELBO(128)
210+
elbo_avg = estimate_objective(obj, q_avg, LogDensityFunction(m))
211211
(elbo_avg = elbo_avg,)
212212
else
213213
nothing
@@ -223,7 +223,7 @@ q_mf, _, info_mf, _ = vi(m, q_init, n_iters; show_progress=false, callback=callb
223223

224224
Let's plot the result:
225225
```{julia}
226-
iters = 1:10:length(info_mf)
226+
iters = 1:10:length(info_mf)
227227
elbo_mf = [i.elbo_avg for i in info_mf[iters]]
228228
Plots.plot!(iters, elbo_mf, xlabel="Iterations", ylabel="ELBO", label="callback", ylims=(-200,Inf))
229229
```
@@ -247,7 +247,7 @@ _, _, info_adam, _ = vi(m, q_init, n_iters; show_progress=false, callback=callba
247247
```
248248

249249
```{julia}
250-
iters = 1:10:length(info_mf)
250+
iters = 1:10:length(info_mf)
251251
elbo_adam = [i.elbo_avg for i in info_adam[iters]]
252252
Plots.plot(iters, elbo_mf, xlabel="Iterations", ylabel="ELBO", label="DoWG")
253253
Plots.plot!(iters, elbo_adam, xlabel="Iterations", ylabel="ELBO", label="Adam")

usage/automatic-differentiation/index.qmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ model = gdemo(1.5, 2)
9191
9292
for adtype in [AutoForwardDiff(), AutoReverseDiff()]
9393
result = run_ad(model, adtype; benchmark=true)
94-
@show result.time_vs_primal
94+
@show result.grad_time / result.primal_time
9595
end
9696
```
9797

usage/modifying-logprob/index.qmd

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,10 @@ using LinearAlgebra
4747
end
4848
```
4949

50-
Note that `@addlogprob!` always increases the accumulated log probability, regardless of the provided
51-
sampling context.
52-
For instance, if you do not want to apply `@addlogprob!` when evaluating the prior of your model but only when computing the log likelihood and the log joint probability, then you should [check the type of the internal variable `__context_`](https://github.com/TuringLang/DynamicPPL.jl/issues/154), as in the following example:
50+
Note that `@addlogprob! (p::Float64)` adds `p` to the log likelihood.
51+
If instead you want to add to the log prior, you can use
5352

5453
```{julia}
5554
#| eval: false
56-
if DynamicPPL.leafcontext(__context__) !== Turing.PriorContext()
57-
@addlogprob! myloglikelihood(x, μ)
58-
end
55+
@addlogprob! (; logprior=value_goes_here)
5956
```

0 commit comments

Comments
 (0)