diff --git a/docs/src/models/quickstart.md b/docs/src/models/quickstart.md index 6eb313c7bd..aef7863bc9 100644 --- a/docs/src/models/quickstart.md +++ b/docs/src/models/quickstart.md @@ -31,8 +31,8 @@ pars = Flux.params(model) # contains references to arrays in model opt = Flux.Adam(0.01) # will store optimiser momentum, etc. # Training loop, using the whole data set 1000 times: +losses = [] for epoch in 1:1_000 - losses = [] for (x, y) in loader loss, grad = withgradient(pars) do # Evaluate model and loss inside gradient context: @@ -42,9 +42,6 @@ for epoch in 1:1_000 Flux.update!(opt, pars, grad) push!(losses, loss) # logging, outside gradient context end - if isinteger(log2(epoch)) - println("after epoch $epoch, loss is ", mean(losses)) - end end pars # parameters, momenta and output have all changed