From 5dd951394919ecc7ada8b67b63f6307352a70f21 Mon Sep 17 00:00:00 2001
From: "Randall C. O'Reilly"
Date: Tue, 8 Jul 2025 22:22:59 -0700
Subject: [PATCH 01/14] lab branch no RunStop
---
examples/deep_fsa/deep_fsa.go | 1 -
examples/hip/hip.go | 1 -
examples/ra25/ra25.go | 1 -
examples/sir2/sir2.go | 1 -
4 files changed, 4 deletions(-)
diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go
index 639e33c..2b2b1a1 100644
--- a/examples/deep_fsa/deep_fsa.go
+++ b/examples/deep_fsa/deep_fsa.go
@@ -391,7 +391,6 @@ func (ss *Sim) Init() {
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
- ss.GUI.StopNow = false
ss.ApplyParams()
ss.NewRun()
ss.ViewUpdate.RecordSyns()
diff --git a/examples/hip/hip.go b/examples/hip/hip.go
index 2fc0d00..2298f82 100644
--- a/examples/hip/hip.go
+++ b/examples/hip/hip.go
@@ -400,7 +400,6 @@ func (ss *Sim) Init() {
ss.Stats.SetString("RunName", ss.Params.RunName(0)) // in case user interactively changes tag
ss.Loops.ResetCounters()
- ss.GUI.StopNow = false
ss.ApplyParams()
ss.NewRun()
ss.ViewUpdate.RecordSyns()
diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go
index 71b5d45..6188981 100644
--- a/examples/ra25/ra25.go
+++ b/examples/ra25/ra25.go
@@ -386,7 +386,6 @@ func (ss *Sim) Init() {
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
- ss.GUI.StopNow = false
ss.ApplyParams()
ss.NewRun()
ss.ViewUpdate.RecordSyns()
diff --git a/examples/sir2/sir2.go b/examples/sir2/sir2.go
index 08f9924..ac1ba3c 100644
--- a/examples/sir2/sir2.go
+++ b/examples/sir2/sir2.go
@@ -402,7 +402,6 @@ func (ss *Sim) Init() {
ss.Loops.ResetCounters()
ss.InitRandSeed(0)
ss.ConfigEnv() // re-config env just in case a different set of patterns was
- ss.GUI.StopNow = false
ss.ApplyParams()
ss.NewRun()
ss.ViewUpdate.RecordSyns()
From 5d269e8d34a5c8cf8bd9bedb24622f3e31cd5a92 Mon Sep 17 00:00:00 2001
From: "Randall C. O'Reilly"
Date: Wed, 9 Jul 2025 02:39:03 -0700
Subject: [PATCH 02/14] lab branch significant progress updating to axon style
---
examples/ra25/config.go | 152 ++++++
examples/ra25/ra25.go | 1027 +++++++++++++++++++-------------------
examples/ra25/typegen.go | 26 +-
go.mod | 7 +-
leabra/deep_net.go | 12 +-
leabra/helpers.go | 21 +-
leabra/layer.go | 12 +-
leabra/layerbase.go | 166 +++---
leabra/logging.go | 297 -----------
leabra/looper.go | 357 ++++++++-----
leabra/network.go | 58 ++-
leabra/networkbase.go | 116 +++--
leabra/neuromod.go | 2 +-
leabra/params.go | 216 ++++++++
leabra/path.go | 6 +-
leabra/pathbase.go | 56 ++-
leabra/pbwm_layers.go | 2 +-
leabra/pbwm_net.go | 15 +-
leabra/rl.go | 14 +-
19 files changed, 1397 insertions(+), 1165 deletions(-)
create mode 100644 examples/ra25/config.go
delete mode 100644 leabra/logging.go
create mode 100644 leabra/params.go
diff --git a/examples/ra25/config.go b/examples/ra25/config.go
new file mode 100644
index 0000000..aefd53b
--- /dev/null
+++ b/examples/ra25/config.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ra25
+
+import (
+ "cogentcore.org/core/base/errors"
+ "cogentcore.org/core/base/reflectx"
+ "cogentcore.org/core/math32/vecint"
+)
+
+// ParamConfig has config parameters related to sim params.
+type ParamConfig struct {
+
+ // Hidden1Size is the size of hidden 1 layer.
+ Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
+
+ // Hidden2Size is the size of hidden 2 layer.
+ Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
+
+ // Script is an interpreted script that is run to set parameters in Layer and Path
+ // sheets, by default using the "Script" set name.
+ Script string `new-window:"+" width:"100"`
+
+ // Sheet is the extra params sheet name(s) to use (space separated
+ // if multiple). Must be valid name as listed in compiled-in params
+ // or loaded params.
+ Sheet string
+
+ // Tag is an extra tag to add to file names and logs saved from this run.
+ Tag string
+
+ // Note is additional info to describe the run params etc,
+ // like a git commit message for the run.
+ Note string
+
+ // SaveAll will save a snapshot of all current param and config settings
+ // in a directory named params_ (or _good if Good is true),
+ // then quit. Useful for comparing to later changes and seeing multiple
+ // views of current params.
+ SaveAll bool `nest:"+"`
+
+ // Good is for SaveAll, save to params_good for a known good params state.
+ // This can be done prior to making a new release after all tests are passing.
+ // Add results to git to provide a full diff record of all params over level.
+ Good bool `nest:"+"`
+}
+
+// RunConfig has config parameters related to running the sim.
+type RunConfig struct {
+
+ // NThreads is the number of parallel threads for CPU computation;
+ // 0 = use default.
+ NThreads int `default:"0"`
+
+ // Run is the _starting_ run number, which determines the random seed.
+ // Runs counts up from there. Can do all runs in parallel by launching
+ // separate jobs with each starting Run, Runs = 1.
+ Run int `default:"0" flag:"run"`
+
+ // Runs is the total number of runs to do when running Train, starting from Run.
+ Runs int `default:"5" min:"1"`
+
+ // Epochs is the total number of epochs per run.
+ Epochs int `default:"100"`
+
+ // Trials is the total number of trials per epoch.
+ // Should be an even multiple of NData.
+ Trials int `default:"32"`
+
+ // Cycles is the total number of cycles per trial: at least 200.
+ Cycles int `default:"200"`
+
+ // PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
+ PlusCycles int `default:"50"`
+
+ // NZero is how many perfect, zero-error epochs before stopping a Run.
+ NZero int `default:"2"`
+
+ // TestInterval is how often (in epochs) to run through all the test patterns,
+ // in terms of training epochs. Can use 0 or -1 for no testing.
+ TestInterval int `default:"5"`
+
+ // PCAInterval is how often (in epochs) to compute PCA on hidden
+ // representations to measure variance.
+ PCAInterval int `default:"10"`
+
+ // StartWeights is the name of weights file to load at start of first run.
+ StartWeights string
+}
+
+// LogConfig has config parameters related to logging data.
+type LogConfig struct {
+
+ // SaveWeights will save final weights after each run.
+ SaveWeights bool
+
+ // Train has the list of Train mode levels to save log files for.
+ Train []string `default:"['Expt', 'Run', 'Epoch']" nest:"+"`
+
+ // Test has the list of Test mode levels to save log files for.
+ Test []string `nest:"+"`
+}
+
+// Config has the overall Sim configuration options.
+type Config struct {
+
+ // Name is the short name of the sim.
+ Name string `display:"-" default:"RA25"`
+
+ // Title is the longer title of the sim.
+ Title string `display:"-" default:"Leabra random associator"`
+
+ // URL is a link to the online README or other documentation for this sim.
+ URL string `display:"-" default:"https://github.com/emer/leabra/blob/main/sims/ra25/README.md"`
+
+ // Doc is brief documentation of the sim.
+ Doc string `display:"-" default:"This demonstrates a basic Leabra model and provides a template for creating new models. It has a random-associator four-layer leabra network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers."`
+
+ // Includes has a list of additional config files to include.
+ // After configuration, it contains list of include files added.
+ Includes []string
+
+ // GUI means open the GUI. Otherwise it runs automatically and quits,
+ // saving results to log files.
+ GUI bool `default:"true"`
+
+ // Debug reports debugging information.
+ Debug bool
+
+ // Params has parameter related configuration options.
+ Params ParamConfig `display:"add-fields"`
+
+ // Run has sim running related configuration options.
+ Run RunConfig `display:"add-fields"`
+
+ // Log has data logging related configuration options.
+ Log LogConfig `display:"add-fields"`
+}
+
+func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
+
+func (cfg *Config) Defaults() {
+ errors.Log(reflectx.SetFromDefaultTags(cfg))
+}
+
+func NewConfig() *Config {
+ cfg := &Config{}
+ cfg.Defaults()
+ return cfg
+}
diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go
index 6188981..2e4d6f0 100644
--- a/examples/ra25/ra25.go
+++ b/examples/ra25/ra25.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -6,221 +6,68 @@
// that uses the standard supervised learning paradigm to learn
// mappings between 25 random input / output patterns
// defined over 5x5 input / output layers (i.e., 25 units)
-package main
+package ra25
-//go:generate core generate -add-types
+//go:generate core generate -add-types -add-funcs -gosl
import (
"embed"
- "log"
+ "fmt"
+ "io/fs"
"os"
+ "reflect"
+ "cogentcore.org/core/base/errors"
+ "cogentcore.org/core/base/metadata"
"cogentcore.org/core/core"
"cogentcore.org/core/enums"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
- "cogentcore.org/core/math32/vecint"
"cogentcore.org/core/tree"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/base/randx"
- "github.com/emer/emergent/v2/econfig"
+ "cogentcore.org/lab/patterns"
+ "cogentcore.org/lab/plot"
+ "cogentcore.org/lab/stats/stats"
+ "cogentcore.org/lab/table"
+ "cogentcore.org/lab/tensor"
+ "cogentcore.org/lab/tensorfs"
"github.com/emer/emergent/v2/egui"
- "github.com/emer/emergent/v2/elog"
- "github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/env"
- "github.com/emer/emergent/v2/estats"
- "github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
- "github.com/emer/emergent/v2/netview"
- "github.com/emer/emergent/v2/params"
- "github.com/emer/emergent/v2/patgen"
"github.com/emer/emergent/v2/paths"
- "github.com/emer/etensor/tensor"
- "github.com/emer/etensor/tensor/table"
"github.com/emer/leabra/v2/leabra"
)
-//go:embed *.tsv
-var patsfs embed.FS
+//go:embed random_5x5_25.tsv
+var content embed.FS
-func main() {
- sim := &Sim{}
- sim.New()
- sim.ConfigAll()
- if sim.Config.GUI {
- sim.RunGUI()
- } else {
- sim.RunNoGUI()
- }
-}
-
-// ParamSets is the default set of parameters.
-// Base is always applied, and others can be optionally
-// selected to apply on top of that.
-var ParamSets = params.Sets{
- "Base": {
- {Sel: "Path", Desc: "norm and momentum on works better, but wt bal is not better for smaller nets",
- Params: params.Params{
- "Path.Learn.Norm.On": "true",
- "Path.Learn.Momentum.On": "true",
- "Path.Learn.WtBal.On": "true", // no diff really
- // "Path.Learn.WtBal.Targs": "true", // no diff here
- }},
- {Sel: "Layer", Desc: "using default 1.8 inhib for all of network -- can explore",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.8",
- "Layer.Act.Init.Decay": "0.0",
- "Layer.Act.Gbar.L": "0.1", // set explictly, new default, a bit better vs 0.2
- }},
- {Sel: ".BackPath", Desc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
- Params: params.Params{
- "Path.WtScale.Rel": "0.2",
- }},
- {Sel: "#Output", Desc: "output definitely needs lower inhib -- true for smaller layers in general",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.4",
- }},
- },
- "DefaultInhib": {
- {Sel: "#Output", Desc: "go back to default",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.8",
- }},
- },
- "NoMomentum": {
- {Sel: "Path", Desc: "no norm or momentum",
- Params: params.Params{
- "Path.Learn.Norm.On": "false",
- "Path.Learn.Momentum.On": "false",
- }},
- },
- "WtBalOn": {
- {Sel: "Path", Desc: "weight bal on",
- Params: params.Params{
- "Path.Learn.WtBal.On": "true",
- }},
- },
-}
-
-// ParamConfig has config parameters related to sim params
-type ParamConfig struct {
-
- // network parameters
- Network map[string]any
-
- // size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden1Size vecint.Vector2i `default:"{'X':7,'Y':7}" nest:"+"`
-
- // size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden2Size vecint.Vector2i `default:"{'X':7,'Y':7}" nest:"+"`
-
- // Extra Param Sheet name(s) to use (space separated if multiple).
- // must be valid name as listed in compiled-in params or loaded params
- Sheet string
-
- // extra tag to add to file names and logs saved from this run
- Tag string
-
- // user note -- describe the run params etc -- like a git commit message for the run
- Note string
-
- // Name of the JSON file to input saved parameters from.
- File string `nest:"+"`
-
- // Save a snapshot of all current param and config settings
- // in a directory named params_ (or _good if Good is true), then quit.
- // Useful for comparing to later changes and seeing multiple views of current params.
- SaveAll bool `nest:"+"`
-
- // For SaveAll, save to params_good for a known good params state.
- // This can be done prior to making a new release after all tests are passing.
- // add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+"`
-}
-
-// RunConfig has config parameters related to running the sim
-type RunConfig struct {
- // starting run number, which determines the random seed.
- // runs counts from there, can do all runs in parallel by launching
- // separate jobs with each run, runs = 1.
- Run int `default:"0"`
-
- // total number of runs to do when running Train
- NRuns int `default:"5" min:"1"`
-
- // total number of epochs per run
- NEpochs int `default:"100"`
-
- // stop run after this number of perfect, zero-error epochs.
- NZero int `default:"2"`
-
- // total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `default:"32"`
-
- // how often to run through all the test patterns, in terms of training epochs.
- // can use 0 or -1 for no testing.
- TestInterval int `default:"5"`
-
- // how frequently (in epochs) to compute PCA on hidden representations
- // to measure variance?
- PCAInterval int `default:"5"`
-
- // if non-empty, is the name of weights file to load at start
- // of first run, for testing.
- StartWts string
-}
-
-// LogConfig has config parameters related to logging data
-type LogConfig struct {
-
- // if true, save final weights after each run
- SaveWeights bool
-
- // if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `default:"true" nest:"+"`
-
- // if true, save run log to file, as .run.tsv typically
- Run bool `default:"true" nest:"+"`
-
- // if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `default:"false" nest:"+"`
-
- // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `default:"false" nest:"+"`
-
- // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `default:"false" nest:"+"`
-
- // if true, save network activation etc data from testing trials,
- // for later viewing in netview.
- NetData bool
-}
-
-// Config is a standard Sim config -- use as a starting point.
-type Config struct {
-
- // specify include files here, and after configuration,
- // it contains list of include files added.
- Includes []string
-
- // open the GUI -- does not automatically run -- if false,
- // then runs automatically and quits.
- GUI bool `default:"true"`
-
- // log debugging information
- Debug bool
-
- // parameter related configuration options
- Params ParamConfig `display:"add-fields"`
+// Modes are the looping modes (Stacks) for running and statistics.
+type Modes int32 //enums:enum
+const (
+ Train Modes = iota
+ Test
+)
- // sim running related configuration options
- Run RunConfig `display:"add-fields"`
+// Levels are the looping levels for running and statistics.
+type Levels int32 //enums:enum
+const (
+ Cycle Levels = iota
+ Trial
+ Epoch
+ Run
+ Expt
+)
- // data logging related configuration options
- Log LogConfig `display:"add-fields"`
-}
+// StatsPhase is the phase of stats processing for given mode, level.
+// Accumulated values are reset at Start, added each Step.
+type StatsPhase int32 //enums:enum
+const (
+ Start StatsPhase = iota
+ Step
+)
-func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
+// see params.go for params, config.go for config
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
@@ -230,68 +77,92 @@ func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `new-window:"+"`
+ Config *Config `new-window:"+"`
- // the network -- click to view / edit parameters for layers, paths, etc
+ // Net is the network: click to view / edit parameters for layers, paths, etc.
Net *leabra.Network `new-window:"+" display:"no-inline"`
- // network parameter management
- Params emer.NetParams `display:"add-fields"`
+ // Params manages network parameter setting.
+ Params leabra.Params `display:"inline"`
- // contains looper control loops for running sim
+ // Loops are the control loops for running the sim, in different Modes
+ // across stacks of Levels.
Loops *looper.Stacks `new-window:"+" display:"no-inline"`
- // contains computed statistic values
- Stats estats.Stats `new-window:"+"`
+ // Envs provides mode-string based storage of environments.
+ Envs env.Envs `new-window:"+" display:"no-inline"`
+
+ // TrainUpdate has Train mode netview update parameters.
+ TrainUpdate leabra.NetViewUpdate `display:"inline"`
- // Contains all the logs and information about the logs.'
- Logs elog.Logs `new-window:"+"`
+ // TestUpdate has Test mode netview update parameters.
+ TestUpdate leabra.NetViewUpdate `display:"inline"`
- // the training patterns to use
- Patterns *table.Table `new-window:"+" display:"no-inline"`
+ // Root is the root tensorfs directory, where all stats and other misc sim data goes.
+ Root *tensorfs.Node `display:"-"`
- // Environments
- Envs env.Envs `new-window:"+" display:"no-inline"`
+ // Stats has the stats directory within Root.
+ Stats *tensorfs.Node `display:"-"`
- // leabra timing parameters and state
- Context leabra.Context `new-window:"+"`
+ // Current has the current stats values within Stats.
+ Current *tensorfs.Node `display:"-"`
- // netview update parameters
- ViewUpdate netview.ViewUpdate `display:"add-fields"`
+ // StatFuncs are statistics functions called at given mode and level,
+ // to perform all stats computations. phase = Start does init at start of given level,
+ // and all intialization / configuration (called during Init too).
+ StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
- // manages all the gui elements
+ // GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
- // a list of random seeds to use for each run
+ // RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
-// New creates new blank elements and initializes defaults
-func (ss *Sim) New() {
- econfig.Config(&ss.Config, "config.toml")
- ss.Net = leabra.NewNetwork("RA25")
- ss.Params.Config(ParamSets, ss.Config.Params.Sheet, ss.Config.Params.Tag, ss.Net)
- ss.Stats.Init()
- ss.Patterns = &table.Table{}
- ss.RandSeeds.Init(100) // max 100 runs
- ss.InitRandSeed(0)
- ss.Context.Defaults()
+// RunSim runs the simulation as a standalone app
+// with given configuration.
+func RunSim(cfg *Config) error {
+ ss := &Sim{Config: cfg}
+ ss.ConfigSim()
+ if ss.Config.GUI {
+ ss.RunGUI()
+ } else {
+ ss.RunNoGUI()
+ }
+ return nil
}
-//////////////////////////////////////////////////////////////////////////////
-// Configs
+// EmbedSim runs the simulation with default configuration
+// embedded within given body element.
+func EmbedSim(b tree.Node) *Sim {
+ cfg := NewConfig()
+ cfg.GUI = true
+ ss := &Sim{Config: cfg}
+ ss.ConfigSim()
+ ss.Init()
+ ss.ConfigGUI(b)
+ return ss
+}
-// ConfigAll configures all the elements using the standard functions
-func (ss *Sim) ConfigAll() {
- // ss.ConfigPatterns()
- ss.OpenPatterns()
+func (ss *Sim) ConfigSim() {
+ ss.Root, _ = tensorfs.NewDir("Root")
+ tensorfs.CurRoot = ss.Root
+ ss.Net = leabra.NewNetwork(ss.Config.Name)
+ ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag, reflect.ValueOf(ss))
+ ss.RandSeeds.Init(100) // max 100 runs
+ ss.InitRandSeed(0)
+ // ss.ConfigInputs()
+ ss.OpenInputs()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
- ss.ConfigLogs()
ss.ConfigLoops()
+ ss.ConfigStats()
+ // if ss.Config.Run.GPU {
+ // fmt.Println(leabra.GPUSystem.Vars().StringDoc())
+ // }
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
- ss.Net.SaveParamsSnapshot(&ss.Params.Params, &ss.Config, ss.Config.Params.Good)
+ ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
os.Exit(0)
}
}
@@ -303,26 +174,31 @@ func (ss *Sim) ConfigEnv() {
trn = &env.FixedTable{}
tst = &env.FixedTable{}
} else {
- trn = ss.Envs.ByMode(etime.Train).(*env.FixedTable)
- tst = ss.Envs.ByMode(etime.Test).(*env.FixedTable)
+ trn = ss.Envs.ByMode(Train).(*env.FixedTable)
+ tst = ss.Envs.ByMode(Test).(*env.FixedTable)
}
+ inputs := tensorfs.DirTable(ss.Root.Dir("Inputs/Train"), nil)
+
+ // this logic can be used to create train-test splits of a set of patterns:
+ // n := inputs.NumRows()
+ // order := rand.Perm(n)
+ // ntrn := int(0.85 * float64(n))
+ // trnEnv := table.NewView(inputs)
+ // tstEnv := table.NewView(inputs)
+ // trnEnv.Indexes = order[:ntrn]
+ // tstEnv.Indexes = order[ntrn:]
+
// note: names must be standard here!
- trn.Name = etime.Train.String()
- trn.Config(table.NewIndexView(ss.Patterns))
+ trn.Name = Train.String()
+ trn.Config(table.NewView(inputs))
trn.Validate()
- tst.Name = etime.Test.String()
- tst.Config(table.NewIndexView(ss.Patterns))
+ tst.Name = Test.String()
+ tst.Config(table.NewView(inputs))
tst.Sequential = true
tst.Validate()
- // note: to create a train / test split of pats, do this:
- // all := table.NewIndexView(ss.Patterns)
- // splits, _ := split.Permuted(all, []float64{.8, .2}, []string{"Train", "Test"})
- // trn.Table = splits.Splits[0]
- // tst.Table = splits.Splits[1]
-
trn.Init(0)
tst.Init(0)
@@ -331,16 +207,14 @@ func (ss *Sim) ConfigEnv() {
}
func (ss *Sim) ConfigNet(net *leabra.Network) {
+ // net.Context.SetThetaCycles(int32(ss.Config.Run.Cycles)).
+ // SetPlusCycles(int32(ss.Config.Run.PlusCycles))
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
- inp := net.AddLayer2D("Input", 5, 5, leabra.InputLayer)
- inp.Doc = "Input represents sensory input, coming into the cortex via tha thalamus"
- hid1 := net.AddLayer2D("Hidden1", ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X, leabra.SuperLayer)
- hid1.Doc = "First hidden layer performs initial internal processing of sensory inputs, transforming in preparation for producing appropriate responses"
- hid2 := net.AddLayer2D("Hidden2", ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X, leabra.SuperLayer)
- hid2.Doc = "Another 'deep' layer of internal processing to prepare directly for Output response"
- out := net.AddLayer2D("Output", 5, 5, leabra.TargetLayer)
- out.Doc = "Output represents motor output response, via deep layer 5 neurons projecting supcortically, in motor cortex"
+ inp := net.AddLayer2D("Input", leabra.InputLayer, 5, 5)
+ hid1 := net.AddLayer2D("Hidden1", leabra.SuperLayer, ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X)
+ hid2 := net.AddLayer2D("Hidden2", leabra.SuperLayer, ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X)
+ out := net.AddLayer2D("Output", leabra.TargetLayer, 5, 5)
// use this to position layers relative to each other
// hid2.PlaceRightOf(hid1, 2)
@@ -356,40 +230,37 @@ func (ss *Sim) ConfigNet(net *leabra.Network) {
// net.LateralConnectLayerPath(hid1, full, &leabra.HebbPath{}).SetType(InhibPath)
// note: if you wanted to change a layer type from e.g., Target to Compare, do this:
- // out.SetType(emer.Compare)
+ // out.Type = leabra.CompareLayer
// that would mean that the output layer doesn't reflect target values in plus phase
// and thus removes error-driven learning -- but stats are still computed.
net.Build()
net.Defaults()
+ net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
net.InitWeights()
}
func (ss *Sim) ApplyParams() {
- ss.Params.SetAll()
- if ss.Config.Params.Network != nil {
- ss.Params.SetNetworkMap(ss.Net, ss.Config.Params.Network)
- }
+ ss.Params.Script = ss.Config.Params.Script
+ ss.Params.ApplyAll(ss.Net)
}
-////////////////////////////////////////////////////////////////////////////////
-// Init, utils
+//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
- if ss.Config.GUI {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // in case user interactively changes tag
- }
ss.Loops.ResetCounters()
+ ss.SetRunName()
ss.InitRandSeed(0)
// ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.ApplyParams()
+ ss.StatsInit()
ss.NewRun()
- ss.ViewUpdate.RecordSyns()
- ss.ViewUpdate.Update()
+ ss.TrainUpdate.RecordSyns()
+ ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
@@ -398,108 +269,84 @@ func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
+// CurrentMode returns the current Train / Test mode from Context.
+func (ss *Sim) CurrentMode() Modes {
+ ctx := ss.Net.Context()
+ var md Modes
+ md.SetInt64(int64(ctx.Mode))
+ return md
+}
+
+// NetViewUpdater returns the NetViewUpdate for given mode.
+func (ss *Sim) NetViewUpdater(mode enums.Enum) *leabra.NetViewUpdate {
+ if mode.Int64() == Train.Int64() {
+ return &ss.TrainUpdate
+ }
+ return &ss.TestUpdate
+}
+
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
ls := looper.NewStacks()
- trls := ss.Config.Run.NTrials
+ trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
+ cycles := ss.Config.Run.Cycles
+ plusPhase := ss.Config.Run.PlusCycles
- ls.AddStack(etime.Train).
- AddTime(etime.Run, ss.Config.Run.NRuns).
- AddTime(etime.Epoch, ss.Config.Run.NEpochs).
- AddTime(etime.Trial, trls).
- AddTime(etime.Cycle, 100)
+ ls.AddStack(Train, Trial).
+ AddLevel(Expt, 1).
+ AddLevel(Run, ss.Config.Run.Runs).
+ AddLevel(Epoch, ss.Config.Run.Epochs).
+ AddLevelIncr(Trial, trials, ss.Config.Run.NData).
+ AddLevel(Cycle, cycles)
- ls.AddStack(etime.Test).
- AddTime(etime.Epoch, 1).
- AddTime(etime.Trial, trls).
- AddTime(etime.Cycle, 100)
+ ls.AddStack(Test, Trial).
+ AddLevel(Epoch, 1).
+ AddLevelIncr(Trial, trials, ss.Config.Run.NData).
+ AddLevel(Cycle, cycles)
- leabra.LooperStdPhases(ls, &ss.Context, ss.Net, 75, 99) // plus phase timing
- leabra.LooperSimCycleAndLearn(ls, ss.Net, &ss.Context, &ss.ViewUpdate) // std algo code
+ leabra.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
- ls.Stacks[etime.Train].OnInit.Add("Init", func() { ss.Init() })
+ ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })
- for m, _ := range ls.Stacks {
- st := ls.Stacks[m]
- st.Loops[etime.Trial].OnStart.Add("ApplyInputs", func() {
- ss.ApplyInputs()
- })
- }
+ ls.AddOnStartToLoop(Trial, "ApplyInputs", func(mode enums.Enum) {
+ ss.ApplyInputs(mode.(Modes))
+ })
- ls.Loop(etime.Train, etime.Run).OnStart.Add("NewRun", ss.NewRun)
+ ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
- // Train stop early condition
- ls.Loop(etime.Train, etime.Epoch).IsDone.AddBool("NZeroStop", func() bool {
- // This is calculated in TrialStats
+ trainEpoch := ls.Loop(Train, Epoch)
+ trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
stopNz := ss.Config.Run.NZero
if stopNz <= 0 {
- stopNz = 2
+ return false
}
- curNZero := ss.Stats.Int("NZero")
+ curModeDir := ss.Current.Dir(Train.String())
+ curNZero := int(curModeDir.Value("NZero").Float1D(-1))
stop := curNZero >= stopNz
return stop
+ return false
})
- // Add Testing
- trainEpoch := ls.Loop(etime.Train, etime.Epoch)
trainEpoch.OnStart.Add("TestAtInterval", func() {
if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
- // Note the +1 so that it doesn't occur at the 0th timestep.
ss.TestAll()
}
})
- /////////////////////////////////////////////
- // Logging
-
- ls.Loop(etime.Test, etime.Epoch).OnEnd.Add("LogTestErrors", func() {
- leabra.LogTestErrors(&ss.Logs)
- })
- ls.Loop(etime.Train, etime.Epoch).OnEnd.Add("PCAStats", func() {
- trnEpc := ls.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if ss.Config.Run.PCAInterval > 0 && trnEpc%ss.Config.Run.PCAInterval == 0 {
- leabra.PCAStats(ss.Net, &ss.Logs, &ss.Stats)
- ss.Logs.ResetLog(etime.Analyze, etime.Trial)
- }
- })
-
- ls.AddOnEndToAll("Log", func(mode, time enums.Enum) {
- ss.Log(mode.(etime.Modes), time.(etime.Times))
- })
- leabra.LooperResetLogBelow(ls, &ss.Logs)
-
- ls.Loop(etime.Train, etime.Trial).OnEnd.Add("LogAnalyze", func() {
- trnEpc := ls.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if (ss.Config.Run.PCAInterval > 0) && (trnEpc%ss.Config.Run.PCAInterval == 0) {
- ss.Log(etime.Analyze, etime.Trial)
- }
- })
-
- ls.Loop(etime.Train, etime.Run).OnEnd.Add("RunStats", func() {
- ss.Logs.RunStats("PctCor", "FirstZero", "LastZero")
- })
+ ls.AddOnStartToAll("StatsStart", ss.StatsStart)
+ ls.AddOnEndToAll("StatsStep", ss.StatsStep)
- // Save weights to file, to look at later
- ls.Loop(etime.Train, etime.Run).OnEnd.Add("SaveWeights", func() {
- ctrString := ss.Stats.PrintValues([]string{"Run", "Epoch"}, []string{"%03d", "%05d"}, "_")
- leabra.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.Stats.String("RunName"))
+ ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
+ ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
+ leabra.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
- ////////////////////////////////////////////
- // GUI
+ if ss.Config.GUI {
+ leabra.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater)
- if !ss.Config.GUI {
- if ss.Config.Log.NetData {
- ls.Loop(etime.Test, etime.Trial).OnEnd.Add("NetDataRecord", func() {
- ss.GUI.NetDataRecord(ss.ViewUpdate.Text)
- })
- }
- } else {
- leabra.LooperUpdateNetView(ls, &ss.ViewUpdate, ss.Net, ss.NetViewCounters)
- leabra.LooperUpdatePlots(ls, &ss.GUI)
- ls.Stacks[etime.Train].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
- ls.Stacks[etime.Test].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
+ ls.Stacks[Train].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
+ ls.Stacks[Test].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
}
if ss.Config.Debug {
@@ -508,231 +355,380 @@ func (ss *Sim) ConfigLoops() {
ss.Loops = ls
}
-// ApplyInputs applies input patterns from given environment.
-// It is good practice to have this be a separate method with appropriate
-// args so that it can be used for various different contexts
-// (training, testing, etc).
-func (ss *Sim) ApplyInputs() {
- ctx := &ss.Context
+// ApplyInputs applies input patterns from given environment for given mode.
+// Any other start-of-trial logic can also be put here.
+func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
- ev := ss.Envs.ByMode(ctx.Mode).(*env.FixedTable)
- ev.Step()
+ ndata := int(net.Context().NData)
+ curModeDir := ss.Current.Dir(mode.String())
+ ev := ss.Envs.ByMode(mode)
lays := net.LayersByType(leabra.InputLayer, leabra.TargetLayer)
net.InitExt()
- ss.Stats.SetString("TrialName", ev.TrialName.Cur)
- for _, lnm := range lays {
- ly := ss.Net.LayerByName(lnm)
- pats := ev.State(ly.Name)
- if pats != nil {
- ly.ApplyExt(pats)
+ for di := range ndata {
+ ev.Step()
+ curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
+ for _, lnm := range lays {
+ ly := ss.Net.LayerByName(lnm)
+ st := ev.State(ly.Name)
+ if st != nil {
+ ly.ApplyExt(uint32(di), st)
+ }
}
}
+ net.ApplyExts()
}
-// NewRun intializes a new run of the model, using the TrainEnv.Run counter
-// for the new run value
+// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
- ctx := &ss.Context
- ss.InitRandSeed(ss.Loops.Loop(etime.Train, etime.Run).Counter.Cur)
- ss.Envs.ByMode(etime.Train).Init(0)
- ss.Envs.ByMode(etime.Test).Init(0)
+ ctx := ss.Net.Context()
+ ss.InitRandSeed(ss.Loops.Loop(Train, Run).Counter.Cur)
+ ss.Envs.ByMode(Train).Init(0)
+ ss.Envs.ByMode(Test).Init(0)
ctx.Reset()
- ctx.Mode = etime.Train
ss.Net.InitWeights()
- ss.InitStats()
- ss.StatCounters()
- ss.Logs.ResetLog(etime.Train, etime.Epoch)
- ss.Logs.ResetLog(etime.Test, etime.Epoch)
+ if ss.Config.Run.StartWeights != "" {
+ ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights))
+ mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights)
+ }
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
- ss.Envs.ByMode(etime.Test).Init(0)
- ss.Loops.ResetAndRun(etime.Test)
- ss.Loops.Mode = etime.Train // Important to reset Mode back to Train because this is called from within the Train Run.
+ ss.Envs.ByMode(Test).Init(0)
+ ss.Loops.ResetAndRun(Test)
+ ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
-/////////////////////////////////////////////////////////////////////////
-// Patterns
+//////// Inputs
-func (ss *Sim) ConfigPatterns() {
- dt := ss.Patterns
- dt.SetMetaData("name", "TrainPatterns")
- dt.SetMetaData("desc", "Training patterns")
+func (ss *Sim) ConfigInputs() {
+ dt := table.New()
+ metadata.SetName(dt, "Train")
+ metadata.SetDoc(dt, "Training inputs")
dt.AddStringColumn("Name")
- dt.AddFloat32TensorColumn("Input", []int{5, 5}, "Y", "X")
- dt.AddFloat32TensorColumn("Output", []int{5, 5}, "Y", "X")
+ dt.AddFloat32Column("Input", 5, 5)
+ dt.AddFloat32Column("Output", 5, 5)
dt.SetNumRows(25)
- patgen.PermutedBinaryMinDiff(dt.Columns[1].(*tensor.Float32), 6, 1, 0, 3)
- patgen.PermutedBinaryMinDiff(dt.Columns[2].(*tensor.Float32), 6, 1, 0, 3)
- dt.SaveCSV("random_5x5_25_gen.tsv", table.Tab, table.Headers)
+ patterns.PermutedBinaryMinDiff(dt.Columns.Values[1], 6, 1, 0, 3)
+ patterns.PermutedBinaryMinDiff(dt.Columns.Values[2], 6, 1, 0, 3)
+ dt.SaveCSV("random_5x5_25_gen.tsv", tensor.Tab, table.Headers)
+
+ tensorfs.DirFromTable(ss.Root.Dir("Inputs/Train"), dt)
}
-func (ss *Sim) OpenPatterns() {
- dt := ss.Patterns
- dt.SetMetaData("name", "TrainPatterns")
- dt.SetMetaData("desc", "Training patterns")
- err := dt.OpenFS(patsfs, "random_5x5_25.tsv", table.Tab)
- if err != nil {
- log.Println(err)
+// OpenTable opens a [table.Table] from embedded content, storing
+// the data in the given tensorfs directory.
+func (ss *Sim) OpenTable(dir *tensorfs.Node, fsys fs.FS, fnm, name, docs string) (*table.Table, error) {
+ dt := table.New()
+ metadata.SetName(dt, name)
+ metadata.SetDoc(dt, docs)
+ err := dt.OpenFS(content, fnm, tensor.Tab)
+ if errors.Log(err) != nil {
+ return dt, err
}
+ tensorfs.DirFromTable(dir.Dir(name), dt)
+ return dt, err
}
-////////////////////////////////////////////////////////////////////////////////////////////
-// Stats
+func (ss *Sim) OpenInputs() {
+ dir := ss.Root.Dir("Inputs")
+ ss.OpenTable(dir, content, "random_5x5_25.tsv", "Train", "Training inputs")
+}
+
+//////// Stats
-// InitStats initializes all the statistics.
-// called at start of new run
-func (ss *Sim) InitStats() {
- ss.Stats.SetFloat("UnitErr", 0.0)
- ss.Stats.SetFloat("CorSim", 0.0)
- ss.Stats.SetString("TrialName", "")
- ss.Logs.InitErrStats() // inits TrlErr, FirstZero, LastZero, NZero
+// AddStat adds a stat compute function.
+func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
+ ss.StatFuncs = append(ss.StatFuncs, f)
}
-// StatCounters saves current counters to Stats, so they are available for logging etc
-// Also saves a string rep of them for ViewUpdate.Text
-func (ss *Sim) StatCounters() {
- ctx := &ss.Context
- mode := ctx.Mode
- ss.Loops.Stacks[mode].CountersToStats(&ss.Stats)
- // always use training epoch..
- trnEpc := ss.Loops.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- ss.Stats.SetInt("Epoch", trnEpc)
- trl := ss.Stats.Int("Trial")
- ss.Stats.SetInt("Trial", trl)
- ss.Stats.SetInt("Cycle", int(ctx.Cycle))
+// StatsStart is called by Looper at the start of given level, for each iteration.
+// It needs to call RunStats Start at the next level down.
+// e.g., each Epoch is the start of the full set of Trial Steps.
+func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
+ mode := lmd.(Modes)
+ level := ltm.(Levels)
+ if level <= Trial {
+ return
+ }
+ ss.RunStats(mode, level-1, Start)
}
-func (ss *Sim) NetViewCounters(tm etime.Times) {
- if ss.ViewUpdate.View == nil {
+// StatsStep is called by Looper at each step of iteration,
+// where it accumulates the stat results.
+func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
+ mode := lmd.(Modes)
+ level := ltm.(Levels)
+ if level == Cycle {
return
}
- if tm == etime.Trial {
- ss.TrialStats() // get trial stats for current di
+ ss.RunStats(mode, level, Step)
+ tensorfs.DirTable(leabra.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
+}
+
+// RunStats runs the StatFuncs for given mode, level and phase.
+func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
+ for _, sf := range ss.StatFuncs {
+ sf(mode, level, phase)
+ }
+ if phase == Step && ss.GUI.Tabs != nil {
+ nm := mode.String() + " " + level.String() + " Plot"
+ ss.GUI.Tabs.AsLab().GoUpdatePlot(nm)
+ if level == Run {
+ ss.GUI.Tabs.AsLab().GoUpdatePlot("Train RunAll Plot")
+ }
}
- ss.StatCounters()
- ss.ViewUpdate.Text = ss.Stats.Print([]string{"Run", "Epoch", "Trial", "TrialName", "Cycle", "UnitErr", "TrlErr", "CorSim"})
}
-// TrialStats computes the trial-level statistics.
-// Aggregation is done directly from log data.
-func (ss *Sim) TrialStats() {
- out := ss.Net.LayerByName("Output")
+// SetRunName sets the overall run name, used for naming output logs and weight files
+// based on params extra sheets and tag, and starting run number (for distributed runs).
+func (ss *Sim) SetRunName() string {
+ runName := ss.Params.RunName(ss.Config.Run.Run)
+ ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
+ return runName
+}
- ss.Stats.SetFloat("CorSim", float64(out.CosDiff.Cos))
+// RunName returns the overall run name, used for naming output logs and weight files
+// based on params extra sheets and tag, and starting run number (for distributed runs).
+func (ss *Sim) RunName() string {
+ return ss.Current.StringValue("RunName", 1).String1D(0)
+}
- sse, avgsse := out.MSE(0.5) // 0.5 = per-unit tolerance -- right side of .5
- ss.Stats.SetFloat("SSE", sse)
- ss.Stats.SetFloat("AvgSSE", avgsse)
- if sse > 0 {
- ss.Stats.SetFloat("TrlErr", 1)
- } else {
- ss.Stats.SetFloat("TrlErr", 0)
+// StatsInit initializes all the stats by calling Start across all modes and levels.
+func (ss *Sim) StatsInit() {
+ for md, st := range ss.Loops.Stacks {
+ mode := md.(Modes)
+ for _, lev := range st.Order {
+ level := lev.(Levels)
+ if level == Cycle {
+ continue
+ }
+ ss.RunStats(mode, level, Start)
+ }
+ }
+ if ss.GUI.Tabs != nil {
+ tbs := ss.GUI.Tabs.AsLab()
+ _, idx := tbs.CurrentTab()
+ tbs.PlotTensorFS(leabra.StatsNode(ss.Stats, Train, Epoch))
+ tbs.PlotTensorFS(leabra.StatsNode(ss.Stats, Train, Run))
+ tbs.PlotTensorFS(leabra.StatsNode(ss.Stats, Test, Trial))
+ tbs.PlotTensorFS(ss.Stats.Dir("Train/RunAll"))
+ tbs.SelectTabIndex(idx)
}
}
-//////////////////////////////////////////////////////////////////////////////
-// Logging
-
-func (ss *Sim) ConfigLogs() {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // used for naming logs, stats, etc
-
- ss.Logs.AddCounterItems(etime.Run, etime.Epoch, etime.Trial, etime.Cycle)
- ss.Logs.AddStatStringItem(etime.AllModes, etime.AllTimes, "RunName")
- ss.Logs.AddStatStringItem(etime.AllModes, etime.Trial, "TrialName")
+// ConfigStats handles configures functions to do all stats computation
+// in the tensorfs system.
+func (ss *Sim) ConfigStats() {
+ net := ss.Net
+ ss.Stats = ss.Root.Dir("Stats")
+ ss.Current = ss.Stats.Dir("Current")
- ss.Logs.AddStatAggItem("CorSim", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddStatAggItem("UnitErr", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddErrStatAggItems("TrlErr", etime.Run, etime.Epoch, etime.Trial)
+ ss.SetRunName()
- ss.Logs.AddCopyFromFloatItems(etime.Train, []etime.Times{etime.Epoch, etime.Run}, etime.Test, etime.Epoch, "Tst", "CorSim", "UnitErr", "PctCor", "PctErr")
+ // last arg(s) are levels to exclude
+ counterFunc := leabra.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ counterFunc(mode, level, phase == Start)
+ })
+ runNameFunc := leabra.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ runNameFunc(mode, level, phase == Start)
+ })
+ trialNameFunc := leabra.StatTrialName(ss.Stats, ss.Current, ss.Loops, net, Trial)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ trialNameFunc(mode, level, phase == Start)
+ })
- ss.Logs.AddPerTrlMSec("PerTrlMSec", etime.Run, etime.Epoch, etime.Trial)
+ // up to a point, it is good to use loops over stats in one function,
+ // to reduce repetition of boilerplate.
+ statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ for _, name := range statNames {
+ if name == "NZero" && (mode != Train || level == Trial) {
+ return
+ }
+ modeDir := ss.Stats.Dir(mode.String())
+ curModeDir := ss.Current.Dir(mode.String())
+ levelDir := modeDir.Dir(level.String())
+ subDir := modeDir.Dir((level - 1).String()) // note: will fail for Cycle
+ tsr := levelDir.Float64(name)
+ ndata := int(ss.Net.Context().NData)
+ var stat float64
+ if phase == Start {
+ tsr.SetNumRows(0)
+ plot.SetFirstStyler(tsr, func(s *plot.Style) {
+ s.Range.SetMin(0).SetMax(1)
+ s.On = true
+ switch name {
+ case "NZero":
+ s.On = false
+ case "FirstZero", "LastZero":
+ if level < Run {
+ s.On = false
+ }
+ }
+ })
+ switch name {
+ case "NZero":
+ if level == Epoch {
+ curModeDir.Float64(name, 1).SetFloat1D(0, 0)
+ }
+ case "FirstZero", "LastZero":
+ if level == Epoch {
+ curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
+ }
+ }
+ continue
+ }
+ switch level {
+ case Trial:
+ out := ss.Net.LayerByName("Output")
+ for di := range ndata {
+ var stat float64
+ switch name {
+ case "CorSim":
+ stat = 1.0 - float64(leabra.LayerStates.Value(int(out.Index), int(di), int(leabra.LayerPhaseDiff)))
+ case "UnitErr":
+ stat = out.PctUnitErr(ss.Net.Context())[di]
+ case "Err":
+ uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
+ stat = 1.0
+ if uniterr == 0 {
+ stat = 0
+ }
+ }
+ curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
+ tsr.AppendRowFloat(stat)
+ }
+ case Epoch:
+ nz := curModeDir.Float64("NZero", 1).Float1D(0)
+ switch name {
+ case "NZero":
+ err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if err == 0 {
+ stat++
+ } else {
+ stat = 0
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ case "FirstZero":
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if stat < 0 && nz == 1 {
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ case "LastZero":
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ default:
+ stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
+ }
+ tsr.AppendRowFloat(stat)
+ case Run:
+ stat = stats.StatFinal.Call(subDir.Value(name)).Float1D(0)
+ tsr.AppendRowFloat(stat)
+ default: // Expt
+ stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
+ tsr.AppendRowFloat(stat)
+ }
+ }
+ })
- layers := ss.Net.LayersByType(leabra.SuperLayer, leabra.CTLayer, leabra.TargetLayer)
- leabra.LogAddDiagnosticItems(&ss.Logs, layers, etime.Train, etime.Epoch, etime.Trial)
- leabra.LogInputLayer(&ss.Logs, ss.Net, etime.Train)
+ perTrlFunc := leabra.StatPerTrialMSec(ss.Stats, Train, Trial)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ perTrlFunc(mode, level, phase == Start)
+ })
- leabra.LogAddPCAItems(&ss.Logs, ss.Net, etime.Train, etime.Run, etime.Epoch, etime.Trial)
+ lays := net.LayersByType(leabra.SuperLayer, leabra.CTLayer, leabra.TargetLayer)
+ actGeFunc := leabra.StatLayerActGe(ss.Stats, net, Train, Trial, Run, lays...)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ actGeFunc(mode, level, phase == Start)
+ })
- ss.Logs.AddLayerTensorItems(ss.Net, "Act", etime.Test, etime.Trial, "InputLayer", "TargetLayer")
+ pcaFunc := leabra.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, Run, lays...)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
+ pcaFunc(mode, level, phase == Start, trnEpc)
+ })
- ss.Logs.PlotItems("CorSim", "PctCor", "FirstZero", "LastZero")
+ stateFunc := leabra.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "Output")
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ stateFunc(mode, level, phase == Start)
+ })
- ss.Logs.CreateTables()
- ss.Logs.SetContext(&ss.Stats, ss.Net)
- // don't plot certain combinations we don't use
- ss.Logs.NoPlot(etime.Train, etime.Cycle)
- ss.Logs.NoPlot(etime.Test, etime.Run)
- // note: Analyze not plotted by default
- ss.Logs.SetMeta(etime.Train, etime.Run, "LegendCol", "RunName")
+ runAllFunc := leabra.StatLevelAll(ss.Stats, Train, Run, func(s *plot.Style, cl tensor.Values) {
+ name := metadata.Name(cl)
+ switch name {
+ case "FirstZero", "LastZero":
+ s.On = true
+ s.Range.SetMin(0)
+ }
+ })
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ runAllFunc(mode, level, phase == Start)
+ })
}
-// Log is the main logging function, handles special things for different scopes
-func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
- ctx := &ss.Context
- if mode != etime.Analyze {
- ctx.Mode = mode // Also set specifically in a Loop callback.
+// StatCounters returns counters string to show at bottom of netview.
+func (ss *Sim) StatCounters(mode, level enums.Enum) string {
+ counters := ss.Loops.Stacks[mode].CountersString()
+ vu := ss.NetViewUpdater(mode)
+ if vu == nil || vu.View == nil {
+ return counters
}
- dt := ss.Logs.Table(mode, time)
- if dt == nil {
- return
+ di := vu.View.Di
+ counters += fmt.Sprintf(" Di: %d", di)
+ curModeDir := ss.Current.Dir(mode.String())
+ if curModeDir.Node("TrialName") == nil {
+ return counters
}
- row := dt.Rows
-
- switch {
- case time == etime.Cycle:
- return
- case time == etime.Trial:
- ss.TrialStats()
- ss.StatCounters()
+ counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
+ statNames := []string{"CorSim", "UnitErr", "Err"}
+ if level == Cycle || curModeDir.Node(statNames[0]) == nil {
+ return counters
}
-
- ss.Logs.LogRow(mode, time, row) // also logs to file, etc
+ for _, name := range statNames {
+ counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
+ }
+ return counters
}
-////////////////////////////////////////////////////////////////////////////////////////////
-// Gui
+//////// GUI
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
-func (ss *Sim) ConfigGUI() {
- title := "Leabra Random Associator"
- ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Leabra model. See emergent on GitHub.
`)
- ss.GUI.CycleUpdateInterval = 10
-
+func (ss *Sim) ConfigGUI(b tree.Node) {
+ ss.GUI.MakeBody(b, ss, ss.Root, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
+ ss.GUI.StopLevel = Trial
nv := ss.GUI.AddNetView("Network")
- nv.Options.MaxRecs = 300
+ nv.Options.MaxRecs = 2 * ss.Config.Run.Cycles
+ nv.Options.Raster.Max = ss.Config.Run.Cycles
nv.SetNet(ss.Net)
- ss.ViewUpdate.Config(nv, etime.AlphaCycle, etime.AlphaCycle)
- ss.GUI.ViewUpdate = &ss.ViewUpdate
+ ss.TrainUpdate.Config(nv, leabra.Theta, ss.StatCounters)
+ ss.TestUpdate.Config(nv, leabra.Theta, ss.StatCounters)
+ ss.GUI.OnStop = func(mode, level enums.Enum) {
+ vu := ss.NetViewUpdater(mode)
+ vu.UpdateWhenStopped(mode, level)
+ }
- nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75)
nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
- ss.GUI.AddPlots(title, &ss.Logs)
-
+ ss.StatsInit()
ss.GUI.FinalizeGUI(false)
}
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
- ////////////////////////////////////////////////
tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Reset RunLog",
- Icon: icons.Reset,
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "New Seed",
+ ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
+ Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
@@ -740,63 +736,42 @@ func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.RandSeeds.NewSeeds()
},
})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "README",
+ ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
+ Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
- core.TheApp.OpenURL("https://github.com/emer/leabra/blob/main/examples/ra25/README.md")
+ core.TheApp.OpenURL(ss.Config.URL)
},
})
}
func (ss *Sim) RunGUI() {
ss.Init()
- ss.ConfigGUI()
+ ss.ConfigGUI(nil)
ss.GUI.Body.RunMainWindow()
}
func (ss *Sim) RunNoGUI() {
+ ss.Init()
+
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
- runName := ss.Params.RunName(ss.Config.Run.Run)
- ss.Stats.SetString("RunName", runName) // used for naming logs, stats, etc
- netName := ss.Net.Name
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Trial, etime.Train, etime.Trial, "trl", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Epoch, etime.Train, etime.Epoch, "epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Run, etime.Train, etime.Run, "run", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestEpoch, etime.Test, etime.Epoch, "tst_epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestTrial, etime.Test, etime.Trial, "tst_trl", netName, runName)
-
- netdata := ss.Config.Log.NetData
- if netdata {
- mpi.Printf("Saving NetView data from testing\n")
- ss.GUI.InitNetData(ss.Net, 200)
- }
-
- ss.Init()
-
- mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.NRuns, ss.Config.Run.Run)
- ss.Loops.Loop(etime.Train, etime.Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.NRuns)
-
- if ss.Config.Run.StartWts != "" { // this is just for testing -- not usually needed
- ss.Loops.Step(etime.Train, 1, etime.Trial) // get past NewRun
- ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWts))
- mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWts)
- }
-
- mpi.Printf("Set NThreads to: %d\n", ss.Net.NThreads)
+ runName := ss.SetRunName()
+ netName := ss.Net.Name
+ cfg := &ss.Config.Log
+ leabra.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
- ss.Loops.Run(etime.Train)
+ mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
+ ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
- ss.Logs.CloseLogFiles()
+ ss.Loops.Run(Train)
- if netdata {
- ss.GUI.SaveNetData(ss.Stats.String("RunName"))
- }
+ leabra.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
}
diff --git a/examples/ra25/typegen.go b/examples/ra25/typegen.go
index 4163884..2310e29 100644
--- a/examples/ra25/typegen.go
+++ b/examples/ra25/typegen.go
@@ -1,17 +1,29 @@
-// Code generated by "core generate -add-types"; DO NOT EDIT.
+// Code generated by "core generate -add-types -add-funcs -gosl"; DO NOT EDIT.
-package main
+package ra25
import (
"cogentcore.org/core/types"
)
-var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params", Fields: []types.Field{{Name: "Network", Doc: "network parameters"}, {Name: "Hidden1Size", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers"}, {Name: "Hidden2Size", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers"}, {Name: "Sheet", Doc: "Extra Param Sheet name(s) to use (space separated if multiple).\nmust be valid name as listed in compiled-in params or loaded params"}, {Name: "Tag", Doc: "extra tag to add to file names and logs saved from this run"}, {Name: "Note", Doc: "user note -- describe the run params etc -- like a git commit message for the run"}, {Name: "File", Doc: "Name of the JSON file to input saved parameters from."}, {Name: "SaveAll", Doc: "Save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true), then quit.\nUseful for comparing to later changes and seeing multiple views of current params."}, {Name: "Good", Doc: "For SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nadd results to git to provide a full diff record of all params over time."}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params.", Fields: []types.Field{{Name: "Hidden1Size", Doc: "Hidden1Size is the size of hidden 1 layer."}, {Name: "Hidden2Size", Doc: "Hidden2Size is the size of hidden 2 layer."}, {Name: "Script", Doc: "Script is an interpreted script that is run to set parameters in Layer and Path\nsheets, by default using the \"Script\" set name."}, {Name: "Sheet", Doc: "Sheet is the extra params sheet name(s) to use (space separated\nif multiple). Must be valid name as listed in compiled-in params\nor loaded params."}, {Name: "Tag", Doc: "Tag is an extra tag to add to file names and logs saved from this run."}, {Name: "Note", Doc: "Note is additional info to describe the run params etc,\nlike a git commit message for the run."}, {Name: "SaveAll", Doc: "SaveAll will save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true),\nthen quit. Useful for comparing to later changes and seeing multiple\nviews of current params."}, {Name: "Good", Doc: "Good is for SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nAdd results to git to provide a full diff record of all params over level."}}})
-var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim", Fields: []types.Field{{Name: "Run", Doc: "starting run number, which determines the random seed.\nruns counts from there, can do all runs in parallel by launching\nseparate jobs with each run, runs = 1."}, {Name: "NRuns", Doc: "total number of runs to do when running Train"}, {Name: "NEpochs", Doc: "total number of epochs per run"}, {Name: "NZero", Doc: "stop run after this number of perfect, zero-error epochs."}, {Name: "NTrials", Doc: "total number of trials per epoch. Should be an even multiple of NData."}, {Name: "TestInterval", Doc: "how often to run through all the test patterns, in terms of training epochs.\ncan use 0 or -1 for no testing."}, {Name: "PCAInterval", Doc: "how frequently (in epochs) to compute PCA on hidden representations\nto measure variance?"}, {Name: "StartWts", Doc: "if non-empty, is the name of weights file to load at start\nof first run, for testing."}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim.", Fields: []types.Field{{Name: "NData", Doc: "NData is the number of data-parallel items to process in parallel per trial.\nIs significantly faster for both CPU and GPU. Results in an effective\nmini-batch of learning."}, {Name: "NThreads", Doc: "NThreads is the number of parallel threads for CPU computation;\n0 = use default."}, {Name: "Run", Doc: "Run is the _starting_ run number, which determines the random seed.\nRuns counts up from there. Can do all runs in parallel by launching\nseparate jobs with each starting Run, Runs = 1."}, {Name: "Runs", Doc: "Runs is the total number of runs to do when running Train, starting from Run."}, {Name: "Epochs", Doc: "Epochs is the total number of epochs per run."}, {Name: "Trials", Doc: "Trials is the total number of trials per epoch.\nShould be an even multiple of NData."}, {Name: "Cycles", Doc: "Cycles is the total number of cycles per trial: at least 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100."}, {Name: "NZero", Doc: "NZero is how many perfect, zero-error epochs before stopping a Run."}, {Name: "TestInterval", Doc: "TestInterval is how often (in epochs) to run through all the test patterns,\nin terms of training epochs. Can use 0 or -1 for no testing."}, {Name: "PCAInterval", Doc: "PCAInterval is how often (in epochs) to compute PCA on hidden\nrepresentations to measure variance."}, {Name: "StartWeights", Doc: "StartWeights is the name of weights file to load at start of first run."}}})
-var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data", Fields: []types.Field{{Name: "SaveWeights", Doc: "if true, save final weights after each run"}, {Name: "Epoch", Doc: "if true, save train epoch log to file, as .epc.tsv typically"}, {Name: "Run", Doc: "if true, save run log to file, as .run.tsv typically"}, {Name: "Trial", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large."}, {Name: "TestEpoch", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."}, {Name: "TestTrial", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."}, {Name: "NetData", Doc: "if true, save network activation etc data from testing trials,\nfor later viewing in netview."}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data.", Fields: []types.Field{{Name: "SaveWeights", Doc: "SaveWeights will save final weights after each run."}, {Name: "Train", Doc: "Train has the list of Train mode levels to save log files for."}, {Name: "Test", Doc: "Test has the list of Test mode levels to save log files for."}}})
-var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is a standard Sim config -- use as a starting point.", Fields: []types.Field{{Name: "Includes", Doc: "specify include files here, and after configuration,\nit contains list of include files added."}, {Name: "GUI", Doc: "open the GUI -- does not automatically run -- if false,\nthen runs automatically and quits."}, {Name: "Debug", Doc: "log debugging information"}, {Name: "Params", Doc: "parameter related configuration options"}, {Name: "Run", Doc: "sim running related configuration options"}, {Name: "Log", Doc: "data logging related configuration options"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.Config", IDName: "config", Doc: "Config has the overall Sim configuration options.", Fields: []types.Field{{Name: "Name", Doc: "Name is the short name of the sim."}, {Name: "Title", Doc: "Title is the longer title of the sim."}, {Name: "URL", Doc: "URL is a link to the online README or other documentation for this sim."}, {Name: "Doc", Doc: "Doc is brief documentation of the sim."}, {Name: "Includes", Doc: "Includes has a list of additional config files to include.\nAfter configuration, it contains list of include files added."}, {Name: "GUI", Doc: "GUI means open the GUI. Otherwise it runs automatically and quits,\nsaving results to log files."}, {Name: "Debug", Doc: "Debug reports debugging information."}, {Name: "Params", Doc: "Params has parameter related configuration options."}, {Name: "Run", Doc: "Run has sim running related configuration options."}, {Name: "Log", Doc: "Log has data logging related configuration options."}}})
-var _ = types.AddType(&types.Type{Name: "main.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "the network -- click to view / edit parameters for layers, paths, etc"}, {Name: "Params", Doc: "network parameter management"}, {Name: "Loops", Doc: "contains looper control loops for running sim"}, {Name: "Stats", Doc: "contains computed statistic values"}, {Name: "Logs", Doc: "Contains all the logs and information about the logs.'"}, {Name: "Patterns", Doc: "the training patterns to use"}, {Name: "Envs", Doc: "Environments"}, {Name: "Context", Doc: "leabra timing parameters and state"}, {Name: "ViewUpdate", Doc: "netview update parameters"}, {Name: "GUI", Doc: "manages all the gui elements"}, {Name: "RandSeeds", Doc: "a list of random seeds to use for each run"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.Modes", IDName: "modes", Doc: "Modes are the looping modes (Stacks) for running and statistics."})
+
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.Levels", IDName: "levels", Doc: "Levels are the looping levels for running and statistics."})
+
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.StatsPhase", IDName: "stats-phase", Doc: "StatsPhase is the phase of stats processing for given mode, level.\nAccumulated values are reset at Start, added each Step."})
+
+var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/examples/ra25.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "Net is the network: click to view / edit parameters for layers, paths, etc."}, {Name: "Params", Doc: "Params manages network parameter setting."}, {Name: "Loops", Doc: "Loops are the control loops for running the sim, in different Modes\nacross stacks of Levels."}, {Name: "Envs", Doc: "Envs provides mode-string based storage of environments."}, {Name: "TrainUpdate", Doc: "TrainUpdate has Train mode netview update parameters."}, {Name: "TestUpdate", Doc: "TestUpdate has Test mode netview update parameters."}, {Name: "Root", Doc: "Root is the root tensorfs directory, where all stats and other misc sim data goes."}, {Name: "Stats", Doc: "Stats has the stats directory within Root."}, {Name: "Current", Doc: "Current has the current stats values within Stats."}, {Name: "StatFuncs", Doc: "StatFuncs are statistics functions called at given mode and level,\nto perform all stats computations. phase = Start does init at start of given level,\nand all intialization / configuration (called during Init too)."}, {Name: "GUI", Doc: "GUI manages all the GUI elements"}, {Name: "RandSeeds", Doc: "RandSeeds is a list of random seeds to use for each run."}}})
+
+var _ = types.AddFunc(&types.Func{Name: "github.com/emer/leabra/v2/examples/ra25.NewConfig", Returns: []string{"Config"}})
+
+var _ = types.AddFunc(&types.Func{Name: "github.com/emer/leabra/v2/examples/ra25.RunSim", Doc: "RunSim runs the simulation as a standalone app\nwith given configuration.", Args: []string{"cfg"}, Returns: []string{"error"}})
+
+var _ = types.AddFunc(&types.Func{Name: "github.com/emer/leabra/v2/examples/ra25.EmbedSim", Doc: "EmbedSim runs the simulation with default configuration\nembedded within given body element.", Args: []string{"b"}, Returns: []string{"Sim"}})
diff --git a/go.mod b/go.mod
index a5d6996..a7cd6be 100644
--- a/go.mod
+++ b/go.mod
@@ -3,10 +3,9 @@ module github.com/emer/leabra/v2
go 1.22.0
require (
- cogentcore.org/core v0.3.9-0.20250127075122-ddf64b82d707
- cogentcore.org/lab v0.0.0-20250116065728-014d19175d12
- github.com/emer/emergent/v2 v2.0.0-dev0.1.7.0.20250128232110-1e71a5c7249b
- github.com/emer/etensor v0.0.0-20250128230539-a9366874f7c3
+ cogentcore.org/core v0.3.12-0.20250708071531-c93d8bc12968
+ cogentcore.org/lab v0.1.2-0.20250708073836-e69d5b7332a1
+ github.com/emer/emergent/v2 v2.0.0-dev0.1.7.0.20250630184401-56587d34404f
)
require (
diff --git a/leabra/deep_net.go b/leabra/deep_net.go
index ccd2e6e..00ad757 100644
--- a/leabra/deep_net.go
+++ b/leabra/deep_net.go
@@ -10,32 +10,32 @@ import (
// AddSuperLayer2D adds a SuperLayer of given size, with given name.
func (nt *Network) AddSuperLayer2D(name string, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer2D(name, nNeurY, nNeurX, SuperLayer)
+ return nt.AddLayer2D(name, SuperLayer, nNeurY, nNeurX)
}
// AddSuperLayer4D adds a SuperLayer of given size, with given name.
func (nt *Network) AddSuperLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer4D(name, nPoolsY, nPoolsX, nNeurY, nNeurX, SuperLayer)
+ return nt.AddLayer4D(name, SuperLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
}
// AddCTLayer2D adds a CTLayer of given size, with given name.
func (nt *Network) AddCTLayer2D(name string, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer2D(name, nNeurY, nNeurX, CTLayer)
+ return nt.AddLayer2D(name, CTLayer, nNeurY, nNeurX)
}
// AddCTLayer4D adds a CTLayer of given size, with given name.
func (nt *Network) AddCTLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer4D(name, nPoolsY, nPoolsX, nNeurY, nNeurX, CTLayer)
+ return nt.AddLayer4D(name, CTLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
}
// AddPulvinarLayer2D adds a PulvinarLayer of given size, with given name.
func (nt *Network) AddPulvinarLayer2D(name string, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer2D(name, nNeurY, nNeurX, PulvinarLayer)
+ return nt.AddLayer2D(name, PulvinarLayer, nNeurY, nNeurX)
}
// AddPulvinarLayer4D adds a PulvinarLayer of given size, with given name.
func (nt *Network) AddPulvinarLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
- return nt.AddLayer4D(name, nPoolsY, nPoolsX, nNeurY, nNeurX, PulvinarLayer)
+ return nt.AddLayer4D(name, PulvinarLayer, nPoolsY, nPoolsX, nNeurY, nNeurX)
}
// ConnectSuperToCT adds a CTCtxtPath from given sending Super layer to a CT layer
diff --git a/leabra/helpers.go b/leabra/helpers.go
index f1e8490..9faef64 100644
--- a/leabra/helpers.go
+++ b/leabra/helpers.go
@@ -9,13 +9,12 @@ import (
"cogentcore.org/core/core"
"cogentcore.org/lab/base/mpi"
- "github.com/emer/emergent/v2/ecmd"
)
-////////////////////////////////////////////////////
-// Misc
+//////// Misc
-// ToggleLayersOff can be used to disable layers in a Network, for example if you are doing an ablation study.
+// ToggleLayersOff can be used to disable layers in a Network,
+// for example if you are doing an ablation study.
func ToggleLayersOff(net *Network, layerNames []string, off bool) {
for _, lnm := range layerNames {
lyi := net.LayerByName(lnm)
@@ -27,8 +26,7 @@ func ToggleLayersOff(net *Network, layerNames []string, off bool) {
}
}
-/////////////////////////////////////////////
-// Weights files
+//////// Weights files
// WeightsFilename returns default current weights file name,
// using train run and epoch counters from looper
@@ -51,17 +49,6 @@ func SaveWeights(net *Network, ctrString, runName string) string {
return fnm
}
-// SaveWeightsIfArgSet saves network weights if the "wts" arg has been set to true.
-// uses WeightsFilename information to identify the weights.
-// only for 0 rank MPI if running mpi
-// Returns the name of the file saved to, or empty if not saved.
-func SaveWeightsIfArgSet(net *Network, args *ecmd.Args, ctrString, runName string) string {
- if args.Bool("wts") {
- return SaveWeights(net, ctrString, runName)
- }
- return ""
-}
-
// SaveWeightsIfConfigSet saves network weights if the given config
// bool value has been set to true.
// uses WeightsFilename information to identify the weights.
diff --git a/leabra/layer.go b/leabra/layer.go
index b42a30d..1db3520 100644
--- a/leabra/layer.go
+++ b/leabra/layer.go
@@ -11,7 +11,7 @@ import (
"cogentcore.org/core/enums"
"cogentcore.org/core/math32"
"cogentcore.org/lab/base/randx"
- "github.com/emer/etensor/tensor"
+ "cogentcore.org/lab/tensor"
)
//////////////////////////////////////////////////////////////////////////////////////
@@ -169,8 +169,8 @@ func (ly *Layer) ApplyExt2D(ext tensor.Tensor) {
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
- vl := float32(ext.Float(idx))
- i := ly.Shape.Offset(idx)
+ vl := float32(ext.Float(idx...))
+ i := ly.Shape.IndexTo1D(idx...)
ly.ApplyExtValue(i, vl, clear, set, toTarg)
}
}
@@ -186,7 +186,7 @@ func (ly *Layer) ApplyExt2Dto4D(ext tensor.Tensor) {
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
- vl := float32(ext.Float(idx))
+ vl := float32(ext.Float(idx...))
ui := tensor.Projection2DIndex(&ly.Shape, false, y, x)
ly.ApplyExtValue(ui, vl, clear, set, toTarg)
}
@@ -205,8 +205,8 @@ func (ly *Layer) ApplyExt4D(ext tensor.Tensor) {
for yn := 0; yn < ynmx; yn++ {
for xn := 0; xn < xnmx; xn++ {
idx := []int{yp, xp, yn, xn}
- vl := float32(ext.Float(idx))
- i := ly.Shape.Offset(idx)
+ vl := float32(ext.Float(idx...))
+ i := ly.Shape.IndexTo1D(idx...)
ly.ApplyExtValue(i, vl, clear, set, toTarg)
}
}
diff --git a/leabra/layerbase.go b/leabra/layerbase.go
index 69c6e95..e4b706d 100644
--- a/leabra/layerbase.go
+++ b/leabra/layerbase.go
@@ -5,20 +5,19 @@
package leabra
import (
- "encoding/json"
"fmt"
"io"
- "log"
- "math"
+ "reflect"
"strconv"
"strings"
"cogentcore.org/core/base/errors"
"cogentcore.org/core/base/num"
+ "cogentcore.org/core/base/reflectx"
"cogentcore.org/core/math32"
"github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/weights"
- "github.com/emer/etensor/tensor"
)
// Layer implements the Leabra algorithm at the layer level,
@@ -208,31 +207,70 @@ func (ly *Layer) ShouldDisplay(field string) bool {
return true
}
-// JsonToParams reformates json output to suitable params display output
-func JsonToParams(b []byte) string {
- br := strings.Replace(string(b), `"`, ``, -1)
- br = strings.Replace(br, ",\n", "", -1)
- br = strings.Replace(br, "{\n", "{", -1)
- br = strings.Replace(br, "} ", "}\n ", -1)
- br = strings.Replace(br, "\n }", " }", -1)
- br = strings.Replace(br, "\n }\n", " }", -1)
- return br[1:] + "\n"
-}
+// ParamsString returns a listing of all parameters in the Layer and
+// pathways within the layer. If nonDefault is true, only report those
+// not at their default values.
+func (ly *Layer) ParamsString(nonDefault bool) string {
+ var b strings.Builder
+ b.WriteString("//////// Layer: " + ly.Name + "\n")
+
+ b.WriteString(params.PrintStruct(ly, 1, func(path string, ft reflect.StructField, fv any) bool {
+ if ft.Tag.Get("display") == "-" {
+ return false
+ }
+ if nonDefault {
+ if def := ft.Tag.Get("default"); def != "" {
+ if reflectx.ValueIsDefault(reflect.ValueOf(fv), def) {
+ return false
+ }
+ } else {
+ if reflectx.NonPointerType(ft.Type).Kind() != reflect.Struct {
+ return false
+ }
+ }
+ }
+ isPBWM := ly.Type == MatrixLayer || ly.Type == GPiThalLayer || ly.Type == CINLayer || ly.Type == PFCLayer || ly.Type == PFCDeepLayer
+ switch path {
+ case "Act", "Inhib", "Learn":
+ return true
+ case "Burst":
+ return ly.Type == SuperLayer || ly.Type == CTLayer
+ case "Pulvinar", "Drivers":
+ return ly.Type == PulvinarLayer
+ case "RW":
+ return ly.Type == RWPredLayer || ly.Type == RWDaLayer
+ case "TD":
+ return ly.Type == TDPredLayer || ly.Type == TDIntegLayer || ly.Type == TDDaLayer
+ case "PBWM":
+ return isPBWM
+ case "SendTo":
+ return ly.Type == GPiThalLayer || ly.Type == ClampDaLayer || ly.Type == RWDaLayer || ly.Type == TDDaLayer || ly.Type == CINLayer
+ case "Matrix":
+ return ly.Type == MatrixLayer
+ case "GPiGate":
+ return ly.Type == GPiThalLayer
+ case "CIN":
+ return ly.Type == CINLayer
+ case "PFCGate", "PFCMaint":
+ return ly.Type == PFCLayer || ly.Type == PFCDeepLayer
+ case "PFCDyns":
+ return ly.Type == PFCDeepLayer
+ }
+ return false
+ },
+ func(path string, ft reflect.StructField, fv any) string {
+ if nonDefault {
+ if def := ft.Tag.Get("default"); def != "" {
+ return reflectx.ToString(fv) + " [" + def + "]"
+ }
+ }
+ return ""
+ }))
-// AllParams returns a listing of all parameters in the Layer
-func (ly *Layer) AllParams() string {
- str := "/////////////////////////////////////////////////\nLayer: " + ly.Name + "\n"
- b, _ := json.MarshalIndent(&ly.Act, "", " ")
- str += "Act: {\n " + JsonToParams(b)
- b, _ = json.MarshalIndent(&ly.Inhib, "", " ")
- str += "Inhib: {\n " + JsonToParams(b)
- b, _ = json.MarshalIndent(&ly.Learn, "", " ")
- str += "Learn: {\n " + JsonToParams(b)
for _, pt := range ly.RecvPaths {
- pstr := pt.AllParams()
- str += pstr
+ b.WriteString(pt.ParamsString(nonDefault))
}
- return str
+ return b.String()
}
// RecipToSendPath finds the reciprocal pathway relative to the given sending pathway
@@ -329,76 +367,6 @@ func (ly *Layer) UnitValues(vals *[]float32, varNm string, di int) error {
return nil
}
-// UnitValuesTensor returns values of given variable name on unit
-// for each unit in the layer, as a float32 tensor in same shape as layer units.
-func (ly *Layer) UnitValuesTensor(tsr tensor.Tensor, varNm string, di int) error {
- if tsr == nil {
- err := fmt.Errorf("leabra.UnitValuesTensor: Tensor is nil")
- log.Println(err)
- return err
- }
- tsr.SetShape(ly.Shape.Sizes, ly.Shape.Names...)
- vidx, err := ly.UnitVarIndex(varNm)
- if err != nil {
- nan := math.NaN()
- for i := range ly.Neurons {
- tsr.SetFloat1D(i, nan)
- }
- return err
- }
- for i := range ly.Neurons {
- v := ly.UnitValue1D(vidx, i, di)
- if math32.IsNaN(v) {
- tsr.SetFloat1D(i, math.NaN())
- } else {
- tsr.SetFloat1D(i, float64(v))
- }
- }
- return nil
-}
-
-// UnitValuesSampleTensor fills in values of given variable name on unit
-// for a smaller subset of sample units in the layer, into given tensor.
-// This is used for computationally intensive stats or displays that work
-// much better with a smaller number of units.
-// The set of sample units are defined by SampleIndexes -- all units
-// are used if no such subset has been defined.
-// If tensor is not already big enough to hold the values, it is
-// set to a 1D shape to hold all the values if subset is defined,
-// otherwise it calls UnitValuesTensor and is identical to that.
-// Returns error on invalid var name.
-func (ly *Layer) UnitValuesSampleTensor(tsr tensor.Tensor, varNm string, di int) error {
- nu := len(ly.SampleIndexes)
- if nu == 0 {
- return ly.UnitValuesTensor(tsr, varNm, di)
- }
- if tsr == nil {
- err := fmt.Errorf("axon.UnitValuesSampleTensor: Tensor is nil")
- log.Println(err)
- return err
- }
- if tsr.Len() != nu {
- tsr.SetShape([]int{nu}, "Units")
- }
- vidx, err := ly.UnitVarIndex(varNm)
- if err != nil {
- nan := math.NaN()
- for i, _ := range ly.SampleIndexes {
- tsr.SetFloat1D(i, nan)
- }
- return err
- }
- for i, ui := range ly.SampleIndexes {
- v := ly.UnitValue1D(vidx, ui, di)
- if math32.IsNaN(v) {
- tsr.SetFloat1D(i, math.NaN())
- } else {
- tsr.SetFloat1D(i, float64(v))
- }
- }
- return nil
-}
-
// UnitVal returns value of given variable name on given unit,
// using shape-based dimensional index
func (ly *Layer) UnitValue(varNm string, idx []int, di int) float32 {
@@ -406,7 +374,7 @@ func (ly *Layer) UnitValue(varNm string, idx []int, di int) float32 {
if err != nil {
return math32.NaN()
}
- fidx := ly.Shape.Offset(idx)
+ fidx := ly.Shape.IndexTo1D(idx...)
return ly.UnitValue1D(vidx, fidx, di)
}
@@ -540,8 +508,8 @@ func (ly *Layer) BuildSubPools() {
pi := 1
for py := 0; py < spy; py++ {
for px := 0; px < spx; px++ {
- soff := ly.Shape.Offset([]int{py, px, 0, 0})
- eoff := ly.Shape.Offset([]int{py, px, sh[2] - 1, sh[3] - 1}) + 1
+ soff := ly.Shape.IndexTo1D(py, px, 0, 0)
+ eoff := ly.Shape.IndexTo1D(py, px, sh[2]-1, sh[3]-1) + 1
pl := &ly.Pools[pi]
pl.StIndex = soff
pl.EdIndex = eoff
diff --git a/leabra/logging.go b/leabra/logging.go
deleted file mode 100644
index 7090650..0000000
--- a/leabra/logging.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright (c) 2022, The Emergent Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package leabra
-
-import (
- "reflect"
- "strconv"
-
- "cogentcore.org/core/base/errors"
- "cogentcore.org/core/math32/minmax"
- "github.com/emer/emergent/v2/egui"
- "github.com/emer/emergent/v2/elog"
- "github.com/emer/emergent/v2/estats"
- "github.com/emer/emergent/v2/etime"
- "github.com/emer/etensor/plot/plotcore"
- "github.com/emer/etensor/tensor/stats/split"
- "github.com/emer/etensor/tensor/stats/stats"
- "github.com/emer/etensor/tensor/table"
-)
-
-// LogTestErrors records all errors made across TestTrials, at Test Epoch scope
-func LogTestErrors(lg *elog.Logs) {
- sk := etime.Scope(etime.Test, etime.Trial)
- lt := lg.TableDetailsScope(sk)
- ix, _ := lt.NamedIndexView("TestErrors")
- ix.Filter(func(et *table.Table, row int) bool {
- return et.Float("Err", row) > 0 // include error trials
- })
- lg.MiscTables["TestErrors"] = ix.NewTable()
-
- allsp := split.All(ix)
- split.AggColumn(allsp, "UnitErr", stats.Sum)
- // note: can add other stats to compute
- lg.MiscTables["TestErrorStats"] = allsp.AggsToTable(table.AddAggName)
-}
-
-// PCAStats computes PCA statistics on recorded hidden activation patterns
-// from Analyze, Trial log data
-func PCAStats(net *Network, lg *elog.Logs, stats *estats.Stats) {
- stats.PCAStats(lg.IndexView(etime.Analyze, etime.Trial), "ActM", net.LayersByType(SuperLayer, TargetLayer, CTLayer))
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// Log items
-
-// LogAddDiagnosticItems adds standard Axon diagnostic statistics to given logs,
-// across the given time levels, in higher to lower order, e.g., Epoch, Trial
-// These are useful for tuning and diagnosing the behavior of the network.
-func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes, times ...etime.Times) {
- ntimes := len(times)
- for _, lnm := range layerNames {
- clnm := lnm
- itm := lg.AddItem(&elog.Item{
- Name: clnm + "_ActMAvg",
- Type: reflect.Float64,
- FixMax: false,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pools[0].ActAvg.ActMAvg)
- }}})
- lg.AddStdAggs(itm, mode, times...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_ActMMax",
- Type: reflect.Float64,
- FixMax: false,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pools[0].ActM.Max)
- }}})
- lg.AddStdAggs(itm, mode, times...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_CosDiff",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.CosDiff.Cos)
- }}})
- lg.AddStdAggs(itm, mode, times...)
- }
-}
-
-func LogInputLayer(lg *elog.Logs, net *Network, mode etime.Modes) {
- // input layer average activity -- important for tuning
- layerNames := net.LayersByType(InputLayer)
- for _, lnm := range layerNames {
- clnm := lnm
- lg.AddItem(&elog.Item{
- Name: clnm + "_ActAvg",
- Type: reflect.Float64,
- FixMax: true,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pools[0].ActM.Max)
- }}})
- }
-}
-
-// LogAddPCAItems adds PCA statistics to log for Hidden and Target layers
-// across the given time levels, in higher to lower order, e.g., Run, Epoch, Trial
-// These are useful for diagnosing the behavior of the network.
-func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etime.Times) {
- ntimes := len(times)
- layers := net.LayersByType(SuperLayer, TargetLayer, CTLayer)
- for _, lnm := range layers {
- clnm := lnm
- cly := net.LayerByName(clnm)
- lg.AddItem(&elog.Item{
- Name: clnm + "_ActM",
- Type: reflect.Float64,
- CellShape: cly.GetSampleShape().Sizes,
- FixMax: true,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Analyze, times[ntimes-1]): func(ctx *elog.Context) {
- ctx.SetLayerSampleTensor(clnm, "ActM")
- }, etime.Scope(etime.Test, times[ntimes-1]): func(ctx *elog.Context) {
- ctx.SetLayerSampleTensor(clnm, "ActM")
- }}})
- itm := lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_NStrong",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Top5",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Next5",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Rest",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
- }
-}
-
-// LayerActsLogConfigMetaData configures meta data for LayerActs table
-func LayerActsLogConfigMetaData(dt *table.Table) {
- dt.SetMetaData("read-only", "true")
- dt.SetMetaData("precision", strconv.Itoa(elog.LogPrec))
- dt.SetMetaData("Type", "Bar")
- dt.SetMetaData("XAxis", "Layer")
- dt.SetMetaData("XAxisRot", "45")
- dt.SetMetaData("Nominal:On", "+")
- dt.SetMetaData("Nominal:FixMin", "+")
- dt.SetMetaData("ActM:On", "+")
- dt.SetMetaData("ActM:FixMin", "+")
- dt.SetMetaData("ActM:Max", "1")
- dt.SetMetaData("ActP:FixMin", "+")
- dt.SetMetaData("ActP:Max", "1")
- dt.SetMetaData("MaxGeM:FixMin", "+")
- dt.SetMetaData("MaxGeM:FixMax", "+")
- dt.SetMetaData("MaxGeM:Max", "3")
- dt.SetMetaData("MaxGeP:FixMin", "+")
- dt.SetMetaData("MaxGeP:FixMax", "+")
- dt.SetMetaData("MaxGeP:Max", "3")
-}
-
-// LayerActsLogConfig configures Tables to record
-// layer activity for tuning the network inhibition, nominal activity,
-// relative scaling, etc. in elog.MiscTables:
-// LayerActs is current, LayerActsRec is record over trials,
-// LayerActsAvg is average of recorded trials.
-func LayerActsLogConfig(net *Network, lg *elog.Logs) {
- dt := lg.MiscTable("LayerActs")
- dt.SetMetaData("name", "LayerActs")
- dt.SetMetaData("desc", "Layer Activations")
- LayerActsLogConfigMetaData(dt)
- dtRec := lg.MiscTable("LayerActsRec")
- dtRec.SetMetaData("name", "LayerActsRec")
- dtRec.SetMetaData("desc", "Layer Activations Recorded")
- LayerActsLogConfigMetaData(dtRec)
- dtAvg := lg.MiscTable("LayerActsAvg")
- dtAvg.SetMetaData("name", "LayerActsAvg")
- dtAvg.SetMetaData("desc", "Layer Activations Averaged")
- LayerActsLogConfigMetaData(dtAvg)
- dts := []*table.Table{dt, dtRec, dtAvg}
- for _, t := range dts {
- t.AddStringColumn("Layer")
- t.AddFloat64Column("Nominal")
- t.AddFloat64Column("ActM")
- t.AddFloat64Column("ActP")
- }
- nlay := len(net.Layers)
- dt.SetNumRows(nlay)
- dtRec.SetNumRows(0)
- dtAvg.SetNumRows(nlay)
- for li, ly := range net.Layers {
- dt.SetString("Layer", li, ly.Name)
- dt.SetFloat("Nominal", li, float64(ly.Inhib.ActAvg.Init))
- dtAvg.SetString("Layer", li, ly.Name)
- }
-}
-
-// LayerActsLog records layer activity for tuning the network
-// inhibition, nominal activity, relative scaling, etc.
-// if gui is non-nil, plot is updated.
-func LayerActsLog(net *Network, lg *elog.Logs, di int, gui *egui.GUI) {
- dt := lg.MiscTable("LayerActs")
- dtRec := lg.MiscTable("LayerActsRec")
- for li, ly := range net.Layers {
- lpl := &ly.Pools[0]
- dt.SetFloat("Nominal", li, float64(ly.Inhib.ActAvg.Init))
- dt.SetFloat("ActM", li, float64(lpl.ActAvg.ActMAvg))
- dt.SetFloat("ActP", li, float64(lpl.ActAvg.ActPAvg))
- dtRec.SetNumRows(dtRec.Rows + 1)
- dtRec.SetString("Layer", li, ly.Name)
- dtRec.SetFloat("Nominal", li, float64(ly.Inhib.ActAvg.Init))
- dtRec.SetFloat("ActM", li, float64(lpl.ActAvg.ActMAvg))
- dtRec.SetFloat("ActP", li, float64(lpl.ActAvg.ActPAvg))
- }
- if gui != nil {
- gui.UpdatePlotScope(etime.ScopeKey("LayerActs"))
- }
-}
-
-// LayerActsLogAvg computes average of LayerActsRec record
-// of layer activity for tuning the network
-// inhibition, nominal activity, relative scaling, etc.
-// if gui is non-nil, plot is updated.
-// if recReset is true, reset the recorded data after computing average.
-func LayerActsLogAvg(net *Network, lg *elog.Logs, gui *egui.GUI, recReset bool) {
- dtRec := lg.MiscTable("LayerActsRec")
- dtAvg := lg.MiscTable("LayerActsAvg")
- if dtRec.Rows == 0 {
- return
- }
- ix := table.NewIndexView(dtRec)
- spl := split.GroupBy(ix, "Layer")
- split.AggAllNumericColumns(spl, stats.Mean)
- ags := spl.AggsToTable(table.ColumnNameOnly)
- cols := []string{"Nominal", "ActM", "ActP", "MaxGeM", "MaxGeP"}
- for li, ly := range net.Layers {
- rw := errors.Log1(ags.RowsByString("Layer", ly.Name, table.Equals, table.UseCase))[0]
- for _, cn := range cols {
- dtAvg.SetFloat(cn, li, ags.Float(cn, rw))
- }
- }
- if recReset {
- dtRec.SetNumRows(0)
- }
- if gui != nil {
- gui.UpdatePlotScope(etime.ScopeKey("LayerActsAvg"))
- }
-}
-
-// LayerActsLogRecReset resets the recorded LayerActsRec data
-// used for computing averages
-func LayerActsLogRecReset(lg *elog.Logs) {
- dtRec := lg.MiscTable("LayerActsRec")
- dtRec.SetNumRows(0)
-}
-
-// LayerActsLogConfigGUI configures GUI for LayerActsLog Plot and LayerActs Avg Plot
-func LayerActsLogConfigGUI(lg *elog.Logs, gui *egui.GUI) {
- pt, _ := gui.Tabs.NewTab("LayerActs Plot")
- plt := plotcore.NewPlotEditor(pt)
- gui.Plots["LayerActs"] = plt
- plt.SetTable(lg.MiscTables["LayerActs"])
-
- pt, _ = gui.Tabs.NewTab("LayerActs Avg Plot")
- plt = plotcore.NewPlotEditor(pt)
- gui.Plots["LayerActsAvg"] = plt
- plt.SetTable(lg.MiscTables["LayerActsAvg"])
-}
diff --git a/leabra/looper.go b/leabra/looper.go
index 1149cc0..1d283f5 100644
--- a/leabra/looper.go
+++ b/leabra/looper.go
@@ -5,152 +5,269 @@
package leabra
import (
- "github.com/emer/emergent/v2/egui"
- "github.com/emer/emergent/v2/elog"
- "github.com/emer/emergent/v2/etime"
+ "cogentcore.org/core/enums"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
)
-// LooperStdPhases adds the minus and plus phases of the alpha cycle,
-// along with embedded beta phases which just record St1 and St2 activity in this case.
-// plusStart is start of plus phase, typically 75,
-// and plusEnd is end of plus phase, typically 99
-// resets the state at start of trial.
-// Can pass a trial-level time scale to use instead of the default etime.Trial
-func LooperStdPhases(ls *looper.Stacks, ctx *Context, net *Network, plusStart, plusEnd int, trial ...etime.Times) {
- trl := etime.Trial
- if len(trial) > 0 {
- trl = trial[0]
- }
- ls.AddEventAllModes(etime.Cycle, "MinusPhase:Start", 0, func() {
- ctx.PlusPhase = false
+// LooperStandard adds all the standard Leabra Trial and Cycle level processing calls
+// to the given Looper Stacks. cycle and trial are the enums for the looper levels,
+// trainMode is the training mode enum value.
+// - minus and plus phases of the theta cycle (trial), at plusStart (150) and plusEnd (199) cycles.
+// - embedded beta phases within theta, that record Beta1 and Beta2 states.
+// - net.Cycle() at every cycle step.
+// - net.DWt() and net.WtFromDWt() learning calls in training mode, with netview update
+// between these two calls if it is visible and viewing synapse variables.
+// - netview update calls at appropriate levels (no-op if no GUI)
+func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, plusStart, plusEnd int, cycle, trial, trainMode enums.Enum) {
+ ls.AddEventAllModes(cycle, "MinusPhase:Start", 0, func() {
+ net.Context.PlusPhase = false
})
- ls.AddEventAllModes(etime.Cycle, "Quarter1", 25, func() {
- net.QuarterFinal(ctx)
- ctx.QuarterInc()
+ ls.AddEventAllModes(cycle, "Quarter1", 25, func() {
+ net.QuarterFinal()
})
- ls.AddEventAllModes(etime.Cycle, "Quarter2", 50, func() {
- net.QuarterFinal(ctx)
- ctx.QuarterInc()
+ ls.AddEventAllModes(cycle, "Quarter2", 50, func() {
+ net.QuarterFinal()
})
- ls.AddEventAllModes(etime.Cycle, "MinusPhase:End", plusStart, func() {
- net.QuarterFinal(ctx)
- ctx.QuarterInc()
+ ls.AddEventAllModes(cycle, "MinusPhase:End", plusStart, func() {
+ net.QuarterFinal()
})
- ls.AddEventAllModes(etime.Cycle, "PlusPhase:Start", plusStart, func() {
- ctx.PlusPhase = true
+ ls.AddEventAllModes(cycle, "PlusPhase:Start", plusStart, func() {
+ net.Context.PlusPhase = true
})
- for m, stack := range ls.Stacks {
- stack.Loops[trl].OnStart.Add("AlphaCycInit", func() {
- net.AlphaCycInit(m == etime.Train)
- ctx.AlphaCycStart()
- })
- stack.Loops[trl].OnEnd.Add("PlusPhase:End", func() {
- net.QuarterFinal(ctx)
+ for mode, st := range ls.Stacks {
+ cycLoop := st.Loops[cycle]
+ cycLoop.OnStart.Add("Cycle", func() {
+ net.Cycle()
})
+ trlLoop := st.Loops[trial]
+ testing := mode.Int64() != trainMode.Int64()
+ trlLoop.OnStart.Add("AlphaCycInit", func() { net.AlphaCycInit(!testing) })
+ trlLoop.OnEnd.Add("PlusPhase:End", func() { net.QuarterFinal() })
+ if mode.Int64() == trainMode.Int64() {
+ trlLoop.OnEnd.Add("UpdateWeights", func() {
+ if view := viewFunc(mode); view != nil && view.IsViewingSynapse() {
+ net.DWt() // todo: need to get synapses here, not after
+ view.RecordSyns() // note: critical to update weights here so DWt is visible
+ net.WtFromDWt()
+ } else {
+ net.DWtToWt()
+ }
+ })
+ }
}
}
-// LooperSimCycleAndLearn adds Cycle and DWt, WtFromDWt functions to looper
-// for given network, ctx, and netview update manager
-// Can pass a trial-level time scale to use instead of the default etime.Trial
-func LooperSimCycleAndLearn(ls *looper.Stacks, net *Network, ctx *Context, viewupdt *netview.ViewUpdate, trial ...etime.Times) {
- trl := etime.Trial
- if len(trial) > 0 {
- trl = trial[0]
- }
- for m := range ls.Stacks {
- ls.Stacks[m].Loops[etime.Cycle].OnStart.Add("Cycle", func() {
- net.Cycle(ctx)
- ctx.CycleInc()
+// LooperUpdateNetView adds netview update calls to the given
+// trial and cycle levels for given NetViewUpdate associated with the mode,
+// returned by the given viewFunc function.
+// The countersFunc returns the counters and other stats to display at the
+// bottom of the NetView, based on given mode and level.
+func LooperUpdateNetView(ls *looper.Stacks, cycle, trial enums.Enum, viewFunc func(mode enums.Enum) *NetViewUpdate) {
+ for mode, st := range ls.Stacks {
+ viewUpdt := viewFunc(mode)
+ cycLoop := st.Loops[cycle]
+ cycLoop.OnEnd.Add("GUI:UpdateNetView", func() {
+ viewUpdt.UpdateCycle(cycLoop.Counter.Cur, mode, cycle)
})
- }
- ttrl := ls.Loop(etime.Train, trl)
- if ttrl != nil {
- ttrl.OnEnd.Add("UpdateWeights", func() {
- net.DWt()
- if viewupdt.IsViewingSynapse() {
- viewupdt.RecordSyns() // note: critical to update weights here so DWt is visible
- }
- net.WtFromDWt()
+ trlLoop := st.Loops[trial]
+ trlLoop.OnEnd.Add("GUI:UpdateNetView", func() {
+ viewUpdt.GoUpdate(mode, trial)
})
}
+}
- // Set variables on ss that are referenced elsewhere, such as ApplyInputs.
- for m, loops := range ls.Stacks {
- for _, loop := range loops.Loops {
- loop.OnStart.Add("SetCtxMode", func() {
- ctx.Mode = m.(etime.Modes)
- })
- }
+//////// NetViewUpdate
+
+//gosl:start
+
+// ViewTimes are the options for when the NetView can be updated.
+type ViewTimes int32 //enums:enum
+const (
+ // Cycle is an update of neuron state, equivalent to 1 msec of real time.
+ Cycle ViewTimes = iota
+
+ // FastSpike is 10 cycles (msec) or 100hz. This is the fastest spiking time
+ // generally observed in the neocortex.
+ FastSpike
+
+ // Gamma is 25 cycles (msec) or 40hz. Neocortical activity often exhibits
+ // synchrony peaks in this range.
+ Gamma
+
+ // Beta is 50 cycles (msec) or 20 hz (two Gammas).
+ // Gating in the basal ganglia and associated updating in prefrontal
+ // cortex occurs at this frequency.
+ Beta
+
+ // Alpha is 100 cycle (msec) or 10 hz (two Betas).
+ // Posterior neocortex exhibits synchrony peaks in this range,
+ // corresponding to the intrinsic bursting frequency of layer 5
+ // IB neurons, and corticothalamic loop resonance.
+ Alpha
+
+ // Phase is the Minus or Plus phase, where plus phase is bursting / outcome
+ // that drives positive learning relative to prediction in minus phase.
+ // Minus phase is at 150 cycles (msec).
+ Phase
+
+ // Theta is 200 cycles (msec) or 5 hz (two Alphas), i.e., a Trial.
+ // This is the modal duration of a saccade, the update frequency of
+ // medial temporal lobe episodic memory, and the minimal predictive learning cycle
+ // (perceive on Alpha 1, predict on 2).
+ Theta
+)
+
+//gosl:end
+
+// ViewTimeCycles are the cycle intervals associated with each ViewTimes level.
+var ViewTimeCycles = []int{1, 10, 25, 50, 100, 150, 200}
+
+// Cycles returns the number of cycles associated with a given view time.
+func (vt ViewTimes) Cycles() int {
+ return ViewTimeCycles[vt]
+}
+
+// NetViewUpdate manages time scales for updating the NetView.
+// Use one of these for each mode you want to control separately.
+type NetViewUpdate struct {
+
+ // On toggles update of display on
+ On bool
+
+ // Time scale to update the network view (Cycle to Trial timescales).
+ Time ViewTimes
+
+ // CounterFunc returns the counter string showing current counters etc.
+ CounterFunc func(mode, level enums.Enum) string `display:"-"`
+
+ // View is the network view.
+ View *netview.NetView `display:"-"`
+}
+
+// Config configures for given NetView, time and counter function,
+// which returns a string to show at the bottom of the netview,
+// given the current mode and level.
+func (vu *NetViewUpdate) Config(nv *netview.NetView, tm ViewTimes, fun func(mode, level enums.Enum) string) {
+ vu.View = nv
+ vu.On = true
+ vu.Time = tm
+ vu.CounterFunc = fun
+}
+
+// ShouldUpdate returns true if the view is On,
+// View is != nil, and it is visible.
+func (vu *NetViewUpdate) ShouldUpdate() bool {
+ if !vu.On || vu.View == nil || !vu.View.IsVisible() {
+ return false
}
+ return true
}
-// LooperResetLogBelow adds a function in OnStart to all stacks and loops
-// to reset the log at the level below each loop -- this is good default behavior.
-// Exceptions can be passed to exclude specific levels -- e.g., if except is Epoch
-// then Epoch does not reset the log below it
-func LooperResetLogBelow(ls *looper.Stacks, logs *elog.Logs, except ...etime.Times) {
- for m, stack := range ls.Stacks {
- for t, loop := range stack.Loops {
- curTime := t
- isExcept := false
- for _, ex := range except {
- if curTime == ex {
- isExcept = true
- break
- }
- }
- if below := stack.TimeBelow(curTime); !isExcept && below != etime.NoTime {
- loop.OnStart.Add("ResetLog"+below.String(), func() {
- logs.ResetLog(m.(etime.Modes), below.(etime.Times))
- })
- }
- }
+// GoUpdate does an update if view is On, visible and active,
+// including recording new data and driving update of display.
+// This version is only for calling from a separate goroutine,
+// not the main event loop (see also Update).
+func (vu *NetViewUpdate) GoUpdate(mode, level enums.Enum) {
+ if !vu.ShouldUpdate() {
+ return
}
+ if vu.IsCycleUpdating() && vu.View.Options.Raster.On { // no update for raster
+ return
+ }
+ counters := vu.CounterFunc(mode, level)
+ vu.View.Record(counters, -1) // -1 = default incrementing raster
+ vu.View.GoUpdateView()
}
-// LooperUpdateNetView adds netview update calls at each time level
-func LooperUpdateNetView(ls *looper.Stacks, viewupdt *netview.ViewUpdate, net *Network, ctrUpdateFunc func(tm etime.Times)) {
- for m, stack := range ls.Stacks {
- for t, loop := range stack.Loops {
- curTime := t.(etime.Times)
- if curTime != etime.Cycle {
- loop.OnEnd.Add("GUI:UpdateNetView", func() {
- ctrUpdateFunc(curTime)
- viewupdt.Testing = m == etime.Test
- viewupdt.UpdateTime(curTime)
- })
- }
- }
- cycLoop := ls.Loop(m, etime.Cycle)
- cycLoop.OnEnd.Add("GUI:UpdateNetView", func() {
- cyc := cycLoop.Counter.Cur
- ctrUpdateFunc(etime.Cycle)
- viewupdt.Testing = m == etime.Test
- viewupdt.UpdateCycle(cyc)
- })
+// Update does an update if view is On, visible and active,
+// including recording new data and driving update of display.
+// This version is only for calling from the main event loop
+// (see also GoUpdate).
+func (vu *NetViewUpdate) Update(mode, level enums.Enum) {
+ if !vu.ShouldUpdate() {
+ return
}
+ counters := vu.CounterFunc(mode, level)
+ vu.View.Record(counters, -1) // -1 = default incrementing raster
+ vu.View.UpdateView()
}
-// LooperUpdatePlots adds plot update calls at each time level
-func LooperUpdatePlots(ls *looper.Stacks, gui *egui.GUI) {
- for m, stack := range ls.Stacks {
- for t, loop := range stack.Loops {
- curTime := t.(etime.Times)
- curLoop := loop
- if curTime == etime.Cycle {
- curLoop.OnEnd.Add("GUI:UpdatePlot", func() {
- cyc := curLoop.Counter.Cur
- gui.GoUpdateCyclePlot(m.(etime.Modes), cyc)
- })
- } else {
- curLoop.OnEnd.Add("GUI:UpdatePlot", func() {
- gui.GoUpdatePlot(m.(etime.Modes), curTime)
- })
- }
- }
+// UpdateWhenStopped does an update when the network updating was stopped
+// either via stepping or hitting the stop button.
+// This has different logic for the raster view vs. regular.
+// This is only for calling from a separate goroutine,
+// not the main event loop.
+func (vu *NetViewUpdate) UpdateWhenStopped(mode, level enums.Enum) {
+ if !vu.ShouldUpdate() {
+ return
+ }
+ if !vu.View.Options.Raster.On { // always record when not in raster mode
+ counters := vu.CounterFunc(mode, level)
+ vu.View.Record(counters, -1) // -1 = use a dummy counter
+ }
+ vu.View.GoUpdateView()
+}
+
+// IsCycleUpdating returns true if the view is updating at a cycle level,
+// either from raster or literal cycle level.
+func (vu *NetViewUpdate) IsCycleUpdating() bool {
+ if !vu.ShouldUpdate() {
+ return false
+ }
+ if vu.View.Options.Raster.On || vu.Time == Cycle {
+ return true
+ }
+ return false
+}
+
+// IsViewingSynapse returns true if netview is actively viewing synapses.
+func (vu *NetViewUpdate) IsViewingSynapse() bool {
+ if !vu.ShouldUpdate() {
+ return false
+ }
+ return vu.View.IsViewingSynapse()
+}
+
+// UpdateCycle triggers an update at the Cycle (Millisecond) timescale,
+// using given text to display at bottom of view
+func (vu *NetViewUpdate) UpdateCycle(cyc int, mode, level enums.Enum) {
+ if !vu.ShouldUpdate() {
+ return
+ }
+ if vu.View.Options.Raster.On {
+ counters := vu.CounterFunc(mode, level)
+ vu.updateCycleRaster(cyc, counters)
+ return
+ }
+ if vu.Time == Theta { // only trial
+ return
+ }
+ vtc := vu.Time.Cycles()
+ if (cyc+1)%vtc == 0 {
+ vu.GoUpdate(mode, level)
+ }
+}
+
+// updateCycleRaster raster version of Cycle update.
+// it always records data at the cycle level.
+func (vu *NetViewUpdate) updateCycleRaster(cyc int, counters string) {
+ vu.View.Record(counters, cyc)
+ vtc := vu.Time.Cycles()
+ if (cyc+1)%vtc == 0 {
+ vu.View.GoUpdateView()
+ }
+}
+
+// RecordSyns records synaptic data -- stored separate from unit data
+// and only needs to be called when synaptic values are updated.
+// Should be done when the DWt values have been computed, before
+// updating Wts and zeroing.
+// NetView displays this recorded data when Update is next called.
+func (vu *NetViewUpdate) RecordSyns() {
+ if !vu.ShouldUpdate() {
+ return
}
+ vu.View.RecordSyns()
}
diff --git a/leabra/network.go b/leabra/network.go
index a27b018..e947a47 100644
--- a/leabra/network.go
+++ b/leabra/network.go
@@ -10,8 +10,8 @@ import (
"unsafe"
"cogentcore.org/core/base/datasize"
+ "cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/paths"
- "github.com/emer/etensor/tensor"
)
///////////////////////////////////////////////////////////////////////////
@@ -38,6 +38,7 @@ func (nt *Network) AlphaCycInit(updtActAvg bool) {
}
ly.AlphaCycInit(updtActAvg)
}
+ nt.Context.AlphaCycStart()
}
// Cycle runs one cycle of activation updating:
@@ -48,14 +49,15 @@ func (nt *Network) AlphaCycInit(updtActAvg bool) {
// * Average and Max Act stats
// This basic version doesn't use the time info, but more specialized types do, and we
// want to keep a consistent API for end-user code.
-func (nt *Network) Cycle(ctx *Context) {
- nt.SendGDelta(ctx) // also does integ
- nt.AvgMaxGe(ctx)
- nt.InhibFromGeAct(ctx)
- nt.ActFromG(ctx)
- nt.AvgMaxAct(ctx)
- nt.CyclePost(ctx) // general post cycle actions.
- nt.RecGateAct(ctx) // Record activation state at time of gating (in ActG neuron var)
+func (nt *Network) Cycle() {
+ nt.SendGDelta() // also does integ
+ nt.AvgMaxGe()
+ nt.InhibFromGeAct()
+ nt.ActFromG()
+ nt.AvgMaxAct()
+ nt.CyclePost() // general post cycle actions.
+ nt.RecGateAct() // Record activation state at time of gating (in ActG neuron var)
+ nt.Context.CycleInc() // keep synced
}
//////////////////////////////////////////////////////////////////////////////////////
@@ -63,7 +65,8 @@ func (nt *Network) Cycle(ctx *Context) {
// SendGeDelta sends change in activation since last sent, if above thresholds
// and integrates sent deltas into GeRaw and time-integrated Ge values
-func (nt *Network) SendGDelta(ctx *Context) {
+func (nt *Network) SendGDelta() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -79,7 +82,8 @@ func (nt *Network) SendGDelta(ctx *Context) {
}
// AvgMaxGe computes the average and max Ge stats, used in inhibition
-func (nt *Network) AvgMaxGe(ctx *Context) {
+func (nt *Network) AvgMaxGe() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -89,7 +93,8 @@ func (nt *Network) AvgMaxGe(ctx *Context) {
}
// InhibiFromGeAct computes inhibition Gi from Ge and Act stats within relevant Pools
-func (nt *Network) InhibFromGeAct(ctx *Context) {
+func (nt *Network) InhibFromGeAct() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -99,7 +104,8 @@ func (nt *Network) InhibFromGeAct(ctx *Context) {
}
// ActFromG computes rate-code activation from Ge, Gi, Gl conductances
-func (nt *Network) ActFromG(ctx *Context) {
+func (nt *Network) ActFromG() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -109,7 +115,8 @@ func (nt *Network) ActFromG(ctx *Context) {
}
// AvgMaxGe computes the average and max Ge stats, used in inhibition
-func (nt *Network) AvgMaxAct(ctx *Context) {
+func (nt *Network) AvgMaxAct() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -122,7 +129,8 @@ func (nt *Network) AvgMaxAct(ctx *Context) {
// value has been computed.
// SuperLayer computes Burst activity.
// GateLayer (GPiThal) computes gating, sends to other layers.
-func (nt *Network) CyclePost(ctx *Context) {
+func (nt *Network) CyclePost() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -132,7 +140,8 @@ func (nt *Network) CyclePost(ctx *Context) {
}
// QuarterFinal does updating after end of a quarter, for first 2
-func (nt *Network) QuarterFinal(ctx *Context) {
+func (nt *Network) QuarterFinal() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -145,10 +154,12 @@ func (nt *Network) QuarterFinal(ctx *Context) {
}
ly.CtxtFromGe(ctx)
}
+ ctx.QuarterInc()
}
// MinusPhase is called at the end of the minus phase (quarter 3), to record state.
-func (nt *Network) MinusPhase(ctx *Context) {
+func (nt *Network) MinusPhase() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -158,7 +169,8 @@ func (nt *Network) MinusPhase(ctx *Context) {
}
// PlusPhase is called at the end of the plus phase (quarter 4), to record state.
-func (nt *Network) PlusPhase(ctx *Context) {
+func (nt *Network) PlusPhase() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -204,6 +216,13 @@ func (nt *Network) WtFromDWt() {
}
}
+// DWtToWt computes the weight change (learning) based on current
+// running-average activation values, and then WtFromDWt.
+func (nt *Network) DWtToWt() {
+ nt.DWt()
+ nt.WtFromDWt()
+}
+
// LrateMult sets the new Lrate parameter for Paths to LrateInit * mult.
// Useful for implementing learning rate schedules.
func (nt *Network) LrateMult(mult float32) {
@@ -215,8 +234,7 @@ func (nt *Network) LrateMult(mult float32) {
}
}
-//////////////////////////////////////////////////////////////////////////////////////
-// Init methods
+//////// Init methods
// InitWeights initializes synaptic weights and all other
// associated long-term state variables including running-average
diff --git a/leabra/networkbase.go b/leabra/networkbase.go
index 6921f12..d56c84c 100644
--- a/leabra/networkbase.go
+++ b/leabra/networkbase.go
@@ -12,12 +12,12 @@ import (
"log"
"os"
"path/filepath"
+ "strings"
"time"
+ "cogentcore.org/core/base/iox/tomlx"
"cogentcore.org/core/core"
- "github.com/emer/emergent/v2/econfig"
"github.com/emer/emergent/v2/emer"
- "github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/paths"
)
@@ -25,9 +25,16 @@ import (
type Network struct {
emer.NetworkBase
+ // Context is the context state. Other copies of Context can be maintained
+ // and [SetContext] to update this one, but this instance is the canonical one.
+ Context Context
+
// list of layers
Layers []*Layer
+ // LayerClassMap is a map from class name to layer names.
+ LayerClassMap map[string][]string `display:"-"`
+
// number of parallel threads (go routines) to use.
NThreads int `edit:"-"`
@@ -49,6 +56,7 @@ func NewNetwork(name string) *Network {
net := &Network{}
emer.InitNetwork(net, name)
net.NThreads = 1
+ net.Context.Defaults()
return net
}
@@ -69,6 +77,62 @@ func (nt *Network) LayersByType(layType ...LayerTypes) []string {
return nt.LayersByClass(nms...)
}
+func (nt *Network) UpdateLayerMaps() {
+ nt.UpdateLayerNameMap()
+ nt.LayerClassMap = make(map[string][]string)
+ for _, ly := range nt.Layers {
+ cs := ly.Type.String() + " " + ly.Class
+ cls := strings.Split(cs, " ")
+ for _, cl := range cls {
+ if cl == "" {
+ continue
+ }
+ ll := nt.LayerClassMap[cl]
+ ll = append(ll, ly.Name)
+ nt.LayerClassMap[cl] = ll
+ }
+ }
+}
+
+// LayersByClass returns a list of layer names by given class(es).
+// Lists are compiled when network Build() function called,
+// or now if not yet present.
+// The layer Type is always included as a Class, along with any other
+// space-separated strings specified in Class for parameter styling, etc.
+// If no classes are passed, all layer names in order are returned.
+func (nt *Network) LayersByClass(classes ...string) []string {
+ if nt.LayerClassMap == nil {
+ nt.UpdateLayerMaps()
+ }
+ var nms []string
+ if len(classes) == 0 {
+ for _, ly := range nt.Layers {
+ if ly.Off {
+ continue
+ }
+ nms = append(nms, ly.Name)
+ }
+ return nms
+ }
+ for _, lc := range classes {
+ nms = append(nms, nt.LayerClassMap[lc]...)
+ }
+ // only get unique layers
+ layers := []string{}
+ has := map[string]bool{}
+ for _, nm := range nms {
+ if has[nm] {
+ continue
+ }
+ layers = append(layers, nm)
+ has[nm] = true
+ }
+ if len(layers) == 0 {
+ panic(fmt.Sprintf("No Layers found for query: %#v.", classes))
+ }
+ return layers
+}
+
// KeyLayerParams returns a listing for all layers in the network,
// of the most important layer-level params (specific to each algorithm).
func (nt *Network) KeyLayerParams() string {
@@ -86,7 +150,7 @@ func (nt *Network) KeyPathParams() string {
// or `params_2006_01_02` (year, month, day) datestamp,
// providing a snapshot of the simulation params for easy diffs and later reference.
// Also saves current Config and Params state.
-func (nt *Network) SaveParamsSnapshot(pars *params.Sets, cfg any, good bool) error {
+func (nt *Network) SaveParamsSnapshot(cfg any, good bool) error {
date := time.Now().Format("2006_01_02")
if good {
date = "good"
@@ -96,10 +160,10 @@ func (nt *Network) SaveParamsSnapshot(pars *params.Sets, cfg any, good bool) err
if err != nil {
log.Println(err) // notify but OK if it exists
}
- econfig.Save(cfg, filepath.Join(dir, "config.toml"))
- pars.SaveTOML(core.Filename(filepath.Join(dir, "params.toml")))
- nt.SaveAllParams(core.Filename(filepath.Join(dir, "params_all.txt")))
- nt.SaveNonDefaultParams(core.Filename(filepath.Join(dir, "params_nondef.txt")))
+ fmt.Println("Saving params to:", dir)
+ tomlx.Save(cfg, filepath.Join(dir, "config.toml"))
+ nt.SaveParams(emer.AllParams, core.Filename(filepath.Join(dir, "params_all.txt")))
+ nt.SaveParams(emer.NonDefault, core.Filename(filepath.Join(dir, "params_nondef.txt")))
nt.SaveAllLayerInhibs(core.Filename(filepath.Join(dir, "params_layers.txt")))
nt.SaveAllPathScales(core.Filename(filepath.Join(dir, "params_paths.txt")))
return nil
@@ -135,25 +199,12 @@ func (nt *Network) AllLayerInhibs() string {
if ly.Off {
continue
}
- ph := ly.ParamsHistory.ParamsHistory()
- lh := ph["Layer.Inhib.ActAvg.Init"]
- if lh != "" {
- lh = "Params: " + lh
- }
- str += fmt.Sprintf("%15s\t\tNominal:\t%6.2f\t%s\n", ly.Name, ly.Inhib.ActAvg.Init, lh)
+ str += fmt.Sprintf("%15s\t\tNominal:\t%6.2f\n", ly.Name, ly.Inhib.ActAvg.Init)
if ly.Inhib.Layer.On {
- lh := ph["Layer.Inhib.Layer.Gi"]
- if lh != "" {
- lh = "Params: " + lh
- }
- str += fmt.Sprintf("\t\t\t\t\t\tLayer.Gi:\t%6.2f\t%s\n", ly.Inhib.Layer.Gi, lh)
+ str += fmt.Sprintf("\t\t\t\t\t\tLayer.Gi:\t%6.2f\n", ly.Inhib.Layer.Gi)
}
if ly.Inhib.Pool.On {
- lh := ph["Layer.Inhib.Pool.Gi"]
- if lh != "" {
- lh = "Params: " + lh
- }
- str += fmt.Sprintf("\t\t\t\t\t\tPool.Gi: \t%6.2f\t%s\n", ly.Inhib.Pool.Gi, lh)
+ str += fmt.Sprintf("\t\t\t\t\t\tPool.Gi: \t%6.2f\n", ly.Inhib.Pool.Gi)
}
str += fmt.Sprintf("\n")
}
@@ -183,6 +234,7 @@ func (nt *Network) AllPathScales() string {
// Defaults sets all the default parameters for all layers and pathways
func (nt *Network) Defaults() {
+ nt.Context.Defaults()
nt.WtBalInterval = 10
nt.WtBalCtr = 0
for li, ly := range nt.Layers {
@@ -231,13 +283,13 @@ func (nt *Network) SynVarProps() map[string]string {
// AddLayerInit is implementation routine that takes a given layer and
// adds it to the network, and initializes and configures it properly.
-func (nt *Network) AddLayerInit(ly *Layer, name string, shape []int, typ LayerTypes) {
+func (nt *Network) AddLayerInit(ly *Layer, name string, typ LayerTypes, shape ...int) {
if nt.EmerNetwork == nil {
log.Printf("Network EmerNetwork is nil: MUST call emer.InitNetwork on network, passing a pointer to the network to initialize properly!")
return
}
emer.InitLayer(ly, name)
- ly.SetShape(shape)
+ ly.Shape.SetShapeSizes(shape...)
ly.Type = typ
nt.Layers = append(nt.Layers, ly)
nt.UpdateLayerMaps()
@@ -250,16 +302,16 @@ func (nt *Network) AddLayerInit(ly *Layer, name string, shape []int, typ LayerTy
// shape is in row-major format with outer-most dimensions first:
// e.g., 4D 3, 2, 4, 5 = 3 rows (Y) of 2 cols (X) of pools, with each unit
// group having 4 rows (Y) of 5 (X) units.
-func (nt *Network) AddLayer(name string, shape []int, typ LayerTypes) *Layer {
+func (nt *Network) AddLayer(name string, typ LayerTypes, shape ...int) *Layer {
ly := &Layer{} // essential to use EmerNet interface here!
- nt.AddLayerInit(ly, name, shape, typ)
+ nt.AddLayerInit(ly, name, typ, shape...)
return ly
}
// AddLayer2D adds a new layer with given name and 2D shape to the network.
// 2D and 4D layer shapes are generally preferred but not essential.
-func (nt *Network) AddLayer2D(name string, shapeY, shapeX int, typ LayerTypes) *Layer {
- return nt.AddLayer(name, []int{shapeY, shapeX}, typ)
+func (nt *Network) AddLayer2D(name string, typ LayerTypes, shapeY, shapeX int) *Layer {
+ return nt.AddLayer(name, typ, shapeY, shapeX)
}
// AddLayer4D adds a new layer with given name and 4D shape to the network.
@@ -267,8 +319,8 @@ func (nt *Network) AddLayer2D(name string, shapeY, shapeX int, typ LayerTypes) *
// shape is in row-major format with outer-most dimensions first:
// e.g., 4D 3, 2, 4, 5 = 3 rows (Y) of 2 cols (X) of pools, with each pool
// having 4 rows (Y) of 5 (X) neurons.
-func (nt *Network) AddLayer4D(name string, nPoolsY, nPoolsX, nNeurY, nNeurX int, typ LayerTypes) *Layer {
- return nt.AddLayer(name, []int{nPoolsY, nPoolsX, nNeurY, nNeurX}, typ)
+func (nt *Network) AddLayer4D(name string, typ LayerTypes, nPoolsY, nPoolsX, nNeurY, nNeurX int) *Layer {
+ return nt.AddLayer(name, typ, nPoolsY, nPoolsX, nNeurY, nNeurX)
}
// ConnectLayerNames establishes a pathway between two layers, referenced by name
@@ -341,7 +393,7 @@ func (nt *Network) LateralConnectLayer(lay *Layer, pat paths.Pattern) *Path {
// Build constructs the layer and pathway state based on the layer shapes
// and patterns of interconnectivity
func (nt *Network) Build() error {
- nt.MakeLayerMaps()
+ nt.UpdateLayerMaps()
var errs []error
for li, ly := range nt.Layers {
ly.Index = li
diff --git a/leabra/neuromod.go b/leabra/neuromod.go
index 4417ed7..7d87eee 100644
--- a/leabra/neuromod.go
+++ b/leabra/neuromod.go
@@ -73,7 +73,7 @@ func (ly *Layer) SendACh(ach float32) {
// AddClampDaLayer adds a ClampDaLayer of given name
func (nt *Network) AddClampDaLayer(name string) *Layer {
- return nt.AddLayer2D(name, 1, 1, ClampDaLayer)
+ return nt.AddLayer2D(name, ClampDaLayer, 1, 1)
}
func (ly *Layer) ClampDaDefaults() {
diff --git a/leabra/params.go b/leabra/params.go
new file mode 100644
index 0000000..1992b58
--- /dev/null
+++ b/leabra/params.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package leabra
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "cogentcore.org/core/base/errors"
+ "cogentcore.org/lab/base/mpi"
+ "github.com/cogentcore/yaegi/interp"
+ "github.com/emer/emergent/v2/params"
+)
+
+// type aliases for params generic types that we use:
+type (
+ // LayerSheets contains Layer parameter Sheets.
+ LayerSheets = params.Sheets[*LayerParams]
+
+ // LayerSheet is one Layer parameter Sheet.
+ LayerSheet = params.Sheet[*LayerParams]
+
+ // LayerSel is one Layer parameter Selector.
+ LayerSel = params.Sel[*LayerParams]
+
+ // PathSheets contains Path parameter Sheets.
+ PathSheets = params.Sheets[*PathParams]
+
+ // PathSheet is one Path parameter Sheet.
+ PathSheet = params.Sheet[*PathParams]
+
+ // PathSel is one Path parameter Selector.
+ PathSel = params.Sel[*PathParams]
+)
+
+// Params contains the [LayerParams] and [PathParams] parameter setting functions
+// provided by the [emergent] [params] package.
+type Params struct {
+
+ // Layer has the parameters to apply to the [LayerParams] for layers.
+ Layer LayerSheets `display:"-"`
+
+ // Path has the parameters to apply to the [PathParams] for paths.
+ Path PathSheets `display:"-"`
+
+ // ExtraSheets has optional additional sheets of parameters to apply
+ // after the default Base sheet. Use "Script" for default Script sheet.
+ // Multiple names separated by spaces can be used (don't put spaces in Sheet names!)
+ ExtraSheets string
+
+ // Tag is an optional additional tag to add to log file names to identify
+ // a specific run of the model (typically set by a config file or args).
+ Tag string
+
+ // Script is a parameter setting script, which adds to the Layer and Path sheets
+ // typically using the "Script" set name.
+ Script string `display:"-"`
+
+ // Interp is the yaegi interpreter for running the script.
+ Interp *interp.Interpreter `display:"-"`
+}
+
+// ScriptParams is a template for yaegi interpreted parameters
+var ScriptParams = `sim.Sim.Params.Layer["Script"] = &axon.LayerSheet{
+ &axon.LayerSel{Sel:"Layer", Set: func(ly *axon.LayerParams) {
+ // set params
+ }},
+}
+sim.Sim.Params.Path["Script"] = &axon.PathSheet{
+ &axon.PathSel{Sel:"Path", Set: func(pt *axon.PathParams) {
+ // set params
+ }},
+}
+`
+
+// Config configures the ExtraSheets, Tag, and Network fields, and
+// initializes the yaegi interpreter for dynamic parameter scripts.
+// Pass a reflect.ValueOf(*Sim) to initialize the yaegi interpreter.
+// Sim must have Params in a field called Params.
+func (pr *Params) Config(layer LayerSheets, path PathSheets, extraSheets, tag string, sim reflect.Value) {
+ pr.Layer = layer
+ pr.Path = path
+ report := ""
+ if extraSheets != "" {
+ pr.ExtraSheets = extraSheets
+ report += " ExtraSheets: " + extraSheets
+ }
+ if tag != "" {
+ pr.Tag = tag
+ report += " Tag: " + tag
+ }
+ if report != "" {
+ mpi.Printf("Params Set: %s\n", report)
+ }
+ pr.Interp = interp.New(interp.Options{})
+ pr.Interp.Use(interp.Exports{
+ "github.com/emer/axon/axon": map[string]reflect.Value{
+ "LayerParams": reflect.ValueOf((*LayerParams)(nil)),
+ "PathParams": reflect.ValueOf((*PathParams)(nil)),
+ "LayerSel": reflect.ValueOf((*LayerSel)(nil)),
+ "LayerSheet": reflect.ValueOf((*LayerSheet)(nil)),
+ "LayerSheets": reflect.ValueOf((*LayerSheets)(nil)),
+ "PathSel": reflect.ValueOf((*PathSel)(nil)),
+ "PathSheet": reflect.ValueOf((*PathSheet)(nil)),
+ "PathSheets": reflect.ValueOf((*PathSheets)(nil)),
+ },
+ "github.com/emer/axon/sim/sim": map[string]reflect.Value{
+ "Sim": sim,
+ },
+ })
+ pr.Interp.ImportUsed()
+}
+
+// Name returns name of current set of parameters, including Tag.
+// if ExtraSheets is empty then it returns "Base", otherwise returns ExtraSheets
+func (pr *Params) Name() string {
+ rn := ""
+ if pr.Tag != "" {
+ rn += pr.Tag + "_"
+ }
+ if pr.ExtraSheets == "" {
+ rn += "Base"
+ } else {
+ rn += pr.ExtraSheets
+ }
+ return rn
+}
+
+// RunName returns the name of a simulation run based on params Name()
+// and starting run number.
+func (pr *Params) RunName(startRun int) string {
+ return fmt.Sprintf("%s_%03d", pr.Name(), startRun)
+}
+
+// ApplyAll applies all parameters to given network,
+// using "Base" Sheet then any ExtraSheets,
+// for Layer and Path params (each must have the named sheets,
+// for proper error checking in case of typos).
+func (pr *Params) ApplyAll(net *Network) {
+ pr.ApplySheet(net, "Base")
+ if pr.ExtraSheets == "" {
+ return
+ }
+ if pr.Script != "" {
+ _, err := pr.Interp.Eval(pr.Script)
+ if err != nil {
+ fmt.Println(pr.Script)
+ errors.Log(err)
+ }
+ }
+ sps := strings.Fields(pr.ExtraSheets)
+ for _, ps := range sps {
+ if ps == "Base" {
+ continue
+ }
+ pr.ApplySheet(net, ps)
+ }
+}
+
+// ApplySheet applies parameters for given [params.Sheet] name
+// for Layer and Path params (each must have the named sheets,
+// for proper error checking in case of typos).
+func (pr *Params) ApplySheet(net *Network, sheetName string) error {
+ lsheet, err := pr.Layer.SheetByName(sheetName)
+ if err != nil {
+ return err
+ }
+ psheet, err := pr.Path.SheetByName(sheetName)
+ if err != nil {
+ return err
+ }
+ lsheet.SelMatchReset()
+ psheet.SelMatchReset()
+
+ ApplyParamSheets(net, lsheet, psheet)
+ return nil
+}
+
+// ApplyParamSheets applies Layer and Path parameters from given sheets,
+// returning true if any applied.
+func ApplyParamSheets(net *Network, layer *params.Sheet[*LayerParams], path *params.Sheet[*PathParams]) bool {
+ appl := ApplyLayerSheet(net, layer)
+ appp := ApplyPathSheet(net, path)
+ return appl || appp
+}
+
+// ApplyLayerSheet applies Layer parameters from given sheet, returning true if any applied.
+func ApplyLayerSheet(net *Network, sheet *params.Sheet[*LayerParams]) bool {
+ applied := false
+ for _, ly := range net.Layers {
+ app := sheet.Apply(ly.Params)
+ ly.UpdateParams()
+ if app {
+ applied = true
+ }
+ }
+ return applied
+}
+
+// ApplyPathSheet applies Path parameters from given sheet, returning true if any applied.
+func ApplyPathSheet(net *Network, sheet *params.Sheet[*PathParams]) bool {
+ applied := false
+ for _, ly := range net.Layers {
+ for _, pt := range ly.RecvPaths {
+ app := sheet.Apply(pt.Params)
+ pt.UpdateParams()
+ if app {
+ applied = true
+ }
+ }
+ }
+ return applied
+}
diff --git a/leabra/path.go b/leabra/path.go
index 43a5e7b..c739c26 100644
--- a/leabra/path.go
+++ b/leabra/path.go
@@ -6,7 +6,7 @@ package leabra
import (
"cogentcore.org/core/math32"
- "github.com/emer/etensor/tensor"
+ "cogentcore.org/lab/tensor"
)
// note: path.go contains algorithm methods; pathbase.go has infrastructure.
@@ -38,9 +38,9 @@ func (pt *Path) SetScalesRPool(scales tensor.Tensor) {
for rux := 0; rux < rNuX; rux++ {
ri := 0
if r2d {
- ri = rsh.Offset([]int{ruy, rux})
+ ri = rsh.IndexTo1D(ruy, rux)
} else {
- ri = rsh.Offset([]int{rpy, rpx, ruy, rux})
+ ri = rsh.IndexTo1D(rpy, rpx, ruy, rux)
}
scst := (ruy*rNuX + rux) * rfsz
nc := int(pt.RConN[ri])
diff --git a/leabra/pathbase.go b/leabra/pathbase.go
index 43af30c..b5a29a6 100644
--- a/leabra/pathbase.go
+++ b/leabra/pathbase.go
@@ -5,21 +5,23 @@
package leabra
import (
- "encoding/json"
"errors"
"fmt"
"io"
"log"
+ "reflect"
"strconv"
"strings"
"cogentcore.org/core/base/indent"
+ "cogentcore.org/core/base/reflectx"
"cogentcore.org/core/math32"
"cogentcore.org/core/math32/minmax"
+ "cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/paths"
"github.com/emer/emergent/v2/weights"
- "github.com/emer/etensor/tensor"
)
// note: paths.go contains algorithm methods; pathbase.go has infrastructure.
@@ -183,16 +185,46 @@ func (pt *Path) ShouldDisplay(field string) bool {
return true
}
-// AllParams returns a listing of all parameters in the Layer
-func (pt *Path) AllParams() string {
- str := "///////////////////////////////////////////////////\nPath: " + pt.Name + "\n"
- b, _ := json.MarshalIndent(&pt.WtInit, "", " ")
- str += "WtInit: {\n " + JsonToParams(b)
- b, _ = json.MarshalIndent(&pt.WtScale, "", " ")
- str += "WtScale: {\n " + JsonToParams(b)
- b, _ = json.MarshalIndent(&pt.Learn, "", " ")
- str += "Learn: {\n " + strings.Replace(JsonToParams(b), " XCal: {", "\n XCal: {", -1)
- return str
+// ParamsString returns a listing of all parameters in the Layer and
+// pathways within the layer. If nonDefault is true, only report those
+// not at their default values.
+func (pt *Path) ParamsString(nonDefault bool) string {
+ var b strings.Builder
+ b.WriteString(" //////// Path: " + pt.Name + "\n")
+ b.WriteString(params.PrintStruct(pt, 1, func(path string, ft reflect.StructField, fv any) bool {
+ if ft.Tag.Get("display") == "-" {
+ return false
+ }
+ if nonDefault {
+ if def := ft.Tag.Get("default"); def != "" {
+ if reflectx.ValueIsDefault(reflect.ValueOf(fv), def) {
+ return false
+ }
+ } else {
+ if reflectx.NonPointerType(ft.Type).Kind() != reflect.Struct {
+ return false
+ }
+ }
+ }
+ switch path {
+ case "WtInit", "WtScale", "Learn":
+ return true
+ case "CHL":
+ return pt.Type == CHLPath
+ case "Trace":
+ return pt.Type == MatrixPath
+ }
+ return false
+ },
+ func(path string, ft reflect.StructField, fv any) string {
+ if nonDefault {
+ if def := ft.Tag.Get("default"); def != "" {
+ return reflectx.ToString(fv) + " [" + def + "]"
+ }
+ }
+ return ""
+ }))
+ return b.String()
}
func (pt *Path) SynVarNames() []string {
diff --git a/leabra/pbwm_layers.go b/leabra/pbwm_layers.go
index 64d91f6..beb315f 100644
--- a/leabra/pbwm_layers.go
+++ b/leabra/pbwm_layers.go
@@ -90,7 +90,7 @@ func (ly *Layer) MatrixOutAChInhib(ctx *Context) {
for xp := maintN; xp < xpN; xp++ {
for yn := 0; yn < ynN; yn++ {
for xn := 0; xn < xnN; xn++ {
- ni := ly.Shape.Offset([]int{yp, xp, yn, xn})
+ ni := ly.Shape.IndexTo1D(yp, xp, yn, xn)
nrn := &ly.Neurons[ni]
if nrn.IsOff() {
continue
diff --git a/leabra/pbwm_net.go b/leabra/pbwm_net.go
index 8908f11..46893f8 100644
--- a/leabra/pbwm_net.go
+++ b/leabra/pbwm_net.go
@@ -9,7 +9,8 @@ import (
)
// RecGateAct is called after GateSend, to record gating activations at time of gating
-func (nt *Network) RecGateAct(ctx *Context) {
+func (nt *Network) RecGateAct() {
+ ctx := &nt.Context
for _, ly := range nt.Layers {
if ly.Off {
continue
@@ -23,7 +24,7 @@ func (nt *Network) RecGateAct(ctx *Context) {
// and each pool has nNeurY, nNeurX neurons. da gives the DaReceptor type (D1R = Go, D2R = NoGo)
func (nt *Network) AddMatrixLayer(name string, nY, nMaint, nOut, nNeurY, nNeurX int, da DaReceptors) *Layer {
tX := nMaint + nOut
- mtx := nt.AddLayer4D(name, nY, tX, nNeurY, nNeurX, MatrixLayer)
+ mtx := nt.AddLayer4D(name, MatrixLayer, nY, tX, nNeurY, nNeurX)
mtx.PBWM.DaR = da
mtx.PBWM.Set(nY, nMaint, nOut)
return mtx
@@ -34,7 +35,7 @@ func (nt *Network) AddMatrixLayer(name string, nY, nMaint, nOut, nNeurY, nNeurX
// and each pool has 1x1 neurons.
func (nt *Network) AddGPeLayer(name string, nY, nMaint, nOut int) *Layer {
tX := nMaint + nOut
- gpe := nt.AddLayer4D(name, nY, tX, 1, 1, GPeLayer)
+ gpe := nt.AddLayer4D(name, GPeLayer, nY, tX, 1, 1)
return gpe
}
@@ -43,14 +44,14 @@ func (nt *Network) AddGPeLayer(name string, nY, nMaint, nOut int) *Layer {
// and each pool has 1x1 neurons.
func (nt *Network) AddGPiThalLayer(name string, nY, nMaint, nOut int) *Layer {
tX := nMaint + nOut
- gpi := nt.AddLayer4D(name, nY, tX, 1, 1, GPiThalLayer)
+ gpi := nt.AddLayer4D(name, GPiThalLayer, nY, tX, 1, 1)
gpi.PBWM.Set(nY, nMaint, nOut)
return gpi
}
// AddCINLayer adds a CINLayer, with a single neuron.
func (nt *Network) AddCINLayer(name string) *Layer {
- cin := nt.AddLayer2D(name, 1, 1, CINLayer)
+ cin := nt.AddLayer2D(name, CINLayer, 1, 1)
return cin
}
@@ -94,12 +95,12 @@ func (nt *Network) AddDorsalBG(prefix string, nY, nMaint, nOut, nNeurY, nNeurX i
// else Full set of 5 dynamic maintenance types. Both have the class "PFC" set.
// deep is positioned behind super.
func (nt *Network) AddPFCLayer(name string, nY, nX, nNeurY, nNeurX int, out, dynMaint bool) (sp, dp *Layer) {
- sp = nt.AddLayer4D(name, nY, nX, nNeurY, nNeurX, SuperLayer)
+ sp = nt.AddLayer4D(name, SuperLayer, nY, nX, nNeurY, nNeurX)
dym := 1
if !dynMaint {
dym = 5
}
- dp = nt.AddLayer4D(name+"D", nY, nX, dym*nNeurY, nNeurX, PFCDeepLayer)
+ dp = nt.AddLayer4D(name+"D", PFCDeepLayer, nY, nX, dym*nNeurY, nNeurX)
sp.AddClass("PFC")
dp.AddClass("PFC")
dp.PFCGate.OutGate = out
diff --git a/leabra/rl.go b/leabra/rl.go
index 422ab55..35f6193 100644
--- a/leabra/rl.go
+++ b/leabra/rl.go
@@ -96,9 +96,9 @@ func (ly *Layer) ActFromGRWDa(ctx *Context) {
// Reward layer, a RWPred prediction layer, and a dopamine layer that computes diff.
// Only generates DA when Rew layer has external input -- otherwise zero.
func (nt *Network) AddRWLayers(prefix string, space float32) (rew, rp, da *Layer) {
- rew = nt.AddLayer2D(prefix+"Rew", 1, 1, InputLayer)
- rp = nt.AddLayer2D(prefix+"RWPred", 1, 1, RWPredLayer)
- da = nt.AddLayer2D(prefix+"DA", 1, 1, RWDaLayer)
+ rew = nt.AddLayer2D(prefix+"Rew", InputLayer, 1, 1)
+ rp = nt.AddLayer2D(prefix+"RWPred", RWPredLayer, 1, 1)
+ da = nt.AddLayer2D(prefix+"DA", RWDaLayer, 1, 1)
da.RW.RewLay = rew.Name
rp.PlaceBehind(rew, space)
da.PlaceBehind(rp, space)
@@ -284,10 +284,10 @@ func (pt *Path) DWtTDPred() {
// Pathway from Rew to RewInteg is given class TDToInteg -- should
// have no learning and 1 weight.
func (nt *Network) AddTDLayers(prefix string, space float32) (rew, rp, ri, td *Layer) {
- rew = nt.AddLayer2D(prefix+"Rew", 1, 1, InputLayer)
- rp = nt.AddLayer2D(prefix+"Pred", 1, 1, TDPredLayer)
- ri = nt.AddLayer2D(prefix+"Integ", 1, 1, TDIntegLayer)
- td = nt.AddLayer2D(prefix+"TD", 1, 1, TDDaLayer)
+ rew = nt.AddLayer2D(prefix+"Rew", InputLayer, 1, 1)
+ rp = nt.AddLayer2D(prefix+"Pred", TDPredLayer, 1, 1)
+ ri = nt.AddLayer2D(prefix+"Integ", TDIntegLayer, 1, 1)
+ td = nt.AddLayer2D(prefix+"TD", TDDaLayer, 1, 1)
ri.TD.PredLay = rp.Name
td.TD.IntegLay = ri.Name
From 8f676b89f941edb161a072ffa9a9a64d9d6281a6 Mon Sep 17 00:00:00 2001
From: "Randall C. O'Reilly"
Date: Wed, 9 Jul 2025 16:11:52 -0700
Subject: [PATCH 03/14] lab: ra25 working with new update
---
examples/bench/README.md | 19 -
examples/bench/bench.go | 229 --
examples/bench/bench_hardware.md | 78 -
examples/bench/bench_results.md | 175 -
examples/bench/run_bench.sh | 32 -
examples/bench/run_hardware.sh | 30 -
examples/deep_fsa/README.md | 24 -
examples/deep_fsa/deep_fsa.go | 804 ----
.../fig_deepleabra_fsa_net_3steps.png | Bin 430186 -> 0 bytes
examples/deep_fsa/fig_reber_grammar_fsa.png | Bin 16796 -> 0 bytes
examples/deep_fsa/fsa_env.go | 166 -
examples/hip/README.md | 74 -
examples/hip/best_2-20.diff | 405 ---
examples/hip/fig_ab_ac_data_catinf.png | Bin 76909 -> 0 bytes
examples/hip/hip.go | 995 -----
examples/hip/plots/fig_ab_ac_data_catinf.png | Bin 76909 -> 0 bytes
examples/hip/test_ab.tsv | 11 -
examples/hip/test_ac.tsv | 11 -
examples/hip/test_lure.tsv | 11 -
examples/hip/train_ab.tsv | 11 -
examples/hip/train_ac.tsv | 11 -
examples/hip_bench/README.md | 51 -
examples/hip_bench/def.params | 419 ---
examples/hip_bench/def_learning.png | Bin 73151 -> 0 bytes
examples/hip_bench/def_memory.png | Bin 72315 -> 0 bytes
examples/hip_bench/def_params.go | 298 --
examples/hip_bench/diff/1vs2_diffs_1.png | Bin 127522 -> 0 bytes
examples/hip_bench/diff/1vs2_diffs_2.png | Bin 286384 -> 0 bytes
examples/hip_bench/diff/1vs2_diffs_3.png | Bin 162629 -> 0 bytes
examples/hip_bench/diff/1vs3_diffs_1.png | Bin 111854 -> 0 bytes
examples/hip_bench/diff/1vs3_diffs_2.png | Bin 183293 -> 0 bytes
examples/hip_bench/diff/2vs4_diffs_1.png | Bin 428668 -> 0 bytes
examples/hip_bench/diff/2vs4_diffs_2.png | Bin 193182 -> 0 bytes
examples/hip_bench/diff/README.md | 36 -
.../hip_bench/diff/fig_alphacyc_diffs_1.png | Bin 240800 -> 0 bytes
.../hip_bench/diff/fig_alphacyc_diffs_2.png | Bin 86337 -> 0 bytes
.../hip_bench/diff/fig_alphacyc_diffs_3.png | Bin 33862 -> 0 bytes
.../hip_bench/diff/fig_netconfig_diffs.png | Bin 106270 -> 0 bytes
examples/hip_bench/hip_bench.go | 2754 --------------
examples/hip_bench/hip_bench.py | 2642 --------------
examples/hip_bench/orig_learning.png | Bin 73360 -> 0 bytes
examples/hip_bench/orig_memory.png | Bin 72489 -> 0 bytes
examples/hip_bench/orig_params.go | 257 --
examples/hip_bench/params.go | 148 -
.../hip_bench/testing_effect/def_params.go | 326 --
.../hip_bench/testing_effect/hip_bench_te.go | 3222 -----------------
examples/ra25/README.md | 64 -
examples/ra25/config.go | 152 -
examples/ra25/ra25.go | 777 ----
examples/ra25/random_5x5_25.tsv | 26 -
examples/ra25/random_5x5_25_gen.csv | 26 -
examples/ra25/typegen.go | 29 -
examples/sir2/README.md | 91 -
examples/sir2/enumgen.go | 48 -
examples/sir2/sir2.go | 765 ----
examples/sir2/sir2_env.go | 189 -
examples/sir2/typegen.go | 15 -
go.mod | 46 +-
go.sum | 119 +-
leabra/deep_layers.go | 58 +-
leabra/deep_paths.go | 28 +-
leabra/enumgen.go | 43 +
leabra/hip.go | 79 +-
leabra/layer.go | 155 +-
leabra/layerbase.go | 214 +-
leabra/layerparams.go | 230 ++
leabra/looper.go | 29 +-
leabra/network.go | 30 +-
leabra/networkbase.go | 26 +-
leabra/neuromod.go | 2 +-
leabra/params.go | 4 +-
leabra/path.go | 87 +-
leabra/pathbase.go | 123 +-
leabra/pathparams.go | 142 +
leabra/pbwm_layers.go | 104 +-
leabra/pbwm_net.go | 14 +-
leabra/pbwm_paths.go | 22 +-
leabra/rl.go | 62 +-
leabra/simstats.go | 682 ++++
leabra/typegen.go | 30 +-
80 files changed, 1658 insertions(+), 16092 deletions(-)
delete mode 100644 examples/bench/README.md
delete mode 100644 examples/bench/bench.go
delete mode 100644 examples/bench/bench_hardware.md
delete mode 100644 examples/bench/bench_results.md
delete mode 100755 examples/bench/run_bench.sh
delete mode 100755 examples/bench/run_hardware.sh
delete mode 100644 examples/deep_fsa/README.md
delete mode 100644 examples/deep_fsa/deep_fsa.go
delete mode 100644 examples/deep_fsa/fig_deepleabra_fsa_net_3steps.png
delete mode 100644 examples/deep_fsa/fig_reber_grammar_fsa.png
delete mode 100644 examples/deep_fsa/fsa_env.go
delete mode 100644 examples/hip/README.md
delete mode 100644 examples/hip/best_2-20.diff
delete mode 100644 examples/hip/fig_ab_ac_data_catinf.png
delete mode 100644 examples/hip/hip.go
delete mode 100644 examples/hip/plots/fig_ab_ac_data_catinf.png
delete mode 100644 examples/hip/test_ab.tsv
delete mode 100644 examples/hip/test_ac.tsv
delete mode 100644 examples/hip/test_lure.tsv
delete mode 100644 examples/hip/train_ab.tsv
delete mode 100644 examples/hip/train_ac.tsv
delete mode 100644 examples/hip_bench/README.md
delete mode 100644 examples/hip_bench/def.params
delete mode 100644 examples/hip_bench/def_learning.png
delete mode 100644 examples/hip_bench/def_memory.png
delete mode 100644 examples/hip_bench/def_params.go
delete mode 100644 examples/hip_bench/diff/1vs2_diffs_1.png
delete mode 100644 examples/hip_bench/diff/1vs2_diffs_2.png
delete mode 100644 examples/hip_bench/diff/1vs2_diffs_3.png
delete mode 100644 examples/hip_bench/diff/1vs3_diffs_1.png
delete mode 100644 examples/hip_bench/diff/1vs3_diffs_2.png
delete mode 100644 examples/hip_bench/diff/2vs4_diffs_1.png
delete mode 100644 examples/hip_bench/diff/2vs4_diffs_2.png
delete mode 100644 examples/hip_bench/diff/README.md
delete mode 100644 examples/hip_bench/diff/fig_alphacyc_diffs_1.png
delete mode 100644 examples/hip_bench/diff/fig_alphacyc_diffs_2.png
delete mode 100644 examples/hip_bench/diff/fig_alphacyc_diffs_3.png
delete mode 100644 examples/hip_bench/diff/fig_netconfig_diffs.png
delete mode 100644 examples/hip_bench/hip_bench.go
delete mode 100755 examples/hip_bench/hip_bench.py
delete mode 100644 examples/hip_bench/orig_learning.png
delete mode 100644 examples/hip_bench/orig_memory.png
delete mode 100644 examples/hip_bench/orig_params.go
delete mode 100644 examples/hip_bench/params.go
delete mode 100644 examples/hip_bench/testing_effect/def_params.go
delete mode 100644 examples/hip_bench/testing_effect/hip_bench_te.go
delete mode 100644 examples/ra25/README.md
delete mode 100644 examples/ra25/config.go
delete mode 100644 examples/ra25/ra25.go
delete mode 100644 examples/ra25/random_5x5_25.tsv
delete mode 100644 examples/ra25/random_5x5_25_gen.csv
delete mode 100644 examples/ra25/typegen.go
delete mode 100644 examples/sir2/README.md
delete mode 100644 examples/sir2/enumgen.go
delete mode 100644 examples/sir2/sir2.go
delete mode 100644 examples/sir2/sir2_env.go
delete mode 100644 examples/sir2/typegen.go
create mode 100644 leabra/layerparams.go
create mode 100644 leabra/pathparams.go
create mode 100644 leabra/simstats.go
diff --git a/examples/bench/README.md b/examples/bench/README.md
deleted file mode 100644
index bedd082..0000000
--- a/examples/bench/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# bench
-
-This is a standard benchmarking system for leabra. It runs 5 layer fully connected networks of various sizes, with the number of events and epochs adjusted to take roughly an equal amount of time overall.
-
-First, build the executable:
-
-```sh
-$ go build
-```
-
-* `run_bench.sh` is a script that runs standard configurations -- can pass additional args like `threads=2` to test different threading levels.
-
-* `bench_results.md` has the algorithmic / implementational history for different versions of the code, on the same platform (macbook pro).
-
-* `run_hardware.sh` is a script specifically for hardware testing, running standard 1, 2, 4 threads for each network size, and only reporting the final result, in the form shown in:
-
-* `bench_hardware.md` has standard results for different hardware.
-
-
diff --git a/examples/bench/bench.go b/examples/bench/bench.go
deleted file mode 100644
index 71272df..0000000
--- a/examples/bench/bench.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// bench runs a benchmark model with 5 layers (3 hidden, Input, Output) all of the same
-// size, for benchmarking different size networks. These are not particularly realistic
-// models for actual applications (e.g., large models tend to have much more topographic
-// patterns of connectivity and larger layers with fewer connections), but they are
-// easy to run..
-package main
-
-import (
- "flag"
- "fmt"
- "math"
- "math/rand"
- "os"
- "time"
-
- "cogentcore.org/core/base/timer"
- "cogentcore.org/lab/base/randx"
- "github.com/emer/emergent/v2/params"
- "github.com/emer/emergent/v2/patgen"
- "github.com/emer/emergent/v2/paths"
- "github.com/emer/etensor/tensor/table"
- "github.com/emer/leabra/v2/leabra"
-)
-
-var Net *leabra.Network
-var Pats *table.Table
-var EpcLog *table.Table
-var Silent = false // non-verbose mode -- just reports result
-
-var ParamSets = params.Sets{
- "Base": {
- {Sel: "Path", Desc: "norm and momentum on works better, but wt bal is not better for smaller nets",
- Params: params.Params{
- "Path.Learn.Norm.On": "true",
- "Path.Learn.Momentum.On": "true",
- "Path.Learn.WtBal.On": "false",
- }},
- {Sel: "Layer", Desc: "using default 1.8 inhib for all of network -- can explore",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.8",
- "Layer.Act.Gbar.L": "0.2", // original value -- makes HUGE diff on perf!
- }},
- {Sel: "#Output", Desc: "output definitely needs lower inhib -- true for smaller layers in general",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.4",
- }},
- {Sel: ".Back", Desc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
- Params: params.Params{
- "Path.WtScale.Rel": "0.2",
- }},
- },
-}
-
-func ConfigNet(net *leabra.Network, units int) {
- squn := int(math.Sqrt(float64(units)))
- shp := []int{squn, squn}
-
- inLay := net.AddLayer("Input", shp, leabra.InputLayer)
- hid1Lay := net.AddLayer("Hidden1", shp, leabra.SuperLayer)
- hid2Lay := net.AddLayer("Hidden2", shp, leabra.SuperLayer)
- hid3Lay := net.AddLayer("Hidden3", shp, leabra.SuperLayer)
- outLay := net.AddLayer("Output", shp, leabra.TargetLayer)
-
- net.ConnectLayers(inLay, hid1Lay, paths.NewFull(), leabra.ForwardPath)
- net.ConnectLayers(hid1Lay, hid2Lay, paths.NewFull(), leabra.ForwardPath)
- net.ConnectLayers(hid2Lay, hid3Lay, paths.NewFull(), leabra.ForwardPath)
- net.ConnectLayers(hid3Lay, outLay, paths.NewFull(), leabra.ForwardPath)
-
- net.ConnectLayers(outLay, hid3Lay, paths.NewFull(), leabra.BackPath)
- net.ConnectLayers(hid3Lay, hid2Lay, paths.NewFull(), leabra.BackPath)
- net.ConnectLayers(hid2Lay, hid1Lay, paths.NewFull(), leabra.BackPath)
-
- net.Defaults()
- net.ApplyParams(ParamSets["Base"], false) // no msg
- net.Build()
- net.InitWeights()
-}
-
-func ConfigPats(dt *table.Table, pats, units int) {
- squn := int(math.Sqrt(float64(units)))
- shp := []int{squn, squn}
- // fmt.Printf("shape: %v\n", shp)
-
- dt.AddStringColumn("Name")
- dt.AddFloat32TensorColumn("Input", shp)
- dt.AddFloat32TensorColumn("Output", shp)
- dt.SetNumRows(pats)
-
- // note: actually can learn if activity is .15 instead of .25
- // but C++ benchmark is for .25..
- nOn := units / 6
-
- patgen.PermutedBinaryRows(dt.Columns[1], nOn, 1, 0)
- patgen.PermutedBinaryRows(dt.Columns[2], nOn, 1, 0)
-}
-
-func ConfigEpcLog(dt *table.Table) {
- dt.AddIntColumn("Epoch")
- dt.AddFloat32Column("CosDiff")
- dt.AddFloat32Column("AvgCosDiff")
- dt.AddFloat32Column("SSE")
- dt.AddFloat32Column("Avg SSE")
- dt.AddFloat32Column("Count Err")
- dt.AddFloat32Column("Pct Err")
- dt.AddFloat32Column("Pct Cor")
- dt.AddFloat32Column("Hid1 ActAvg")
- dt.AddFloat32Column("Hid2 ActAvg")
- dt.AddFloat32Column("Out ActAvg")
-}
-
-func TrainNet(net *leabra.Network, pats, epcLog *table.Table, epcs int) {
- ctx := leabra.NewContext()
- net.InitWeights()
- np := pats.NumRows()
- porder := rand.Perm(np) // randomly permuted order of ints
-
- epcLog.SetNumRows(epcs)
-
- inLay := net.LayerByName("Input")
- hid1Lay := net.LayerByName("Hidden1")
- hid2Lay := net.LayerByName("Hidden2")
- outLay := net.LayerByName("Output")
-
- _ = hid1Lay
- _ = hid2Lay
-
- inPats, _ := pats.ColumnByName("Input")
- outPats, _ := pats.ColumnByName("Output")
-
- tmr := timer.Time{}
- tmr.Start()
- for epc := 0; epc < epcs; epc++ {
- randx.PermuteInts(porder)
- outCosDiff := float32(0)
- cntErr := 0
- sse := 0.0
- avgSSE := 0.0
- for pi := 0; pi < np; pi++ {
- ppi := porder[pi]
- inp := inPats.SubSpace([]int{ppi})
- outp := outPats.SubSpace([]int{ppi})
-
- inLay.ApplyExt(inp)
- outLay.ApplyExt(outp)
-
- net.AlphaCycInit(true)
- ctx.AlphaCycStart()
- for qtr := 0; qtr < 4; qtr++ {
- for cyc := 0; cyc < ctx.CycPerQtr; cyc++ {
- net.Cycle(ctx)
- ctx.CycleInc()
- }
- net.QuarterFinal(ctx)
- ctx.QuarterInc()
- }
- net.DWt()
- net.WtFromDWt()
- outCosDiff += outLay.CosDiff.Cos
- pSSE, pAvgSSE := outLay.MSE(0.5)
- sse += pSSE
- avgSSE += pAvgSSE
- if pSSE != 0 {
- cntErr++
- }
- }
- outCosDiff /= float32(np)
- sse /= float64(np)
- avgSSE /= float64(np)
- pctErr := float64(cntErr) / float64(np)
- pctCor := 1 - pctErr
- // fmt.Printf("epc: %v \tCosDiff: %v \tAvgCosDif: %v\n", epc, outCosDiff, outLay.CosDiff.Avg)
- epcLog.SetFloat("Epoch", epc, float64(epc))
- epcLog.SetFloat("CosDiff", epc, float64(outCosDiff))
- epcLog.SetFloat("AvgCosDiff", epc, float64(outLay.CosDiff.Avg))
- epcLog.SetFloat("SSE", epc, sse)
- epcLog.SetFloat("Avg SSE", epc, avgSSE)
- epcLog.SetFloat("Count Err", epc, float64(cntErr))
- epcLog.SetFloat("Pct Err", epc, pctErr)
- epcLog.SetFloat("Pct Cor", epc, pctCor)
- epcLog.SetFloat("Hid1 ActAvg", epc, float64(hid1Lay.Pools[0].ActAvg.ActPAvgEff))
- epcLog.SetFloat("Hid2 ActAvg", epc, float64(hid2Lay.Pools[0].ActAvg.ActPAvgEff))
- epcLog.SetFloat("Out ActAvg", epc, float64(outLay.Pools[0].ActAvg.ActPAvgEff))
- }
- tmr.Stop()
- if Silent {
- fmt.Printf("%v\n", tmr.Total)
- } else {
- fmt.Printf("Took %v for %v epochs, avg per epc: m%6.4g\n", tmr.Total, epcs, float64(tmr.Total)/float64(int(time.Second)*epcs))
- }
-}
-
-func main() {
- var epochs int
- var pats int
- var units int
-
- flag.Usage = func() {
- fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s:\n", os.Args[0])
- flag.PrintDefaults()
- }
-
- // process command args
- flag.IntVar(&epochs, "epochs", 2, "number of epochs to run")
- flag.IntVar(&pats, "pats", 10, "number of patterns per epoch")
- flag.IntVar(&units, "units", 100, "number of units per layer -- uses NxN where N = sqrt(units)")
- flag.BoolVar(&Silent, "silent", false, "only report the final time")
- flag.Parse()
-
- if !Silent {
- fmt.Printf("Running bench with: %v epochs, %v pats, %v units\n", epochs, pats, units)
- }
-
- Net = leabra.NewNetwork("Bench")
- ConfigNet(Net, units)
-
- Pats = &table.Table{}
- ConfigPats(Pats, pats, units)
-
- EpcLog = &table.Table{}
- ConfigEpcLog(EpcLog)
-
- TrainNet(Net, Pats, EpcLog, epochs)
-
- EpcLog.SaveCSV("bench_epc.dat", ',', table.Headers)
-}
diff --git a/examples/bench/bench_hardware.md b/examples/bench/bench_hardware.md
deleted file mode 100644
index fa74eae..0000000
--- a/examples/bench/bench_hardware.md
+++ /dev/null
@@ -1,78 +0,0 @@
-# Hardware benchmarks
-
-NOTE: generally taking the best of 2 runs. Not sure how the mac allocates priority but it often slows things down after a short while if they're taking a lot of CPU. Great for some things but not for this!
-
-## MacBook Pro 16-inch, 2021: Apple M1 Max, 64 GB LPDDR5 memory, Go 1.17.5
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 0.79 1.63 1.96
-MEDIUM: 1.03 1.18 1.20
-LARGE: 6.83 5.04 3.90
-HUGE: 10.60 7.49 5.54
-GINORM: 17.1 12.3 9.07
-```
-
-## MacBook Pro 16-inch, 2019: 2.4 Ghz 8-Core Intel Core i9, 64 GB 2667 Mhz DDR4 memory
-
-## Go 1.17.5 -- uses registers to pass args, is tiny bit faster
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 1.16 3.01 3.29
-MEDIUM: 1.51 2.09 2.00
-LARGE: 9.40 7.13 5.26
-HUGE: 17.3 12.2 9.15
-GINORM: 26.1 19.8 15.3
-```
-
-## Go 1.15.4
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 1.25 3.31 3.51
-MEDIUM: 1.59 2.26 2.07
-LARGE: 9.43 7.01 5.35
-HUGE: 18.6 12.9 9.66
-GINORM: 23.1 17.4 13.2
-```
-
-## hpc2: Dual AMD EPYC 7532 CPUs (128 threads per node), and 256 GB of RAM each, Go 1.15.6
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 1.29 4.54 5.7
-MEDIUM: 1.7 4.32 4.25
-LARGE: 11.2 13.4 10.3
-HUGE: 22.1 18.8 13.6
-GINORM: 26.6 22.6 16.9
-```
-
-## crick: Dual Intel Xeon E5-2620 V4 @ 2.10 Ghz, 64 GB RAM, Go 1.15.6
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 1.91 5.2 7.18
-MEDIUM: 2.28 3.62 5.19
-LARGE: 12.1 14.4 12.1
-HUGE: 24.0 26.5 19.2
-GINORM: 30.0 33.5 24.5
-```
-
-## blanca: Dual Intel Xeon E5-2667 V2 @3.3 Ghz, 64 GB Ram, Go 1.13.4
-
-```
-Size 1 thr 2 thr 4 thr
----------------------------------
-SMALL: 1.6 5.07 5.99
-MEDIUM: 2.04 4.64 4.68
-LARGE: 11.2 12.3 9.52
-HUGE: 21.2 21.7 15.2
-GINORM: 27.0 28.5 21.4
-```
-
diff --git a/examples/bench/bench_results.md b/examples/bench/bench_results.md
deleted file mode 100644
index 6e03b61..0000000
--- a/examples/bench/bench_results.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# Benchmark results
-
-5-layer networks, with same # of units per layer: SMALL = 25; MEDIUM = 100; LARGE = 625; HUGE = 1024; GINORM = 2048, doing full learning, with default params, including momentum, dwtnorm, and weight balance.
-
-Results are total time for 1, 2, 4 threads, on my macbook.
-
-## C++ Emergent
-
-```
-* Size 1 thr 2 thr 4 thr
----------------------------------
-* SMALL: 2.383 2.248 2.042
-* MEDIUM: 2.535 1.895 1.263
-* LARGE: 19.627 8.559 8.105
-* HUGE: 24.119 11.803 11.897
-* GINOR: 35.334 24.768 17.794
-```
-
-## Go v1.15, 8/21/2020, leabra v1.1.5
-
-Basically the same results as below, except a secs or so faster due to faster macbook pro. Layer.Act.Gbar.L = 0.2 instead of new default of 0.1 makes a *huge* difference!
-
-```
-* Size 1 thr 2 thr 4 thr
----------------------------------
-* SMALL: 1.27 3.53 3.64
-* MEDIUM: 1.61 2.31 2.09
-* LARGE: 9.56 7.48 5.44
-* HUGE: 19.17 13.3 9.62
-* GINOR: 23.61 17.94 13.24
-```
-
-```
-$ ./bench -epochs 5 -pats 20 -units 625 -threads=1
-Took 9.845 secs for 5 epochs, avg per epc: 1.969
-TimerReport: BenchNet, NThreads: 1
- Function Name Total Secs Pct
- ActFmG 1.824 18.59
- AvgMaxAct 0.09018 0.919
- AvgMaxGe 0.08463 0.8624
- CyclePost 0.002069 0.02108
- DWt 2.11 21.51
- GFmInc 0.3974 4.05
- InhibFmGeAct 0.107 1.091
- QuarterFinal 0.004373 0.04457
- SendGDelta 3.117 31.77
- WtBalFmWt 1.285e-05 0.0001309
- WtFmDWt 2.075 21.15
- Total 9.813
-```
-
-```
-$ ./bench -epochs 5 -pats 10 -units 1024 -threads=1
-Took 19.34 secs for 5 epochs, avg per epc: 3.868
-TimerReport: BenchNet, NThreads: 1
- Function Name Total Secs Pct
- ActFmG 1.639 8.483
- AvgMaxAct 0.07904 0.4091
- AvgMaxGe 0.07551 0.3909
- CyclePost 0.001287 0.006663
- DWt 3.669 18.99
- GFmInc 0.3667 1.898
- InhibFmGeAct 0.09876 0.5112
- QuarterFinal 0.004008 0.02075
- SendGDelta 10.21 52.87
- WtBalFmWt 1.2e-05 6.211e-05
- WtFmDWt 3.172 16.42
- Total 19.32
-```
-
-## Go emergent 6/2019 after a few bugfixes, etc: significantly faster!
-
-```
-* SMALL: 1.46 3.63 3.96
-* MEDIUM: 1.87 2.46 2.32
-* LARGE: 11.38 8.48 6.03
-* HUGE: 19.53 14.52 11.29
-* GINOR: 26.93 20.66 15.71
-```
-
-now really just as fast overall, if not faster, than C++!
-
-note: only tiny changes after adding IsOff check for all neuron-level computation.
-
-## Go emergent, per-layer threads, thread pool, optimized range synapse code
-
-```
-* SMALL: 1.486 4.297 4.644
-* MEDIUM: 2.864 3.312 3.037
-* LARGE: 25.09 20.06 16.88
-* HUGE: 31.39 23.85 19.53
-* GINOR: 42.18 31.29 26.06
-```
-
-also: not too much diff for wt bal off!
-
-## Go emergent, per-layer threads, thread pool
-
-```
-* SMALL: 1.2180 4.25328 4.66991
-* MEDIUM: 3.392145 3.631261 3.38302
-* LARGE: 31.27893 20.91189 17.828935
-* HUGE: 42.0238 22.64010 18.838019
-* GINOR: 65.67025 35.54374 27.56567
-```
-
-## Go emergent, per-layer threads, no thread pool (de-novo threads)
-
-```
-* SMALL: 1.2180 3.548349 4.08908
-* MEDIUM: 3.392145 3.46302 3.187831
-* LARGE: 31.27893 22.20344 18.797924
-* HUGE: 42.0238 29.00472 24.53498
-* GINOR: 65.67025 45.09239 36.13568
-```
-
-# Per Function
-
-Focusing on the LARGE case:
-
-C++: `emergent -nogui -ni -p leabra_bench.proj epochs=5 pats=20 units=625 n_threads=1`
-
-```
-BenchNet_5lay timing report:
-function time percent
-Net_Input 8.91 43.1
-Net_InInteg 0.71 3.43
-Activation 1.95 9.43
-Weight_Change 4.3 20.8
-Weight_Update 2.85 13.8
-Net_InStats 0.177 0.855
-Inhibition 0.00332 0.016
-Act_post 1.63 7.87
-Cycle_Stats 0.162 0.781
- total: 20.7
-```
-
-Go: `./bench -epochs 5 -pats 20 -units 625 -threads=1`
-
-```
-TimerReport: BenchNet, NThreads: 1
- Function Name Total Secs Pct
- ActFmG 2.121 8.223
- AvgMaxAct 0.1003 0.389
- AvgMaxGe 0.1012 0.3922
- DWt 5.069 19.65
- GeFmGeInc 0.3249 1.259
- InhibFmGeAct 0.08498 0.3295
- QuarterFinal 0.003773 0.01463
- SendGeDelta 14.36 55.67
- WtBalFmWt 0.1279 0.4957
- WtFmDWt 3.501 13.58
- Total 25.79
-```
-
-```
-TimerReport: BenchNet, NThreads: 1
- Function Name Total Secs Pct
- ActFmG 2.119 7.074
- AvgMaxAct 0.1 0.3339
- AvgMaxGe 0.102 0.3407
- DWt 5.345 17.84
- GeFmGeInc 0.3348 1.118
- InhibFmGeAct 0.0842 0.2811
- QuarterFinal 0.004 0.01351
- SendGeDelta 17.93 59.87
- WtBalFmWt 0.1701 0.568
- WtFmDWt 3.763 12.56
- Total 29.96
-```
-
-* trimmed 4+ sec from SendGeDelta by avoiding range checks using sub-slices
-* was very sensitive to size of Synapse struct
-
-
diff --git a/examples/bench/run_bench.sh b/examples/bench/run_bench.sh
deleted file mode 100755
index bbabadd..0000000
--- a/examples/bench/run_bench.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# typically run with -threads=N arg as follows:
-# $./run_bench.sh -threads=2
-
-exe=./bench
-
-echo " "
-echo "=============================================================="
-echo "SMALL Network (5 x 25 units)"
-$exe -epochs 10 -pats 100 -units 25 $*
-echo " "
-echo "=============================================================="
-echo "MEDIUM Network (5 x 100 units)"
-$exe -epochs 3 -pats 100 -units 100 $*
-echo " "
-echo "=============================================================="
-echo "LARGE Network (5 x 625 units)"
-$exe -epochs 5 -pats 20 -units 625 $*
-echo " "
-echo "=============================================================="
-echo "HUGE Network (5 x 1024 units)"
-$exe -epochs 5 -pats 10 -units 1024 $*
-echo " "
-echo "=============================================================="
-echo "GINORMOUS Network (5 x 2048 units)"
-$exe -epochs 2 -pats 10 -units 2048 $*
-# echo " "
-# echo "=============================================================="
-# echo "GAZILIOUS Network (5 x 4096 units)"
-# $exe -nogui -ni -p leabra_bench.proj epochs=1 pats=10 units=4096 $*
-
diff --git a/examples/bench/run_hardware.sh b/examples/bench/run_hardware.sh
deleted file mode 100755
index 9d12179..0000000
--- a/examples/bench/run_hardware.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Use this for generating standard results for hardware
-
-exe=./bench
-
-echo " "
-echo "Size 1 thr 2 thr 4 thr"
-echo "---------------------------------"
-echo "SMALL: "
-$exe -silent -epochs 10 -pats 100 -units 25 $*
-$exe -silent -epochs 10 -pats 100 -units 25 -threads=2 $*
-$exe -silent -epochs 10 -pats 100 -units 25 -threads=4 $*
-echo "MEDIUM: "
-$exe -silent -epochs 3 -pats 100 -units 100 $*
-$exe -silent -epochs 3 -pats 100 -units 100 -threads=2 $*
-$exe -silent -epochs 3 -pats 100 -units 100 -threads=4 $*
-echo "LARGE: "
-$exe -silent -epochs 5 -pats 20 -units 625 $*
-$exe -silent -epochs 5 -pats 20 -units 625 -threads=2 $*
-$exe -silent -epochs 5 -pats 20 -units 625 -threads=4 $*
-echo "HUGE: "
-$exe -silent -epochs 5 -pats 10 -units 1024 $*
-$exe -silent -epochs 5 -pats 10 -units 1024 -threads=2 $*
-$exe -silent -epochs 5 -pats 10 -units 1024 -threads=4 $*
-echo "GINORM: "
-$exe -silent -epochs 2 -pats 10 -units 2048 $*
-$exe -silent -epochs 2 -pats 10 -units 2048 -threads=2 $*
-$exe -silent -epochs 2 -pats 10 -units 2048 -threads=4 $*
-
diff --git a/examples/deep_fsa/README.md b/examples/deep_fsa/README.md
deleted file mode 100644
index 51196e1..0000000
--- a/examples/deep_fsa/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-This example illustrates and tests the predictive learning abilities of the `deep` leabra biologically based model. It uses a classical test of sequence learning [Reber, 1967; Cleeremans & McClelland, 1991](#references) that was explored using simple recurrent networks (SRNs) [Elman, 1990; Jordan, 1989](#references). As shown in Figure 1, sequences were generated according to a finite state automaton (FSA) grammar, as used in implicit sequence learning experiments by Reber (1967). Each node has a 50% random branching to two different other nodes, and the labels generated by node transitions are ambiguous (except for the B=begin and E=end states). Thus, many iterations through the grammar are required to infer the systematic underlying grammar, and it is actually a reasonably challenging task for SRNs, and people, to learn, providing an important validation of the power of these predictive learning mechanisms.
-
-
-
-**Figure 1:** Finite state automaton (FSA) grammar used in implicit sequential learning exerpiments (Reber, 1967) and in early simple recurrent networks (SRNs) (Cleeremans \& McClelland, 1991). It generates a sequence of letters according to the link transitioned between state nodes, with a 50\% random choice for each node of which outgoing link to follow. Each letter (except for the B=begin and E=end) appears at 2 different points in the grammar, making them fully ambiguous. This combination of randomness and ambiguity makes it challenging for a learning system to infer the true underlying nature of the grammar.
-
-
-
-
-**Figure 2:** Predictive learning model applied to the FSA grammar shown in previous figure, showing the prediction state (end of the *minus* phase, or the first 75 msec of each alpha cycle) for the first 3 steps of a sequence, after having learned the grammar, followed by the plus phase after the third step. The `Input` layer provides the 5IB drivers for the corresponding `HiddenP` pulvinar layer, and the `Targets` layer is purely for display, showing the two valid possible labels that could have been predicted. The model's prediction is scored as accurate if either or both targets are activated. Computationally, the model is similar to the SRN, where the `CT` layer that drives the prediction over the pulvinar encodes the previous time step (alpha cycle) activation state, due to the phasic bursting of the 5IB neurons that drive CT updating. Note how the CT layer in b) reflects the Hidden activation state in a), and likewise for c) reflecting b) -- this is evident because we're using one-to-one connectivity between Hidden and HiddenCT layers (which works well in general, along with full lateral connectivity within the CT layer). Thus, even though the correct answer is always present on the Input layer for each step, the CT layer is nevertheless attempting to predict this Input based on the information from the prior time step. **a)** In the first step, the B label is unambiguous and easily predicted (based on prior E context). **b)** In the 2nd step, the network correctly guesses that the T label will come next, but there is a faint activation of the other P alternative, which is also activated sometimes based on prior learning history and associated minor weight tweaks. **c)** In the 3rd step, both S and X are equally predicted. **d)** In the *plus* phase for this trial, only the X present in the Input drives HiddenP activations, and the projections from pulvinar back to the cortex convey both the minus-phase prediction and plus-phase actual input. You can see one neuron visibly changes is activation as a result (and all neurons experience much smaller changes), and learning in all these cortical (Hidden) layer neurons is a function of their local temporal difference between minus and plus phases.
-
-The model (Figure 2) required around 20 epochs of 25 sequences through the grammar to learn it to the point of making no prediction errors for 5 epochs in a row, to guarantee that it had completely learned it. A few steps through a sequence are shown in the figure, illustrating how the CT context layer, which drives the P pulvinar layer prediction, represents the information present on the *previous* alpha cycle time step. Thus, the network is attempting to predict the actual Input state, which then drives the pulvinar plus phase activation at the end of each alpha cycle, as shown in the last panel. On each trial, the difference between plus and minus phases locally over each cortical neuron drives its synaptic weight changes, which accumulate over trials to accurately learn to predict the sequences to the extent possible given their probabilistic nature.
-
-# References
-
-* Cleeremans, A., & McClelland, J. L. (1991). Learning the structure of event sequences. Journal of Experimental Psychology: General, 120, 235–253.
-
-* Elman, J. L. (1990). Finding structure in time. Cognitive Science, 14(2), 179–211.
-
-* Jordan, M. I. (1989). Serial Order: A Parallel, Distributed Processing Approach. In J. L. Elman & D. E. Rumelhart (Eds.), Advances in Connectionist Theory: Speech. Hillsdale, NJ: Lawrence Erlbaum Associates.
-
-* Reber, A. S. (1967). Implicit Learning of Artificial Grammars. Journal of Verbal Learning and
-Verbal Behavior, 6, 855–863.
-
diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go
deleted file mode 100644
index 2b2b1a1..0000000
--- a/examples/deep_fsa/deep_fsa.go
+++ /dev/null
@@ -1,804 +0,0 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// deep_fsa runs a DeepLeabra network on the classic Reber grammar
-// finite state automaton problem.
-package main
-
-//go:generate core generate -add-types
-
-import (
- "log"
- "os"
-
- "cogentcore.org/core/core"
- "cogentcore.org/core/enums"
- "cogentcore.org/core/icons"
- "cogentcore.org/core/math32/vecint"
- "cogentcore.org/core/tree"
- "cogentcore.org/lab/base/mpi"
- "cogentcore.org/lab/base/randx"
- "github.com/emer/emergent/v2/econfig"
- "github.com/emer/emergent/v2/egui"
- "github.com/emer/emergent/v2/elog"
- "github.com/emer/emergent/v2/emer"
- "github.com/emer/emergent/v2/env"
- "github.com/emer/emergent/v2/estats"
- "github.com/emer/emergent/v2/etime"
- "github.com/emer/emergent/v2/looper"
- "github.com/emer/emergent/v2/netview"
- "github.com/emer/emergent/v2/params"
- "github.com/emer/emergent/v2/paths"
- "github.com/emer/etensor/tensor/table"
- "github.com/emer/leabra/v2/leabra"
-)
-
-func main() {
- sim := &Sim{}
- sim.New()
- sim.ConfigAll()
- if sim.Config.GUI {
- sim.RunGUI()
- } else {
- sim.RunNoGUI()
- }
-}
-
-// ParamSets is the default set of parameters.
-// Base is always applied, and others can be optionally
-// selected to apply on top of that.
-var ParamSets = params.Sets{
- "Base": {
- {Sel: "Path", Desc: "norm and momentum on is critical, wt bal not as much but fine",
- Params: params.Params{
- "Path.Learn.Norm.On": "true",
- "Path.Learn.Momentum.On": "true",
- "Path.Learn.WtBal.On": "true",
- }},
- {Sel: "Layer", Desc: "using default 1.8 inhib for hidden layers",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "1.8",
- "Layer.Learn.AvgL.Gain": "1.5", // key to lower relative to 2.5
- "Layer.Act.Gbar.L": "0.1", // lower leak = better
- "Layer.Inhib.ActAvg.Fixed": "true", // simpler to have everything fixed, for replicability
- "Layer.Act.Init.Decay": "0", // essential to have all layers no decay
- }},
- {Sel: ".SuperLayer", Desc: "fix avg act",
- Params: params.Params{
- "Layer.Inhib.ActAvg.Fixed": "true",
- }},
- {Sel: ".BackPath", Desc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
- Params: params.Params{
- "Path.WtScale.Rel": "0.2",
- }},
- {Sel: ".PulvinarLayer", Desc: "standard weight is .3 here for larger distributed reps. no learn",
- Params: params.Params{
- "Layer.Pulvinar.DriveScale": "0.8", // using .8 for localist layer
- }},
- {Sel: ".CTCtxtPath", Desc: "no weight balance on CT context paths -- makes a diff!",
- Params: params.Params{
- "Path.Learn.WtBal.On": "false", // this should be true for larger DeepLeabra models -- e.g., sg..
- }},
- {Sel: ".CTFromSuper", Desc: "initial weight = 0.5 much better than 0.8",
- Params: params.Params{
- "Path.WtInit.Mean": "0.5",
- }},
- {Sel: ".Input", Desc: "input layers need more inhibition",
- Params: params.Params{
- "Layer.Inhib.Layer.Gi": "2.0",
- "Layer.Inhib.ActAvg.Init": "0.15",
- }},
- {Sel: "#HiddenPToHiddenCT", Desc: "critical to make this small so deep context dominates",
- Params: params.Params{
- "Path.WtScale.Rel": "0.05",
- }},
- {Sel: "#HiddenCTToHiddenCT", Desc: "testing",
- Params: params.Params{
- "Path.Learn.WtBal.On": "false",
- }},
- },
-}
-
-// ParamConfig has config parameters related to sim params
-type ParamConfig struct {
-
- // network parameters
- Network map[string]any
-
- // size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden1Size vecint.Vector2i `default:"{'X':7,'Y':7}" nest:"+"`
-
- // size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden2Size vecint.Vector2i `default:"{'X':7,'Y':7}" nest:"+"`
-
- // Extra Param Sheet name(s) to use (space separated if multiple).
- // must be valid name as listed in compiled-in params or loaded params
- Sheet string
-
- // extra tag to add to file names and logs saved from this run
- Tag string
-
- // user note -- describe the run params etc -- like a git commit message for the run
- Note string
-
- // Name of the JSON file to input saved parameters from.
- File string `nest:"+"`
-
- // Save a snapshot of all current param and config settings
- // in a directory named params_ (or _good if Good is true), then quit.
- // Useful for comparing to later changes and seeing multiple views of current params.
- SaveAll bool `nest:"+"`
-
- // For SaveAll, save to params_good for a known good params state.
- // This can be done prior to making a new release after all tests are passing.
- // add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+"`
-}
-
-// RunConfig has config parameters related to running the sim
-type RunConfig struct {
- // starting run number, which determines the random seed.
- // runs counts from there, can do all runs in parallel by launching
- // separate jobs with each run, runs = 1.
- Run int `default:"0"`
-
- // total number of runs to do when running Train
- NRuns int `default:"5" min:"1"`
-
- // total number of epochs per run
- NEpochs int `default:"100"`
-
- // stop run after this number of perfect, zero-error epochs.
- NZero int `default:"2"`
-
- // total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `default:"100"`
-
- // how often to run through all the test patterns, in terms of training epochs.
- // can use 0 or -1 for no testing.
- TestInterval int `default:"5"`
-
- // how frequently (in epochs) to compute PCA on hidden representations
- // to measure variance?
- PCAInterval int `default:"5"`
-
- // if non-empty, is the name of weights file to load at start
- // of first run, for testing.
- StartWts string
-}
-
-// LogConfig has config parameters related to logging data
-type LogConfig struct {
-
- // if true, save final weights after each run
- SaveWeights bool
-
- // if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `default:"true" nest:"+"`
-
- // if true, save run log to file, as .run.tsv typically
- Run bool `default:"true" nest:"+"`
-
- // if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `default:"false" nest:"+"`
-
- // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `default:"false" nest:"+"`
-
- // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `default:"false" nest:"+"`
-
- // if true, save network activation etc data from testing trials,
- // for later viewing in netview.
- NetData bool
-}
-
-// Config is a standard Sim config -- use as a starting point.
-type Config struct {
-
- // specify include files here, and after configuration,
- // it contains list of include files added.
- Includes []string
-
- // open the GUI -- does not automatically run -- if false,
- // then runs automatically and quits.
- GUI bool `default:"true"`
-
- // log debugging information
- Debug bool
-
- // InputNames are names of input letters
- InputNames []string
-
- // InputNameMap has indexes of InputNames
- InputNameMap map[string]int
-
- // parameter related configuration options
- Params ParamConfig `display:"add-fields"`
-
- // sim running related configuration options
- Run RunConfig `display:"add-fields"`
-
- // data logging related configuration options
- Log LogConfig `display:"add-fields"`
-}
-
-func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
-
-// Sim encapsulates the entire simulation model, and we define all the
-// functionality as methods on this struct. This structure keeps all relevant
-// state information organized and available without having to pass everything around
-// as arguments to methods, and provides the core GUI interface (note the view tags
-// for the fields which provide hints to how things should be displayed).
-type Sim struct {
-
- // simulation configuration parameters -- set by .toml config file and / or args
- Config Config `new-window:"+"`
-
- // the network -- click to view / edit parameters for layers, paths, etc
- Net *leabra.Network `new-window:"+" display:"no-inline"`
-
- // network parameter management
- Params emer.NetParams `display:"add-fields"`
-
- // contains looper control loops for running sim
- Loops *looper.Stacks `new-window:"+" display:"no-inline"`
-
- // contains computed statistic values
- Stats estats.Stats `new-window:"+"`
-
- // Contains all the logs and information about the logs.'
- Logs elog.Logs `new-window:"+"`
-
- // the training patterns to use
- Patterns *table.Table `new-window:"+" display:"no-inline"`
-
- // Environments
- Envs env.Envs `new-window:"+" display:"no-inline"`
-
- // leabra timing parameters and state
- Context leabra.Context `new-window:"+"`
-
- // netview update parameters
- ViewUpdate netview.ViewUpdate `display:"add-fields"`
-
- // manages all the gui elements
- GUI egui.GUI `display:"-"`
-
- // a list of random seeds to use for each run
- RandSeeds randx.Seeds `display:"-"`
-}
-
-// New creates new blank elements and initializes defaults
-func (ss *Sim) New() {
- econfig.Config(&ss.Config, "config.toml")
- ss.Config.InputNames = []string{"B", "T", "S", "X", "V", "P", "E"}
- ss.Net = leabra.NewNetwork("RA25")
- ss.Params.Config(ParamSets, ss.Config.Params.Sheet, ss.Config.Params.Tag, ss.Net)
- ss.Stats.Init()
- ss.Patterns = &table.Table{}
- ss.RandSeeds.Init(100) // max 100 runs
- ss.InitRandSeed(0)
- ss.Context.Defaults()
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// Configs
-
-// ConfigAll configures all the elements using the standard functions
-func (ss *Sim) ConfigAll() {
- ss.ConfigEnv()
- ss.ConfigNet(ss.Net)
- ss.ConfigLogs()
- ss.ConfigLoops()
- if ss.Config.Params.SaveAll {
- ss.Config.Params.SaveAll = false
- ss.Net.SaveParamsSnapshot(&ss.Params.Params, &ss.Config, ss.Config.Params.Good)
- os.Exit(0)
- }
-}
-
-func (ss *Sim) ConfigEnv() {
- // Can be called multiple times -- don't re-create
- var trn, tst *FSAEnv
- if len(ss.Envs) == 0 {
- trn = &FSAEnv{}
- tst = &FSAEnv{}
- } else {
- trn = ss.Envs.ByMode(etime.Train).(*FSAEnv)
- tst = ss.Envs.ByMode(etime.Test).(*FSAEnv)
- }
-
- if ss.Config.InputNameMap == nil {
- ss.Config.InputNameMap = make(map[string]int, len(ss.Config.InputNames))
- for i, nm := range ss.Config.InputNames {
- ss.Config.InputNameMap[nm] = i
- }
- }
-
- // note: names must be standard here!
- trn.Name = etime.Train.String()
- trn.Seq.Max = 25 // 25 sequences per epoch training
- trn.TMatReber()
-
- tst.Name = etime.Test.String()
- tst.Seq.Max = 10
- tst.TMatReber() // todo: random
-
- trn.Init(0)
- tst.Init(0)
-
- // note: names must be in place when adding
- ss.Envs.Add(trn, tst)
-}
-
-func (ss *Sim) ConfigNet(net *leabra.Network) {
- net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
-
- in := net.AddLayer2D("Input", 1, 7, leabra.InputLayer)
- hid, hidct, hidp := net.AddDeep2D("Hidden", 8, 8)
-
- hidp.Shape.CopyShape(&in.Shape)
- hidp.Drivers.Add("Input")
-
- trg := net.AddLayer2D("Targets", 1, 7, leabra.InputLayer) // just for visualization
-
- in.AddClass("Input")
- hidp.AddClass("Input")
- trg.AddClass("Input")
-
- hidct.PlaceRightOf(hid, 2)
- hidp.PlaceRightOf(in, 2)
- trg.PlaceBehind(hidp, 2)
-
- full := paths.NewFull()
- full.SelfCon = true // unclear if this makes a diff for self cons at all
-
- net.ConnectLayers(in, hid, full, leabra.ForwardPath)
-
- // for this small localist model with longer-term dependencies,
- // these additional context pathways turn out to be essential!
- // larger models in general do not require them, though it might be
- // good to check
- net.ConnectCtxtToCT(hidct, hidct, full)
- // net.LateralConnectLayer(hidct, full) // note: this does not work AT ALL -- essential to learn from t-1
- net.ConnectCtxtToCT(in, hidct, full)
-
- net.Build()
- net.Defaults()
- ss.ApplyParams()
- net.InitWeights()
-}
-
-func (ss *Sim) ApplyParams() {
- ss.Params.SetAll()
- if ss.Config.Params.Network != nil {
- ss.Params.SetNetworkMap(ss.Net, ss.Config.Params.Network)
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Init, utils
-
-// Init restarts the run, and initializes everything, including network weights
-// and resets the epoch log table
-func (ss *Sim) Init() {
- if ss.Config.GUI {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // in case user interactively changes tag
- }
- ss.Loops.ResetCounters()
- ss.InitRandSeed(0)
- // ss.ConfigEnv() // re-config env just in case a different set of patterns was
- // selected or patterns have been modified etc
- ss.ApplyParams()
- ss.NewRun()
- ss.ViewUpdate.RecordSyns()
- ss.ViewUpdate.Update()
-}
-
-// InitRandSeed initializes the random seed based on current training run number
-func (ss *Sim) InitRandSeed(run int) {
- ss.RandSeeds.Set(run)
- ss.RandSeeds.Set(run, &ss.Net.Rand)
-}
-
-// ConfigLoops configures the control loops: Training, Testing
-func (ss *Sim) ConfigLoops() {
- ls := looper.NewStacks()
-
- trls := ss.Config.Run.NTrials
-
- ls.AddStack(etime.Train).
- AddTime(etime.Run, ss.Config.Run.NRuns).
- AddTime(etime.Epoch, ss.Config.Run.NEpochs).
- AddTime(etime.Trial, trls).
- AddTime(etime.Cycle, 100)
-
- ls.AddStack(etime.Test).
- AddTime(etime.Epoch, 1).
- AddTime(etime.Trial, trls).
- AddTime(etime.Cycle, 100)
-
- leabra.LooperStdPhases(ls, &ss.Context, ss.Net, 75, 99) // plus phase timing
- leabra.LooperSimCycleAndLearn(ls, ss.Net, &ss.Context, &ss.ViewUpdate) // std algo code
-
- ls.Stacks[etime.Train].OnInit.Add("Init", func() { ss.Init() })
-
- for m, _ := range ls.Stacks {
- stack := ls.Stacks[m]
- stack.Loops[etime.Trial].OnStart.Add("ApplyInputs", func() {
- ss.ApplyInputs()
- })
- }
-
- ls.Loop(etime.Train, etime.Run).OnStart.Add("NewRun", ss.NewRun)
-
- // Train stop early condition
- ls.Loop(etime.Train, etime.Epoch).IsDone.AddBool("NZeroStop", func() bool {
- // This is calculated in TrialStats
- stopNz := ss.Config.Run.NZero
- if stopNz <= 0 {
- stopNz = 2
- }
- curNZero := ss.Stats.Int("NZero")
- stop := curNZero >= stopNz
- return stop
- })
-
- // Add Testing
- trainEpoch := ls.Loop(etime.Train, etime.Epoch)
- trainEpoch.OnStart.Add("TestAtInterval", func() {
- if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
- // Note the +1 so that it doesn't occur at the 0th timestep.
- ss.TestAll()
- }
- })
-
- /////////////////////////////////////////////
- // Logging
-
- ls.Loop(etime.Test, etime.Epoch).OnEnd.Add("LogTestErrors", func() {
- leabra.LogTestErrors(&ss.Logs)
- })
- ls.Loop(etime.Train, etime.Epoch).OnEnd.Add("PCAStats", func() {
- trnEpc := ls.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if ss.Config.Run.PCAInterval > 0 && trnEpc%ss.Config.Run.PCAInterval == 0 {
- leabra.PCAStats(ss.Net, &ss.Logs, &ss.Stats)
- ss.Logs.ResetLog(etime.Analyze, etime.Trial)
- }
- })
-
- ls.AddOnEndToAll("Log", func(mode, time enums.Enum) {
- ss.Log(mode.(etime.Modes), time.(etime.Times))
- })
- leabra.LooperResetLogBelow(ls, &ss.Logs)
-
- ls.Loop(etime.Train, etime.Trial).OnEnd.Add("LogAnalyze", func() {
- trnEpc := ls.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if (ss.Config.Run.PCAInterval > 0) && (trnEpc%ss.Config.Run.PCAInterval == 0) {
- ss.Log(etime.Analyze, etime.Trial)
- }
- })
-
- ls.Loop(etime.Train, etime.Run).OnEnd.Add("RunStats", func() {
- ss.Logs.RunStats("PctCor", "FirstZero", "LastZero")
- })
-
- // Save weights to file, to look at later
- ls.Loop(etime.Train, etime.Run).OnEnd.Add("SaveWeights", func() {
- ctrString := ss.Stats.PrintValues([]string{"Run", "Epoch"}, []string{"%03d", "%05d"}, "_")
- leabra.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.Stats.String("RunName"))
- })
-
- ////////////////////////////////////////////
- // GUI
-
- if !ss.Config.GUI {
- if ss.Config.Log.NetData {
- ls.Loop(etime.Test, etime.Trial).OnEnd.Add("NetDataRecord", func() {
- ss.GUI.NetDataRecord(ss.ViewUpdate.Text)
- })
- }
- } else {
- leabra.LooperUpdateNetView(ls, &ss.ViewUpdate, ss.Net, ss.NetViewCounters)
- leabra.LooperUpdatePlots(ls, &ss.GUI)
- ls.Stacks[etime.Train].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
- ls.Stacks[etime.Test].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
- }
-
- if ss.Config.Debug {
- mpi.Println(ls.DocString())
- }
- ss.Loops = ls
-}
-
-// ApplyInputs applies input patterns from given environment.
-// It is good practice to have this be a separate method with appropriate
-// args so that it can be used for various different contexts
-// (training, testing, etc).
-func (ss *Sim) ApplyInputs() {
- ctx := &ss.Context
- net := ss.Net
- net.InitExt()
-
- ev := ss.Envs.ByMode(ctx.Mode).(*FSAEnv)
- ev.Step()
- ss.Stats.SetString("TrialName", ev.String())
-
- in := ss.Net.LayerByName("Input")
- trg := ss.Net.LayerByName("Targets")
- clrmsk, setmsk, _ := in.ApplyExtFlags()
- ns := ev.NNext.Values[0]
- for i := 0; i < ns; i++ {
- lbl := ev.NextLabels.Values[i]
- li, ok := ss.Config.InputNameMap[lbl]
- if !ok {
- log.Printf("Input label: %v not found in InputNames list of labels\n", lbl)
- continue
- }
- if i == 0 {
- in.ApplyExtValue(li, 1, clrmsk, setmsk, false)
- }
- trg.ApplyExtValue(li, 1, clrmsk, setmsk, false)
- }
-}
-
-// NewRun intializes a new run of the model, using the TrainEnv.Run counter
-// for the new run value
-func (ss *Sim) NewRun() {
- ctx := &ss.Context
- ss.InitRandSeed(ss.Loops.Loop(etime.Train, etime.Run).Counter.Cur)
- ss.Envs.ByMode(etime.Train).Init(0)
- ss.Envs.ByMode(etime.Test).Init(0)
- ctx.Reset()
- ctx.Mode = etime.Train
- ss.Net.InitWeights()
- ss.InitStats()
- ss.StatCounters()
- ss.Logs.ResetLog(etime.Train, etime.Epoch)
- ss.Logs.ResetLog(etime.Test, etime.Epoch)
-}
-
-// TestAll runs through the full set of testing items
-func (ss *Sim) TestAll() {
- ss.Envs.ByMode(etime.Test).Init(0)
- ss.Loops.ResetAndRun(etime.Test)
- ss.Loops.Mode = etime.Train // Important to reset Mode back to Train because this is called from within the Train Run.
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////
-// Stats
-
-// InitStats initializes all the statistics.
-// called at start of new run
-func (ss *Sim) InitStats() {
- ss.Stats.SetFloat("UnitErr", 0.0)
- ss.Stats.SetFloat("CorSim", 0.0)
- ss.Stats.SetString("TrialName", "")
- ss.Logs.InitErrStats() // inits TrlErr, FirstZero, LastZero, NZero
-}
-
-// StatCounters saves current counters to Stats, so they are available for logging etc
-// Also saves a string rep of them for ViewUpdate.Text
-func (ss *Sim) StatCounters() {
- ctx := &ss.Context
- mode := ctx.Mode
- ss.Loops.Stacks[mode].CountersToStats(&ss.Stats)
- // always use training epoch..
- trnEpc := ss.Loops.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- ss.Stats.SetInt("Epoch", trnEpc)
- trl := ss.Stats.Int("Trial")
- ss.Stats.SetInt("Trial", trl)
- ss.Stats.SetInt("Cycle", int(ctx.Cycle))
-}
-
-func (ss *Sim) NetViewCounters(tm etime.Times) {
- if ss.ViewUpdate.View == nil {
- return
- }
- if tm == etime.Trial {
- ss.TrialStats() // get trial stats for current di
- }
- ss.StatCounters()
- ss.ViewUpdate.Text = ss.Stats.Print([]string{"Run", "Epoch", "Trial", "TrialName", "Cycle", "UnitErr", "TrlErr", "CorSim"})
-}
-
-// TrialStats computes the trial-level statistics.
-// Aggregation is done directly from log data.
-func (ss *Sim) TrialStats() {
- inp := ss.Net.LayerByName("HiddenP")
- trg := ss.Net.LayerByName("Targets")
- ss.Stats.SetFloat("CorSim", float64(inp.CosDiff.Cos))
- sse := 0.0
- gotOne := false
- for ni := range inp.Neurons {
- inn := &inp.Neurons[ni]
- tgn := &trg.Neurons[ni]
- if tgn.Act > 0.5 {
- if inn.ActM > 0.4 {
- gotOne = true
- }
- } else {
- if inn.ActM > 0.5 {
- sse += float64(inn.ActM)
- }
- }
- }
- if !gotOne {
- sse += 1
- }
- ss.Stats.SetFloat("SSE", sse)
- ss.Stats.SetFloat("AvgSSE", sse)
- if sse > 0 {
- ss.Stats.SetFloat("TrlErr", 1)
- } else {
- ss.Stats.SetFloat("TrlErr", 0)
- }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// Logging
-
-func (ss *Sim) ConfigLogs() {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // used for naming logs, stats, etc
-
- ss.Logs.AddCounterItems(etime.Run, etime.Epoch, etime.Trial, etime.Cycle)
- ss.Logs.AddStatStringItem(etime.AllModes, etime.AllTimes, "RunName")
- ss.Logs.AddStatStringItem(etime.AllModes, etime.Trial, "TrialName")
-
- ss.Logs.AddStatAggItem("CorSim", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddStatAggItem("UnitErr", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddErrStatAggItems("TrlErr", etime.Run, etime.Epoch, etime.Trial)
-
- ss.Logs.AddCopyFromFloatItems(etime.Train, []etime.Times{etime.Epoch, etime.Run}, etime.Test, etime.Epoch, "Tst", "CorSim", "UnitErr", "PctCor", "PctErr")
-
- ss.Logs.AddPerTrlMSec("PerTrlMSec", etime.Run, etime.Epoch, etime.Trial)
-
- layers := ss.Net.LayersByType(leabra.SuperLayer, leabra.CTLayer, leabra.TargetLayer)
- leabra.LogAddDiagnosticItems(&ss.Logs, layers, etime.Train, etime.Epoch, etime.Trial)
- leabra.LogInputLayer(&ss.Logs, ss.Net, etime.Train)
-
- leabra.LogAddPCAItems(&ss.Logs, ss.Net, etime.Train, etime.Run, etime.Epoch, etime.Trial)
-
- ss.Logs.AddLayerTensorItems(ss.Net, "Act", etime.Test, etime.Trial, "InputLayer", "TargetLayer")
-
- ss.Logs.PlotItems("CorSim", "PctCor", "FirstZero", "LastZero")
-
- ss.Logs.CreateTables()
- ss.Logs.SetContext(&ss.Stats, ss.Net)
- // don't plot certain combinations we don't use
- ss.Logs.NoPlot(etime.Train, etime.Cycle)
- ss.Logs.NoPlot(etime.Test, etime.Run)
- // note: Analyze not plotted by default
- ss.Logs.SetMeta(etime.Train, etime.Run, "LegendCol", "RunName")
-}
-
-// Log is the main logging function, handles special things for different scopes
-func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
- ctx := &ss.Context
- if mode != etime.Analyze {
- ctx.Mode = mode // Also set specifically in a Loop callback.
- }
- dt := ss.Logs.Table(mode, time)
- if dt == nil {
- return
- }
- row := dt.Rows
-
- switch {
- case time == etime.Cycle:
- return
- case time == etime.Trial:
- ss.TrialStats()
- ss.StatCounters()
- }
-
- ss.Logs.LogRow(mode, time, row) // also logs to file, etc
-}
-
-//////// GUI
-
-// ConfigGUI configures the Cogent Core GUI interface for this simulation.
-func (ss *Sim) ConfigGUI() {
- title := "Leabra Random Associator"
- ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Leabra model. See emergent on GitHub.`)
- ss.GUI.CycleUpdateInterval = 10
-
- nv := ss.GUI.AddNetView("Network")
- nv.Options.MaxRecs = 300
- nv.SetNet(ss.Net)
- ss.ViewUpdate.Config(nv, etime.AlphaCycle, etime.AlphaCycle)
- ss.GUI.ViewUpdate = &ss.ViewUpdate
-
- // nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- // nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
-
- ss.GUI.AddPlots(title, &ss.Logs)
-
- ss.GUI.FinalizeGUI(false)
-}
-
-func (ss *Sim) MakeToolbar(p *tree.Plan) {
- ss.GUI.AddLooperCtrl(p, ss.Loops)
-
- tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Reset RunLog",
- Icon: icons.Reset,
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "New Seed",
- Icon: icons.Add,
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RandSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "README",
- Icon: icons.FileMarkdown,
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- core.TheApp.OpenURL("https://github.com/emer/leabra/blob/main/examples/deep_fsa/README.md")
- },
- })
-}
-
-func (ss *Sim) RunGUI() {
- ss.Init()
- ss.ConfigGUI()
- ss.GUI.Body.RunMainWindow()
-}
-
-func (ss *Sim) RunNoGUI() {
- if ss.Config.Params.Note != "" {
- mpi.Printf("Note: %s\n", ss.Config.Params.Note)
- }
- if ss.Config.Log.SaveWeights {
- mpi.Printf("Saving final weights per run\n")
- }
- runName := ss.Params.RunName(ss.Config.Run.Run)
- ss.Stats.SetString("RunName", runName) // used for naming logs, stats, etc
- netName := ss.Net.Name
-
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Trial, etime.Train, etime.Trial, "trl", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Epoch, etime.Train, etime.Epoch, "epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Run, etime.Train, etime.Run, "run", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestEpoch, etime.Test, etime.Epoch, "tst_epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestTrial, etime.Test, etime.Trial, "tst_trl", netName, runName)
-
- netdata := ss.Config.Log.NetData
- if netdata {
- mpi.Printf("Saving NetView data from testing\n")
- ss.GUI.InitNetData(ss.Net, 200)
- }
-
- ss.Init()
-
- mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.NRuns, ss.Config.Run.Run)
- ss.Loops.Loop(etime.Train, etime.Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.NRuns)
-
- if ss.Config.Run.StartWts != "" { // this is just for testing -- not usually needed
- ss.Loops.Step(etime.Train, 1, etime.Trial) // get past NewRun
- ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWts))
- mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWts)
- }
-
- mpi.Printf("Set NThreads to: %d\n", ss.Net.NThreads)
-
- ss.Loops.Run(etime.Train)
-
- ss.Logs.CloseLogFiles()
-
- if netdata {
- ss.GUI.SaveNetData(ss.Stats.String("RunName"))
- }
-}
diff --git a/examples/deep_fsa/fig_deepleabra_fsa_net_3steps.png b/examples/deep_fsa/fig_deepleabra_fsa_net_3steps.png
deleted file mode 100644
index bf082ae4291f10fb518f20922dca2dac46266be3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 430186
zcmd4(WmJ{j_Xdn^3@pMHNtKXB=>|nWT3TA98|lUbMM_dY8YDI$-JqhNA{`qxEz&LB
zXD*)Ke~k0xd^zuzm*F$y+3tPc>z-@IHLrQC{pNw9H1TQj(-;hfSmypc6%6Kh0tQ3q
zbCM8VX_UJC4E{Q0e_zWHgJI`K{}GfwCoF~+uR2LSbW*i5b#gUyFu}OGy0Tl?S~(gS
z+MBT3Ihe&Q2$N$l*Dx~o?x?wam>YC=QybqpSZ<}ezH^$6m4V>A%gax{6t6!BrMhE2
z`@Gw7&1ycyrd!*pq$IawuSB~5Us+;X@>K2G*>~z%m%dW-UO#(2EcDgOx!DiHG`yYi
zreVM7!sSmc;+*4~K1>QXiG<%bT@ZGp38%a2cU!GPnE)1ue%geU;^aS?(fzC+eD~ig
z7>wZM^R55)d-RQEqW}GF&-DMwm;X7qq6OkoKYXCPbm@{-i?_FTcafPApVTq*dR~37
z8T`fPHdoMik%GcXz_w;h!`1&K2{Q*r>)`cb3gIj8&o=(wxZtlgf|zF0n!WY@fVXcQ
zT^VgPwY9S|5`DDgqFA4}GQt~h#D_KzKjFAEa!268~
zyvhEkudnZ`SFf~MUOaz3Kt?PW))dVV?zEr(`?tKSKfKR=@NhRq2w608+5#@I3{W8-
z#1x;l6oz{p{^$lAJ_!2#bNc=Nub0VTOGjcg)zs3s%vv6{96frJ=h0_Ole>mSlt!+0
zdRiKtiK!{rL@8d#RpVfPkH`I=G26pW;yyk;B>Bh4}D^s22AKP3Wla-h6N|x}Cc=s;(&!5M~Pn=-6apP{P
zR8Yt2bSIC)-yfHApKLA}+6>p~?QAS++S=x!g@5{VOY+`5^O4WbVnsYlrGhD$NJ$$v
zY}GV1XMen-jdHZJvm>Sw?8Tkt8Sk1FX~A7_4M7yn>vLKrCMH>#nfFIW
zM+08JZhLv^0=Mge?%G^m(a#DxCN?&OzP>&U8-0C!JSnjOP`_VPsh8?WG
zMU?vSW5edWov+pA_O`ZOzVFMIFUjim=Iw&uV*Wjz)9KMU4Px6mRB?G4j5@nb1i;j+dvN7mNk(HH2
z(pu*S}S7V!nP&Ab6KQ&c4f^#>YI6QlRi>BhybZ5s%^m
zn||Jm3BI`IhPwSV4x?|c&ynP|XDHE(B_8Z%D66WDwe3c87?hF^SGn3Xm56w*lNmJx
zS~xUkW@a+rs%P`6Ci8yvD6Szhp8=YqI$R8G>ewSZw_BJe9CpTk`D_B$RF7%xj&A`)YoM{KXB`?klUgcHVOS
zBvcOOK@bjbm4yz7g~)gBM%&0d_1(uWDa3F}g6r88+QF3f
z+E!Y{?7Dj(vF?r09EAQfwsa5%ufvnPf%Muo`o7te&7f8{j?b>tl1A9=(`9zuO!t)u
zCQi-{Q(YC6us>sCkufpTtFfdas}Ektn8}U184|n&9g_;e0k^Rjv#e6-;U^|O-b?s@B#TK0xsi@{x2iA>^
zjZrv4GAQ<}(J8a71QXBanUVSJZ#Ig}_>1itrdoXv@zenKX@_W>*C!<=R+pETe=*L8
zwj3D*JlyV2doKh+$ZDGeX%_fX)|17$gW3eIe=-?3M=1w~!UlMcYVh4KVG$9iH0jqR
z{1|WC_&VX?>|EmWFFcWZYe4MZ588l`kTT1rA3xATjY&0*Pfl8G{Tt_uYmVY2B_*xd
zS*Rk3B!>F?l!7b*4<^KY+D_*c78cHFxhent5hZN8rM!Yd(7X5V&A)y7*6X$EGt-q7
z6vdNiU}@BrbVp+nqOalgIoiPTa{l_^;o(qYuSucdg4p^#6^d4|gX%#}0)rhqE|8dl
zmu!4|JgPP_GE(aCe}zW(VGv
zQ`68Ozi?Br_>I7|YoFa_vN8f4CtI}EAiTqtH^)LKAVu*S_2g(7xd!d-deQRo_AHRs
z?T&K1Kw(x_V3CH3Gz=()3J9|0G4Mio@Y-`y#-#DZu-0kx=qK5?=mxLBpuJriKk?
z88SC#9Vc-5^y#_%gR@Z5tG6cN8X{RVbG8$OTwC1!{k%CJ<>kIL9!=q;2)T$|zuNre
zt5lu!RGkD&=9KKs`6>bcHZ}U4%J^1em3|$7^YwD@l
z*}pnA{*5;^0EieX{5$xTqt?90R$e~v$@*NMRHF^P_~%C#du!{Q!Nt9@x;>jX9ztSr
z;}2rKwe38dTwL?}Ijj#qC4ITQcA?cdMDp!rt-YKsR|t7pcJ>z2fkoeg9jP%dNSHRY
z!@&IokYKZ^t*x!x81m$?>|&gJoMsb13oQ-GY~#V;om0Kr9~I-X!MC)*kT^(wH7hMb8m(n%
zgmR(9-<};kQS2}-i!JRdGMnAo7>=&1^G!86K}42h(_hL&NB6BmLto#vEXq-Dcz1%&
zxsl5CVF{q08T6>?{qP~1U#?CI16qqU8wt&hV7;g7FA#j23ia~#Uc+1E3ph#CK|PLp
zISI=z_TF*v#{qD{sVa$#JAF`)k&!9Lk`M+6C9=CzPtv*!dCLL=0E;>C-#7NNx-_4V5(tB$QAlWm*YzT2INU0JFb3_GY=fM1tb3IQTewGtp#(%awP
z_t;y*QPo~I1@~-jJ(%gn##L)g0!+28t;?_e7qGrC$TE$sY^ek4dC<1uv%yI{FQe%x#5olh~GXU=!a-XEa9xLm!?m0>-oHR3I
zDY7x(h%d8C?0&!Kz38c3ZlA1L00kgv{t)(4bp;
z5T{9Z&xdce=H{6Vfme;XOFcIh7#bB4gtbCMw=>oI+f${ThrKtZHS4w`)px)&YfxIh
zO!va>wXLIO<0(njKD)AX*YGQkiQiWvBO>^@T~?>s74s)w(@F>T=BU>FevvwJQ+Rff
z&gTAPOMEhxHVQb5bU*S-V%5>caZ$0eX__WXzF9v@EZd?
zJ)5F4we)lJzXAK;b1H1Bm!+^P_^_)A?%UHDkkbyJ+~=JC^X%EP?U5Ixe`ja=a}F0*
zT265S!pi;zEp2zY+?9+*VzOJGX9jK&LEpn4@e*6d(yi{`;E>Z27k4(ImYHRFe`isr
zMNUpm{ozCN@ZOyK5^5bi=udcUj-DWKnkHXMl$*v*rU}6HE?vI-5hZ?|z3ndIEYAy~
z(r%uf{lziO6Q&0QDWaaOdwQ;e-8oyW@j_^i}rqWfg%za6@{NKdi?!#
z+<`QBM4Qil9-)ZK(~IM2(Lb{OE0TpIi1^GItDe{(XcE)`3#sdX<>EvVYYG62DljDI
zj1}~~O%*k}P~4I?Iz8Q&!XxOqU>)gJxr?z;^U+8{RnQwHbAP
zA3rD@X_JY*`T)p6n@fZi%IiN?pMssl^U2ykCL+nmx@RddM}Le0@2#bop%_26AZx?q
zDYG~s$}0o4x6p!xnc31oS6p1Y2?MMOm8G{X?I)_tIV@$zLs?iO{S
zNWJgvVtL%rHQ!t$CR*aH%5-I|b`OrjO?tkg=Dcl`yKFzV#-jy*4I6PrpO=@HHD24n
zr~+zfT|3G9Ja>GzhUo(Bi+kQS7|QQ{hxAzrLtABgqqpcL}h
z4)8}ad`COzfyYhm!X0%-An`PlyN-T69#{JQ{rk&OonG;R&V@2W8R_Xfp0(Q{`KlR;
zHV$Lr(!FwpLLNn;(tUEa2Gwo_T8F*=(Ggg<6gkI(w7!JUk`MM*MC|NeMpN
z7T8jPql6D5>3=picSCNWt-pom7leCqpZE`q8=IK$uvobN{c#Sv0zkTK!7VC{!mhe~
zMwp2ytIx5Uh5}%-{neK}mjOK$t!zTPDYk|~3k`kyjI`MHJAJQZNuV=^3Xj(AJ2LSc
zYzaI3eQjxJnS&qLk`9O><2DcNc8Bs4x8TOk#@0>cI$)^2{jIfC2?|f=M(u_Owlf4^eT72>l>3nay7l?3D8?{D++
zW%UEXEBdnsFx3W+?_8M3b@)itgkLd2i^=<4feAqgM!ahbrRy
z@2?ED)GX0kV{c~zXXm@4v$inUKZ73&5nG5-xdw){|F6rrj;JBUwc$!c#A$h3UR
z(7JWJl?IN=i!Dm5Cv@i||}6wExxIZ{O~LMVgXpQJV@xSq>g`qwMEf
zi_BUz5=Fgp@Cm;A?irRB%uGxUdqc}JL_}R-^m4lAQ?Uck>ua2ns=u|`WU${cx*W}}
z*O9G`&B50$2iZN0KNvpPOYC-k@}z*;clSxgA1|nv)-M0p0Z(UB7YJ?b?C8X6s;l=!
z9M5X%@7DoDn6KUs_FBsj+xwa2?7N0F$VyB5mJLPS*6LGTO>8ry34}0n6Pm7anf88j
z1%e0J!kqKtHlG(O2Zx;lqAYb&y{iQQSlHh#>dS#m?f>=Thk0?LA(_YDchXK%ZP&5u
z(lp*`qN1X?S5qXQrCY0~*>B692i&3!$QV*wETsW7n|5Af6u%78=+++_)?0n|9pk9}5^DkeRG1%ci5Hr!-C3}*
z{|&9Nnd;j;T-|;NAraZ5&JE}-Q&Un>tns3ItDRC~*@-Xf+?F4w%DBOkm;dLjhS1(A
zqE4{bZdkMmx3aQQ;;o=<2r}$CP&>UJMLz){r0+S;
zgQY6*Gn^XoTIqR+~l$1G$
z+uMI~Ss2Iy+{nW}3@A6prQBtv>u0h@k@Fw`W)0`((A4GwX~o@p@aOwmavsYF0BAPU
z{Si#6+-5D5Yct()2?+_Z3JU7boR``S+ZI)E@$)~da+$X*rd}>IY08n+fUG6~oQliL
za>+NyFzJBZ8$XH&3Tl@Pd0?SZU4#~^x>|U?uSlB4Yv~zKhq>D%G@`myu8XC8%i6Qh
z-t<5#Y1BzgO`WG>TOkQKH5_6)`}y-DRH9xFX?!-5J32b7w})N&Z4fBQm3`S232d+J
zV5L?`u2zv$*?@!Gi;%k7!}hQ1p1OLZ0^6jvq@?6&Vq%-(gynE0QHyYMULpzT)^UEs
z6xQ1S-z3glVefi>+vpDTFZF8cSfmzFkg5tPl)A`;4
z3D|0LhhV6Y^mKH^ad9I&e;2A2HQ*s@H)d@L|UY-Ns
z0Sx7wk3^9QMGcaKi`t8w@yi$PlYg^?2SpxkM)=T~mk`RPQgfvHH~d=E6;DygWlV
zv@*G#i#3|4^b`b?Ys2W=1D1a0pF5jzWZ
zE?`bxftW(>xZP7$Ytrcp*m?%q$W(e)V2AJgxe7hSr3Z&?_iZSvH`=g}Qw4
z#TJ52Q=ts<@7sR8<1v8pTH?B>kNlK)!KI|Jvr`ojf=;Vb%1~)b9>2_6K?x1j@R2Vs
zsHlxNW1Lz=Hl~0B)Vz1^Uicmjq;fRxKyaA36w=(@q}V|_a7%x>Hj>$pNq{$~A+Pp<
z(pohSoxaCpylXMwT>pgn$|FaPtU}{(2fQE(i0wg9RFxH`L4Gm`OZ6m+&YJs1PUtXNhX&yCyUIL7k2e8Gfh*U+rsp$Tu5tgdW`TMtT
z<^UQ!U?X8zG;41y;ZIWHkq0A!>7|F7!0h3m2U>$on1&rMbD8TkbYA(a4U|3_Bv8Bm
zJ(qD|)^Xx@`|lX8HRvI%y0g{swcb^Ov2{a3L$SOzv9@)4E{9W+igN}MKxgIFwuXIB
zoEoyPC+2nJE@=|v<<$USL6{+2W_V|4V-*;E
z9n`A>-(C!k9T^?nzq+=%+7)_TVinjHRS2B2N?BdK^%ok1sj)1C
z7f_*(h=?%m-e2#xn}z+WP&k}tZV>$C?YNn`}8RW;GP0h
zq$n8s$o1GqL+)~hOgk|V6wC042kG>(-}BDK=1dw7>}?6B7+y
z2o)h{ZXHG5N|!(Y6Oz_<|4qq~y0y-?cA?qZ;ZONRgr5(8p2{6|V*mfV91&fGa+G-%
zjG53_Ae#+%RE74#OC7>sD}68!uienx!mYy>4eskbsnVjpE#Um
z7|?UqV4fzU0`(F=7QIYzax#o(pcscmU*SN_0Y?84*h`l-ARs^uZU}3Ho*Jr}F5C$%
zP*U>M=`&~4;h$itdg!O?G;D~whX*r1e{UH7#>R%GxcF08>&5fuQ()=Gj~!!#@(Uk<
zky>bCVqz5yH7u@;?r`Fpn^Re-4MR+2;6n3Y`~yCSRtfVw7}!S9iir)+RIg`J8Ik!_
z(VV7Kt2!gh$A=qt^mKD$q@%lc=gys|Z%#m~965TFO=cG2;FX^rGb`)wf6jZUE|c@Q
z(#HR%1>i;ZBtM+o@@Im%;hqJa&=T>u>FX5!(T1Bc$VA0gI?E{m9|~8|UduPYr`Wom
zmif1~=Inv9RW-7dd)cd6w
z6G_0-RYOY35AJ3hsoU~j4(H_LbQ=E^sqr?M_vfW^=M4LvT?KCm=g!-41`Y`O{b+G1
zKpu!pCT8YeSx@{7dljT6$H$ocUG>7
zJw6e23plSF8vDSX#wy4aN~|>f8{w03F>yILtnm51F136%-|+bO`L4PX;9;m0ER-q%
zPep6U%w$Ah91>(y>bfw{_M^^_mPW|_PbI*id|m6Xc7yhM3;)UUAXwgI8c
z_GSWtEIM~`bkwkKuorv~8#3VU@6RGEtSjQN3UYyCJfY95)IfNUPUvZKR}kFrU)E~?
zeo*pcpgcKGS3<_&eKQRh3N7N@yLZrPM4(ECI|h_xQUY&`eCz!~cRmUUj;$(ERaKSk
z)Hwc_BX|s`N#uGU=wSL0LH|xytO6u3>44pVrB`IvBK=$fvJEw8co6ZwZD;eia=i}B
z!LHS6%$Ws*%S>lR$RI5zXP8GP*xcYS=F@;9pB|`iFgaq*ea^3^9<2Ii{kzaJ}s%j0M`uT9S<$hSbfjcg~2$AWdd^?
zBpp_nsmk={mLb1?|Gu0*y0~a3b9ZAFr{uk}u0CAr-JNeB27}@v9S~DF(IGQUF@l*W
zvwE~b{Ja0^EbyWAymye%0L;=`G~L^0o?i66gyS+?&2K#O|G
zEn@+JKoAO$U_|>dnIA0Dd3)6Yh&x%JKs#c%%$(qVVehge9nm%eS}Q0hnCUOee{5uQ
z>B5E7p&Cyt81^4bZQ4^YPlGdZa(**h^#&HO2?w#!oY1eYQfW}OV
zeXzambA(9dA;-N@Ocb?|xrOrqqG;MQ`uoz-(gS1TH2Ml%zAuy%6xflGM_0s7W5jb2
z>wI32o?g1+qcP~JsO`C_1^v@2is80iz;R@FVHT~22KITCHUme%U~KY
z+rAoI7_a6CfCJgMl&`@qqTL`k{*)vQ%Dq`@LJ9u5H$cgF0Y@pgA55+w82{}U{Rq7t
zl?uT^mqvhjibb3AInvVg)=hf)>2Gf?pl1N#N^In#yqaW#K|M==iannU>lxn?U>*Q?
z0Ml5lWW|6MB)hn{YkCWkDPVi7a;lUCy6~{rTqw9^^Py)>FfuZ>L7SQr_Pa5dsuKw1
z_yiWH(-TH*=&E4Kz8WHoc@(&P+)F1LCQ`FkFJETb4cA5ut2+A{K-%DbiZ(I`l3)k$
za?a3E$vHs&(~60SQEY}jstdF%$ZXJ0r*;!3!6a0N$peGQ(W`Kf1Q#o$EHO!fiS^W~
zE_hixG{rpDy|-YLFaPAp6LL=DlYBgo7S1D_x_=Wk+n%DT=>_6LNlu+A*u7J_zA*v9s`16eVYCJeAEF(ndLd4o=X!59kxbP4
zlg@&lC=NO?L92r-4G+$i&gn;VH3caC(#L^w}+k3Q3hv
z4`2`ZI&3?)!{7T&cTO?GEb$n6f?!&JE6iWKfF(qJ1=*jZ^&Di$(RzO!Du}UnAZ7Xf
zg8);$4$ycC8aF|Y)lA?}a?m}18Hb$jYMId6`w)9?6>bT|1*Z{!-26Pcx|J$)o$HIk
z`OpbZfqUTVeCt&0tgT$Nx)pAPklnruyJ+z}6E456TjB7X`J6Yf^7J=vVqxqyD&Vm;
zV-g++rO%T^3R(c|*Wf!zd(cpIK&P4;_8SHaj&5E6Q-N49jeJxKvKEk&-~yCk)aHle
zY9I^}1Zo~F7c^EN7+81T$chW7Eh~qm(Fx&=M{3_?85Zm(zT%`+3+X&=AaTDO~X>62+kCas>
zluv(*L&8FLH@9MkE=a?gC^c$JYiY%RyCdRMuCT9tPzI*kQG72;gksz!PGT_Rjv%R+
zAFeYn@LGO_1VmK)_Ts3Rn_I*97e=S4KxJ6L1ecu8_5+Ym+5(Tk8@W+h#AW9Ge5eyG
zz{K=3dGI$#X;2|h7iqX{)bOLRF@rb-mP@4t=I*u%hd9WF@{wEsDHctwVPDc=o?!tx
zx!rTHEse+){Q2NXg>mkSLpAs+*Zg`KyLqTaCO6x`)|akakq(1yz4%RbHZ!98KoV9q
z7mWJ$-=>b?MiLSdi2QBJn<_ABgtn+0-F_tY9`cgn)m10C=k8Eq1!3w0eON5zm9uA)
zz?H1NzC0DhX9=W-&Dq;fO^g6nqQrp330728C<0|`TJYJv?!L-Bp_Y*ME)xf_?V#c
zOd1;PqYrUggng8cNdb6Nf>s2ns=WLmN}yeDU7R$XP?(X@0Ez~ws-1&w2(<_+A1;Rf$qhaX#
z1pXT(HlP&8s{o(1NqtR#F;@kol9URSr3;>}*l|+Pd%e#LV#_ytEk;CGm>YCKNWurE
z>PeFchf*8D8vr&L!K*^o(RZ!}J^?JH+8O+lr
zaIdFUgTTiZgK8HPEeic*b~r!_v6L6U6P}DUSipP>-L@f^ipAIWfcN!fSmHJ)8dG7~
z|Il`%8d3|C3myS>z)65eODF-}+MHzry^s_#D-S?Ne34lQGAo--DZiT1Ek?%sNLVsw
zo*?YbxfTNjb%2$Yw%*~zB-m~m)JL2|sqGLCx1(@QMW)Rvz%t?)wK13vcj1Egs!E#9
zfkKnFK+N|U*L;Tfw}N^Y6=D$#nHVT=n~UJEnAkBG&2G;XwX&uQkWdRxsomI~h_lmh
za&n@OJcAKW1xFS4RMpeVg>j%2@VEFY}!U;&IuH9A2r)%j6D)yu2;qe9$APzaYQ
zBelOd8z2_M^#LiEOeN&PNKc>s^EOn4E~G^E-gZ)e{f7i=bCwB%;XS+sAgRZF?I7ng
z1y+L_F9xL#M4_F5SGl;28i*i&t1yIT1*IUwCS+`CY6-1PR0pNPV1GUEwTJo3^aE@h
zXodR01fQLQp?Z`{0COg~_4mU6MFFIk)V-rej&Oq-xG8Vq9Ie!k&d#F4tBQL@#KvaB
zS|7Sd0$S$9gP3qwCBOy}7z`3pDJd{XC6LjdmOg+n7-SX1LAS$01KZapwX?IiX+BW?
z1UOMmum!jP7Fzp~LtB(zjt9jwAc7J~sM$g5Nb&~)P#$#(*2Yi4V4bYV
zb_(WzjI69V=&78j+olJ=#@{7+0z_FS`b|$Hdng@<0owX6z&Fhm65oFWL|yK=OOxIvL*dlKr-Qy0`|fnB>Nn&V2qZ-Cg~
z>%|#!G68l&i$re*lev3(mch-RT4B&)<&~A+0o#}ZS7g3@TlwnMt3m4_Z{8Hp#g2`Q
zF(pEXJ+%skO#l=JY&wHCNTZQKvawi-IT~PWfD_g^-#(Cb7IO_!?W0+kzNy{4`<&Ny
zAQOmtd1#A?*NIP`Rsk
zT++b8cf&O7@7P$U)Vm`X{5iP(1Bg55?U=4#zcY)=N9l%qZWiWn;J6PLRz*yHT@=+o
zrkDlz5|GZj+DPM@18I+NXM1M{Ri;=zyM!oKt-Chnm~x+8j%91Ih8r?9bTuPG?=eYh
zz^hmS$iq8
z56KK);h2%mDs2?U591t|3uK|A07KR8!G5Ky4t
zwQhsr24m-BsYSd9KzekP0X;t`vC5gQfvHBGzP-YMHVD19Cszl^4*a8G&;v9li1ZhJ
zp{1isJ6skg7)dY>m@$-*Pn|j?d;h-qYQs|?=n+N$$H+i)zV`O^|LwpsoW!t#)PaY*
z*psJ+g*?pDi-Qb;a?S463W~}ga-QtNiyuK3kAhL26<9&L$|VuYvoVm=}232hH
z*bR)XCuF|zbpVArwnXL7@e#_IyLot6ZZ3_Yjhkpq%!gL)moaC_6Xd^IFd2nOJFN^t
zNzfP1PrQa2sJ7Ah27E)))YSBaw-Av2;GtHq1=_{t??5ove@g`;&IQHGejd26b^zCr
zp`l4URy|YTxz?ZTfNZ-6QfL`o|5vX{4B=6N{ZQge_+JQ^G(|D+g~MS9%Z@aeLvMmX
zK59Z>MVJ%lvNnwD>^;cu06HUYLQvq-B7pxA^Y$C)%nVepam79iCK?ecxg2n|1GzE)
z9(4RgT|)yu4}4KFU@mQ02O@Ir%ph846o9lL!YQGlXGPY!R3k_U+S=h>Z9Xs|v4ow1
zU2X%ZHn8B2%MyWkkP0MA27SFnU!e(f-}V&Jr!nRFWPXNOGM1b%Ko^gLrIL(TU_1h@
zRASl9jHY$a8xXJ01LuS63=N|dB&_*`;VB@=nra9@nuUe2nK-EDY=ci@w22{k%r}87
zGz-*!_rZm_!IUPvar5gd%ggOh)!3>i(1fW0LKo_v90*vpS38wTU~U8fuL!UnjP2%D
zj*bG*%SADYis}Q$Vnb$851+~do>VRj;jNc1j}5}~>+qJx;g$=ofF;r78Qt+P{I7t-
z4n^7M*H@Jw^@d{E<)?gjPbCs6L1US_NV@&S2Z}4&E~FMfNgD#fhAh1VR66FMe2qdv
zC>Zj>*0tE&CJ<+Vt@$hh-imBf0}`&FA}Hp>fUmQ
zZ?WZ>)2BP?4)*LInyVCy2%c&H0ut}PMI-^C)eQvq+NeJb?{+~s0;}+#NF<65CWZg(
z@&o&}-LhpriJ3P52?84UnKqN7G>rri{jJc)I8p`k&O}L+<{UjzVDugk;mek6OV{
zJ1P!S84FbF;5w4RZjQVFOdf$|+D-NKsp>r^Fs_dpxiHb05UY85RmC1_mT32(Ou6j9
zv<5|C4-b1fh$!`CU}z1J2tVEO%1vZN5XWdspmL`FR6?0X%x1q$Q@?3NRQgz
zT3;_Q))<28NS7Nr#TK&QeX9l+$=`U(tep#T(v&v=hf3_pOrqW?c@
zo4@~GI`Y}uma_BF=Q}kxu)*uQzmmB2gEoY}i;t621+0zEig{3B%#|Rm+V?_*Ih+ea
z8|IP^dVU$u;vd=$@)9a*LA1v(NzNaH-BkeR8_>&u9<38F-83>@p7G)7wL|mkR;Wz2
zCYJTLLY07W(G6OZVlcn|@X9VI#@j&F_t#z+rajvmbqBeiKM)co#Na8QbWpm0>=$Wk
zvvU_(ChWXtbzmH9uj&LXXaZ==P|XiR?Ka{YP#kF^l{z|p2&yk7#Q*g-#%$b0^#Tsr
z#Rq(W=82=Q#SY!7}!}wsXon++>-Z#G9TM|;%G2WJ-|+%*RZ^&_CVYY5dFJtVx`Pfzg^v+
z=}jjPx;a7UndTn+4Oaq*(src$N7EXnW0*2X7Uinn1+$zaJNM*y4fwk_gg72v7cmG6
z$)B~Ae+S(uWEWrpx!dYPnfF(^jjRF({r`-dVG7Y#`P>=21}=LXiLsF+DJ%_Yx(FO+
zu?;?nY3s4H%d3S!KTzM5U*fO`R72Za35o|EI30*!GyqK7c0AJi4EgNXpz>=@4cQiO
zqbswATDItbTcRmLJpJ_j!ZHt1ANA|WY*j~#94%{QKWh3sK5&pb-*Vu5J@>80@TO7*
zEgDQ?G&2?M=4+S8f+WZC)+s8Od_flj5uNqj3V0iR$h#$b-fftl;t99Q`O8^^MtQy+EwWs4E3WN&E9OM}?qd;zTuzud8L*?p9%iV
zj0o_((fy&w*7eTU3YQ;OWC$uP@O5z)s0NyEHBD=}wcK*gjBy6K9rM%_g%q5#MF%Zr
z>Pf|{mrUpK+5jZl^nSVn$=j-gM=&Kdk+uEP`}6&04iw(-?2>%{q+2{ZiJkrU`3dKm
zo9@4p=1+3OrT>gC05sH4*w&Q6`xRfCW|bMg#DlNdc`2pDTy(Xg@^_fPqHhMfJU@<^
zDa*x27=L`MYehvX>MaA-)%IU#O4N^wqU}5@N5!ci!ytBv4iS=oT^#}cJzf6_0Rb_E
zbRbBB1`W#$2r%30;PfK25fx6ka0E&PAiUWTZV9B
zGR~~zZ~qSRdKZnt)veq)wIeZKXn4w;I2m~jNYZs2QL(Q0?_B+FJ1U8#;j
zy79$g*{>BE`|7U3?B!p_1K-X}Xikz|P_XtXt+D3zuw>p=Ih}aTMdYY`33Qz1U!ETi
zE3deQNiqcRif9i8h1j$!242djp#V{#UZs@2egbOvaR$D6t^-f}>P_vhA53o2g?~^yhNpV0Vv?Rb*AyK`K{b$Boc7=f=V_C*Ae%qZ_3J7%1Z$1a
z`VrXyI+qz?`$vwz=m=;4CG2lK&?o@sKFe3PrGWiC+^)-%sWy{N$m=5$wQ-TSmgja%
zY^{FO!0)N2!Pxpmw|zLQ2V^F_3~E9F)&T^)_LK3QWo@yIt8#}{Sn$m&+_bl@WC|Ee
ze&Z4PFw3i<5fd36Kek(KUsV|PI5I;0{=bOk6W!$d)^X!icD=>;V7}}CV@}QcryZ2L
zFiVl2KZ}E4<$hQf97^Sr?%W1W5Jn|8WYE9|5ZLG;9ECB_LfwH6u*bzSHrJUAo6V|eVp3AeAC~_nwX$m>boRE)$Aca#4GE#d?kVTgx!3aJH|dA(}+13@d*0!_@poG!lFWnv)jKth4k
zd_d#46+MRjCih{%U7&^R!b_0Y3>|Jl@mn%{23Pp}IVF!dAElt2PewtcJ=S7nCCiY1r>R!VcxH{f@nwI^dXOdk}(10yR#brU$gjZJX
zOngwD$?xb7G-LNfrv4R@M_vqftJSy^sevv0rJLWaxD_bXt1+4BXqp$At?PO83dWoi
zaRBOA4J|5w!?~qHaG(b2I2@n`#vaMAVw%?uKoB
zxy_sD9L!o@jJL6M!?az4soB4O4j|{k7XI5V|2eFbUv&|d<Y
zu=O_+=%WCQy`H
zB&BLK{E{vD)ufZJNUp8vZW5GtSyQWJeo^0rm0)nB_m{5b%hMPQ&`8pOU=NCv1$4R#
zBpC{Th^57LM@iX0U$)g685=tTHcIkt0_`w7g46OJ$+GYEAS>~nw=(Di$=^sR89okV
zFINxtx>^c4Uoo>=&FrQ9Y|$jq=VLax=+za-_$+w3^1d)kb!TJD5(+-}0{q?v;q+Jo
zdWH(ttFLmSAS-osJyzH7S$n(YASJu<$C`vziIXpM)yfd!Lix$1H|)Gha3f2tT|^!orn(GK!@xNria8aLHHOp=R#An(By`z4!?yJ-P&X)TTuVy^}MD
z900|dv(deB(W_ws>M9l5EKs7%X1u&ID?bunuwQo(=6kQ>@{jQ4LHtd=Xb&x1c1nuZ
zrHfIFEcw3|)}o?@Kj~((hyCJ=rC`7$1%qG(tJexL${s+ygJkD0#bO{9?or?yOoJj+
zC3Nu&coS%88xIm|Hb}z%GPl+)s&;n^C9i&?N!@)vXe~-|~?6D`j{pYs+IgC+vG2BT;mzTof
z#lNwnY7vPa4TKM+zj8GdmXAm2td1`)KS5tNKxO@|dHWv0OKHRODI&~;3-cj22{EBc
zM;XsnVo$d)+?xy=EYcFHU9o)CYI6Pc`STV9`gv1$s~LAOmK)a?$SqTYf`Sxy^p^uP
zN1$Qg_grU%lLyO+d>Of3QxY_HH8rDa;hY`ciwXZnyONuP@W(lE)HkmBg{tnqPp5R?ZSCnd{*U(cLY{2-HBhSkwi-fvfyADlnNNaYoJS-i;i*s@-SIM&9xhlEAetz~kB-{J4c
z8}CkDzI-`UG2Q|rH@D3IoE}))T+I;64Y`jFhJW)HUA@jv$ugtTnbi?yK45j^tclniUf5(*|xdy3eHK*wjA-`X~c>
ziCbJG4^CZc`G~iK%}hlH-ltnm!>^kQu66yRmm}*}gwdHAc_k&<@Sl&&%}+w)HUL)>
z$LM4Gx!Bs)#|k-1F${QZ}&_%2_GxAhA;b?nZU(F-!V
z4n9{Nx>%UfC-Zz8AD>s}nMx<#c40meGkkRDE#~p80`F{e@bDfN7uROU!6vCNFazkE
zyj26$%>4X!lW0A^INQoo!1cIGtO2h`%mG$dhA!v`2GkR8{i!`pqR|iqb8ABbqnBt<
zji>M1@Wv2r?bg4>rMg{K7lt<{!bet4y}ulU(0rfoTi|N>nSKN&unxVs`d8t*HHyw4-u-z18{8SlE`3-$cAq9IGf#<6QWB5
zFYhM%&2C}qwpsLG5Rvb*hAmyP1}hpB%_LzI6-+~$n@(6i{nGChVRU8WcbHa+x1JY*qzXXyJ~vo(1sv(p{)*T^5o7xs<-Cds^1~ZI
z87f_>KA#jbW5}qTWfYco`HDHwX%aJrV{<#h5Bq0ruY`z+`a4zbz9{Ry6-Xs&$n$lw
zDAeSXfW8z1$CsZLLq9Iv`zv+V?jpNs3nt*?r-aKTLk$fLrX~&tdmCf<2iy59r(4KD
zukjTQHw0HL)>6P12wj07tn{0yoQSmwh=OArcYZfF8-kuW`Jc50%lW6qZ+~T5hc|J)
zqN^Bna8Iu7%`M{~SQ(DQUbvdOvu8Oa#D@?6*j-N^8PMQ6M(irK$+uiaE^9#
z`Td2xH#p*A*Vmh*3tK&3yr-phUFxWmEm*RMeEM1P(u^jMN{G;O0^ug8Af=TUo}
zp5F}iRW(ZjSeUFxC?Jt3#Ll`@zHtzLN3^am+_hJY
zAwEFjd^a8SO$OESzXJ(jB1GTp^2)OU?Bj(`ulC;Vwzjs`u7?3;2+FURwg{X(OJ9{M
zhbVm6!d=eIyu`S;dV~FOgU4Q8lyFSt?esQq`hnoSqX6|Vm^)|fkdVdH8cB(
zp1D8#vLLOFVSX$9yKX~}^@7M04L@npO#%f&Q4_EWrKq?gV)~!L?lTm`Jc9WO>mWu
z1q;b{A3VQOje)PPdVTK(2BQ^)&hwAzEqZ8jWteml{Sv+=dg^}T8I7@--b_ib~{*z`($f?b-ZAC$50as=6H^k^H0AkD(x2p5Uvy3JFPyH|{&&2WhXJ
z{^6cikAvU{(xueYR5$p(5sS(e!VUN1U)(Gbm$`^2
zbqV*5VwT+Ex9>L-%%+^q?oHzA6gX>ILzj0M^P#wyPuDO`0NGlK4NyxQwpH`Is;Y}u{l3WQ|2w}6M+Kc)3fhlThwM!p(_gtFdz831E9gku_vh_{J?;Wm&Ie6gmkEfu
zYpua3=IL;nRE)>Qf-0X@dbu)JQ%Bn$ne%4*y~*U{>t1lo?K=ygkCgd*_hzf54c~)}
zO*oextmHkTD%p>m_fIx9?`vCIXqF(bTPj}Dat0mIzVPiQ*CJ)FpFPR#c9|FZmN9Af
zqno+}*9EJUuEi@)rkgO=h}*Qcl;yirjc^BpSL!7>xb?ecOiPKKdrP;qug3>GyCilr
zL?`=_PRc||vUj~Nv73Q%sVht9J%TgunB;!(-o2Sfs66uJWQKU#0IRw7ZXG9CE5ljo
z9W&Sd91=Xc@Bzw(wsc3uubute>a)&eJy2dS@kBI{6rCZw#l!Oo%D{aCG57^2sXx^#
zEnMBfaJ=o}saCN1qCAyd=i`@+$qf{p=H45^LXEVQp}90gKkt4Wv~ucAJdIJnyj0`k
zjy?N<$M_YVy4C8KeyCxt9l;2xGq2bA=E#$?Y!$9@H*`Jy4SfgtdW-!*`|t<8M@AIG
zGrBgsKR@hJF3AkB;Ra{?3Se*+&@T{N57=DBt!5}?iG10O6232j0v$PhpKS*fRI=&$
zwUMlhgs+jB`WCgzyjW5}f(PNt&t^|`()V{-oN5ea8`*vP7?XmD&(gCW#65D~tWhyF
zb9nvT^fqT@;dy`U7wU$a)}F?>4!hr2Tf|(M^vjf&D`sld*TZw1?k9(i^S(;V5x>dv
z^bME)JAqx_0`C_yUYc^^wx)4bWB#Al>hkDqO0_(T)!h^s!Z+9Qef5eL8Yt{?SOl;9
zgKsyfyAK5oj+6VnoV+_RIr#)+rPf2$eQE-{Y~q4PPS)ksB?dn`OL?hO1i{20KP~Wy4NBYyC1^$oW&7@WIEZmy==9$}bFgm~#{2G({3l8Q%6@-xS>9
z|D+W}>9#A>`z(=NK~C3$!4f~#y^&QyvyqbjqI{0EkC%6T1Y~#U`!X;X=;94-e_a9S
ztE%qA{+DKdlx7L|l|P6FtzAGnA-dDo8Y|mV1K&f!AFp`wc~ZV7`r3t2cfBOcFHua^
zxH^NR32lGsIfAT#=ChM9#bUjF_+|9<5cq&)b$
z;_%AbMuWCplG-EXN4Cso7VAv%#~ELJc4;u<#F#O=(h3$5yA#})ATRbH+KjN?+B-FQ
zO`3HYzg$-OzirXfw|iI2uUV^}8(U`5{`~FRvAps>rw1P$eoSsqM2euiPSL9{y-yXdAd>Q
zF?lQsocYn=GIyJrG!6HjAFnZ5#JohMJWDZfCSsD?7RJW3j6ngbvDwSOQZ`F8FXcur
zo?VNPM0&tcpb8h3=5i)m^luTa$UB~Ot*g(%h4QC!_b<6rlI&N~ZFZIp>Rxri993_H
zJ&Zsf2WrF(PjseYuhFFZ^&!R3_zmO7j3;tErM!-h@-BYQhdfvrh5XK#!3P{18Um~&
zftFQh|5}4*7&WQ<{FeX`{WJ~(?>+78q)+qgGL1<0c$|HM|jsVS#i(`$FIM<5E|1d6=^0(L|FQXM?A|j
zt^U+JZ|7Ij*W$`(c33&gNKnl^I?>cPU(P8KDaQiWm+ei_Lr+b(!f9mYiMt|uw4D$L1n{~
zDIV@iUx@ene#hqedY(aaQlo+)9}|`gd}k@FUSNccj*7w#FlO*o_4D_SNdrk!uhPvM
zaWvk2x$$nK6JV95RpO1OQk70Zd(Af#^Mn!yUcNElP~KkR9HIDHENU$9q`>~?C8vTud?Px2}wHDMJj30s=V@S;oKzyyI=eb8s)M7wlNCA`2{9k9gsoDW{PCrX
ziwoz=?_-;8!`6-b_welobtt-mYMHAWFRF4yWj@EP;jV-_jzK?BDJvR;fiNgKb^Xs`
zwTzKD)bvHvbw0kn-n^)(1OA%z1
z%fW%bLdln5N|y9SKp2%!5ZR}D+-~`wzpa&Muw&T8E>xEAH(~25YbGUY2%D5L9o!0i+si?eU(3o
z)@c949HMru>E~&YilRndc594_TTtBGG=6w8n0%(&zv~gaA%ryZg%F$0rDNHQ45Nry
z0LlqxCWw&fzDJY#RLyyiBm5$DV$hE`3N4;WBTO)1CyNeIVDJblAJ_8Q8!4nr3
z_xL*QnFkIAJQ>r9mmHJ@;R~-DaS9ZW$<7>{hbav<>lJiP(yg$K#!!)(t~;AubDIw?
zJNL|Av3&Se9q}vJH|52x{_1?rx4FrKS>oVw+QiC}?5vZwwK|e_7k1354PHN=a<&o@
zDC}DaYCumqhNveUy^*YxJ}@eM?Y%|pWep4
z8zT}P0f7b3r62_YDJMg!vd$8Mxnx%8F=Ee=F7!Fa9cAz4UwVl8wj2j1r-j?ZQlxX2
z0I_tv`v#h*DthzwuZ$J#NM_BiBq&X>-wlf7HK49Q=b1)G{Kkb!kB5BMAZXh+N7(cG
zCt4fb0&6l1kQgCO7LMHii8fT9$t|5%TpT$Jzp2wmnFWLz#G7$rfj0nF7UGjkdWtv;
zrYw2JG&$1@nfXR3wp-YjBuEiuRf?u!gLaFW*B&eRMqqaN1BaK<@7B?}xC%g@6&j>LP!3e*o;1HLnh$IC-8IFXpETyqd4D>BuN3`T!=r
zI8nFg5&kUyE%B_8J3bP>-Y}3ai!9`6ip(&l%5Dq{$T=*%d5ct~xxDK^0cuP9X4{fP
zMaDh9Ai>k>>)Us?-Wj>M%_H+5Ak@R#`(dGKMrUSz{~0PjR>Iaq=rlCZsyV=D36+!2
zoiQaoc>l=st=wWGjNE4IHInk$>5EV1Q^x8}y#XH;zdN|d_QK|R|F=HcntyXGPZ2`z#Y)(mc((30T=LxN@LH9pd*#L`5tD+zhg2fk{g2#7tz@PG6=b-9G(loi9x-@-l%p&g)hv9u@9!Nd5YWlWZ~1YxkOOI#7a$wR6AT
zQ_+StDirpPrHJ%1wG>vlfSHPK?AFDi0tEJ|I?b3EVDtBXvU758zsV~axM!*|H^Akx>^VzE!7ncMFUNhX7eph*u+@G>d4Q7)CkRFx
zb!5_JRo5FlgpDsYr(%1laKbzg8Nj0wE}GZhl@RhYO#U{>9A?qF2SHMf6NH6r_(=8A
z5J3I8fP*diz={k5G$=^4O2iiR`dSv&CHvWk?ehz%^YQ+f35iccYn=Rd_VkI|n@=2m
zQM}D*V(;$nWVYl5EKpQEB`pbKWqG=
zEsv<3k>*tz?#4I*H;vMK`?=WJA0D%L25+6ec)k>CJ(7l|i-%~GarX1Xq0WERs=iv1
zSj7?vaGMxab|f1h4IPB5TGXx$IRTHesDycU0f_u=LpgssZt-HC@8<{S$@RlOn;d$)b;suB;W(GIb}A&wr^3)
zT4y$4T29AmEq^aN45A#t3OYrizn?qaEuKVU5s26fI%6wdYRuH->T=
zP~hm0N&3joCJJwe-QBg-idVKi$XM?1W`N>ILC|6o5)5Y$SGVteFEry)4bi2;ABka-
zY}kO}WQ*a24)aX8A1i;^88RL+nYHam$YY8A6k73n(Naw#TWtOc5922Um_XNT#ccs&~H?G|$x8nF|OQ(Ysd?)P-V=m{40g+~%X{WmeNM>mQTQ
z-?==mz(Gvk{!MEkQm>TMAcGW%Plg?oGl^>+>3{7Z(A05JJ6&>gi@?)-KUasiP>>mZVs?GS_lih^vNjGP=`OnY{GiFp6TB6eGz
z<%tpjn!yqIEH-mVMt&<3L$$mR6$ZRvc>5HeTplm+2MO~zv~oSQAYpa)o^Rxv7I{u4
z^Ih)K3b%6Xi2g!cM55x?B+`!lLa)*Tk4Q@Dc?`)eu94VbO8zusBNTSSilsq|7kZ1f
zUYvSOqr2J)c5)gH*_a0gp;q(ZUvc@UuMJH1&(YUbN63>#S5MOX&zo@$a)jcmSe{c;
ztIdoc8Gk8xOB1Gf>q=X)h~1-(ZBo)nBfu8@%k9Ni3p*8o?iFW)NZt^EPea^zKxWI4
z8C=dID2S<)wCW+8{in3tTg*3a2&l*UHy5kl=BPgN^Yi-w7BGm#y28{~DMqzVMVro}
zcK@c&hQC_=_a2zY
z&Wl8++!$N;r%&s6Ut866VjnkrB_(A?)&!@6q+Q@&o|SY=9-gDwj|Ay*JYt14Y{U8
zy9LwkT(nN^R?iJuK1+rSpsclx&y6*?7~AGcu6d6I(rta|XKU6=V!RF5_VRZfi;q!B
zoo6MR8K0PUnRn2=FO{&_*F6qfxp;c6E{BoN_0CkHjVqkW)F;5c^_6E}h1-hmKYVPY
zv|r;x;`o|;XxqGPMq>@V0@iV1$;ILztPcsFrDOD`Pz;Y^jin}#maMN7*O}r+(PIUd^zqHg7dDYRXBD+4
z<0;^G{Yrz?zot()%a;z9N@DCfDj)PrH~RyR
zHha34&Gb@PwpataZUB&S-9&7*VKwzTlMlC
zpNojtSKt>9oB|@~Re;Q8z$C`+zbFbs#dO9mb#q?iN--7BxUZq79x7S%#4|p+uWm|U
zW{s3T)yXYw_FrJ#j+1`A2GdAML#(qo5Ym-MLs%~B_!ZX|@_4W^NE0bvv>dlJ^QNTJ
zVzqZ2y0x+5?D?7uemhBm$Xb83iR;6x?ZY!8Y(EJi`CMwu2Iu|Lw!Y%wNJadW8p|76^V^mo@s8<-Ljp%m4P+Ri}RD
z1psuK^<$vw{}ERl7KT1qh(+Zj`+c2a=ecQmkPyId&g#(GX0Z&OgpWj5A`$c}u;kZ%B5$lZ>
zg8LPcCW|Z~a_4s{swUrFzQ-}UM}&@kEjl9zgbY;fvr%mk@|G?GoOf$$tAH72$;n&G
zlbrnZ3;(~>Vz~XK9KcsmTIiwUI_uAK=?cOH0}P!MKUtE=Eea>h-m^
zKF*fBWR^Eo8QhQDcrh`YZNzty*XMZ+^U)k;@7_^2urrY1S(v$e`uv{jGe!z4By9ao
zHi`M&eFRy9Z`nr~Hh9CZYOA0+pq6vy0MXNFL5zqnQi6n2^3v;Gq+wZGYvlT7MJABgRRfJLt!R0c&&<
zhjf~ial!fZtsD(%(eVD{0-8qeF8TBaL3(k$g^1w23!&m6KTghmInuR)R5^6MZs)?G
zm&{Pss{*^`S$Edd3H;5xJnGxxMI8FX$e(o}?ZxowM1YUa5>`Y_2w-Sr*`(aKn%{DML4%S9kMme1V75HsfBFQ*MeZE2-O$zCKRB-tUt&Vu$ao
zV5Dfum>kFbq%4s(W&6GG-LcR`xQ6mK^Xs@W#c^AmZoOB5Ggl1Z=Ir<{G1ynUIn%3#
zRh7#gOIDyL>AKj7#zhnAiz!4;=uCUs2xW*$AM%whnBY1JtPGBJtev*%`7nhBas60!
zqZCg5wj*$scRCiLUh+Q4ifA6kwdR?%o1#s+XOKY+1ML-U>v#}jO((=1&&k4wD=Ry@
zkzFF;m5puoaSnX*d+bzIZToi*`9ss2byBN+fUJYmS{AHgopt4k(P1H|(W
z(FMiCmVY$U{Pv@!wkbR|8c<|>4fClz=dEfu7+)EaDd!%2slhnxc48*C*7V0i*wI$H
z&E+D}$|}9VykFHvv@*h0AiKtPSl~>W5huq;nXOk+qo%`$kT+xJT`{1<@S3!$
ze$Ja2yUD}U%uNXz$wbAE9G)klrh*z49G!X0qKAKu+2t~J)2kvUA_$+-MMFc8CHGw3hZTUs@M@<&Ed+NMLc*_Hr
z(W!FAOSwRwsJIG(&$tX%B9A?O&($}00xK@J@tAV(wy$jQczf}$(M~GhUs-bzqK&qY
zSz{)nFBWw9C|$fqrGJo9NtOOhQRp47@d>wFTA})R-gvDXHyIkdp&&&!HsB4EVYbEi
zPBIkYAij;_(^*Md8uwRsv6YCdsAs$4IMI3--WI=V)L>gn+h*pRPq6|E$ZQcHFk!Lx
zjV=h+*)Au4mc+Q*t;OxY8KhJE57qeCr_)cQnikxIt#pXM~<#RYGqq3t?sam_-SU56@ar^yAB*n
z_c@x2^*ZqJ%e|6Pc@_f1X|ArW&e|z*$<06BpZbXI9O`23Zk+gJ*1Rqye6quVoiH0H
z=-y@gC~SSM9xAJW3@6TM9KeicBY({yoK_(8p~-ig94Y=u&Ec(~5Us({mp6Zx^~tnm
zM@}8SJb`ki*Jo7WPsK~4nrOWK<$iEdd&sC8*&DW=#~j^WW9uni`BuDc<^8+v?)6(J
zY=j|w)?t+Ct4)D4`#+TI8Gb9_UNa3PbVtOd+m&yw|Ch_48EbP`ZS|`fKw|D%93KxW
z4!mO}uM;P^>p6j1)5FK4&JD`KTC4At^-WNMLTTMmns4XA})(I&A1~0;h4c`^Azm;4=POiM{~5GV*q_a
zps?GVqA{LW&>IhA_F_*remuEw4TXK?8d}lWJP0q%rmJ6(n?t#1K1RM;|F+?nVx3i)
zT8SpU+7b5^ukEb1_DpY~vl(7!Iah?;_O!OYvRIy!iq8wmm6TCszXo!o9U3y!AJl8}
zdp>q-$W;gpa5)*_d7i@z^yuyaX`)j09A&Ur%>+sMi;L|t`
zxS`h9>A?zMKd;5mPH0wPLd{1++}*tcV@sY>ois`
z*1at`{6Z*tESZTqp5BToCdds7E2E{rTbAciQ0aCjdrfBF|B8&
zv#e2`QFD3g!bj9E*p@9v5Qhb?d3@Nr=EV7fZp1>laUJLdNa%d*3UiAeA>0f9{RYKQ
zcUpAOj;HMkMv3Q`g6}Uk-gb}VnqVEn@gE$G4l#nMJv=a<&se;D1@n{44EYEd2-;^O
zPKOm=9=~gONSw;F#3yt}xD@SNdk#z3g#l}-G;gM?x3?g8NG?qdTRbe;o5qUtPc59J
zderkf28XcIS~zB_@MEFNr~L63JS4|Swy$?EWVV^HP)f_ncmxEj-@MPBmRbz8iQjMm
z2KkMLwA5K(bS8YzaGd;Y?e5yD2M4;~k_#%J?!H7XQhwVrW)(iYEO&y^LNLF(hyA
zU0dZAZh2BKw~x9llIs-fC`ua08mvtmpT%$X=LPotg=IHt!C;;a)ONSE=6s8numq|#
z2}i}IPGH@7gON1)y_6gIwR3lKUTK(*C^aBsm#eC$X)p{L=s3jdK@Yq4sQE}APw#&B
z!%}QP?eIe%kMi0+>&V@2~kSJR>;`Zb88afY7GH!z5|d&+;4xJg2^Y
zup&=XL|a~7253APHT589?6s`)WYVm_$}4DXC1rtNs!n*k|7gew?@lvP_d6%OEI
zFbxI%D!nmDk&!r!X=_VJHGh{pX5c$<4&^}P@|IiM8Ctj~BK_$ApXK!{wO4CZrb`h<
zwesZz(jF2-lNi^;DV&P{#t+WEJA`%R;%U*F>|L5>z0W5N2L?hW_m)_@+15`yNq
zb)GQiitLPNpwJU4&iVSgQJ192I%@-IvkcwR6lOAyh&vwI-N4V4y1X2}X;!J58K
z7Oh(yN*G(~%k3?CtarsZKJ&c+B_d=RQ=p1Qrj(f!>w@NP3;*0XDi3zhc)diCfu
zU{i4eWedO!KhqKZk~fVFg#hjqoLk4DE63Wht_}5d0b(mH`^4
zI4p7Vb{NI8(+6Jt2X<aNNzY_W`Tg+jiXw*}^8C~}E(Q8R<_450;O
z&d|V<*Ic7BHj`A~@*Pz2K7wGGOI9?)Y=$)*}$M
zVKKTbDils5uk;bCde4Frc4e$ANrpl{S+q{3h{ak)WaAyI8}-rZ?51&^T~;G>a#t2)
z4a%T)%0~@Y01lWs0TT?ki
z9{fz7cZ&Xgtq_!6<8xVbY+VDBbvPb`udA?iZ4s((tAmR+u#4&j2Fc
zX4L*$-1pr*z2bmV)Qk~(2m}(2E-7VI@|reOdX-5k<5X(=rYl8}ksC;0B|l*@6fb^y
zzv7(5If)f&zIqP3cn93=0G&Mc>zeDbA$)hSxYD@Kcvh`eiP~+YA^G2HW1xnw*F*n@XK2p2K
z{s(nQD{2(6@n!Jbw0A6Cm_Nol@7dH;5Or@e8jtD4A^NDxo2!gF{OM0qnt7)!w8Z@ijqZ%W|V?P
z%Z1H{Q4y09(b4GX=-0}wJ)WK`rVTAGta@GIpjO9=kJ46o?2rQPaw!PFPS-B30d*DA
zci!}eBDyFN+{Cf*@oBylhP~j*&ZFgo;=r<2?Ok1o0MNHj|2pB-b=$9`mp^7_BOVmF
zNXNhlx|*m!joi;oMn)#d{>I8usCPY#<%&sLIV%<9*B(=*%T%Fg
zxPMMr_RI$@JAwwIzuZfZ<=^|=`~{r!Mf7mJf#g>Kq5>Xp{tL{&*U{^%QQc@8WXzS_
z)VH^{t3m_~?CCG4tJ9>o*}8s=8t(H{4*e9e=0hoA~PD=BNu@dV@%4
z6*E;VI5DY)VAc#8%8AM)XE~J9)(&FFlh~rilY6Q`@^qm)s4t_X;W^!7{=JaePI=_T
zue9Ma>7cDxcE9P1F%62Kxzg>Yi_9uXy*1=a@4vYb1qPE)7a_d-mAY+P*GTP
zgsR}I3@2&j-#gDiY=bokDx=b8LQTOQ!f|bFFGN(ADa=6()W4}Radadya`N8p+r@6r
zCNXySI;O-wWRu%HJyKsfwrI`AA+Y|9lhqj08~UFBWfIV~c}~I6FpEv+DYI4c8ZaMO
zS-ij1dm-4CsNF2e)kfB{NjSKnNYTQQJ09vi8iwTp-8OA)hT*`!OG
z(OkgY%QX+*4+vQDb}`sH_VkK;m9zS?
z$HyJpu?Ppo(4IAyjIrkD&nbGEkGMNUWHn?#&|mVOg=(h->M0)}?tefIoZ2=YFM(y{
zNpk%lye+1=Wd;on#`5VgzOxVNLtbAONtzY8DR`dvLlypDG6UGUCj=yFJvbky_&TRS
zrFRwbDuCn$8+ZEaQB1k{9+%k+c$^oX#a
zu3HS{>+y~k(tzs+Lq{^Aa4GJ&1fOa#*9N4XUAO)X9Wfc^k|!6DFx)0fM%d|$OS>o*
zS_em+8UqANe{Y->3+2zHXpXAQ=r5nBrpS&9YUlPZeQ2^tj5U4ipQRql?6mHr(1gZogK3ou{B03gY3`Y9ntt
zt@OaZMP_IZ;(^M+#^CGtX4hjeCqn|Ei)Q*5At5q~B@H!Qw-(kvOs}XU#lDYetliX~kRLOJ%7K}Xg=J+!ZqHaK_jeTq#6UO5
z1&j?3%JE~c<^Cg#n4Uc&iWyivz2tuWJQSoEt$t^#`wGNi>N7;=fB%9EG!nU=74i~0
zAUWk2jFT}i{d;|D^-Wio1avtkKDPuV^Xy9|b$V)MbFnrVA?`s_yF%4?jT*}D-%B9r
zi1KD@5B}!g6e8f%^G{N7&B3+I)=p5+fcBnt4#;a<$C|e;Vs@LJYdSY$ZutSt#7oRi
zO`nfB(=mh8ucgPPlz-3Q6stkyUh|Tpg-Cs3j{%KGQ0w8B=fu22%_(miAIp!4WslNR
zCooVEWJj;CMvMM&C$zZGJrGX$=Sa%I$@NK#U3LaJP1Avi7#d-;Efg%Y9Ob&({K34|
zITsMFKYl;}?1F)b2@Y(Ij{C{o=Skl_=0S%COEvC(Bku5?spU#^anjm=KMFT
zARdfK?SllSDz%EVjjSey%^QNeKqgp7nmSGdZ$$74ACswnIJGk>Jae
zJAR9yTBb!Rj3Xz`2KuV@p++X|#}?g!x&rL>0YXF?(DP&Lk}QR)0h_#SI;AA>WHpVi
zMZbb;!Y8cCtgB>xGbSr3CZU0ke#+V2$o5U50VrrejMUEI
zp*$^_ta!dDPO?p$UMoA?MTp7_Hgk+*L;FTtdkbI+KyAz;H0P6~1gJ-aP@6axq1aYn
zxbcA-{*d|!
zg)JQMK-W5
z5m4tU!DEwO2M#yJr*KobMCF*_k8#~0rM}NL1wt8gZtZd|~27(cQGPtuyX0VRWx@*TM=`=j%1ALN^@c%h;GYcGC3=
zt~fA>PEm`lvODMdFlz0&<~y55O1HF}@UQ5S+}V0(mpmAJ;(xi*-&X*kDEUXUjin8zD_+oS_Cf^`XgT;V+Hy4kzqOPOVOFZF8G7DB4Pah6;$Z
z-ppOGRETvk_@R8!W}UA8g{|A;%K3Q-u0&EZ_&C}?X6=-pyJ`Fnixy|PH;b*(xe%g8
znvby=EkqZ$FNM`FAa&1;^4JGs@4_-gQc_}g2mp=07)P|;nG4i)q^bvL;{h6d6Vw_
zJV|_oF`;euV$Y4bdL8C=Tdm)`1|nQsXqNP^VuEw7c6J`TxHlPv(?Z_5Um
zTvu1kIx@~bBpwD6hf
zvZpfip*=?zvyX1G+HBn0s2(I_+@QZq!(A>}6Hs0}B=n+u$GMl=`q(+i3buUO=3(IH
zOOC_)f(WQ)>jSiqRKgU>r>aVt*w5!J&tyMcxiY^OK^tcIJVz=e4Y6ic!JA)MNdVUK
zaE!*-pCX_k2i@>~ky<^>Uok24!Had}rlv*+>a0P6mhu-qd4i9k`!)=|O154~aguB}
z3E!W3;LYb;jj241^W7klKC2imVs7y$U?Z#L9cU9{>M+pBzIp{qL{<-wlq0FAmZg-{
zg$}qK*`YN2=CR*av?V`)-Aj?5)a3LpmYD(mKvDvOj&ep>UXmxaP`O%Q5D4Ba#PF~-Ec+YZo$Y5IKF6-{F
z@RmBGc`T}&$%%vyx!ZRYcix~tJ4~HmRX)-iC9&cF
zbLIi5&dwj+l)##1Rqwk`y>xux4HK3epOioe*Vs6Wi;dN%b}Ci(@e%>(-u9-y_B9gA
zYha8!_A$ILdV>f>Z1)2X*WS-(7G95|={M$NA1C4Y7(2isWBUgB9K#Sa@W*~0dJPR03IK9zL7{8udbJ;-zj_$p7b0d}jMinIXh!{hz2dAeEg7evQ
z2M7)$!poxk4O0S8&~Sspzb7j+;;5~B;eB_lOGz~XFJ&lgtL|Mnm7ogorF2o+kBke*
zzFUI!!P8JJW_+PCwDc#=QF0M^bQ&AtfJD)K9xX>zw{`xh(!>*m>Oga{+ZPiU+cnjQ
zH8UcD3q##Izk!Vt{X=rDXnG?~Qla>ep^Xai>@TqEEC>oLt+#>QNEOpZ-11~q&HQuR
z=KCj9WfFunSJpsFEYT`P+J%RZIEAOB7{(>?@DtU8+fGd_;b{nLp_v8msar1~y@z{k
zBr9HD_T+Az8|@3a`QKn+bijYM$-kckni&nKXPH#?^iVrd-o?>MLjKLKXIFnfNA^W0
zH>`vEubT+y<^uw1G7lSkPpPRE?hR4>4Q7@6M1K~M!46`l$*>4k`$#*#PYB~k2;97T
z4#>RBlLs-hH
z-cjr2%dS7y5duZs-KK;Jlgj#O{LdpF;Y5J19v@f6-O9Y0LzHj61|)WxyoyROcT;Hq
z*;7{QnfAy3A$xD>uQ@lh0V4JOWp`gvBJ}`~?`+($3d_L5yhE;p`1o~T>)bPo$ttU>
zBkbzJAbFyJ9C}Yg)+Q;+$)}+-Adx<+BV2ovQ86!?7a)$@Fm(~uCuIaO(ah$r@*Nfm
z(!g?ia0q$7FU}FJCs&(sA)^azT6+B~McZ?@*+7<@+9dDY%IvJorC_i%7pPSqQd09a
zws!AfehJC#MBlT6&F7*|B4_D=7a3pV4Y^#0nkY5lgLnDmQF7lgw(+vD8TmAMkKI(}
z^q%pqTiYkgq1v>+Rt8TN+Kg|W>22KLayi>xmI$SVYJ#c`kPfX+wIKxyOZh(>
zKP?#OXm(Ss*zyI;22y!I#`txE1_LnQIzC|KZ>d3|7ys|`B}E_>AOxBJc3!6h6!J-o
zYam8_HD~K^-eCK;hwwvg!slft00@Gi^HWRrJ8P)JrCPvKjj8sv-c{ZHr7dw^QAv>_
zJG@dg7$;0qNbbYqyT-S%R4WN5AqgRZnXe&8C_zX9PTFPC@d*in&*hd!#2Ud+Lc?Dz
zFr9}308kHqJbrUqKt+ZGCzvgo9ivM+9wO-NXK)|MUeyPV&qyC=)X>Z
zb)dNm-kk7G*%ofY<>^h>+9WFD!A+lNy>I`mvHI~p<$XgoeQxUdqIv`MDbe+2Fi4d^
zoqcuyQpymVAL33=wr6|ktEnu-qGN;Cz@EdIu!+d^5+e_1Pcd
zK`MfM!gBzHZEbzG$;aCg;BtTj(64E#A
z$`^%GfyYJ}JI}I5EzHe(c{gO!&?1YPxqfKgygVrcc*TPqCY#?le0(KoL+GWiPzP(=IB`=0r=s9a1G~EAuzmRKqz34A
z*x5OQH%5E~r$m5i47kO4#A?k-{N>mzU3qXcwCliZdal(8+zz5+yRC4ZlT13aQ1c!L
zic;agkj|eZ-3Il?Gz!}9Y)4PaWdZ01GVOXA*hQ(;WKe6Yk1~OHc#%#}mkz9apG`JX
zAgJ_jt${-2dL?crC3fKtI{&*H#C>N7haG{Oh6@PW9~m2)S=M`mBm0}mAL-V3-U8Fg
zfI_$UQvyADB_(niSX{?sDC`LzEA~%2g9F|xN@}FRv&bZZhTdN@-vsS75&)kmm^F)6
zHW~GrHwt)iB$EvIy0Rce1Olv2z5JIy_||+3@N8Q$aUnRr1#z#gAo(VX4nJ#P`_vpX
zlwc>+>p!q%&DESpJBKYyXFvUHoSX>$n?e|$m`D^G7l(`l!r`I-{3|oWe}(@UgnK`5
z6$d9pRW^|E;rT1fW{6Uf%kd97#tAsRWz`p(j*gCckgW}o2DaSkGiSXjW1xuu!|M|=
zyWZd5MH3Lz2np82#&EV{6{=&Q%49?E&S{fXpBsBMGN<(3-A1_hb-MjTDc+V45{d**
zw6(!jz|q1OkM!*?2AXl_Enw`Xm^Jn`jxd}O0`=p_h`t>fk@Szn#j2s=*r5#$j+K3m
z{E3w@qnANug7SjDkqV+6YgBqPfgZ&$`Qz8X9Xxh5(y|o#>`?;|6z7xYUCN&^I!Yr60dcK%zV~a{Knw2TXqb4>Vs~x$`Q^o_ze}yvEraBt9c)U#~x?
zO2HSV0=bJR#+1h@3{*n4U+Bd|1JEg}mAk!<)p1TwJ9jGq*$j|-;LrkXwBex!DNZUQ
zsY498&JVcYv8sKFi;tw*7)Nx`=Fcb~-Dy=@;0bZ5L9FgW53pD?-^h>(lTR0qA5o8L
zL_;tL6s|Y+h^2dYXMmSKS!o$QzkSMoi?6*;p`}a|CF^le`eZC2EA9n%6EHU3bbc;TiQ^vB2WK%p($3M*5MZqibT~ve-CltF2pB7qaIUJ@i3w0Q
zwsG&DKf*=Go?_XOPzoLlhR1EA1e!aqS|4VbahI#bC=2>=;LoVBpl^HZkq6dL7wn)F
ze%d={P)aJS(8!uwQuK$V84H?39sCoM)0oO{l`mDwbYwtO(HyImO`Z*<&9|dT{fIkr
z(US<57fF`sQKb4I&zxfnB;Z=-Z3WN|A2;7UOHOt@etnMIYNrQev?b5C2ag^J!UEG(
zC>L5&fcQDG2rJEs^3Q92_wM0_SYTe9^qiY($_5+m{msz(sq}}jvjG^hb|AlbP`g~X
z+lE3_;fd6zvy<6tR87k=3ug5A{1Mt*9>K+6b22fa&+PCkl_K3QlQ4z);c0^OkRrBO
zd4G2g56t#<9mLnS;?BMqanijFPs{}tSk~*FE%2P1HrgVeqGH8~%4EITKaJpd-Y|W3
zGTPTb^%mVXAaLd!9k7>I;fU?V8$-31SD59fpHndi4CWD|K${9d`&5lu=R3L5RJ~-q
zDkLa0_AgM0A5E$U&^Vigjt&NhtoPRsnJb_Fd}njrOx0T#S-B(tNYy?~lQf9!0E&!o
zg%!$!9#BKCx|FYRgM-(Qnr-X=JEhvTF3@)kw+>L8x?!sVKBRA`hm)8M=+Q_`#L}!{
z#3if$Y|R{UfD)qql?X>gF)?R4X>S2Ve{gp+9GJC2rtdCwjja*$RUvc`$S@nk*tF!vAjqJTtOpB+Q;ppml8bmW5~BLpCCHT|XMz&HRLA{1q6
z`GZqa+GW|6FC=bv)#_~!)JDFWssHKMA69Fwlcp9hlZ+tr^0o}I(}=j2TA_<=%*yh)
z@2cOpcO_VQZfhI#F(k-;Z?Wc!frReSExZ-a&%5DFRc?N+7cz6ii)Y9#-a5{1*Qzco
z)FhWs=Ey{q0c*0anwL(|eCeL{#z*wv)MrbG)bDy$L9xi|Yi=r|(tW%`#A&$Ky$BVR
z*Y9>T2h7eqw(-g%=&1e7rW+ku^%I#DtmcKMSi$0rWBccQ`;Gx9$9)0-wY)X<9&5p^
zx4g*L*UTr^AA<=*&NBl=HGbGa(=NNc_~7Y7;)JSTp%wV!{*xRExomm-^4!4O~NBB}$Aumn;S|jc46sO8REih3#*t8HRt%;Aq{py;-iFG3CEL2+4wgpvn
zd*+ziLb|s%ix0$OtOU{3q=lcSp|`c@QBDR?Ornhr?wMn_*9iuSNgtohGgql4Nyg^o
z_ugv8#Hfc)Gpa1M;vRte~QDl5aJ8Y%>^&t8wQunyHd
zEa?#yrP@ZPV=BJCG^?qF0j($>p4Lrgm4?{&$0)y`qOTvnuygtVOJZbaXD{gPrlA_@
zJ-W!I{>yrw!q69?>{!^6X9
zikQrmvd1Z!phW~sY~R7TC6MU5{QklB$U{x@RDcolW$UyKhnY7G|J;9u13s0
zItJ59LePY$;)_0aX{XWM?I`TP{N(rY=t{yrTJ~{g;Gs#9VHkfS`}q;iTp~$GwmObs
zg%jBNWi`--T(KhhSA}ZV*$Kq;ZhjU(eg3`Qs&^d0Y>koAk-}hJkbi{kVLKz0d2Cby
z#h~}gkgzZ~46~T{8YN3V*Xhf*fEYWup&l;^|j@~R3z@jsQVThX!Px#F+lu#pTTJn)~93~GFt-o}|
zw!F$F|9x={K!!^fP`?B>3(L~s_4nvjcx1+b`q+I{*_@Q$<4Oi_Gq5b;Gl@3&s5oKPaXYk6M!dEiwu?H
zNeMD6&g7;9`t6^Nxoo$6|$C^1=7
zE_-ShA_zAelvE%l#iFBL`XH#&i0c)*1v1!Z2P
zEs&RVlp(QJ{D)qJ0TA)pKxF7-v7l(f*D4dX%4kTv$C<(60!Z-dfG~?nNh;H3<1uUd0N*YL}c6hKdne;}{r*
za%&xd?(J(QoB>O%ut}$F@4kD)&+I|vBx4(kl{o&3+pf2UI$Z|smF-oPr1ja$9GdiU
zn%OnzY4U&AlMWKiDf<(LQ2RbO_Jpg9APKCJ|4YE{yDF@I+Q*A!%WV6cI*Os9rqplk
zgF{2dfQjAv^S>8vD|{{cG>Jr9^n46_^nCOtp4~Trd+##IiM9v5fClH?wumT~h3%y%
zQo!26QP0|O+p(x~{Oq8#?}s0xuCPX32_4L&;xf`&+&^^vLP;$Ux9I|*Zl0mLFw-V;+mMN?ij
zb}gzZ|A$tXHor^2E9pp>zPd)rJT9(Zpbp=6XH2gpGdruq@E`}L0X$JmV_T}@72CHt
zJQYQarnBZ5g@;2t&z|&uSU@^c3Yu1u*zqXHl6sZiQhMWrmiMj4gf~|P_*=)&jD43}
zEbwO<4SzqmI6gE!(kZWznomqIj-VuFz*PJ&AAOOk{FKk|bwB`6aQ}mS0)gj0Dz35C
z%HCg<6;`rp>kNS0O_zmHSkF`N|FN<7vH+aj3&~-j()vk7Np@8Z*FQqKNDZHqnZ>
z<6)>M7k#=Z?*GyB)j?5z-``6wk|NzDxpa3UqNFs^-5?FpA<`|K0@5WQy);OJw7??L
zEdtWd{d|A(%89X
zbhC|Ivbx6)mmpO$#SzXOTPz`UdkRw6-Kqc1Bs_Gu4CSJI^ys;=Fu=;^I{jzy)aL;z
zoo;n_c!;_Wc`94i)@Dyk^mAn;Tfd^m=|i~-ah_b}Xzj+$8!n_N&(mJ$Tk^2)uhZ%!
z8gh|nMOfl#M;mIfFjTMjiDN8UzwKq`+C>e#VKS7Az4cB`OTN7v@GF-@2yD(KlM_j9J{rz3S!a5d1tZO
z&)wJGt_9krAzbw>r<6FJL*#~)x|Ig+&>#wz7nypKtIi9sVSo(LDX{SwQ-6aFH2v*NQfI&O#hh
zYOR@wd@D{v<{`PEx;&BF9sjx+im;4uC)lQX53UW}V#Dfmm)GcJI&O4?#FUY=P*Yh;
zwa)@TOdR~sU;7FWzwX1b
zggp;xoo@EnA`=_dm=k`)_VR@PJ&s9;8L45qgL<9v?}%}q36fBoQA3iW(6c(%-3L$;<~1oGvxC($5RlpqfwO{2IwQ8TxC$?-MdRQ1lJ)0opcP?~(f
z>g8IChvA2hvcthdNc5Cv$i{D{$Vc+wt>i+E}qcb<2Q?AnCV-I
zQ`BGL*1M;fk*XLw!LBnUw
zZwLo|P?PjS>2sor$a^^b)H{D0PeM9f)$6civPuheDhu^~tPg;uh+Wo@Bf-9ieD^|I
zeEGyYV9B(8v6hhzY=TPFJkR3AcA&wdazN7I?G}hy!s23;5mxs$O`Dn1dT9O&M*@-t#6B-4^eI
zf9Tv+53OH6@Hv&_>X(=h0sxr=gvJ?DwSM2uo_NN1nHL}1A7sD%k2EnvDK#jQ=Y^tmuu+DjG32S-Vp?`Q3gK5F
zHm1#8D5u-vhWh#+<$HCjFWlm@l9+_WM^J+8yA$Soev14OSsVW0WTg*PnITXvNv@2beK~)+;F5NBc)?%v>nr5?
zrTh}P&-|R=p_ACs40Qx2x66jQKiQKGqh|^l;ATpA%ZUN_qx$-g|3p9EmBfUF;eY=8
z`I!}mg&O149tZvu>C}6}F~n4wbzpirsf96iGZgfV{TCS|icVd*j@AF*UX^agiCU5@Mh$fx>?Q;fy+c8cguA8`xf)kgch
zi7>mXKIJuNa=r7{=Z9wzn`oBq?1jsBEN1Bl^X_$_dpaO@;f{6r{nhPgN6o`hcJQ
zlZjkSU36F{M%Q9CI>Q?tB^;Bmr*C1zJ#LgUdtbRic3xgJFB}xE9Q~(lNf{Adlj{zh
ztf(kjbq20hN8_%Lvb+bgj;o0ScbWw(g7Q!Y%~jZrQMp!KYpZYR_92Y|?0CWgTka3N
zt+fqsJV@qa6sT4F99ADgiQkm1jlJ!k+espiT5d6@hS!!iiPn_b6$+X%sIfNIf6{G8ud1$a6!A*&(Ts|@!H()N9O>}^q`U=O!{T#Eqyy6M=Vr#w66T97z
zodUXOjhvMZ%732P+H;m+3dI_RQd3Tbg9$uk?{_q?Iv>Jg$v=#G(3}0r+v+R)K$z(i
zs#Q5RW(tz-Fgt1;_{l>^I*d$18Y8{hkFGoat4R4TS}NNf7iP!0#i)p=@NU_@|HW)B
zS`3)igIUKf00z^QhL7MQeHt#;>1$|M%%?mVe$zb`_IK9;Rn-zmrh{!TYVf^RM@p+-
zO|jKJ^->2h9g>!%cXsB)QauOeNuCe(sCi-G+Fne=YIMZ(Zm>^Wk(&JgT+Dd=hjxn#
z+oRnm8;|NQ5Cw(;F>MHqitEuJGfI;XE^G+HfT3W{9&JL)#552_I+?)3__7x)xBQF4
zkr8nOn2&MoQ~M^-#<4-=n|1`Lg|r9TYvj|=;QZSW*uA|8;V1vYyIIlt1dH{Ec&^*Juhe$ybMLHNU70H8AfnXIa*
zF?Vnno$G3B&;n~;!$F?@KcK^bibA#A1E>HccK0ic!NSe3&>^RACIF};}0-)E1zrN#FtBBMM#(4|bTW
zj1BT6!LA@}9fu?F12VmXl5(v5a^`zk*}5d1v%dp#h2xQ;jOO=)4=a!N7e*c4?OUqL
zVI*!_`{%Ub(#fCUR)Ua#fgi5YZx8y%Nj=aH6_^-bW{rN#^?Jx04yb!ggDHvRG5XD`
zFk!N@BgD9NtEl8n^@`-`y0e*!dTi__P|q<(boH|I6L>Uj4B?!s!d8U?vr03;Gh0*J
zM1{V&G`z0_v?FI?kU3*KBL$fs8L)qhtum|;r4`^(`Y2BhNi2Aah{!~hk!Q<|Q1(k?
zv^(~;G^jA`UVp()t^j(;ZWkeTIgB19MzD}krNeu&J@5HwgQ;GCo4mG`?q11cXFnp;
z=9MVm+Ugq`KvoX$;=RSlvr(vB=-b$!6BO*NrtA*(Cwzi-_gn4Zx1Ep$5(!XBVSo1n
zkwH;|KO6n4SN+tTKPn!{kQ7g*F^+t~ZULRQxt_7)PUZ0~qiaquyKepq&hE(Dt1g;v
z5)eJkJfX)t@8d1&|4KlskLfV;zmI3htZkD-jOSBpk&S73618dvm-qWuw@iFOQKoCI
zVzDC6>9F&@U1Gl984ucQS1I#~D%veSy{&id$2?=p&J_r`v6
z`zuK13&kL)sHrCi>lo$x{;LsWa$$DgAu_`siSuUX`_E9MgLNO414I=Bb^9;MjFAgW
z7G5e`-I2p_m{LpacEh<@r})RW;pk}T!;7!GLs?9aNzuRXd`nC}8|ZG7%y1iby&oQ?
zVS4zTW^{K+BaT|2hCUD#Xw<%n$Sj73Qt@Jy@(8h%Si=)Hg>_#?>aUo6?Ws6-wEC=+
z6{*~vaKQsh^L>J254w3xVPk3j^nSk=y%^ADAN;;|`2E1Es)QIQ_3NY-&BX=QRUqUA
zSJtu8RhYk!fC?YVq_ZaunwytSK;u&X^z>7(PNRQ8h3Od
zq+#cl7(E#AmmiT$RP)F)t-S`cmxE?bn@_-7N0m9P&UcqB*XmtfCMS)QVM@u&jM^av
zJ*jc6g`FMdk|Xh%&wIy^X63%zH?$|
zXgF-i8$(}_z&bIn`TGU*#fujq(R(O;;+3+G|#G`Vb7p+z?lR23y)yO@WO581G9=-O0}q3+XUDY(u^WY
zXtBA`X~Qjiz0)^?c@Vy%VAs
zQjv2I##tHtmD1Oos>zoz!3{&@35Aj}${8G}Fs0<_(Wf9huvKQq5Q0N)Ocrw5VXxHq
zqp|mK+=Y77N+Pf3-G4!
zw=JGcukG-{)-EwM`fHS=gsrxEM+nIs;C^}8%k_bd;9~~6Q!-=3FSr3lmA>w?)tsQz
z%3zE4$w6-Qr><`$>(fR!!*;^v`os=SSLjGvG;1p=o;$fTxF6|KV5^8r2T3)_;2X^I
z^AGL?sZP66FTEf0LKh1&)9-&~=Q<}R4=?(N(4!FNq9uP7OyUvOR`^k{LcT-@;jVGxn1%$99!K*TKZf?|?
zu0pkpuW|htq{chb#u2G%xN5|1435(vK?rp@BhAlyBPf)ZJbR-&3zykD{_%l8Hqcns
z&4*lX{UFZ3?hb>iIxgr`T7AdtH?N1kC+O?iXu%2%KF?O8%luxrRCgtjs(Ck+hMlb8
zUHDw|P#|GXa$vCEa1Hahi1B4e?KVA*o6e%zRDl(%h_li*|c%Tnm_Lgk$O?$s?w
zK-V)c=mj5Ov1^En3{d&%)jCPz)=xk7*ZW>GQU44Wn&pv?r=>odI8;03eaC~A`6XPZ
zajS+Hxg;@@k>%&gEI$r-h5-bTxAIN;R1Yj~NxwMbhb8na9Q1=OnGv8TGL?=(N-mJZ~-!
z@uS5>h&&y7dM34x;8o!?`xE3m$
zXq9Z`TRNsxyD$vZB=_Xnj>s-J4?UxVg_^p@V~HB9-xiR44hQ%{WMFrKB`;?BrrX(f
zKA)Xx-9rjZgu5Dvt0x1`GD-Xq<&clFoXO{JC&+l~ayqk#Quf#we)bUw&3ootH&2PVnG4s=jQc!HUu~cj2GS)
z27};P9d&MxHV{mIbK_@qi5phF11z=$Tg%YD^$vD+BV)hp_{pP&HbY9o+$n90hu#Jf
zWoHYae5!1}{QXMU+xVnWfhTf&+%RMrB{=4ja|qS2AHYWWzMT*opCFAkl+t{*%c_?C
z2XnQM0+o8@d5XTHamPpJG6AdFYuEF=NmRePA)F$X2~Qg4p8Z#KfR`0g%#ABBTeKl1
z&i>?FWZd*B)UG13Q<^)Fhw$l)tn#7Umf{XxBYI4A
zc#=GQh&(-#%ItegNinM8IeR)Ft0z=|ad3!S)&@RRg3>Yh-~XB0IlUW|in#UuN#a_a
zru?maW$+mV?jPnK#F5m&NNSeqt(e4wl)nqtbEQh=#1gM<3O}yhRa-ZH1hW*J#G3dN
zU6#bW9BV9W%>*g#5woBVCRqmgQ`Nm^awqlpj>6>j1{Z<|`p0ZC{7KC}>xA!$0SsD-i&=>TV=x*2M@zPGnDg=#K
z&lELn2C3_w>yan$kH|_iC8gw|jFBT>M{XiYO(3Sy+l%ENXxs3Ghsj4YZ{u61yHDMw
zpJcLzjq`@>^X@1%%%h~PDDZWR^J+J+#EJE|^Jo~hyL-dLM;?M{`9d`=g|+sg}I
zZPm+>s*n4d#~uM_6T8=k0QtN4RbJ}NB-juHnp_O&Pp^TR{OV{?^}oqPUmr--LHT4!
z`>B@Nks}roOCN;irZao;G1FZ6XiPjGc`~gcT#@QqkjVnx