diff --git a/README.md b/README.md index 153fe51..e133bff 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,8 @@ In theory, you are optimizing a rapid event related design. Looking at the outpu Spoiler: your design could be broadly characterized as having the features of something other than a rapid event-related design, although the design may have not converged to the ideal form of this other type of design. +The general type of design the algorithm converged on was the block type design. Even though the output is not completely a block design, there are chunks of stimulus 0 presentations and then chunks of stimulus 1 presentations. + ## Question 1.2 @@ -59,6 +61,8 @@ Speculate on why the optimal design you have obtained is in some respects unchar You will investigate these questions in the next sections, but please answer these questions before looking at your results from part 2 & 3. +1. The lack of counterbalancing means that the algorithm won't worry about predictability of stimuli. This is why we get blocks of stimuli rather than alternating stimuli aperiodically. I would expect the stimuli to change more frequently than in this design if equal weight were given to detection efficiency and 3rd order counterbalancing. +2. Since there are only two conditions, a block design is efficient. If there were 6 conditions, I would expect a design more characteristic of a rapid event-related design because it would be hard to get the blocks of events we want to compare close to each other sequentially. # Part 2 @@ -73,10 +77,13 @@ Also change `exercise = 'part2'` on line 20 of the script. Save the python scrip Compared to the result of Part 1, does this design qualitatively seems to be more of a rapid event-related design? +Yes, it appears so. + ## Question 2.2 Are the differences between this design and Part 1 consistent with your earlier predictions? +Somewhat. There is less alternating than I expected towards the end (so it is still somewhat like a block design), but there is more interleaving of the stimuli towards the beginning of the design. # Part 3 @@ -133,4 +140,5 @@ This is a very good thing statistically, but it may be undesirable psychological **Q: Does the structure of this design seem desirable from both a psychological expectation and neural adaptation perspective? If not, is there a parameter in the [src.neurodesign.experiment class documentation](https://neurodesign.readthedocs.io/en/latest/genalg.html#neurodesign-design-optimisation) that might be useful to change?** +One parameter that limits the number of times a stimulus can be repeated is `maxrep`. The parameter is an integer (or `None`) indicating the maximum number of repetitions. However, for the most part, the structure of the design from Part 4 does not seem as if there is a stimuli that is presented too many times in a row. The most that I can tell is 4 times in a row. Every other "block" of conditions are only one or two presentations of the stimuli. diff --git a/optimize_part1.py b/optimize_part1.py index 53ccc28..3f9cd1a 100755 --- a/optimize_part1.py +++ b/optimize_part1.py @@ -14,7 +14,7 @@ import pandas as pd import numpy as np -cycles = 10 # try cycles=10 for testing and cycles=5000 for real applications +cycles = 5000 # try cycles=10 for testing and cycles=5000 for real applications sims = 10 exercise = 'part1' # change this for each exercise diff --git a/optimize_part2.py b/optimize_part2.py new file mode 100755 index 0000000..f7b6973 --- /dev/null +++ b/optimize_part2.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# coding: utf-8 + +# base script for homework exercises +import warnings +warnings.filterwarnings("ignore", message="numpy.dtype size changed") + +from neurodesign import optimisation,experiment +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from scipy.stats import t +import seaborn as sns +import pandas as pd +import numpy as np + +cycles = 5000 # try cycles=10 for testing and cycles=5000 for real applications +sims = 10 + +exercise = 'part2' # change this for each exercise + +# define the experiment +EXP = experiment( + TR=2, + duration=300, + P = [.5, .5], + C = [[1.0, -1.0]], + n_stimuli = 2, + rho = 0.3, + resolution=0.1, + stim_duration=1, + ITImodel = 'exponential', + ITImin = 1, + ITImean = 4, + ITImax=30, + confoundorder=3, # this cannot be 0 + hardprob=True, + ) + +# optimize the design for detection efficiency only using GA +POP_GA = optimisation( + experiment=EXP, + weights=[0,.5,0,.5], + preruncycles = 2, + cycles = cycles, + seed=1, + outdes=5, + I=10, + folder='/tmp/', + optimisation='GA', + R = [0.5, 0.5, 0.0] + ) + +POP_GA.optimise() + +# print the best model score +print("Score: %s " % POP_GA.optima[::-1][0]) +print("N trials: %d " % len(POP_GA.bestdesign.onsets)) + + +# Let's look at the resulting experimental designs. + +# this plots the columns of the X matrix convolved with the HRF +plt.figure(figsize=(10, 7)) +plt.plot(POP_GA.bestdesign.Xconv) +plt.savefig("/data/%s_Xconv.pdf" % exercise) +plt.close() + +plt.figure() +plt.plot(POP_GA.bestdesign.Xnonconv) +plt.savefig("/data/%s_X.pdf" % exercise) +plt.close() + + +# save the onsets for the best GA design + +trials = pd.DataFrame(dict(onset=POP_GA.bestdesign.onsets, trial_type=POP_GA.bestdesign.order, ITI=POP_GA.bestdesign.ITI)) +trials.to_csv('/data/%s.csv' % exercise) + +# save the onsets by conditon +# groups = trials.groupby('trial_type') +# for g in groups: +# onsets = groups.get_group(g[0]) +# onsets['onset'].to_csv('/data/best_GA_' + str(g[0]) + '.csv', index=False, header=False) + diff --git a/optimize_part3.py b/optimize_part3.py new file mode 100755 index 0000000..dd50649 --- /dev/null +++ b/optimize_part3.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# coding: utf-8 + +# base script for homework exercises +import warnings +warnings.filterwarnings("ignore", message="numpy.dtype size changed") + +from neurodesign import optimisation,experiment +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from scipy.stats import t +import seaborn as sns +import pandas as pd +import numpy as np + +cycles = 5000 # try cycles=10 for testing and cycles=5000 for real applications +sims = 10 + +exercise = 'part3' # change this for each exercise + +# define the experiment +EXP = experiment( + TR=2, + duration=300, + P = [1.0/6.0, 1.0/6.0, 1.0/6.0, 1.0/6.0, 1.0/6.0, 1.0/6.0], + C = [[1.0, -1.0, 0, 0, 0, 0]], + n_stimuli = 6, + rho = 0.3, + resolution=0.1, + stim_duration=1, + ITImodel = 'exponential', + ITImin = 1, + ITImean = 4, + ITImax=30, + confoundorder=1, # this cannot be 0 + hardprob=True, + ) + +# optimize the design for detection efficiency only using GA +POP_GA = optimisation( + experiment=EXP, + weights=[0,1,0,0], + preruncycles = 2, + cycles = cycles, + seed=1, + outdes=5, + I=10, + folder='/tmp/', + optimisation='GA', + R = [0.5, 0.5, 0.0] + ) + +POP_GA.optimise() + +# print the best model score +print("Score: %s " % POP_GA.optima[::-1][0]) +print("N trials: %d " % len(POP_GA.bestdesign.onsets)) + + +# Let's look at the resulting experimental designs. + +# this plots the columns of the X matrix convolved with the HRF +plt.figure(figsize=(10, 7)) +plt.plot(POP_GA.bestdesign.Xconv) +plt.savefig("/data/%s_Xconv.pdf" % exercise) +plt.close() + +plt.figure() +plt.plot(POP_GA.bestdesign.Xnonconv) +plt.savefig("/data/%s_X.pdf" % exercise) +plt.close() + + +# save the onsets for the best GA design + +trials = pd.DataFrame(dict(onset=POP_GA.bestdesign.onsets, trial_type=POP_GA.bestdesign.order, ITI=POP_GA.bestdesign.ITI)) +trials.to_csv('/data/%s.csv' % exercise) + +# save the onsets by conditon +# groups = trials.groupby('trial_type') +# for g in groups: +# onsets = groups.get_group(g[0]) +# onsets['onset'].to_csv('/data/best_GA_' + str(g[0]) + '.csv', index=False, header=False) + diff --git a/optimize_part4.py b/optimize_part4.py new file mode 100755 index 0000000..51c2365 --- /dev/null +++ b/optimize_part4.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# coding: utf-8 + +# base script for homework exercises +import warnings +warnings.filterwarnings("ignore", message="numpy.dtype size changed") + +from neurodesign import optimisation,experiment +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from scipy.stats import t +import seaborn as sns +import pandas as pd +import numpy as np + +cycles = 5000 # try cycles=10 for testing and cycles=5000 for real applications +sims = 10 + +exercise = 'part4' # change this for each exercise + +# define the experiment +EXP = experiment( + TR=2, + duration=300, + P = [1.0/5.0, 1.0/5.0, 1.0/5.0, 1.0/5.0, 1.0/5.0], + C = [[1.0, -1.0, 0, 0, 0], [0, 0, 1.0, -1.0, 0], [1.0, 1.0, -1.0, -1.0, 0]], + n_stimuli = 5, + rho = 0.3, + resolution=0.1, + stim_duration=1, + ITImodel = 'exponential', + ITImin = 1, + ITImean = 2, + ITImax=5, + confoundorder=3, # this cannot be 0 + hardprob=True, + ) + +# optimize the design for detection efficiency only using GA +POP_GA = optimisation( + experiment=EXP, + weights=[0,.5,.5,0], + preruncycles = 2, + cycles = cycles, + seed=1, + outdes=5, + I=10, + folder='/tmp/', + optimisation='GA', + R = [0.5, 0.5, 0.0] + ) + +POP_GA.optimise() + +# print the best model score +print("Score: %s " % POP_GA.optima[::-1][0]) +print("N trials: %d " % len(POP_GA.bestdesign.onsets)) + + +# Let's look at the resulting experimental designs. + +# this plots the columns of the X matrix convolved with the HRF +plt.figure(figsize=(10, 7)) +plt.plot(POP_GA.bestdesign.Xconv) +plt.savefig("/data/%s_Xconv.pdf" % exercise) +plt.close() + +plt.figure() +plt.plot(POP_GA.bestdesign.Xnonconv) +plt.savefig("/data/%s_X.pdf" % exercise) +plt.close() + + +# save the onsets for the best GA design + +trials = pd.DataFrame(dict(onset=POP_GA.bestdesign.onsets, trial_type=POP_GA.bestdesign.order, ITI=POP_GA.bestdesign.ITI)) +trials.to_csv('/data/%s.csv' % exercise) + +# save the onsets by conditon +# groups = trials.groupby('trial_type') +# for g in groups: +# onsets = groups.get_group(g[0]) +# onsets['onset'].to_csv('/data/best_GA_' + str(g[0]) + '.csv', index=False, header=False) + diff --git a/part1.csv b/part1.csv new file mode 100755 index 0000000..a961c6b --- /dev/null +++ b/part1.csv @@ -0,0 +1,61 @@ +,ITI,onset,trial_type +0,0.0,0.0,0 +1,6.0,7.0,0 +2,3.1,11.1,0 +3,2.9000000000000004,15.0,0 +4,2.0,18.0,0 +5,1.7000000000000002,20.7,0 +6,3.7,25.4,1 +7,1.5,27.9,1 +8,2.7,31.6,1 +9,5.300000000000001,37.900000000000006,1 +10,9.8,48.7,1 +11,1.0,50.7,1 +12,1.5,53.2,1 +13,1.4000000000000001,55.6,1 +14,4.6000000000000005,61.2,1 +15,2.9000000000000004,65.10000000000001,0 +16,1.1,67.2,0 +17,4.6000000000000005,72.8,0 +18,4.6000000000000005,78.39999999999999,1 +19,1.1,80.49999999999999,1 +20,2.2,83.69999999999999,1 +21,4.0,88.69999999999999,0 +22,2.5,92.19999999999999,0 +23,1.6,94.79999999999998,0 +24,1.1,96.89999999999998,0 +25,1.4000000000000001,99.29999999999998,0 +26,2.1,102.39999999999998,0 +27,2.9000000000000004,106.29999999999998,1 +28,2.2,109.49999999999999,1 +29,2.5,112.99999999999999,1 +30,1.3,115.29999999999998,1 +31,2.6,118.89999999999998,1 +32,1.0,120.89999999999998,1 +33,10.8,132.7,0 +34,1.1,134.79999999999998,0 +35,2.9000000000000004,138.7,0 +36,2.4000000000000004,142.1,1 +37,7.0,150.1,1 +38,1.8,152.9,0 +39,1.2000000000000002,155.1,0 +40,1.9000000000000001,158.0,0 +41,1.6,160.6,0 +42,1.1,162.7,0 +43,2.5,166.2,0 +44,2.8000000000000003,170.0,1 +45,1.4000000000000001,172.4,1 +46,2.8000000000000003,176.20000000000002,1 +47,1.6,178.8,1 +48,4.2,184.0,0 +49,1.5,186.5,0 +50,1.4000000000000001,188.9,0 +51,7.2,197.1,1 +52,1.7000000000000002,199.79999999999998,1 +53,4.800000000000001,205.6,0 +54,3.2,209.79999999999998,0 +55,3.7,214.49999999999997,0 +56,1.2000000000000002,216.69999999999996,1 +57,1.2000000000000002,218.89999999999995,1 +58,1.4000000000000001,221.29999999999995,1 +59,1.0,223.29999999999995,1 diff --git a/part1_X.pdf b/part1_X.pdf new file mode 100755 index 0000000..edd9ce7 Binary files /dev/null and b/part1_X.pdf differ diff --git a/part1_Xconv.pdf b/part1_Xconv.pdf new file mode 100755 index 0000000..faff033 Binary files /dev/null and b/part1_Xconv.pdf differ diff --git a/part2.csv b/part2.csv new file mode 100755 index 0000000..4510a7d --- /dev/null +++ b/part2.csv @@ -0,0 +1,61 @@ +,ITI,onset,trial_type +0,0.0,0.0,0 +1,6.1000000000000005,7.100000000000001,1 +2,1.6,9.700000000000001,1 +3,1.4000000000000001,12.100000000000001,1 +4,2.5,15.600000000000001,1 +5,4.4,21.0,0 +6,2.8000000000000003,24.8,0 +7,5.6000000000000005,31.4,1 +8,1.0,33.4,1 +9,1.0,35.4,1 +10,2.7,39.1,0 +11,4.4,44.5,0 +12,1.8,47.3,0 +13,4.9,53.199999999999996,1 +14,3.6,57.8,0 +15,2.0,60.8,0 +16,1.7000000000000002,63.5,0 +17,1.1,65.6,0 +18,3.2,69.8,1 +19,2.0,72.8,1 +20,3.4000000000000004,77.2,0 +21,1.0,79.2,0 +22,1.0,81.2,0 +23,1.4000000000000001,83.60000000000001,0 +24,1.0,85.60000000000001,0 +25,4.7,91.30000000000001,1 +26,1.0,93.30000000000001,1 +27,25.1,119.4,1 +28,1.7000000000000002,122.10000000000001,1 +29,1.2000000000000002,124.30000000000001,1 +30,2.7,128.0,1 +31,4.4,133.4,0 +32,1.0,135.4,0 +33,1.3,137.70000000000002,0 +34,1.5,140.20000000000002,0 +35,2.0,143.20000000000002,0 +36,4.6000000000000005,148.8,1 +37,17.5,167.3,0 +38,6.6000000000000005,174.9,1 +39,6.4,182.3,1 +40,13.600000000000001,196.9,1 +41,1.4000000000000001,199.3,1 +42,1.0,201.3,1 +43,1.4000000000000001,203.70000000000002,1 +44,2.6,207.3,1 +45,5.1000000000000005,213.4,0 +46,3.7,218.1,0 +47,1.0,220.1,0 +48,5.300000000000001,226.4,1 +49,1.6,229.0,1 +50,14.0,244.0,0 +51,3.0,248.0,0 +52,5.7,254.7,1 +53,4.5,260.2,1 +54,10.8,272.0,0 +55,2.0,275.0,0 +56,1.0,277.0,0 +57,1.2000000000000002,279.2,0 +58,8.5,288.7,1 +59,1.8,291.5,1 diff --git a/part2_X.pdf b/part2_X.pdf new file mode 100755 index 0000000..90c0da7 Binary files /dev/null and b/part2_X.pdf differ diff --git a/part2_Xconv.pdf b/part2_Xconv.pdf new file mode 100755 index 0000000..d029a7d Binary files /dev/null and b/part2_Xconv.pdf differ diff --git a/part4.csv b/part4.csv new file mode 100644 index 0000000..3bb99b4 --- /dev/null +++ b/part4.csv @@ -0,0 +1,101 @@ +,ITI,onset,trial_type +0,0.0,0.0,3 +1,2.4000000000000004,3.4000000000000004,3 +2,1.6,6.0,3 +3,1.3,8.3,3 +4,1.9000000000000001,11.200000000000001,4 +5,1.1,13.3,4 +6,1.4000000000000001,15.700000000000003,4 +7,1.0,17.700000000000003,4 +8,1.3,20.000000000000004,0 +9,3.6,24.6,0 +10,1.5,27.1,0 +11,1.4000000000000001,29.5,0 +12,2.0,32.5,4 +13,1.5,35.0,4 +14,3.5,39.5,4 +15,1.1,41.6,4 +16,4.9,47.5,0 +17,2.9000000000000004,51.4,0 +18,1.5,53.9,0 +19,1.8,56.699999999999996,0 +20,2.8000000000000003,60.49999999999999,1 +21,1.4000000000000001,62.89999999999999,1 +22,1.9000000000000001,65.8,1 +23,1.1,67.89999999999999,1 +24,1.7000000000000002,70.6,0 +25,1.0,72.6,0 +26,1.0,74.6,0 +27,2.5,78.1,0 +28,1.6,80.69999999999999,2 +29,2.0,83.69999999999999,2 +30,1.9000000000000001,86.6,2 +31,3.0,90.6,2 +32,1.1,92.69999999999999,4 +33,4.4,98.1,4 +34,1.2000000000000002,100.3,4 +35,4.800000000000001,106.1,4 +36,1.1,108.19999999999999,2 +37,1.5,110.69999999999999,2 +38,1.1,112.79999999999998,2 +39,3.3000000000000003,117.09999999999998,2 +40,1.7000000000000002,119.79999999999998,3 +41,1.9000000000000001,122.69999999999999,3 +42,1.9000000000000001,125.6,3 +43,1.2000000000000002,127.79999999999998,3 +44,1.2000000000000002,129.99999999999997,1 +45,3.0,133.99999999999997,1 +46,2.2,137.19999999999996,1 +47,1.1,139.29999999999995,1 +48,1.7000000000000002,141.99999999999994,1 +49,1.9000000000000001,144.89999999999995,1 +50,1.4000000000000001,147.29999999999995,1 +51,1.4000000000000001,149.69999999999996,1 +52,1.8,152.49999999999997,3 +53,2.0,155.49999999999997,3 +54,1.1,157.59999999999997,3 +55,1.2000000000000002,159.79999999999995,3 +56,3.5,164.29999999999995,4 +57,4.7,169.99999999999994,4 +58,1.0,171.99999999999994,4 +59,1.8,174.79999999999995,4 +60,3.7,179.49999999999994,1 +61,1.0,181.49999999999994,1 +62,1.2000000000000002,183.69999999999993,1 +63,2.3000000000000003,186.99999999999994,1 +64,2.3000000000000003,190.29999999999995,3 +65,3.3000000000000003,194.59999999999997,3 +66,4.7,200.29999999999995,3 +67,1.7000000000000002,202.99999999999994,3 +68,1.5,205.49999999999994,3 +69,1.7000000000000002,208.19999999999993,3 +70,1.3,210.49999999999994,3 +71,1.4000000000000001,212.89999999999995,3 +72,1.1,214.99999999999994,2 +73,2.1,218.09999999999994,2 +74,1.4000000000000001,220.49999999999994,2 +75,1.8,223.29999999999995,2 +76,1.7000000000000002,225.99999999999994,0 +77,2.2,229.19999999999993,0 +78,1.4000000000000001,231.59999999999994,0 +79,1.1,233.69999999999993,0 +80,1.6,236.29999999999993,2 +81,1.0,238.29999999999993,2 +82,1.9000000000000001,241.19999999999993,2 +83,1.1,243.29999999999993,2 +84,1.1,245.39999999999992,2 +85,1.4000000000000001,247.79999999999993,2 +86,1.6,250.39999999999992,2 +87,1.5,252.89999999999992,2 +88,3.7,257.5999999999999,1 +89,1.8,260.3999999999999,1 +90,2.4000000000000004,263.7999999999999,1 +91,1.6,266.3999999999999,1 +92,1.5,268.8999999999999,0 +93,2.8000000000000003,272.69999999999993,0 +94,1.5,275.19999999999993,0 +95,2.6,278.79999999999995,0 +96,1.4000000000000001,281.19999999999993,4 +97,1.8,283.99999999999994,4 +98,2.3000000000000003,287.29999999999995,4 +99,2.7,290.99999999999994,4 diff --git a/part4_X.pdf b/part4_X.pdf new file mode 100644 index 0000000..f1e3f26 Binary files /dev/null and b/part4_X.pdf differ diff --git a/part4_Xconv.pdf b/part4_Xconv.pdf new file mode 100644 index 0000000..531e708 Binary files /dev/null and b/part4_Xconv.pdf differ diff --git a/sbatch_part1.sh b/sbatch_part1.sh new file mode 100755 index 0000000..37b040a --- /dev/null +++ b/sbatch_part1.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#SBATCH --mail-type=ALL +#SBATCH --mail-user=briana.oshiro@uconn.edu +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --mem=2gb +#SBATCH --time=10:00:00 +#SBATCH -e error_part1.log +#SBATCH -o output_part1.log +#SBATCH --job-name=part1 +#SBATCH --partition=serial +##### END OF JOB DEFINITION ##### + +module load singularity +singularity run \ +--bind /scratch/psyc5171/$USER/f19-ex3:/data \ +/scratch/psyc5171/containers/neurodesign_latest.sif \ +/data/optimize_part1.py diff --git a/sbatch_part2.sh b/sbatch_part2.sh new file mode 100755 index 0000000..62e9f41 --- /dev/null +++ b/sbatch_part2.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#SBATCH --mail-type=ALL +#SBATCH --mail-user=briana.oshiro@uconn.edu +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --mem=2gb +#SBATCH --time=10:00:00 +#SBATCH -e error_part2.log +#SBATCH -o output_part2.log +#SBATCH --job-name=part2 +#SBATCH --partition=serial +##### END OF JOB DEFINITION ##### + +module load singularity +singularity run \ +--bind /scratch/psyc5171/$USER/f19-ex3:/data \ +/scratch/psyc5171/containers/neurodesign_latest.sif \ +/data/optimize_part2.py diff --git a/sbatch_part3.sh b/sbatch_part3.sh new file mode 100755 index 0000000..4009e3e --- /dev/null +++ b/sbatch_part3.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#SBATCH --mail-type=ALL +#SBATCH --mail-user=briana.oshiro@uconn.edu +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --mem=2gb +#SBATCH --time=50:00:00 +#SBATCH -e error_part3.log +#SBATCH -o output_part3.log +#SBATCH --job-name=design +#SBATCH --partition=serial +##### END OF JOB DEFINITION ##### + +module load singularity +singularity run \ +--bind /scratch/psyc5171/$USER/f19-ex3:/data \ +/scratch/psyc5171/containers/neurodesign_latest.sif \ +/data/optimize_part3.py diff --git a/sbatch_part4.sh b/sbatch_part4.sh new file mode 100755 index 0000000..9edf9ed --- /dev/null +++ b/sbatch_part4.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#SBATCH --mail-type=ALL +#SBATCH --mail-user=briana.oshiro@uconn.edu +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --mem=2gb +#SBATCH --time=50:00:00 +#SBATCH -e error_part4.log +#SBATCH -o output_part4.log +#SBATCH --job-name=part4 +#SBATCH --partition=serial +##### END OF JOB DEFINITION ##### + +module load singularity +singularity run \ +--bind /scratch/psyc5171/$USER/f19-ex3:/data \ +/scratch/psyc5171/containers/neurodesign_latest.sif \ +/data/optimize_part4.py