From 76936cd9092c8a5636da8849cbdaaef398fc7845 Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Tue, 29 Aug 2017 00:59:29 -0400 Subject: [PATCH 1/8] Scripts for hw2 --- scripts/HW 2 afni_proc | 11 ++++++++++ scripts/hw_processing.sh | 25 +++++++++++++++++++++++ scripts/make_timings.py | 43 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+) create mode 100644 scripts/HW 2 afni_proc create mode 100644 scripts/hw_processing.sh create mode 100755 scripts/make_timings.py diff --git a/scripts/HW 2 afni_proc b/scripts/HW 2 afni_proc new file mode 100644 index 0000000..3eed144 --- /dev/null +++ b/scripts/HW 2 afni_proc @@ -0,0 +1,11 @@ +HW 2 afni_proc + +Constructed timing files using make_timings.py and timing_tool.py to drop 1st TR + +Experiment 1: + Dice Coefficient: .1978 - not good + +Experiment 2: + Without motion censoring and regression - Dice Coefficient: .2200 + With a smaller smoothing kernel - Dice Coefficient: .2745 + Using 3dREMLfit - Dice Coefficient: .1978 \ No newline at end of file diff --git a/scripts/hw_processing.sh b/scripts/hw_processing.sh new file mode 100644 index 0000000..b9113d6 --- /dev/null +++ b/scripts/hw_processing.sh @@ -0,0 +1,25 @@ +#run to generate afni_proc.py script +./cmd.ap.sub_03 + +#manually alter proc.sub_03 to correct tshift and others + +#run proc.sub_03 for test and retest files + +#in each results folder +#1. threshold statistics +3dmerge -1zscore -1thresh 1.96 -1noneg -prefix stat.thresh 'stats.sub_03+tlrc[17]' + +#2. calculate mask of intersection +#navigate back out to sub-03 folder +3dmask_tool -input group.*/subj.sub_03/sub_03.results/mask_group+tlrc.HEAD -frac 1.0 -prefix mask + +#in each results folder +#3. binarize thresholded images +3dcalc -a ./group.test/subj.sub_03/sub_03.results/stat.thresh+tlrc -prefix stat.test.bin -expr 'astep(a,.1)' +#in retest folder +3dcalc -a ./group.retest/subj.sub_03/sub_03.results/stat.thresh+tlrc -prefix stat.retest.bin -expr 'astep(a,.1)' + +#4. calculate dice coefficient +3ddot -mask mask+tlrc -dodice stat.test.bin+tlrc stat.retest.bin+tlrc +#found a Dice coefficient of 0.197834 - not so great + diff --git a/scripts/make_timings.py b/scripts/make_timings.py new file mode 100755 index 0000000..89e73d2 --- /dev/null +++ b/scripts/make_timings.py @@ -0,0 +1,43 @@ +#!/Users/monica/anaconda/bin/python +import re +import sys + +### CONSTRUCT TIMING FILE FOR - +#'landmark stimuli with responses vs. only detection stimuli with responses' + +#make blank lists for storing times +landmark_timing=[] +detection_timing=[] + +#Condition names +landmark=['Correct_Task','Incorrect_Task'] +detection=['Response_Control'] + +#Open event file +with open(sys.argv[1], 'r') as text: + trials=text.read() + +#Look for event onset and trial type +n=re.compile(r"^(\d+.\d+)\t\d\t\d.\d\t(\w+)",re.MULTILINE) +matches=n.finditer(trials) + +#Match trial type to conditions of interest +for m in matches: + if m.groups()[1] in landmark: + landmark_timing.append(m.groups()[0]) + elif m.groups()[1] in detection: + detection_timing.append(m.groups()[0]) + +#Convert list into tab-delimited string +landmark_timing = '\t'.join(landmark_timing) +detection_timing = '\t'.join(detection_timing) + +#Write landmark timing file +l = open('landmark_times.1D','w+') +l.write(landmark_timing) +l.close() + +#Write detection timing file +d = open('detection_times.1D','w+') +d.write(detection_timing) +d.close() \ No newline at end of file From 7c8739aa9b22bd69bb85db01b26ebdbc48d33667 Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Tue, 29 Aug 2017 23:25:49 -0400 Subject: [PATCH 2/8] Re-ran analyses using correct sub-brick --- scripts/HW 2 afni_proc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/scripts/HW 2 afni_proc b/scripts/HW 2 afni_proc index 3eed144..4744d8c 100644 --- a/scripts/HW 2 afni_proc +++ b/scripts/HW 2 afni_proc @@ -8,4 +8,19 @@ Experiment 1: Experiment 2: Without motion censoring and regression - Dice Coefficient: .2200 With a smaller smoothing kernel - Dice Coefficient: .2745 - Using 3dREMLfit - Dice Coefficient: .1978 \ No newline at end of file + Using 3dREMLfit - Dice Coefficient: .1978 + +Edit (8/29/2017) +The above was done assuming sub-brick 17 was the correct statistic, but it probably wasn't. Instead, I used stats.sub_03+tlrc['L-D_GLT#0_Tstat'] + +Below is everything redone using the correct sub-brick. +Experiment 1: + Dice Coefficient: .05296 - quite bad + +Experiment 2: + Without motion censoring and regression - Dice Coefficient: .02986 - much lower + This makes sense, since leaving in bad motion volumes would decrease the similarity/reliability between the two samples + With a smaller smoothing kernel - Dice Coefficient: .05976 - not much different + I would expect a smaller smoothing kernel to result in a smaller Dice coefficient because the data would be noisier in each sample + Using 3dREMLfit - Dice Coefficient: .00205 - much much lower + \ No newline at end of file From 0145998b093c19d21001d819cbc78abe151e2d4f Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Wed, 30 Aug 2017 23:18:52 -0400 Subject: [PATCH 3/8] Changed timing files and contrast --- scripts/HW 2 afni_proc | 11 +- scripts/hw_processing.sh | 32 ++- scripts/make_timings.py | 23 +- scripts/proc.sub_03_exp1 | 502 +++++++++++++++++++++++++++++++++++++++ scripts/proc.sub_03_exp2 | 502 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 1060 insertions(+), 10 deletions(-) create mode 100755 scripts/proc.sub_03_exp1 create mode 100755 scripts/proc.sub_03_exp2 diff --git a/scripts/HW 2 afni_proc b/scripts/HW 2 afni_proc index 4744d8c..cb24cb7 100644 --- a/scripts/HW 2 afni_proc +++ b/scripts/HW 2 afni_proc @@ -23,4 +23,13 @@ Experiment 2: With a smaller smoothing kernel - Dice Coefficient: .05976 - not much different I would expect a smaller smoothing kernel to result in a smaller Dice coefficient because the data would be noisier in each sample Using 3dREMLfit - Dice Coefficient: .00205 - much much lower - \ No newline at end of file + +Edit (8/30/2017) +Hopefully the last attempt. Edited make_timings.py to create 3 timing files (separating landmark correct and incorrect) +Contrast: .5*landmark_correct .5*landmark_incorrect -detection + +Experiment 1: + Dice Coefficient: .1676 +Experiment 2: + With a smaller smoothing kernel - Dice Coefficient: .1359 + Lower, as expected. diff --git a/scripts/hw_processing.sh b/scripts/hw_processing.sh index b9113d6..ff5783f 100644 --- a/scripts/hw_processing.sh +++ b/scripts/hw_processing.sh @@ -1,7 +1,33 @@ -#run to generate afni_proc.py script +cd Documents/ibrain-training/hw2/sub-03/ses-test/func +chmod 755 ../../../playground/scripts/make_timings.py + +#run script to make timing files in test sample +../../../playground/scripts/make_timings.py sub-03_ses-test_task-linebisection_events.tsv +#run timing tool on each file to adjust for dropping the first TR in each run +for f in *.1D +do + timing_tool.py -timing $f -add_offset -2.5 -write_timing adj_${f} +done + +#do the same for the retest sample +cd ../../ses-retest/func +../../../playground/scripts/make_timings.py sub-03_ses-retest_task-linebisection_events.tsv +for g in *.1D +do + timing_tool.py -timing $g -add_offset -2.5 -write_timing adj_${g} +done + +#run uber_subject.py and set parameters +# change smoothing kernel to 6mm FWHM +# change motion threshold to .5 +# check regress motion derivatives +# +#run to generate afni_proc.py script for test and retest ./cmd.ap.sub_03 -#manually alter proc.sub_03 to correct tshift and others +#manually alter proc.sub_03 to add -tpattern alt+z to tshift +#-AddEdge -deoblique on -master_tlrc 3 to align +#-bout to stats bucket #run proc.sub_03 for test and retest files @@ -23,3 +49,5 @@ 3ddot -mask mask+tlrc -dodice stat.test.bin+tlrc stat.retest.bin+tlrc #found a Dice coefficient of 0.197834 - not so great +#for experiment 2, I changed the -1blur_fwhm option in proc.sub_03 + diff --git a/scripts/make_timings.py b/scripts/make_timings.py index 89e73d2..aa9fd08 100755 --- a/scripts/make_timings.py +++ b/scripts/make_timings.py @@ -6,11 +6,13 @@ #'landmark stimuli with responses vs. only detection stimuli with responses' #make blank lists for storing times -landmark_timing=[] +landmark_timing_c=[] +landmark_timing_ic=[] detection_timing=[] #Condition names -landmark=['Correct_Task','Incorrect_Task'] +landmark_c=['Correct_Task'] +landmark_ic=['Incorrect_Task'] detection=['Response_Control'] #Open event file @@ -23,20 +25,27 @@ #Match trial type to conditions of interest for m in matches: - if m.groups()[1] in landmark: - landmark_timing.append(m.groups()[0]) + if m.groups()[1] in landmark_c: + landmark_timing_c.append(m.groups()[0]) + elif m.groups()[1] in landmark_ic: + landmark_timing_ic.append(m.groups()[0]) elif m.groups()[1] in detection: detection_timing.append(m.groups()[0]) #Convert list into tab-delimited string -landmark_timing = '\t'.join(landmark_timing) +landmark_timing_c = '\t'.join(landmark_timing_c) +landmark_timing_ic = '\t'.join(landmark_timing_ic) detection_timing = '\t'.join(detection_timing) #Write landmark timing file -l = open('landmark_times.1D','w+') -l.write(landmark_timing) +l = open('landmark_correct_times.1D','w+') +l.write(landmark_timing_c) l.close() +l2 = open('landmark_incorrect_times.1D','w+') +l2.write(landmark_timing_ic) +l2.close() + #Write detection timing file d = open('detection_times.1D','w+') d.write(detection_timing) diff --git a/scripts/proc.sub_03_exp1 b/scripts/proc.sub_03_exp1 new file mode 100755 index 0000000..d609298 --- /dev/null +++ b/scripts/proc.sub_03_exp1 @@ -0,0 +1,502 @@ +#!/bin/tcsh -xef + +echo "auto-generated by afni_proc.py, Wed Aug 30 21:45:56 2017" +echo "(version 5.15, April 25, 2017)" +echo "execution started: `date`" + +# execute via : +# tcsh -xef proc.sub_03 |& tee output.proc.sub_03 + +# =========================== auto block: setup ============================ +# script setup + +# take note of the AFNI version +afni -ver + +# check that the current AFNI version is recent enough +afni_history -check_date 23 Sep 2016 +if ( $status ) then + echo "** this script requires newer AFNI binaries (than 23 Sep 2016)" + echo " (consider: @update.afni.binaries -defaults)" + exit +endif + +# the user may specify a single subject to run with +if ( $#argv > 0 ) then + set subj = $argv[1] +else + set subj = sub_03 +endif + +# assign output directory name +set output_dir = $subj.results + +# verify that the results directory does not yet exist +if ( -d $output_dir ) then + echo output dir "$subj.results" already exists + exit +endif + +# set list of runs +set runs = (`count -digits 2 1 1`) + +# create results and stimuli directories +mkdir $output_dir +mkdir $output_dir/stimuli + +# copy stim files into stimulus directory +cp \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ + $output_dir/stimuli + +# copy anatomy to results dir +3dcopy \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/anat/sub-03_ses-test_T1w.nii.gz \ + $output_dir/sub-03_ses-test_T1w + +# ============================ auto block: tcat ============================ +# apply 3dTcat to copy input dsets to results dir, while +# removing the first 1 TRs +3dTcat -prefix $output_dir/pb00.$subj.r01.tcat \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/sub-03_ses-test_task-linebisection_bold.nii.gz'[1..$]' + +# and make note of repetitions (TRs) per run +set tr_counts = ( 237 ) + +# ------------------------------------------------------- +# enter the results directory (can begin processing data) +cd $output_dir + + +# ========================== auto block: outcount ========================== +# data check: compute outlier fraction for each volume +touch out.pre_ss_warn.txt +foreach run ( $runs ) + 3dToutcount -automask -fraction -polort 4 -legendre \ + pb00.$subj.r$run.tcat+orig > outcount.r$run.1D + + # outliers at TR 0 might suggest pre-steady state TRs + if ( `1deval -a outcount.r$run.1D"{0}" -expr "step(a-0.4)"` ) then + echo "** TR #0 outliers: possible pre-steady state TRs in run $run" \ + >> out.pre_ss_warn.txt + endif +end + +# catenate outlier counts into a single time series +cat outcount.r*.1D > outcount_rall.1D + +# ================================= tshift ================================= +# time shift data so all slice timing is the same +foreach run ( $runs ) + 3dTshift -tzero 0 -tpattern alt+z -quintic -prefix pb01.$subj.r$run.tshift \ + pb00.$subj.r$run.tcat+orig +end + +# -------------------------------- +# extract volreg registration base +3dbucket -prefix vr_base pb01.$subj.r01.tshift+orig"[2]" + +# ================================= align ================================== +# for e2a: compute anat alignment transformation to EPI registration base +# (new anat will be intermediate, stripped, sub-03_ses-test_T1w_ns+orig) +align_epi_anat.py -anat2epi -anat sub-03_ses-test_T1w+orig \ + -save_skullstrip -suffix _al_junk \ + -epi vr_base+orig -epi_base 0 \ + -epi_strip 3dAutomask \ + -volreg off -tshift off -save_all -AddEdge -deoblique on -master_tlrc 3 + +# ================================== tlrc ================================== +# warp anatomy to standard space +@auto_tlrc -base MNI_avg152T1+tlrc -input sub-03_ses-test_T1w_ns+orig -no_ss + +# store forward transformation matrix in a text file +cat_matvec sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I > warp.anat.Xat.1D + +# ================================= volreg ================================= +# align each dset to base volume, align to anat, warp to tlrc space + +# verify that we have a +tlrc warp dataset +if ( ! -f sub-03_ses-test_T1w_ns+tlrc.HEAD ) then + echo "** missing +tlrc warp dataset: sub-03_ses-test_T1w_ns+tlrc.HEAD" + exit +endif + +# register and warp +foreach run ( $runs ) + # register each volume to the base + 3dvolreg -verbose -zpad 1 -base vr_base+orig \ + -1Dfile dfile.r$run.1D -prefix rm.epi.volreg.r$run \ + -cubic \ + -1Dmatrix_save mat.r$run.vr.aff12.1D \ + pb01.$subj.r$run.tshift+orig + + # create an all-1 dataset to mask the extents of the warp + 3dcalc -overwrite -a pb01.$subj.r$run.tshift+orig -expr 1 \ + -prefix rm.epi.all1 + + # catenate volreg/epi2anat/tlrc xforms + cat_matvec -ONELINE \ + sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I \ + sub-03_ses-test_T1w_al_junk_mat.aff12.1D -I \ + mat.r$run.vr.aff12.1D > mat.r$run.warp.aff12.1D + + # apply catenated xform: volreg/epi2anat/tlrc + 3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input pb01.$subj.r$run.tshift+orig \ + -1Dmatrix_apply mat.r$run.warp.aff12.1D \ + -mast_dxyz 4 \ + -prefix rm.epi.nomask.r$run + + # warp the all-1 dataset for extents masking + 3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input rm.epi.all1+orig \ + -1Dmatrix_apply mat.r$run.warp.aff12.1D \ + -mast_dxyz 4 -final NN -quiet \ + -prefix rm.epi.1.r$run + + # make an extents intersection mask of this run + 3dTstat -min -prefix rm.epi.min.r$run rm.epi.1.r$run+tlrc +end + +# make a single file of registration params +cat dfile.r*.1D > dfile_rall.1D + +# ---------------------------------------- +# create the extents mask: mask_epi_extents+tlrc +# (this is a mask of voxels that have valid data at every TR) +# (only 1 run, so just use 3dcopy to keep naming straight) +3dcopy rm.epi.min.r01+tlrc mask_epi_extents + +# and apply the extents mask to the EPI data +# (delete any time series with missing data) +foreach run ( $runs ) + 3dcalc -a rm.epi.nomask.r$run+tlrc -b mask_epi_extents+tlrc \ + -expr 'a*b' -prefix pb02.$subj.r$run.volreg +end + +# warp the volreg base EPI dataset to make a final version +cat_matvec -ONELINE \ + sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I \ + sub-03_ses-test_T1w_al_junk_mat.aff12.1D -I > \ + mat.basewarp.aff12.1D + +3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input vr_base+orig \ + -1Dmatrix_apply mat.basewarp.aff12.1D \ + -mast_dxyz 4 \ + -prefix final_epi_vr_base + +# create an anat_final dataset, aligned with stats +3dcopy sub-03_ses-test_T1w_ns+tlrc anat_final.$subj + +# record final registration costs +3dAllineate -base final_epi_vr_base+tlrc -allcostX \ + -input anat_final.$subj+tlrc |& tee out.allcostX.txt + +# ----------------------------------------- +# warp anat follower datasets (affine) +3dAllineate -source sub-03_ses-test_T1w+orig \ + -master anat_final.$subj+tlrc \ + -final wsinc5 -1Dmatrix_apply warp.anat.Xat.1D \ + -prefix anat_w_skull_warped + +# ================================== blur ================================== +# blur each volume of each run +foreach run ( $runs ) + 3dmerge -1blur_fwhm 6.0 -doall -prefix pb03.$subj.r$run.blur \ + pb02.$subj.r$run.volreg+tlrc +end + +# ================================== mask ================================== +# create 'full_mask' dataset (union mask) +foreach run ( $runs ) + 3dAutomask -dilate 1 -prefix rm.mask_r$run pb03.$subj.r$run.blur+tlrc +end + +# create union of inputs, output type is byte +3dmask_tool -inputs rm.mask_r*+tlrc.HEAD -union -prefix full_mask.$subj + +# ---- create subject anatomy mask, mask_anat.$subj+tlrc ---- +# (resampled from tlrc anat) +3dresample -master full_mask.$subj+tlrc -input sub-03_ses-test_T1w_ns+tlrc \ + -prefix rm.resam.anat + +# convert to binary anat mask; fill gaps and holes +3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.anat+tlrc \ + -prefix mask_anat.$subj + +# compute overlaps between anat and EPI masks +3dABoverlap -no_automask full_mask.$subj+tlrc mask_anat.$subj+tlrc \ + |& tee out.mask_ae_overlap.txt + +# note Dice coefficient of masks, as well +3ddot -dodice full_mask.$subj+tlrc mask_anat.$subj+tlrc \ + |& tee out.mask_ae_dice.txt + +# ---- create group anatomy mask, mask_group+tlrc ---- +# (resampled from tlrc base anat, MNI_avg152T1+tlrc) +3dresample -master full_mask.$subj+tlrc -prefix ./rm.resam.group \ + -input /Users/monica/abin/MNI_avg152T1+tlrc + +# convert to binary group mask; fill gaps and holes +3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.group+tlrc \ + -prefix mask_group + +# ================================= scale ================================== +# scale each voxel time series to have a mean of 100 +# (be sure no negatives creep in) +# (subject to a range of [0,200]) +foreach run ( $runs ) + 3dTstat -prefix rm.mean_r$run pb03.$subj.r$run.blur+tlrc + 3dcalc -a pb03.$subj.r$run.blur+tlrc -b rm.mean_r$run+tlrc \ + -c mask_epi_extents+tlrc \ + -expr 'c * min(200, a/b*100)*step(a)*step(b)' \ + -prefix pb04.$subj.r$run.scale +end + +# ================================ regress ================================= + +# compute de-meaned motion parameters (for use in regression) +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -demean -write motion_demean.1D + +# compute motion parameter derivatives (for use in regression) +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -derivative -demean -write motion_deriv.1D + +# create censor file motion_${subj}_censor.1D, for censoring motion +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -show_censor_count -censor_prev_TR \ + -censor_motion 0.5 motion_${subj} + +# note TRs that were not censored +set ktrs = `1d_tool.py -infile motion_${subj}_censor.1D \ + -show_trs_uncensored encoded` + +# ------------------------------ +# run the regression analysis +3dDeconvolve -input pb04.$subj.r*.scale+tlrc.HEAD \ + -censor motion_${subj}_censor.1D \ + -polort 4 \ + -num_stimts 15 \ + -stim_times 1 stimuli/adj_detection_times.1D 'BLOCK(16.25)' \ + -stim_label 1 detection_times \ + -stim_times 2 stimuli/adj_landmark_correct_times.1D 'BLOCK(16.25)' \ + -stim_label 2 landmark_correct_times \ + -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'BLOCK(16.25)' \ + -stim_label 3 landmark_incorrect_times \ + -stim_file 4 motion_demean.1D'[0]' -stim_base 4 -stim_label 4 roll_01 \ + -stim_file 5 motion_demean.1D'[1]' -stim_base 5 -stim_label 5 pitch_01 \ + -stim_file 6 motion_demean.1D'[2]' -stim_base 6 -stim_label 6 yaw_01 \ + -stim_file 7 motion_demean.1D'[3]' -stim_base 7 -stim_label 7 dS_01 \ + -stim_file 8 motion_demean.1D'[4]' -stim_base 8 -stim_label 8 dL_01 \ + -stim_file 9 motion_demean.1D'[5]' -stim_base 9 -stim_label 9 dP_01 \ + -stim_file 10 motion_deriv.1D'[0]' -stim_base 10 -stim_label 10 roll_02 \ + -stim_file 11 motion_deriv.1D'[1]' -stim_base 11 -stim_label 11 pitch_02 \ + -stim_file 12 motion_deriv.1D'[2]' -stim_base 12 -stim_label 12 yaw_02 \ + -stim_file 13 motion_deriv.1D'[3]' -stim_base 13 -stim_label 13 dS_02 \ + -stim_file 14 motion_deriv.1D'[4]' -stim_base 14 -stim_label 14 dL_02 \ + -stim_file 15 motion_deriv.1D'[5]' -stim_base 15 -stim_label 15 dP_02 \ + -gltsym 'SYM: .5*landmark_correct_times .5*landmark_incorrect_times \ + -detection_times' \ + -glt_label 1 L-D \ + -fout -tout -x1D X.xmat.1D -xjpeg X.jpg \ + -x1D_uncensored X.nocensor.xmat.1D \ + -fitts fitts.$subj \ + -errts errts.${subj} \ + -bucket stats.$subj -bout + + +# if 3dDeconvolve fails, terminate the script +if ( $status != 0 ) then + echo '---------------------------------------' + echo '** 3dDeconvolve error, failing...' + echo ' (consider the file 3dDeconvolve.err)' + exit +endif + + +# display any large pairwise correlations from the X-matrix +1d_tool.py -show_cormat_warnings -infile X.xmat.1D |& tee out.cormat_warn.txt + +# -- execute the 3dREMLfit script, written by 3dDeconvolve -- +tcsh -x stats.REML_cmd + +# if 3dREMLfit fails, terminate the script +if ( $status != 0 ) then + echo '---------------------------------------' + echo '** 3dREMLfit error, failing...' + exit +endif + + +# create an all_runs dataset to match the fitts, errts, etc. +3dTcat -prefix all_runs.$subj pb04.$subj.r*.scale+tlrc.HEAD + +# -------------------------------------------------- +# create a temporal signal to noise ratio dataset +# signal: if 'scale' block, mean should be 100 +# noise : compute standard deviation of errts +3dTstat -mean -prefix rm.signal.all all_runs.$subj+tlrc"[$ktrs]" +3dTstat -stdev -prefix rm.noise.all errts.${subj}_REML+tlrc"[$ktrs]" +3dcalc -a rm.signal.all+tlrc \ + -b rm.noise.all+tlrc \ + -c full_mask.$subj+tlrc \ + -expr 'c*a/b' -prefix TSNR.$subj + +# --------------------------------------------------- +# compute and store GCOR (global correlation average) +# (sum of squares of global mean of unit errts) +3dTnorm -norm2 -prefix rm.errts.unit errts.${subj}_REML+tlrc +3dmaskave -quiet -mask full_mask.$subj+tlrc rm.errts.unit+tlrc \ + > gmean.errts.unit.1D +3dTstat -sos -prefix - gmean.errts.unit.1D\' > out.gcor.1D +echo "-- GCOR = `cat out.gcor.1D`" + +# --------------------------------------------------- +# compute correlation volume +# (per voxel: average correlation across masked brain) +# (now just dot product with average unit time series) +3dcalc -a rm.errts.unit+tlrc -b gmean.errts.unit.1D -expr 'a*b' -prefix rm.DP +3dTstat -sum -prefix corr_brain rm.DP+tlrc + +# create ideal files for fixed response stim types +1dcat X.nocensor.xmat.1D'[5]' > ideal_detection_times.1D +1dcat X.nocensor.xmat.1D'[6]' > ideal_landmark_correct_times.1D +1dcat X.nocensor.xmat.1D'[7]' > ideal_landmark_incorrect_times.1D + +# -------------------------------------------------------- +# compute sum of non-baseline regressors from the X-matrix +# (use 1d_tool.py to get list of regressor colums) +set reg_cols = `1d_tool.py -infile X.nocensor.xmat.1D -show_indices_interest` +3dTstat -sum -prefix sum_ideal.1D X.nocensor.xmat.1D"[$reg_cols]" + +# also, create a stimulus-only X-matrix, for easy review +1dcat X.nocensor.xmat.1D"[$reg_cols]" > X.stim.xmat.1D + +# ============================ blur estimation ============================= +# compute blur estimates +touch blur_est.$subj.1D # start with empty file + +# create directory for ACF curve files +mkdir files_ACF + +# -- estimate blur for each run in epits -- +touch blur.epits.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.epits.r$run.1D \ + all_runs.$subj+tlrc"[$trs]" >> blur.epits.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{0..$(2)}'\'` ) +echo average epits FWHM blurs: $blurs +echo "$blurs # epits FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{1..$(2)}'\'` ) +echo average epits ACF blurs: $blurs +echo "$blurs # epits ACF blur estimates" >> blur_est.$subj.1D + +# -- estimate blur for each run in errts -- +touch blur.errts.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.errts.r$run.1D \ + errts.${subj}+tlrc"[$trs]" >> blur.errts.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{0..$(2)}'\'` ) +echo average errts FWHM blurs: $blurs +echo "$blurs # errts FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{1..$(2)}'\'` ) +echo average errts ACF blurs: $blurs +echo "$blurs # errts ACF blur estimates" >> blur_est.$subj.1D + +# -- estimate blur for each run in err_reml -- +touch blur.err_reml.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.err_reml.r$run.1D \ + errts.${subj}_REML+tlrc"[$trs]" >> blur.err_reml.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{0..$(2)}'\'` ) +echo average err_reml FWHM blurs: $blurs +echo "$blurs # err_reml FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{1..$(2)}'\'` ) +echo average err_reml ACF blurs: $blurs +echo "$blurs # err_reml ACF blur estimates" >> blur_est.$subj.1D + + +# ================== auto block: generate review scripts =================== + +# generate a review script for the unprocessed EPI data +gen_epi_review.py -script @epi_review.$subj \ + -dsets pb00.$subj.r*.tcat+orig.HEAD + +# generate scripts to review single subject results +# (try with defaults, but do not allow bad exit status) +gen_ss_review_scripts.py -mot_limit 0.5 -exit0 + +# ========================== auto block: finalize ========================== + +# remove temporary files +\rm -f rm.* + +# if the basic subject review script is here, run it +# (want this to be the last text output) +if ( -e @ss_review_basic ) ./@ss_review_basic |& tee out.ss_review.$subj.txt + +# return to parent directory +cd .. + +echo "execution finished: `date`" + + + + +# ========================================================================== +# script generated by the command: +# +# afni_proc.py -subj_id sub_03 -script proc.sub_03 -scr_overwrite -blocks \ +# tshift align tlrc volreg blur mask scale regress -copy_anat \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/anat/sub-03_ses-test_T1w.nii.gz \ +# -tcat_remove_first_trs 1 -dsets \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/sub-03_ses-test_task-linebisection_bold.nii.gz \ +# -tlrc_base MNI_avg152T1+tlrc -volreg_align_to third -volreg_align_e2a \ +# -volreg_tlrc_warp -blur_size 6.0 -regress_stim_times \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ +# -regress_stim_labels detection_times landmark_correct_times \ +# landmark_incorrect_times -regress_basis 'BLOCK(16.25)' \ +# -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ +# -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ +# .5*landmark_incorrect_times -detection_times' -glt_label 1 L-D \ +# -regress_reml_exec -regress_make_ideal_sum sum_ideal.1D \ +# -regress_est_blur_epits -regress_est_blur_errts -regress_run_clustsim no diff --git a/scripts/proc.sub_03_exp2 b/scripts/proc.sub_03_exp2 new file mode 100755 index 0000000..d115ba0 --- /dev/null +++ b/scripts/proc.sub_03_exp2 @@ -0,0 +1,502 @@ +#!/bin/tcsh -xef + +echo "auto-generated by afni_proc.py, Wed Aug 30 21:45:56 2017" +echo "(version 5.15, April 25, 2017)" +echo "execution started: `date`" + +# execute via : +# tcsh -xef proc.sub_03 |& tee output.proc.sub_03 + +# =========================== auto block: setup ============================ +# script setup + +# take note of the AFNI version +afni -ver + +# check that the current AFNI version is recent enough +afni_history -check_date 23 Sep 2016 +if ( $status ) then + echo "** this script requires newer AFNI binaries (than 23 Sep 2016)" + echo " (consider: @update.afni.binaries -defaults)" + exit +endif + +# the user may specify a single subject to run with +if ( $#argv > 0 ) then + set subj = $argv[1] +else + set subj = sub_03 +endif + +# assign output directory name +set output_dir = $subj.results + +# verify that the results directory does not yet exist +if ( -d $output_dir ) then + echo output dir "$subj.results" already exists + exit +endif + +# set list of runs +set runs = (`count -digits 2 1 1`) + +# create results and stimuli directories +mkdir $output_dir +mkdir $output_dir/stimuli + +# copy stim files into stimulus directory +cp \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ + $output_dir/stimuli + +# copy anatomy to results dir +3dcopy \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/anat/sub-03_ses-test_T1w.nii.gz \ + $output_dir/sub-03_ses-test_T1w + +# ============================ auto block: tcat ============================ +# apply 3dTcat to copy input dsets to results dir, while +# removing the first 1 TRs +3dTcat -prefix $output_dir/pb00.$subj.r01.tcat \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/sub-03_ses-test_task-linebisection_bold.nii.gz'[1..$]' + +# and make note of repetitions (TRs) per run +set tr_counts = ( 237 ) + +# ------------------------------------------------------- +# enter the results directory (can begin processing data) +cd $output_dir + + +# ========================== auto block: outcount ========================== +# data check: compute outlier fraction for each volume +touch out.pre_ss_warn.txt +foreach run ( $runs ) + 3dToutcount -automask -fraction -polort 4 -legendre \ + pb00.$subj.r$run.tcat+orig > outcount.r$run.1D + + # outliers at TR 0 might suggest pre-steady state TRs + if ( `1deval -a outcount.r$run.1D"{0}" -expr "step(a-0.4)"` ) then + echo "** TR #0 outliers: possible pre-steady state TRs in run $run" \ + >> out.pre_ss_warn.txt + endif +end + +# catenate outlier counts into a single time series +cat outcount.r*.1D > outcount_rall.1D + +# ================================= tshift ================================= +# time shift data so all slice timing is the same +foreach run ( $runs ) + 3dTshift -tzero 0 -tpattern alt+z -quintic -prefix pb01.$subj.r$run.tshift \ + pb00.$subj.r$run.tcat+orig +end + +# -------------------------------- +# extract volreg registration base +3dbucket -prefix vr_base pb01.$subj.r01.tshift+orig"[2]" + +# ================================= align ================================== +# for e2a: compute anat alignment transformation to EPI registration base +# (new anat will be intermediate, stripped, sub-03_ses-test_T1w_ns+orig) +align_epi_anat.py -anat2epi -anat sub-03_ses-test_T1w+orig \ + -save_skullstrip -suffix _al_junk \ + -epi vr_base+orig -epi_base 0 \ + -epi_strip 3dAutomask \ + -volreg off -tshift off -save_all -AddEdge -deoblique on -master_tlrc 3 + +# ================================== tlrc ================================== +# warp anatomy to standard space +@auto_tlrc -base MNI_avg152T1+tlrc -input sub-03_ses-test_T1w_ns+orig -no_ss + +# store forward transformation matrix in a text file +cat_matvec sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I > warp.anat.Xat.1D + +# ================================= volreg ================================= +# align each dset to base volume, align to anat, warp to tlrc space + +# verify that we have a +tlrc warp dataset +if ( ! -f sub-03_ses-test_T1w_ns+tlrc.HEAD ) then + echo "** missing +tlrc warp dataset: sub-03_ses-test_T1w_ns+tlrc.HEAD" + exit +endif + +# register and warp +foreach run ( $runs ) + # register each volume to the base + 3dvolreg -verbose -zpad 1 -base vr_base+orig \ + -1Dfile dfile.r$run.1D -prefix rm.epi.volreg.r$run \ + -cubic \ + -1Dmatrix_save mat.r$run.vr.aff12.1D \ + pb01.$subj.r$run.tshift+orig + + # create an all-1 dataset to mask the extents of the warp + 3dcalc -overwrite -a pb01.$subj.r$run.tshift+orig -expr 1 \ + -prefix rm.epi.all1 + + # catenate volreg/epi2anat/tlrc xforms + cat_matvec -ONELINE \ + sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I \ + sub-03_ses-test_T1w_al_junk_mat.aff12.1D -I \ + mat.r$run.vr.aff12.1D > mat.r$run.warp.aff12.1D + + # apply catenated xform: volreg/epi2anat/tlrc + 3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input pb01.$subj.r$run.tshift+orig \ + -1Dmatrix_apply mat.r$run.warp.aff12.1D \ + -mast_dxyz 4 \ + -prefix rm.epi.nomask.r$run + + # warp the all-1 dataset for extents masking + 3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input rm.epi.all1+orig \ + -1Dmatrix_apply mat.r$run.warp.aff12.1D \ + -mast_dxyz 4 -final NN -quiet \ + -prefix rm.epi.1.r$run + + # make an extents intersection mask of this run + 3dTstat -min -prefix rm.epi.min.r$run rm.epi.1.r$run+tlrc +end + +# make a single file of registration params +cat dfile.r*.1D > dfile_rall.1D + +# ---------------------------------------- +# create the extents mask: mask_epi_extents+tlrc +# (this is a mask of voxels that have valid data at every TR) +# (only 1 run, so just use 3dcopy to keep naming straight) +3dcopy rm.epi.min.r01+tlrc mask_epi_extents + +# and apply the extents mask to the EPI data +# (delete any time series with missing data) +foreach run ( $runs ) + 3dcalc -a rm.epi.nomask.r$run+tlrc -b mask_epi_extents+tlrc \ + -expr 'a*b' -prefix pb02.$subj.r$run.volreg +end + +# warp the volreg base EPI dataset to make a final version +cat_matvec -ONELINE \ + sub-03_ses-test_T1w_ns+tlrc::WARP_DATA -I \ + sub-03_ses-test_T1w_al_junk_mat.aff12.1D -I > \ + mat.basewarp.aff12.1D + +3dAllineate -base sub-03_ses-test_T1w_ns+tlrc \ + -input vr_base+orig \ + -1Dmatrix_apply mat.basewarp.aff12.1D \ + -mast_dxyz 4 \ + -prefix final_epi_vr_base + +# create an anat_final dataset, aligned with stats +3dcopy sub-03_ses-test_T1w_ns+tlrc anat_final.$subj + +# record final registration costs +3dAllineate -base final_epi_vr_base+tlrc -allcostX \ + -input anat_final.$subj+tlrc |& tee out.allcostX.txt + +# ----------------------------------------- +# warp anat follower datasets (affine) +3dAllineate -source sub-03_ses-test_T1w+orig \ + -master anat_final.$subj+tlrc \ + -final wsinc5 -1Dmatrix_apply warp.anat.Xat.1D \ + -prefix anat_w_skull_warped + +# ================================== blur ================================== +# blur each volume of each run +foreach run ( $runs ) + 3dmerge -1blur_fwhm 3.0 -doall -prefix pb03.$subj.r$run.blur \ + pb02.$subj.r$run.volreg+tlrc +end + +# ================================== mask ================================== +# create 'full_mask' dataset (union mask) +foreach run ( $runs ) + 3dAutomask -dilate 1 -prefix rm.mask_r$run pb03.$subj.r$run.blur+tlrc +end + +# create union of inputs, output type is byte +3dmask_tool -inputs rm.mask_r*+tlrc.HEAD -union -prefix full_mask.$subj + +# ---- create subject anatomy mask, mask_anat.$subj+tlrc ---- +# (resampled from tlrc anat) +3dresample -master full_mask.$subj+tlrc -input sub-03_ses-test_T1w_ns+tlrc \ + -prefix rm.resam.anat + +# convert to binary anat mask; fill gaps and holes +3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.anat+tlrc \ + -prefix mask_anat.$subj + +# compute overlaps between anat and EPI masks +3dABoverlap -no_automask full_mask.$subj+tlrc mask_anat.$subj+tlrc \ + |& tee out.mask_ae_overlap.txt + +# note Dice coefficient of masks, as well +3ddot -dodice full_mask.$subj+tlrc mask_anat.$subj+tlrc \ + |& tee out.mask_ae_dice.txt + +# ---- create group anatomy mask, mask_group+tlrc ---- +# (resampled from tlrc base anat, MNI_avg152T1+tlrc) +3dresample -master full_mask.$subj+tlrc -prefix ./rm.resam.group \ + -input /Users/monica/abin/MNI_avg152T1+tlrc + +# convert to binary group mask; fill gaps and holes +3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.group+tlrc \ + -prefix mask_group + +# ================================= scale ================================== +# scale each voxel time series to have a mean of 100 +# (be sure no negatives creep in) +# (subject to a range of [0,200]) +foreach run ( $runs ) + 3dTstat -prefix rm.mean_r$run pb03.$subj.r$run.blur+tlrc + 3dcalc -a pb03.$subj.r$run.blur+tlrc -b rm.mean_r$run+tlrc \ + -c mask_epi_extents+tlrc \ + -expr 'c * min(200, a/b*100)*step(a)*step(b)' \ + -prefix pb04.$subj.r$run.scale +end + +# ================================ regress ================================= + +# compute de-meaned motion parameters (for use in regression) +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -demean -write motion_demean.1D + +# compute motion parameter derivatives (for use in regression) +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -derivative -demean -write motion_deriv.1D + +# create censor file motion_${subj}_censor.1D, for censoring motion +1d_tool.py -infile dfile_rall.1D -set_nruns 1 \ + -show_censor_count -censor_prev_TR \ + -censor_motion 0.5 motion_${subj} + +# note TRs that were not censored +set ktrs = `1d_tool.py -infile motion_${subj}_censor.1D \ + -show_trs_uncensored encoded` + +# ------------------------------ +# run the regression analysis +3dDeconvolve -input pb04.$subj.r*.scale+tlrc.HEAD \ + -censor motion_${subj}_censor.1D \ + -polort 4 \ + -num_stimts 15 \ + -stim_times 1 stimuli/adj_detection_times.1D 'BLOCK(16.25)' \ + -stim_label 1 detection_times \ + -stim_times 2 stimuli/adj_landmark_correct_times.1D 'BLOCK(16.25)' \ + -stim_label 2 landmark_correct_times \ + -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'BLOCK(16.25)' \ + -stim_label 3 landmark_incorrect_times \ + -stim_file 4 motion_demean.1D'[0]' -stim_base 4 -stim_label 4 roll_01 \ + -stim_file 5 motion_demean.1D'[1]' -stim_base 5 -stim_label 5 pitch_01 \ + -stim_file 6 motion_demean.1D'[2]' -stim_base 6 -stim_label 6 yaw_01 \ + -stim_file 7 motion_demean.1D'[3]' -stim_base 7 -stim_label 7 dS_01 \ + -stim_file 8 motion_demean.1D'[4]' -stim_base 8 -stim_label 8 dL_01 \ + -stim_file 9 motion_demean.1D'[5]' -stim_base 9 -stim_label 9 dP_01 \ + -stim_file 10 motion_deriv.1D'[0]' -stim_base 10 -stim_label 10 roll_02 \ + -stim_file 11 motion_deriv.1D'[1]' -stim_base 11 -stim_label 11 pitch_02 \ + -stim_file 12 motion_deriv.1D'[2]' -stim_base 12 -stim_label 12 yaw_02 \ + -stim_file 13 motion_deriv.1D'[3]' -stim_base 13 -stim_label 13 dS_02 \ + -stim_file 14 motion_deriv.1D'[4]' -stim_base 14 -stim_label 14 dL_02 \ + -stim_file 15 motion_deriv.1D'[5]' -stim_base 15 -stim_label 15 dP_02 \ + -gltsym 'SYM: .5*landmark_correct_times .5*landmark_incorrect_times \ + -detection_times' \ + -glt_label 1 L-D \ + -fout -tout -x1D X.xmat.1D -xjpeg X.jpg \ + -x1D_uncensored X.nocensor.xmat.1D \ + -fitts fitts.$subj \ + -errts errts.${subj} \ + -bucket stats.$subj -bout + + +# if 3dDeconvolve fails, terminate the script +if ( $status != 0 ) then + echo '---------------------------------------' + echo '** 3dDeconvolve error, failing...' + echo ' (consider the file 3dDeconvolve.err)' + exit +endif + + +# display any large pairwise correlations from the X-matrix +1d_tool.py -show_cormat_warnings -infile X.xmat.1D |& tee out.cormat_warn.txt + +# -- execute the 3dREMLfit script, written by 3dDeconvolve -- +tcsh -x stats.REML_cmd + +# if 3dREMLfit fails, terminate the script +if ( $status != 0 ) then + echo '---------------------------------------' + echo '** 3dREMLfit error, failing...' + exit +endif + + +# create an all_runs dataset to match the fitts, errts, etc. +3dTcat -prefix all_runs.$subj pb04.$subj.r*.scale+tlrc.HEAD + +# -------------------------------------------------- +# create a temporal signal to noise ratio dataset +# signal: if 'scale' block, mean should be 100 +# noise : compute standard deviation of errts +3dTstat -mean -prefix rm.signal.all all_runs.$subj+tlrc"[$ktrs]" +3dTstat -stdev -prefix rm.noise.all errts.${subj}_REML+tlrc"[$ktrs]" +3dcalc -a rm.signal.all+tlrc \ + -b rm.noise.all+tlrc \ + -c full_mask.$subj+tlrc \ + -expr 'c*a/b' -prefix TSNR.$subj + +# --------------------------------------------------- +# compute and store GCOR (global correlation average) +# (sum of squares of global mean of unit errts) +3dTnorm -norm2 -prefix rm.errts.unit errts.${subj}_REML+tlrc +3dmaskave -quiet -mask full_mask.$subj+tlrc rm.errts.unit+tlrc \ + > gmean.errts.unit.1D +3dTstat -sos -prefix - gmean.errts.unit.1D\' > out.gcor.1D +echo "-- GCOR = `cat out.gcor.1D`" + +# --------------------------------------------------- +# compute correlation volume +# (per voxel: average correlation across masked brain) +# (now just dot product with average unit time series) +3dcalc -a rm.errts.unit+tlrc -b gmean.errts.unit.1D -expr 'a*b' -prefix rm.DP +3dTstat -sum -prefix corr_brain rm.DP+tlrc + +# create ideal files for fixed response stim types +1dcat X.nocensor.xmat.1D'[5]' > ideal_detection_times.1D +1dcat X.nocensor.xmat.1D'[6]' > ideal_landmark_correct_times.1D +1dcat X.nocensor.xmat.1D'[7]' > ideal_landmark_incorrect_times.1D + +# -------------------------------------------------------- +# compute sum of non-baseline regressors from the X-matrix +# (use 1d_tool.py to get list of regressor colums) +set reg_cols = `1d_tool.py -infile X.nocensor.xmat.1D -show_indices_interest` +3dTstat -sum -prefix sum_ideal.1D X.nocensor.xmat.1D"[$reg_cols]" + +# also, create a stimulus-only X-matrix, for easy review +1dcat X.nocensor.xmat.1D"[$reg_cols]" > X.stim.xmat.1D + +# ============================ blur estimation ============================= +# compute blur estimates +touch blur_est.$subj.1D # start with empty file + +# create directory for ACF curve files +mkdir files_ACF + +# -- estimate blur for each run in epits -- +touch blur.epits.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.epits.r$run.1D \ + all_runs.$subj+tlrc"[$trs]" >> blur.epits.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{0..$(2)}'\'` ) +echo average epits FWHM blurs: $blurs +echo "$blurs # epits FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{1..$(2)}'\'` ) +echo average epits ACF blurs: $blurs +echo "$blurs # epits ACF blur estimates" >> blur_est.$subj.1D + +# -- estimate blur for each run in errts -- +touch blur.errts.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.errts.r$run.1D \ + errts.${subj}+tlrc"[$trs]" >> blur.errts.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{0..$(2)}'\'` ) +echo average errts FWHM blurs: $blurs +echo "$blurs # errts FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{1..$(2)}'\'` ) +echo average errts ACF blurs: $blurs +echo "$blurs # errts ACF blur estimates" >> blur_est.$subj.1D + +# -- estimate blur for each run in err_reml -- +touch blur.err_reml.1D + +# restrict to uncensored TRs, per run +foreach run ( $runs ) + set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ + -show_trs_run $run` + if ( $trs == "" ) continue + 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ + -ACF files_ACF/out.3dFWHMx.ACF.err_reml.r$run.1D \ + errts.${subj}_REML+tlrc"[$trs]" >> blur.err_reml.1D +end + +# compute average FWHM blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{0..$(2)}'\'` ) +echo average err_reml FWHM blurs: $blurs +echo "$blurs # err_reml FWHM blur estimates" >> blur_est.$subj.1D + +# compute average ACF blur (from every other row) and append +set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{1..$(2)}'\'` ) +echo average err_reml ACF blurs: $blurs +echo "$blurs # err_reml ACF blur estimates" >> blur_est.$subj.1D + + +# ================== auto block: generate review scripts =================== + +# generate a review script for the unprocessed EPI data +gen_epi_review.py -script @epi_review.$subj \ + -dsets pb00.$subj.r*.tcat+orig.HEAD + +# generate scripts to review single subject results +# (try with defaults, but do not allow bad exit status) +gen_ss_review_scripts.py -mot_limit 0.5 -exit0 + +# ========================== auto block: finalize ========================== + +# remove temporary files +\rm -f rm.* + +# if the basic subject review script is here, run it +# (want this to be the last text output) +if ( -e @ss_review_basic ) ./@ss_review_basic |& tee out.ss_review.$subj.txt + +# return to parent directory +cd .. + +echo "execution finished: `date`" + + + + +# ========================================================================== +# script generated by the command: +# +# afni_proc.py -subj_id sub_03 -script proc.sub_03 -scr_overwrite -blocks \ +# tshift align tlrc volreg blur mask scale regress -copy_anat \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/anat/sub-03_ses-test_T1w.nii.gz \ +# -tcat_remove_first_trs 1 -dsets \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/sub-03_ses-test_task-linebisection_bold.nii.gz \ +# -tlrc_base MNI_avg152T1+tlrc -volreg_align_to third -volreg_align_e2a \ +# -volreg_tlrc_warp -blur_size 6.0 -regress_stim_times \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ +# -regress_stim_labels detection_times landmark_correct_times \ +# landmark_incorrect_times -regress_basis 'BLOCK(16.25)' \ +# -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ +# -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ +# .5*landmark_incorrect_times -detection_times' -glt_label 1 L-D \ +# -regress_reml_exec -regress_make_ideal_sum sum_ideal.1D \ +# -regress_est_blur_epits -regress_est_blur_errts -regress_run_clustsim no From 1a438adcb3f43f81a2edc0c23d51f6d62b8ae829 Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Thu, 31 Aug 2017 19:05:11 -0400 Subject: [PATCH 4/8] Includes timing files for all 5 conditions --- scripts/HW 2 afni_proc | 6 ++++++ scripts/make_timings.py | 21 +++++++++++++++++- scripts/proc.sub_03_exp1 | 46 ++++++++++++++++++++++++---------------- 3 files changed, 54 insertions(+), 19 deletions(-) diff --git a/scripts/HW 2 afni_proc b/scripts/HW 2 afni_proc index cb24cb7..2673a23 100644 --- a/scripts/HW 2 afni_proc +++ b/scripts/HW 2 afni_proc @@ -33,3 +33,9 @@ Experiment 1: Experiment 2: With a smaller smoothing kernel - Dice Coefficient: .1359 Lower, as expected. + +Edit (8/31/2017) +Edited make_timings.py to create 5 timing files for each condition. All 5 stimulus files were entered into uber_subject.py. Same contrast. + +Experiment 1: + Dice Coefficient: .1381 diff --git a/scripts/make_timings.py b/scripts/make_timings.py index aa9fd08..5307934 100755 --- a/scripts/make_timings.py +++ b/scripts/make_timings.py @@ -9,11 +9,16 @@ landmark_timing_c=[] landmark_timing_ic=[] detection_timing=[] +nr_control_timing=[] +nr_task_timing=[] #Condition names landmark_c=['Correct_Task'] landmark_ic=['Incorrect_Task'] detection=['Response_Control'] +nr_control=['No_Response_Control'] +nr_task=['No_Response_Task'] + #Open event file with open(sys.argv[1], 'r') as text: @@ -31,11 +36,17 @@ landmark_timing_ic.append(m.groups()[0]) elif m.groups()[1] in detection: detection_timing.append(m.groups()[0]) + elif m.groups()[1] in nr_control: + nr_control_timing.append(m.groups()[0]) + elif m.groups()[1] in nr_task: + nr_task_timing.append(m.groups()[0]) #Convert list into tab-delimited string landmark_timing_c = '\t'.join(landmark_timing_c) landmark_timing_ic = '\t'.join(landmark_timing_ic) detection_timing = '\t'.join(detection_timing) +nr_control_timing = '\t'.join(nr_control_timing) +nr_task_timing = '\t'.join(nr_task_timing) #Write landmark timing file l = open('landmark_correct_times.1D','w+') @@ -49,4 +60,12 @@ #Write detection timing file d = open('detection_times.1D','w+') d.write(detection_timing) -d.close() \ No newline at end of file +d.close() + +n = open('nr_control_times.1D','w+') +n.write(nr_control_timing) +n.close() + +n2 = open('nr_task_times.1D','w+') +n2.write(nr_task_timing) +n2.close() \ No newline at end of file diff --git a/scripts/proc.sub_03_exp1 b/scripts/proc.sub_03_exp1 index d609298..26c09ae 100755 --- a/scripts/proc.sub_03_exp1 +++ b/scripts/proc.sub_03_exp1 @@ -1,6 +1,6 @@ #!/bin/tcsh -xef -echo "auto-generated by afni_proc.py, Wed Aug 30 21:45:56 2017" +echo "auto-generated by afni_proc.py, Thu Aug 31 12:13:55 2017" echo "(version 5.15, April 25, 2017)" echo "execution started: `date`" @@ -49,6 +49,8 @@ cp /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_control_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_task_times.1D \ $output_dir/stimuli # copy anatomy to results dir @@ -105,7 +107,7 @@ align_epi_anat.py -anat2epi -anat sub-03_ses-test_T1w+orig \ -save_skullstrip -suffix _al_junk \ -epi vr_base+orig -epi_base 0 \ -epi_strip 3dAutomask \ - -volreg off -tshift off -save_all -AddEdge -deoblique on -master_tlrc 3 + -volreg off -tshift off -AddEdge -deoblique on -master_tlrc 3 # ================================== tlrc ================================== # warp anatomy to standard space @@ -280,25 +282,29 @@ set ktrs = `1d_tool.py -infile motion_${subj}_censor.1D \ 3dDeconvolve -input pb04.$subj.r*.scale+tlrc.HEAD \ -censor motion_${subj}_censor.1D \ -polort 4 \ - -num_stimts 15 \ + -num_stimts 17 \ -stim_times 1 stimuli/adj_detection_times.1D 'BLOCK(16.25)' \ -stim_label 1 detection_times \ -stim_times 2 stimuli/adj_landmark_correct_times.1D 'BLOCK(16.25)' \ -stim_label 2 landmark_correct_times \ -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'BLOCK(16.25)' \ -stim_label 3 landmark_incorrect_times \ - -stim_file 4 motion_demean.1D'[0]' -stim_base 4 -stim_label 4 roll_01 \ - -stim_file 5 motion_demean.1D'[1]' -stim_base 5 -stim_label 5 pitch_01 \ - -stim_file 6 motion_demean.1D'[2]' -stim_base 6 -stim_label 6 yaw_01 \ - -stim_file 7 motion_demean.1D'[3]' -stim_base 7 -stim_label 7 dS_01 \ - -stim_file 8 motion_demean.1D'[4]' -stim_base 8 -stim_label 8 dL_01 \ - -stim_file 9 motion_demean.1D'[5]' -stim_base 9 -stim_label 9 dP_01 \ - -stim_file 10 motion_deriv.1D'[0]' -stim_base 10 -stim_label 10 roll_02 \ - -stim_file 11 motion_deriv.1D'[1]' -stim_base 11 -stim_label 11 pitch_02 \ - -stim_file 12 motion_deriv.1D'[2]' -stim_base 12 -stim_label 12 yaw_02 \ - -stim_file 13 motion_deriv.1D'[3]' -stim_base 13 -stim_label 13 dS_02 \ - -stim_file 14 motion_deriv.1D'[4]' -stim_base 14 -stim_label 14 dL_02 \ - -stim_file 15 motion_deriv.1D'[5]' -stim_base 15 -stim_label 15 dP_02 \ + -stim_times 4 stimuli/adj_nr_control_times.1D 'BLOCK(16.25)' \ + -stim_label 4 nr_control_times \ + -stim_times 5 stimuli/adj_nr_task_times.1D 'BLOCK(16.25)' \ + -stim_label 5 nr_task_times \ + -stim_file 6 motion_demean.1D'[0]' -stim_base 6 -stim_label 6 roll_01 \ + -stim_file 7 motion_demean.1D'[1]' -stim_base 7 -stim_label 7 pitch_01 \ + -stim_file 8 motion_demean.1D'[2]' -stim_base 8 -stim_label 8 yaw_01 \ + -stim_file 9 motion_demean.1D'[3]' -stim_base 9 -stim_label 9 dS_01 \ + -stim_file 10 motion_demean.1D'[4]' -stim_base 10 -stim_label 10 dL_01 \ + -stim_file 11 motion_demean.1D'[5]' -stim_base 11 -stim_label 11 dP_01 \ + -stim_file 12 motion_deriv.1D'[0]' -stim_base 12 -stim_label 12 roll_02 \ + -stim_file 13 motion_deriv.1D'[1]' -stim_base 13 -stim_label 13 pitch_02 \ + -stim_file 14 motion_deriv.1D'[2]' -stim_base 14 -stim_label 14 yaw_02 \ + -stim_file 15 motion_deriv.1D'[3]' -stim_base 15 -stim_label 15 dS_02 \ + -stim_file 16 motion_deriv.1D'[4]' -stim_base 16 -stim_label 16 dL_02 \ + -stim_file 17 motion_deriv.1D'[5]' -stim_base 17 -stim_label 17 dP_02 \ -gltsym 'SYM: .5*landmark_correct_times .5*landmark_incorrect_times \ -detection_times' \ -glt_label 1 L-D \ @@ -366,6 +372,8 @@ echo "-- GCOR = `cat out.gcor.1D`" 1dcat X.nocensor.xmat.1D'[5]' > ideal_detection_times.1D 1dcat X.nocensor.xmat.1D'[6]' > ideal_landmark_correct_times.1D 1dcat X.nocensor.xmat.1D'[7]' > ideal_landmark_incorrect_times.1D +1dcat X.nocensor.xmat.1D'[8]' > ideal_nr_control_times.1D +1dcat X.nocensor.xmat.1D'[9]' > ideal_nr_task_times.1D # -------------------------------------------------------- # compute sum of non-baseline regressors from the X-matrix @@ -493,10 +501,12 @@ echo "execution finished: `date`" # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_control_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_task_times.1D \ # -regress_stim_labels detection_times landmark_correct_times \ -# landmark_incorrect_times -regress_basis 'BLOCK(16.25)' \ -# -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ -# -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ +# landmark_incorrect_times nr_control_times nr_task_times -regress_basis \ +# 'BLOCK(16.25)' -regress_censor_motion 0.5 -regress_apply_mot_types \ +# demean deriv -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ # .5*landmark_incorrect_times -detection_times' -glt_label 1 L-D \ # -regress_reml_exec -regress_make_ideal_sum sum_ideal.1D \ # -regress_est_blur_epits -regress_est_blur_errts -regress_run_clustsim no From 130b89a0d6fa4ac8747fee53bfc15c8e6a38556c Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Mon, 4 Sep 2017 23:09:42 -0400 Subject: [PATCH 5/8] Changed basis function to GAM --- scripts/HW 2 afni_proc | 7 +++ scripts/hw_processing.sh | 2 +- scripts/{proc.sub_03_exp1 => proc.sub_03_1} | 16 +++---- scripts/{proc.sub_03_exp2 => proc.sub_03_2} | 50 ++++++++++++--------- 4 files changed, 46 insertions(+), 29 deletions(-) rename scripts/{proc.sub_03_exp1 => proc.sub_03_1} (97%) rename scripts/{proc.sub_03_exp2 => proc.sub_03_2} (89%) diff --git a/scripts/HW 2 afni_proc b/scripts/HW 2 afni_proc index 2673a23..482594a 100644 --- a/scripts/HW 2 afni_proc +++ b/scripts/HW 2 afni_proc @@ -39,3 +39,10 @@ Edited make_timings.py to create 5 timing files for each condition. All 5 stimul Experiment 1: Dice Coefficient: .1381 + +Edit (9/3/2017). FINALLY. +Re-ran changing basis function from BLOCK(16.25) to GAM +Experiment 1: + Dice Coefficient: .1987 +Experiment 2: With smoothing kernel of 3mm FWHM + Dice Coefficient: .1615 diff --git a/scripts/hw_processing.sh b/scripts/hw_processing.sh index ff5783f..df0cc74 100644 --- a/scripts/hw_processing.sh +++ b/scripts/hw_processing.sh @@ -33,7 +33,7 @@ done #in each results folder #1. threshold statistics -3dmerge -1zscore -1thresh 1.96 -1noneg -prefix stat.thresh 'stats.sub_03+tlrc[17]' +3dmerge -1zscore -1thresh 1.96 -1noneg -prefix stat.thresh 'stats.sub_03+tlrc['L-D_GLT#0_Tstat']' #2. calculate mask of intersection #navigate back out to sub-03 folder diff --git a/scripts/proc.sub_03_exp1 b/scripts/proc.sub_03_1 similarity index 97% rename from scripts/proc.sub_03_exp1 rename to scripts/proc.sub_03_1 index 26c09ae..c93c612 100755 --- a/scripts/proc.sub_03_exp1 +++ b/scripts/proc.sub_03_1 @@ -1,6 +1,6 @@ #!/bin/tcsh -xef -echo "auto-generated by afni_proc.py, Thu Aug 31 12:13:55 2017" +echo "auto-generated by afni_proc.py, Mon Sep 4 21:54:18 2017" echo "(version 5.15, April 25, 2017)" echo "execution started: `date`" @@ -283,15 +283,15 @@ set ktrs = `1d_tool.py -infile motion_${subj}_censor.1D \ -censor motion_${subj}_censor.1D \ -polort 4 \ -num_stimts 17 \ - -stim_times 1 stimuli/adj_detection_times.1D 'BLOCK(16.25)' \ + -stim_times 1 stimuli/adj_detection_times.1D 'GAM' \ -stim_label 1 detection_times \ - -stim_times 2 stimuli/adj_landmark_correct_times.1D 'BLOCK(16.25)' \ + -stim_times 2 stimuli/adj_landmark_correct_times.1D 'GAM' \ -stim_label 2 landmark_correct_times \ - -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'BLOCK(16.25)' \ + -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'GAM' \ -stim_label 3 landmark_incorrect_times \ - -stim_times 4 stimuli/adj_nr_control_times.1D 'BLOCK(16.25)' \ + -stim_times 4 stimuli/adj_nr_control_times.1D 'GAM' \ -stim_label 4 nr_control_times \ - -stim_times 5 stimuli/adj_nr_task_times.1D 'BLOCK(16.25)' \ + -stim_times 5 stimuli/adj_nr_task_times.1D 'GAM' \ -stim_label 5 nr_task_times \ -stim_file 6 motion_demean.1D'[0]' -stim_base 6 -stim_label 6 roll_01 \ -stim_file 7 motion_demean.1D'[1]' -stim_base 7 -stim_label 7 pitch_01 \ @@ -505,8 +505,8 @@ echo "execution finished: `date`" # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_task_times.1D \ # -regress_stim_labels detection_times landmark_correct_times \ # landmark_incorrect_times nr_control_times nr_task_times -regress_basis \ -# 'BLOCK(16.25)' -regress_censor_motion 0.5 -regress_apply_mot_types \ -# demean deriv -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ +# GAM -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ +# -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ # .5*landmark_incorrect_times -detection_times' -glt_label 1 L-D \ # -regress_reml_exec -regress_make_ideal_sum sum_ideal.1D \ # -regress_est_blur_epits -regress_est_blur_errts -regress_run_clustsim no diff --git a/scripts/proc.sub_03_exp2 b/scripts/proc.sub_03_2 similarity index 89% rename from scripts/proc.sub_03_exp2 rename to scripts/proc.sub_03_2 index d115ba0..8e4b54c 100755 --- a/scripts/proc.sub_03_exp2 +++ b/scripts/proc.sub_03_2 @@ -1,6 +1,6 @@ #!/bin/tcsh -xef -echo "auto-generated by afni_proc.py, Wed Aug 30 21:45:56 2017" +echo "auto-generated by afni_proc.py, Mon Sep 4 21:54:18 2017" echo "(version 5.15, April 25, 2017)" echo "execution started: `date`" @@ -49,6 +49,8 @@ cp /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_control_times.1D \ + /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_task_times.1D \ $output_dir/stimuli # copy anatomy to results dir @@ -105,7 +107,7 @@ align_epi_anat.py -anat2epi -anat sub-03_ses-test_T1w+orig \ -save_skullstrip -suffix _al_junk \ -epi vr_base+orig -epi_base 0 \ -epi_strip 3dAutomask \ - -volreg off -tshift off -save_all -AddEdge -deoblique on -master_tlrc 3 + -volreg off -tshift off -AddEdge -deoblique on -master_tlrc 3 # ================================== tlrc ================================== # warp anatomy to standard space @@ -280,25 +282,29 @@ set ktrs = `1d_tool.py -infile motion_${subj}_censor.1D \ 3dDeconvolve -input pb04.$subj.r*.scale+tlrc.HEAD \ -censor motion_${subj}_censor.1D \ -polort 4 \ - -num_stimts 15 \ - -stim_times 1 stimuli/adj_detection_times.1D 'BLOCK(16.25)' \ + -num_stimts 17 \ + -stim_times 1 stimuli/adj_detection_times.1D 'GAM' \ -stim_label 1 detection_times \ - -stim_times 2 stimuli/adj_landmark_correct_times.1D 'BLOCK(16.25)' \ + -stim_times 2 stimuli/adj_landmark_correct_times.1D 'GAM' \ -stim_label 2 landmark_correct_times \ - -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'BLOCK(16.25)' \ + -stim_times 3 stimuli/adj_landmark_incorrect_times.1D 'GAM' \ -stim_label 3 landmark_incorrect_times \ - -stim_file 4 motion_demean.1D'[0]' -stim_base 4 -stim_label 4 roll_01 \ - -stim_file 5 motion_demean.1D'[1]' -stim_base 5 -stim_label 5 pitch_01 \ - -stim_file 6 motion_demean.1D'[2]' -stim_base 6 -stim_label 6 yaw_01 \ - -stim_file 7 motion_demean.1D'[3]' -stim_base 7 -stim_label 7 dS_01 \ - -stim_file 8 motion_demean.1D'[4]' -stim_base 8 -stim_label 8 dL_01 \ - -stim_file 9 motion_demean.1D'[5]' -stim_base 9 -stim_label 9 dP_01 \ - -stim_file 10 motion_deriv.1D'[0]' -stim_base 10 -stim_label 10 roll_02 \ - -stim_file 11 motion_deriv.1D'[1]' -stim_base 11 -stim_label 11 pitch_02 \ - -stim_file 12 motion_deriv.1D'[2]' -stim_base 12 -stim_label 12 yaw_02 \ - -stim_file 13 motion_deriv.1D'[3]' -stim_base 13 -stim_label 13 dS_02 \ - -stim_file 14 motion_deriv.1D'[4]' -stim_base 14 -stim_label 14 dL_02 \ - -stim_file 15 motion_deriv.1D'[5]' -stim_base 15 -stim_label 15 dP_02 \ + -stim_times 4 stimuli/adj_nr_control_times.1D 'GAM' \ + -stim_label 4 nr_control_times \ + -stim_times 5 stimuli/adj_nr_task_times.1D 'GAM' \ + -stim_label 5 nr_task_times \ + -stim_file 6 motion_demean.1D'[0]' -stim_base 6 -stim_label 6 roll_01 \ + -stim_file 7 motion_demean.1D'[1]' -stim_base 7 -stim_label 7 pitch_01 \ + -stim_file 8 motion_demean.1D'[2]' -stim_base 8 -stim_label 8 yaw_01 \ + -stim_file 9 motion_demean.1D'[3]' -stim_base 9 -stim_label 9 dS_01 \ + -stim_file 10 motion_demean.1D'[4]' -stim_base 10 -stim_label 10 dL_01 \ + -stim_file 11 motion_demean.1D'[5]' -stim_base 11 -stim_label 11 dP_01 \ + -stim_file 12 motion_deriv.1D'[0]' -stim_base 12 -stim_label 12 roll_02 \ + -stim_file 13 motion_deriv.1D'[1]' -stim_base 13 -stim_label 13 pitch_02 \ + -stim_file 14 motion_deriv.1D'[2]' -stim_base 14 -stim_label 14 yaw_02 \ + -stim_file 15 motion_deriv.1D'[3]' -stim_base 15 -stim_label 15 dS_02 \ + -stim_file 16 motion_deriv.1D'[4]' -stim_base 16 -stim_label 16 dL_02 \ + -stim_file 17 motion_deriv.1D'[5]' -stim_base 17 -stim_label 17 dP_02 \ -gltsym 'SYM: .5*landmark_correct_times .5*landmark_incorrect_times \ -detection_times' \ -glt_label 1 L-D \ @@ -366,6 +372,8 @@ echo "-- GCOR = `cat out.gcor.1D`" 1dcat X.nocensor.xmat.1D'[5]' > ideal_detection_times.1D 1dcat X.nocensor.xmat.1D'[6]' > ideal_landmark_correct_times.1D 1dcat X.nocensor.xmat.1D'[7]' > ideal_landmark_incorrect_times.1D +1dcat X.nocensor.xmat.1D'[8]' > ideal_nr_control_times.1D +1dcat X.nocensor.xmat.1D'[9]' > ideal_nr_task_times.1D # -------------------------------------------------------- # compute sum of non-baseline regressors from the X-matrix @@ -493,9 +501,11 @@ echo "execution finished: `date`" # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_detection_times.1D \ # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_correct_times.1D \ # /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_landmark_incorrect_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_control_times.1D \ +# /Users/monica/Documents/ibrain-training/hw2/sub-03/ses-test/func/adj_nr_task_times.1D \ # -regress_stim_labels detection_times landmark_correct_times \ -# landmark_incorrect_times -regress_basis 'BLOCK(16.25)' \ -# -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ +# landmark_incorrect_times nr_control_times nr_task_times -regress_basis \ +# GAM -regress_censor_motion 0.5 -regress_apply_mot_types demean deriv \ # -regress_opts_3dD -gltsym 'SYM: .5*landmark_correct_times \ # .5*landmark_incorrect_times -detection_times' -glt_label 1 L-D \ # -regress_reml_exec -regress_make_ideal_sum sum_ideal.1D \ From 62087921aced7854fb06b5d75b291fd18d69e208 Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Mon, 25 Sep 2017 15:01:16 -0400 Subject: [PATCH 6/8] Output and scripts for hw3 --- hw3/output/results_FDR_table.txt | 3 ++ hw3/output/results_table.txt | 4 +++ hw3/output/words_output.txt | 27 ++++++++++++++++ hw3/scripts/clustertable_extract.py | 21 ++++++++++++ hw3/scripts/hw3.sh | 50 +++++++++++++++++++++++++++++ 5 files changed, 105 insertions(+) create mode 100644 hw3/output/results_FDR_table.txt create mode 100644 hw3/output/results_table.txt create mode 100644 hw3/output/words_output.txt create mode 100755 hw3/scripts/clustertable_extract.py create mode 100644 hw3/scripts/hw3.sh diff --git a/hw3/output/results_FDR_table.txt b/hw3/output/results_FDR_table.txt new file mode 100644 index 0000000..7787e28 --- /dev/null +++ b/hw3/output/results_FDR_table.txt @@ -0,0 +1,3 @@ + k (Voxels) Z x y z +1 57299 3.2815 3.0 66.0 45.0 +Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm) \ No newline at end of file diff --git a/hw3/output/results_table.txt b/hw3/output/results_table.txt new file mode 100644 index 0000000..77e0ad8 --- /dev/null +++ b/hw3/output/results_table.txt @@ -0,0 +1,4 @@ + k (Voxels) Z x y z +1 22282 0.2257 -42.0 87.0 -15.0 +2 19 0.023 39.0 -57.0 21.0 +Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm) \ No newline at end of file diff --git a/hw3/output/words_output.txt b/hw3/output/words_output.txt new file mode 100644 index 0000000..eac8b4f --- /dev/null +++ b/hw3/output/words_output.txt @@ -0,0 +1,27 @@ +HISTORY_NOTE = [monica@mriusers-MBP: Sun Sep 17 21:47:25 2017] {AFNI_17.1.11:macosx_10.7_Intel_64} 3dttest++ -mask ../masks/group_mask+tlrc -prefix words -setA 'stats*+tlrc.HEAD[Words#0_Coef]'~ +TYPESTRING = 3DIM_HEAD_FUNC~ +IDCODE_STRING = AFN_PDV_VSKBMM4QFFgD3s-35A~ +IDCODE_DATE = Sun Sep 17 21:47:25 2017~ +SCENE_DATA = 2 11 1 -999 -999 -999 -999 -999 +LABEL_1 = zyxt~ +LABEL_2 = zyxt~ +DATASET_NAME = zyxt~ +ORIENT_SPECIFIC = 1 2 4 +ORIGIN = 90 126 -72 +DELTA = -3 -3 3 +IJK_TO_DICOM = -3 0 0 90 0 -3 0 126 0 0 3 -72 +IJK_TO_DICOM_REAL = -3 0 0 90 0 -3 0 126 0 0 3 -72 +BRICK_STATS = -0.248389 0.225726 -9.351179 8.191493 +DATASET_RANK = 3 2 0 0 0 0 0 0 +DATASET_DIMENSIONS = 61 73 61 0 0 +BRICK_TYPES = 3 3 +BRICK_FLOAT_FACS = 0 0 +BRICK_LABS = SetA_mean~SetA_Tstat~ +BRICK_STATAUX = 1 3 1 16 +BRICK_STATSYM = none;Ttest(16)~ +FDRCURVE_000001 = 0.000129 0.092062 0.443462 0.472502 0.506242 0.539848 0.577305 0.616368 0.659138 0.700843 0.744295 0.791323 0.839838 0.888438 0.939331 0.989339 1.040367 1.092791 1.146421 1.20042 1.254131 1.308733 1.362172 1.41752 1.470654 1.524289 1.577432 1.633076 1.686124 1.737868 1.791292 1.842088 1.892841 1.943333 1.991563 2.041249 2.089485 2.136528 2.181833 2.226235 2.270943 2.313704 2.355185 2.396872 2.437348 2.47631 2.514298 2.547712 2.580618 2.609116 2.646286 2.675714 2.706881 2.735897 2.763161 2.78445 2.811696 2.830003 2.851635 2.876096 2.899488 2.919909 2.942753 2.959107 2.974747 2.989804 2.997524 3.014758 3.014771 3.024873 3.036207 3.036207 3.03701 3.055237 3.065696 3.070787 3.082001 3.090105 3.10342 3.105447 3.105447 3.105447 3.105447 3.105447 3.105447 3.106586 3.10774 3.110595 3.113038 3.113038 3.113038 3.11365 3.113867 3.134915 3.159632 3.18435 3.209066 3.233783 3.258501 3.269092 3.279443 3.281517 3.281517 +MDFCURVE_000001 = -6.123312 0.200753 0.999066 0.998484 0.997668 0.996029 0.994475 0.992022 0.987634 0.981551 0.973525 0.963736 0.951574 0.935694 0.914703 0.889466 0.861413 0.824368 0.783225 0.736574 0.683813 0.622429 0.551571 0.473139 0.3865 0.293849 0.201926 0.109237 0.03316 0 +TEMPLATE_SPACE = MNI~ +INT_CMAP = 0 +BYTEORDER_STRING = LSB_FIRST~ +BRICK_KEYWORDS = (null) diff --git a/hw3/scripts/clustertable_extract.py b/hw3/scripts/clustertable_extract.py new file mode 100755 index 0000000..6db597d --- /dev/null +++ b/hw3/scripts/clustertable_extract.py @@ -0,0 +1,21 @@ +#!/Users/monica/anaconda/bin/python +import argparse +import pandas as pd + +parser=argparse.ArgumentParser() +parser.add_argument('--clusterfile',action='store',type=str,help='Cluster Report file') +args=parser.parse_args() + +#read in output of 3dclust +raw=pd.read_table(args.clusterfile,skiprows=10,sep='\s{2,}') +#keep only rows with data +data=raw.drop(raw.index[[0,3,4]]) +#keep only columsn of interest +table=data[['#Volume','Max Int','MI RL','MI AP','MI IS']] +#rename first column head +table=table.rename(columns={'#Volume':'k (Voxels)', 'Max Int':'Z','MI RL':'x', 'MI AP':'y', 'MI IS':'z'}) +table.append + +table.to_csv('results_table.txt',sep='\t') +with open('results_table.txt','a+') as f: + f.write('Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm)') diff --git a/hw3/scripts/hw3.sh b/hw3/scripts/hw3.sh new file mode 100644 index 0000000..a056ebc --- /dev/null +++ b/hw3/scripts/hw3.sh @@ -0,0 +1,50 @@ +#HW 3 +#Monica Ly + +########## +# Part 1: group statistics +cd masks +# Create group mask +3dmask_tool -input full_mask*+tlrc.HEAD -frac 1.0 -prefix group_mask +cd ../betas +3dinfo -label stats.sub-01_REML+tlrc +3dttest++ -mask ../masks/group_mask+tlrc -prefix words -setA 'stats*+tlrc.HEAD['Words#0_Coef']' +3dcopy ~/abin/MNI_avg152T1+tlrc . +3dAttribute -all words+tlrc > words_output.txt + +########### +# Part 2: cluster correction +#Calculate average smoothness +for f in ../blurs/blur*.1D; do + 1d_tool.py -infile $f -select_rows 5 -write - >> ../blurs/blurs.acf.1D +done +1dsum -mean ../blurs/blurs.acf.1D > ../blurs/blurs_mean.1D +#0.823891 4.19962 14.369 10.4995 + +#Simulate for number of clusters that would arise randomly +3dClustSim -mask ../masks/group_mask+tlrc -acf `1dcat ../blurs/blurs_mean.1D[0:2]` \ +-both \ +-pthr .001 \ +-athr .05 \ +-iter 5000 \ +-prefix cluster \ +-cmd refit.cmd +#Smallest cluster size: 18 (NN3_2sided) + +3dcalc -a words+tlrc -b ../masks/group_mask+tlrc -expr 'a*b' -prefix words_masked +3dmerge -1zscore -prefix words_z words_masked+tlrc +3dclust -1Dformat -prefix words_clustered -savemask words_clustermask -inmask -1noneg -NN3 18 words_z+tlrc >> cluster_report.txt +#-1noneg looking for only positive clusters + +#read output of 3dclust (cluster_report.txt) into clustertable_extract.py to get pretty tables +chmod 755 ../scripts/clustertable_extract.py +../scripts/clustertable_extract.py --clusterfile cluster_report.txt +#should output results_table.txt + +############ +# Part 3: FDR Correction +3dFDR -input words+tlrc[1] -prefix words_FDR +3dcalc -a words_FDR+tlrc -b ../masks/group_mask+tlrc -expr 'a*b' -prefix words_FDR_masked +3dmerge -1zscore -prefix words_FDR_z words_FDR_masked+tlrc +3dclust -1Dformat -prefix words_FDR_clustered -savemask words_FDR_clustermask -inmask -1noneg -NN3 18 words_FDR_z+tlrc >> cluster_FDR_report.txt +../scripts/clustertable_extract.py --clusterfile cluster_FDR_report.txt \ No newline at end of file From 5c36f93b805f8ba5c671a23fd2a000dbad4d7414 Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Tue, 5 Dec 2017 11:32:28 -0500 Subject: [PATCH 7/8] HW4 Scripts --- hw4/scripts/batch_hw4_gpu.sh | 45 ++++++++++++++++++++++++++++++++++++ hw4/scripts/burc_wrapper.sh | 17 ++++++++++++++ hw4/scripts/mergeb0s.sh | 36 +++++++++++++++++++++++++++++ hw4/scripts/setup.sh | 18 +++++++++++++++ hw4/scripts/topups.sh | 3 +++ 5 files changed, 119 insertions(+) create mode 100755 hw4/scripts/batch_hw4_gpu.sh create mode 100755 hw4/scripts/burc_wrapper.sh create mode 100755 hw4/scripts/mergeb0s.sh create mode 100644 hw4/scripts/setup.sh create mode 100644 hw4/scripts/topups.sh diff --git a/hw4/scripts/batch_hw4_gpu.sh b/hw4/scripts/batch_hw4_gpu.sh new file mode 100755 index 0000000..15cd907 --- /dev/null +++ b/hw4/scripts/batch_hw4_gpu.sh @@ -0,0 +1,45 @@ +#!/bin/bash +#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL) +#SBATCH --mail-user=monica.ly@uconn.edu # Your email address +#SBATCH --nodes=1 # OpenMP requires a single node +#SBATCH --ntasks=1 # Run a single serial task +#SBATCH --cpus-per-task=8 # Number of cores to use +#SBATCH --mem=8192mb # Memory limit +#SBATCH --time=04:00:00 # Time limit hh:mm:ss +#SBATCH -e error_%A_%a.log # Standard error +#SBATCH -o output_%A_%a.log # Standard output +#SBATCH --job-name=DWIMerge # Descriptive job name +#SBATCH --partition=general + +export OMP_NUM_THREADS=8 #<= cpus-per-task +export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=8 #<= cpus-per-task +##### END OF JOB DEFINITION ##### + +#Define user paths +NETID=$USER +PROJECT=hw4 + +export DIR_BASE=/scratch/${NETID}/${PROJECT} +export DIR_RESOURCES=${DIR_BASE}/resources #ro +export DIR_DATA=${DIR_BASE}/data #rw data +export DIR_DATAIN=/scratch/birc_ro/ibrain_dwi/100206 #ro data +export DIR_DATAOUT=${DIR_BASE}/data_out #rw data +export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer +export DIR_WORK=/work #rw /work on HPC is 40Gb local storage +export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage +export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH + + +# Load modules +module load cuda/8.0.61 #for GPU/CUDA +module load matlab/2017a #matlab binaries are bound +module load singularity/2.3.1-gcc #required to run the container + +#set the matlab license path to the path inside the container +export LM_LICENSE_FILE=/bind/matlablicense/uits.lic + +#finally call the container with any arguments for the job +#wrapper will bind the appropriate paths +#environment variables are passed to the container + +./burc_wrapper.sh /bind/scripts/topups.sh diff --git a/hw4/scripts/burc_wrapper.sh b/hw4/scripts/burc_wrapper.sh new file mode 100755 index 0000000..5b42172 --- /dev/null +++ b/hw4/scripts/burc_wrapper.sh @@ -0,0 +1,17 @@ +#!/bin/bash +#Run the container +#Delete any unused bind points below +singularity run --bind $(dirname `which matlab`)/..:/bind/bin/matlab \ +--bind /apps2/matlab:/bind/matlablicense \ +--bind ${DIR_DATA}:/bind/data:rw \ +--bind ${DIR_DATAIN}:/bind/data_in \ +--bind ${DIR_DATAOUT}:/bind/data_out:rw \ +--bind ${SUBJECTS_DIR}:/bind/freesurfer:rw \ +--bind ${DIR_RESOURCES}:/bind/resources:ro \ +--bind ${DIR_SCRATCH}:/bind/scratch:rw \ +--bind ${DIR_WORK}:/bind/work:rw \ +--bind ${DIR_SCRIPTS}:/bind/scripts:ro \ +/scratch/birc_ro/containers/burc.img "$@" + +#add this line to limit access to home and /tmp +#--contain --workdir ${DIR_TMP} \ diff --git a/hw4/scripts/mergeb0s.sh b/hw4/scripts/mergeb0s.sh new file mode 100755 index 0000000..053559f --- /dev/null +++ b/hw4/scripts/mergeb0s.sh @@ -0,0 +1,36 @@ +#/bin/bash + +#If doing this in hpc +cd /bind/data_in +for direction in LR RL +do + dwi=100206_3T_DWI_dir95_$direction + #Cut off the top of the head volume to make the z dimension even + fslroi ${dwi}.nii.gz /bind/data/${dwi}_even.nii.gz 0 -1 0 -1 0 110 0 -1 + #echo $dwi + for index in 0 16 32 48 64 80 + do + #echo b0_${direction}_${index}.nii.gz + fslroi /bind/data/${dwi}_even.nii.gz /bind/data/b0_${direction}_${index}.nii.gz $index 1 + done + fslmerge -t /bind/data/b0merged_${direction} /bind/data/b0_${direction}_*.nii.gz +done +fslmerge -t /bind/data/b0_all /bind/data/b0merged_*.nii.gz + +#If doing this on local computer +#run from hw4 directory +# cd data_in +# for direction in LR RL +# do +# dwi=100206_3T_DWI_dir95_$direction.nii.gz +# #Cut off the top of the head volume to make the z dimension even +# fslroi ${dwi}.nii.gz ../preproc/${dwi}_even.nii.gz 0 -1 0 -1 0 110 0 -1 +# #echo $dwi +# for index in 0 16 32 48 64 80 +# do +# #echo b0_${direction}_${index}.nii.gz +# fslroi ../preproc/${dwi}_even.nii.gz ../preproc/b0_${direction}_${index}.nii.gz $index 1 +# done +# fslmerge -t ../preproc/b0merged_${direction} ../preproc/b0_${direction}_*.nii.gz +# done +# fslmerge -t ../preproc/b0_all ../preproc/b0merged_*.nii.gz diff --git a/hw4/scripts/setup.sh b/hw4/scripts/setup.sh new file mode 100644 index 0000000..ad542c8 --- /dev/null +++ b/hw4/scripts/setup.sh @@ -0,0 +1,18 @@ +#/bin/bash +### HW 4 + +#Set up HPC +#Log into HPC +# ssh mtl15001@login.storrs.hpc.uconn.edu + +# 1. Clone container repository to a directory on HPC +# Log into HPC +git clone https://github.com/bircibrain/containers +# 2. Run mk_skel.sh hw 4 to create a skeleton directory at /scratch/mtl15001/hw4 +cd containers/example_sbatch/ +./mk_skel.sh hw4 +# 3. Place your processing script in /scratch/mtl15001/hw4/scripts +cp batch_gpu.sh /scratch/mtl15001/hw4/scripts/batch_hw4_gpu.sh +# 4. Modify batch_gpu.sh to run your SLURM job. +sbatch batch_hw4_gpu.sh +#within batch_hw4_gpu.sh, run the job script usinb burc_wrapper.sh script.sh \ No newline at end of file diff --git a/hw4/scripts/topups.sh b/hw4/scripts/topups.sh new file mode 100644 index 0000000..e60fe98 --- /dev/null +++ b/hw4/scripts/topups.sh @@ -0,0 +1,3 @@ +#!/bin/bash +cd /bind/data +topup --imain=b0_all.nii.gz --datain=acqparams.txt --config=b02b0.cnf --out=topup --iout=corrected_b0 \ No newline at end of file From 723041482a613aeabe08e6d5c91a15aacb9e90dc Mon Sep 17 00:00:00 2001 From: Monica Ly Date: Tue, 5 Dec 2017 11:34:25 -0500 Subject: [PATCH 8/8] HW4 Answers --- hw4/HW4_ans | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 hw4/HW4_ans diff --git a/hw4/HW4_ans b/hw4/HW4_ans new file mode 100644 index 0000000..3b5cc4d --- /dev/null +++ b/hw4/HW4_ans @@ -0,0 +1,24 @@ +HW 4 +December 3, 2017 +Monica Ly + +Review the data +Q: How many shells (different nominal b values) are there? What are the shell b values? +A: By looking in the 100206_3T_DWI_dir95_LR.bval file, there are 4 shells: 0-5, 1000ish, 2000ish, and 3000ish. +Q: How many b0 volumes are there in each DWI Sequence? +A: 6. They show up as 5s in the .bval. Indices: 0, 16, 32, 48, 64, 80 +Q: Are the LR/RL phase encoding labels correct? +A: Yes. Looking in AFNI, there are large distortions (big chunks displaced around slice 30) that match the phase encoding labeled + +Preprocess +0. Topup needs to have even dimensions. nifti_tool -disp_hdr -infile b0_LR_0.nii.gz shows that the dims (x,y,z) are 144 168 111 + Z has odd number, so I want to extract all but the top volume first - see script: mergeb0s.sh +1. Extract the b0 volumes from each DWI sequence and combine them into a single 4D NIFTI file +See script: mergeb0s.sh +2. Estimation of the distortion field using topup. +EPI factor = 144 +Echo spacing = 0.78ms = 0.00078 +Read out time (for Siemens) = (EPI factor - 1) * echo spacing = (144-1)*.00078 = 0.11154 +See script: topups.sh (took 3.5 hours to run) +3. +