Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions hw3/output/results_FDR_table.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
k (Voxels) Z x y z
1 57299 3.2815 3.0 66.0 45.0
Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm)
4 changes: 4 additions & 0 deletions hw3/output/results_table.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
k (Voxels) Z x y z
1 22282 0.2257 -42.0 87.0 -15.0
2 19 0.023 39.0 -57.0 21.0
Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm)
27 changes: 27 additions & 0 deletions hw3/output/words_output.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
HISTORY_NOTE = [monica@mriusers-MBP: Sun Sep 17 21:47:25 2017] {AFNI_17.1.11:macosx_10.7_Intel_64} 3dttest++ -mask ../masks/group_mask+tlrc -prefix words -setA 'stats*+tlrc.HEAD[Words#0_Coef]'~
TYPESTRING = 3DIM_HEAD_FUNC~
IDCODE_STRING = AFN_PDV_VSKBMM4QFFgD3s-35A~
IDCODE_DATE = Sun Sep 17 21:47:25 2017~
SCENE_DATA = 2 11 1 -999 -999 -999 -999 -999
LABEL_1 = zyxt~
LABEL_2 = zyxt~
DATASET_NAME = zyxt~
ORIENT_SPECIFIC = 1 2 4
ORIGIN = 90 126 -72
DELTA = -3 -3 3
IJK_TO_DICOM = -3 0 0 90 0 -3 0 126 0 0 3 -72
IJK_TO_DICOM_REAL = -3 0 0 90 0 -3 0 126 0 0 3 -72
BRICK_STATS = -0.248389 0.225726 -9.351179 8.191493
DATASET_RANK = 3 2 0 0 0 0 0 0
DATASET_DIMENSIONS = 61 73 61 0 0
BRICK_TYPES = 3 3
BRICK_FLOAT_FACS = 0 0
BRICK_LABS = SetA_mean~SetA_Tstat~
BRICK_STATAUX = 1 3 1 16
BRICK_STATSYM = none;Ttest(16)~
FDRCURVE_000001 = 0.000129 0.092062 0.443462 0.472502 0.506242 0.539848 0.577305 0.616368 0.659138 0.700843 0.744295 0.791323 0.839838 0.888438 0.939331 0.989339 1.040367 1.092791 1.146421 1.20042 1.254131 1.308733 1.362172 1.41752 1.470654 1.524289 1.577432 1.633076 1.686124 1.737868 1.791292 1.842088 1.892841 1.943333 1.991563 2.041249 2.089485 2.136528 2.181833 2.226235 2.270943 2.313704 2.355185 2.396872 2.437348 2.47631 2.514298 2.547712 2.580618 2.609116 2.646286 2.675714 2.706881 2.735897 2.763161 2.78445 2.811696 2.830003 2.851635 2.876096 2.899488 2.919909 2.942753 2.959107 2.974747 2.989804 2.997524 3.014758 3.014771 3.024873 3.036207 3.036207 3.03701 3.055237 3.065696 3.070787 3.082001 3.090105 3.10342 3.105447 3.105447 3.105447 3.105447 3.105447 3.105447 3.106586 3.10774 3.110595 3.113038 3.113038 3.113038 3.11365 3.113867 3.134915 3.159632 3.18435 3.209066 3.233783 3.258501 3.269092 3.279443 3.281517 3.281517
MDFCURVE_000001 = -6.123312 0.200753 0.999066 0.998484 0.997668 0.996029 0.994475 0.992022 0.987634 0.981551 0.973525 0.963736 0.951574 0.935694 0.914703 0.889466 0.861413 0.824368 0.783225 0.736574 0.683813 0.622429 0.551571 0.473139 0.3865 0.293849 0.201926 0.109237 0.03316 0
TEMPLATE_SPACE = MNI~
INT_CMAP = 0
BYTEORDER_STRING = LSB_FIRST~
BRICK_KEYWORDS = (null)
21 changes: 21 additions & 0 deletions hw3/scripts/clustertable_extract.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/Users/monica/anaconda/bin/python
import argparse
import pandas as pd

parser=argparse.ArgumentParser()
parser.add_argument('--clusterfile',action='store',type=str,help='Cluster Report file')
args=parser.parse_args()

#read in output of 3dclust
raw=pd.read_table(args.clusterfile,skiprows=10,sep='\s{2,}')
#keep only rows with data
data=raw.drop(raw.index[[0,3,4]])
#keep only columsn of interest
table=data[['#Volume','Max Int','MI RL','MI AP','MI IS']]
#rename first column head
table=table.rename(columns={'#Volume':'k (Voxels)', 'Max Int':'Z','MI RL':'x', 'MI AP':'y', 'MI IS':'z'})
table.append

table.to_csv('results_table.txt',sep='\t')
with open('results_table.txt','a+') as f:
f.write('Z is z-score of peak voxel. XYZ Coordinates are for peak voxel, in TLRC space (in mm)')
50 changes: 50 additions & 0 deletions hw3/scripts/hw3.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#HW 3
#Monica Ly

##########
# Part 1: group statistics
cd masks
# Create group mask
3dmask_tool -input full_mask*+tlrc.HEAD -frac 1.0 -prefix group_mask
cd ../betas
3dinfo -label stats.sub-01_REML+tlrc
3dttest++ -mask ../masks/group_mask+tlrc -prefix words -setA 'stats*+tlrc.HEAD['Words#0_Coef']'
3dcopy ~/abin/MNI_avg152T1+tlrc .
3dAttribute -all words+tlrc > words_output.txt

###########
# Part 2: cluster correction
#Calculate average smoothness
for f in ../blurs/blur*.1D; do
1d_tool.py -infile $f -select_rows 5 -write - >> ../blurs/blurs.acf.1D
done
1dsum -mean ../blurs/blurs.acf.1D > ../blurs/blurs_mean.1D
#0.823891 4.19962 14.369 10.4995

#Simulate for number of clusters that would arise randomly
3dClustSim -mask ../masks/group_mask+tlrc -acf `1dcat ../blurs/blurs_mean.1D[0:2]` \
-both \
-pthr .001 \
-athr .05 \
-iter 5000 \
-prefix cluster \
-cmd refit.cmd
#Smallest cluster size: 18 (NN3_2sided)

3dcalc -a words+tlrc -b ../masks/group_mask+tlrc -expr 'a*b' -prefix words_masked
3dmerge -1zscore -prefix words_z words_masked+tlrc
3dclust -1Dformat -prefix words_clustered -savemask words_clustermask -inmask -1noneg -NN3 18 words_z+tlrc >> cluster_report.txt
#-1noneg looking for only positive clusters

#read output of 3dclust (cluster_report.txt) into clustertable_extract.py to get pretty tables
chmod 755 ../scripts/clustertable_extract.py
../scripts/clustertable_extract.py --clusterfile cluster_report.txt
#should output results_table.txt

############
# Part 3: FDR Correction
3dFDR -input words+tlrc[1] -prefix words_FDR
3dcalc -a words_FDR+tlrc -b ../masks/group_mask+tlrc -expr 'a*b' -prefix words_FDR_masked
3dmerge -1zscore -prefix words_FDR_z words_FDR_masked+tlrc
3dclust -1Dformat -prefix words_FDR_clustered -savemask words_FDR_clustermask -inmask -1noneg -NN3 18 words_FDR_z+tlrc >> cluster_FDR_report.txt
../scripts/clustertable_extract.py --clusterfile cluster_FDR_report.txt
24 changes: 24 additions & 0 deletions hw4/HW4_ans
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
HW 4
December 3, 2017
Monica Ly

Review the data
Q: How many shells (different nominal b values) are there? What are the shell b values?
A: By looking in the 100206_3T_DWI_dir95_LR.bval file, there are 4 shells: 0-5, 1000ish, 2000ish, and 3000ish.
Q: How many b0 volumes are there in each DWI Sequence?
A: 6. They show up as 5s in the .bval. Indices: 0, 16, 32, 48, 64, 80
Q: Are the LR/RL phase encoding labels correct?
A: Yes. Looking in AFNI, there are large distortions (big chunks displaced around slice 30) that match the phase encoding labeled

Preprocess
0. Topup needs to have even dimensions. nifti_tool -disp_hdr -infile b0_LR_0.nii.gz shows that the dims (x,y,z) are 144 168 111
Z has odd number, so I want to extract all but the top volume first - see script: mergeb0s.sh
1. Extract the b0 volumes from each DWI sequence and combine them into a single 4D NIFTI file
See script: mergeb0s.sh
2. Estimation of the distortion field using topup.
EPI factor = 144
Echo spacing = 0.78ms = 0.00078
Read out time (for Siemens) = (EPI factor - 1) * echo spacing = (144-1)*.00078 = 0.11154
See script: topups.sh (took 3.5 hours to run)
3.

45 changes: 45 additions & 0 deletions hw4/scripts/batch_hw4_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/bin/bash
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=monica.ly@uconn.edu # Your email address
#SBATCH --nodes=1 # OpenMP requires a single node
#SBATCH --ntasks=1 # Run a single serial task
#SBATCH --cpus-per-task=8 # Number of cores to use
#SBATCH --mem=8192mb # Memory limit
#SBATCH --time=04:00:00 # Time limit hh:mm:ss
#SBATCH -e error_%A_%a.log # Standard error
#SBATCH -o output_%A_%a.log # Standard output
#SBATCH --job-name=DWIMerge # Descriptive job name
#SBATCH --partition=general

export OMP_NUM_THREADS=8 #<= cpus-per-task
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=8 #<= cpus-per-task
##### END OF JOB DEFINITION #####

#Define user paths
NETID=$USER
PROJECT=hw4

export DIR_BASE=/scratch/${NETID}/${PROJECT}
export DIR_RESOURCES=${DIR_BASE}/resources #ro
export DIR_DATA=${DIR_BASE}/data #rw data
export DIR_DATAIN=/scratch/birc_ro/ibrain_dwi/100206 #ro data
export DIR_DATAOUT=${DIR_BASE}/data_out #rw data
export SUBJECTS_DIR=${DIR_BASE}/freesurfer #rw for Freesurfer
export DIR_WORK=/work #rw /work on HPC is 40Gb local storage
export DIR_SCRATCH=${DIR_BASE}/scratch #rw shared storage
export DIR_SCRIPTS=${DIR_BASE}/scripts #ro, prepended to PATH


# Load modules
module load cuda/8.0.61 #for GPU/CUDA
module load matlab/2017a #matlab binaries are bound
module load singularity/2.3.1-gcc #required to run the container

#set the matlab license path to the path inside the container
export LM_LICENSE_FILE=/bind/matlablicense/uits.lic

#finally call the container with any arguments for the job
#wrapper will bind the appropriate paths
#environment variables are passed to the container

./burc_wrapper.sh /bind/scripts/topups.sh
17 changes: 17 additions & 0 deletions hw4/scripts/burc_wrapper.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash
#Run the container
#Delete any unused bind points below
singularity run --bind $(dirname `which matlab`)/..:/bind/bin/matlab \
--bind /apps2/matlab:/bind/matlablicense \
--bind ${DIR_DATA}:/bind/data:rw \
--bind ${DIR_DATAIN}:/bind/data_in \
--bind ${DIR_DATAOUT}:/bind/data_out:rw \
--bind ${SUBJECTS_DIR}:/bind/freesurfer:rw \
--bind ${DIR_RESOURCES}:/bind/resources:ro \
--bind ${DIR_SCRATCH}:/bind/scratch:rw \
--bind ${DIR_WORK}:/bind/work:rw \
--bind ${DIR_SCRIPTS}:/bind/scripts:ro \
/scratch/birc_ro/containers/burc.img "$@"

#add this line to limit access to home and /tmp
#--contain --workdir ${DIR_TMP} \
36 changes: 36 additions & 0 deletions hw4/scripts/mergeb0s.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#/bin/bash

#If doing this in hpc
cd /bind/data_in
for direction in LR RL
do
dwi=100206_3T_DWI_dir95_$direction
#Cut off the top of the head volume to make the z dimension even
fslroi ${dwi}.nii.gz /bind/data/${dwi}_even.nii.gz 0 -1 0 -1 0 110 0 -1
#echo $dwi
for index in 0 16 32 48 64 80
do
#echo b0_${direction}_${index}.nii.gz
fslroi /bind/data/${dwi}_even.nii.gz /bind/data/b0_${direction}_${index}.nii.gz $index 1
done
fslmerge -t /bind/data/b0merged_${direction} /bind/data/b0_${direction}_*.nii.gz
done
fslmerge -t /bind/data/b0_all /bind/data/b0merged_*.nii.gz

#If doing this on local computer
#run from hw4 directory
# cd data_in
# for direction in LR RL
# do
# dwi=100206_3T_DWI_dir95_$direction.nii.gz
# #Cut off the top of the head volume to make the z dimension even
# fslroi ${dwi}.nii.gz ../preproc/${dwi}_even.nii.gz 0 -1 0 -1 0 110 0 -1
# #echo $dwi
# for index in 0 16 32 48 64 80
# do
# #echo b0_${direction}_${index}.nii.gz
# fslroi ../preproc/${dwi}_even.nii.gz ../preproc/b0_${direction}_${index}.nii.gz $index 1
# done
# fslmerge -t ../preproc/b0merged_${direction} ../preproc/b0_${direction}_*.nii.gz
# done
# fslmerge -t ../preproc/b0_all ../preproc/b0merged_*.nii.gz
18 changes: 18 additions & 0 deletions hw4/scripts/setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#/bin/bash
### HW 4

#Set up HPC
#Log into HPC
# ssh mtl15001@login.storrs.hpc.uconn.edu

# 1. Clone container repository to a directory on HPC
# Log into HPC
git clone https://github.com/bircibrain/containers
# 2. Run mk_skel.sh hw 4 to create a skeleton directory at /scratch/mtl15001/hw4
cd containers/example_sbatch/
./mk_skel.sh hw4
# 3. Place your processing script in /scratch/mtl15001/hw4/scripts
cp batch_gpu.sh /scratch/mtl15001/hw4/scripts/batch_hw4_gpu.sh
# 4. Modify batch_gpu.sh to run your SLURM job.
sbatch batch_hw4_gpu.sh
#within batch_hw4_gpu.sh, run the job script usinb burc_wrapper.sh script.sh
3 changes: 3 additions & 0 deletions hw4/scripts/topups.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash
cd /bind/data
topup --imain=b0_all.nii.gz --datain=acqparams.txt --config=b02b0.cnf --out=topup --iout=corrected_b0
48 changes: 48 additions & 0 deletions scripts/HW 2 afni_proc
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
HW 2 afni_proc

Constructed timing files using make_timings.py and timing_tool.py to drop 1st TR

Experiment 1:
Dice Coefficient: .1978 - not good

Experiment 2:
Without motion censoring and regression - Dice Coefficient: .2200
With a smaller smoothing kernel - Dice Coefficient: .2745
Using 3dREMLfit - Dice Coefficient: .1978

Edit (8/29/2017)
The above was done assuming sub-brick 17 was the correct statistic, but it probably wasn't. Instead, I used stats.sub_03+tlrc['L-D_GLT#0_Tstat']

Below is everything redone using the correct sub-brick.
Experiment 1:
Dice Coefficient: .05296 - quite bad

Experiment 2:
Without motion censoring and regression - Dice Coefficient: .02986 - much lower
This makes sense, since leaving in bad motion volumes would decrease the similarity/reliability between the two samples
With a smaller smoothing kernel - Dice Coefficient: .05976 - not much different
I would expect a smaller smoothing kernel to result in a smaller Dice coefficient because the data would be noisier in each sample
Using 3dREMLfit - Dice Coefficient: .00205 - much much lower

Edit (8/30/2017)
Hopefully the last attempt. Edited make_timings.py to create 3 timing files (separating landmark correct and incorrect)
Contrast: .5*landmark_correct .5*landmark_incorrect -detection

Experiment 1:
Dice Coefficient: .1676
Experiment 2:
With a smaller smoothing kernel - Dice Coefficient: .1359
Lower, as expected.

Edit (8/31/2017)
Edited make_timings.py to create 5 timing files for each condition. All 5 stimulus files were entered into uber_subject.py. Same contrast.

Experiment 1:
Dice Coefficient: .1381

Edit (9/3/2017). FINALLY.
Re-ran changing basis function from BLOCK(16.25) to GAM
Experiment 1:
Dice Coefficient: .1987
Experiment 2: With smoothing kernel of 3mm FWHM
Dice Coefficient: .1615
53 changes: 53 additions & 0 deletions scripts/hw_processing.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
cd Documents/ibrain-training/hw2/sub-03/ses-test/func
chmod 755 ../../../playground/scripts/make_timings.py

#run script to make timing files in test sample
../../../playground/scripts/make_timings.py sub-03_ses-test_task-linebisection_events.tsv
#run timing tool on each file to adjust for dropping the first TR in each run
for f in *.1D
do
timing_tool.py -timing $f -add_offset -2.5 -write_timing adj_${f}
done

#do the same for the retest sample
cd ../../ses-retest/func
../../../playground/scripts/make_timings.py sub-03_ses-retest_task-linebisection_events.tsv
for g in *.1D
do
timing_tool.py -timing $g -add_offset -2.5 -write_timing adj_${g}
done

#run uber_subject.py and set parameters
# change smoothing kernel to 6mm FWHM
# change motion threshold to .5
# check regress motion derivatives
#
#run to generate afni_proc.py script for test and retest
./cmd.ap.sub_03

#manually alter proc.sub_03 to add -tpattern alt+z to tshift
#-AddEdge -deoblique on -master_tlrc 3 to align
#-bout to stats bucket

#run proc.sub_03 for test and retest files

#in each results folder
#1. threshold statistics
3dmerge -1zscore -1thresh 1.96 -1noneg -prefix stat.thresh 'stats.sub_03+tlrc['L-D_GLT#0_Tstat']'

#2. calculate mask of intersection
#navigate back out to sub-03 folder
3dmask_tool -input group.*/subj.sub_03/sub_03.results/mask_group+tlrc.HEAD -frac 1.0 -prefix mask

#in each results folder
#3. binarize thresholded images
3dcalc -a ./group.test/subj.sub_03/sub_03.results/stat.thresh+tlrc -prefix stat.test.bin -expr 'astep(a,.1)'
#in retest folder
3dcalc -a ./group.retest/subj.sub_03/sub_03.results/stat.thresh+tlrc -prefix stat.retest.bin -expr 'astep(a,.1)'

#4. calculate dice coefficient
3ddot -mask mask+tlrc -dodice stat.test.bin+tlrc stat.retest.bin+tlrc
#found a Dice coefficient of 0.197834 - not so great

#for experiment 2, I changed the -1blur_fwhm option in proc.sub_03

Loading