Contents
classdef Analysis + --> +Analysis + + + + + + + ++\ No newline at end of file +--> +Contents
++ +++classdef Analysis ++% ANALYSIS Collection of functions (static methods) used for GLM analysis % of point process data. +% <a href="matlab:nstatOpenHelpPage('AnalysisExamples.html')">Analysis Examples</a> % -% <a href="AnalysisExamples.html">Analysis Examples</a> -% -% see also <a href="TrialExamples.html">Trial</a>, <a -% href="CovCollExamples.html">CovColl</a>, <a -% href="nstCollExamples.html">nstColl</a>,<a -% href="HistoryExamples.html">History</a> -% +% see also <a href="matlab:nstatOpenHelpPage('TrialExamples.html')">Trial</a>, <a +% href="matlab:nstatOpenHelpPage('CovCollExamples.html')">CovColl</a>, <a +% href="matlab:nstatOpenHelpPage('nstCollExamples.html')">nstColl</a>,<a +% href="matlab:nstatOpenHelpPage('HistoryExamples.html')">History</a> % % Reference page in Help browser -% <a href="Analysis.html">Analysis Reference</a> - -properties (Constant) +% <a href="matlab:nstatOpenHelpPage('Analysis.html')">Analysis Reference</a> ++nSTAT v1 Copyright (C) 2012 Masschusetts Institute of Technology Cajigas, I, Malik, WQ, Brown, EN This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
+This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+properties (Constant) colors = {'b','g','r','c','m','y','k'}; end - methods (Static) - function fitResults =RunAnalysisForNeuron(tObj,neuronNumber,configColl,makePlot,Algorithm) -% fitResults =RunAnalysisForNeuron(tObj,neuronNumber,configColl,makePlot,Algorithm) + methods (Static) + function fitResults =RunAnalysisForNeuron(tObj,neuronNumber,configColl,makePlot,Algorithm,DTCorrection,batchMode) ++% fitResults =RunAnalysisForNeuron(tObj,neuronNumber,configColl,makePlot,Algorithm) % tObj: Trial to be analyzed % neuronNumber: number of the neuron to be analyzed. Can be a % vector to specify multiple neurons to be analyzed. @@ -99,11 +144,22 @@ % BNLRCG - faster Truncated, L-2 Regularized, % Binomial Logistic Regression with Conjugate % Gradient Solver by Demba Ba (demba@mit.edu). + % DTCorrection: 0 for no DT Correction of KS plot, 1 is the + % default. % - if(nargin<5) + % batchMode: when batchMode=1 neurons with same name are fit at once rather than separetely + + if(nargin<7 || isempty(batchMode)) + batchMode = 0; %default treat each spike train separately + end + + if(nargin<6 || isempty(DTCorrection)) + DTCorrection =1; + end + if(nargin<5 || isempty(Algorithm)) Algorithm = 'GLM'; end - if(nargin<4) + if(nargin<4 || isempty(makePlot)) makePlot=1; end numNeurons = length(neuronNumber); @@ -117,43 +173,305 @@ ensHistObj=cell(numNeurons,1); AIC =zeros(numNeurons,1); BIC =zeros(numNeurons,1); + logLL =zeros(numNeurons,1); windowSize = .01; % 1/tObj.sampleRate;% for Residual Computation; spikeTraining = cell(1,numNeurons); XvalData =cell(numNeurons,1); XvalTime =cell(numNeurons,1); spikeValidation = cell(1,numNeurons); -Fit Using Training Data
if(diff(tObj.validationWindow)~=0) ++Fit Using Training Data
+if(diff(tObj.validationWindow)~=0) tObj.setTrialTimesFor('training'); end + if(batchMode==1) + display('Running in batch mode: neurons with same name are fit simultaneously'); + end + for i=1:configColl.numConfigs configColl.setConfig(tObj,i); - display(strcat('Analyzing Configuration #',num2str(i))); - - for j=1:numNeurons -display(strcat('Analyzing Configuration #',num2str(i),': Neuron #',num2str(neuronNumber(j)))); - %clear tempLabels; - %tObj.setCurrentNeuron(neuronNumber); - otherLabels = tObj.getLabelsFromMask(neuronNumber(j)); -% labels{j}{i} = horzcat('Baseline',otherLabels); % Labels change depending on presence/absense of History or ensCovHist - labels{j}{i} = otherLabels; % Labels change depending on presence/absense of History or ensCovHist - numHist{j}{i} = tObj.getNumHist; - histObj{j}{i} = tObj.history; - ensHistObj{j}{i} = tObj.ensCovHist; - [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber(j),i,Algorithm); - lambda{j}{i} = lambdaTemp; b{j}{i} = bTemp; stats{j}{i} = statsTemp; - dev(j,i) = devTemp; AIC(j,i)= AICTemp; BIC(j,i)= BICTemp; - distrib{j}{i} =distribTemp; - spikeTraining{j} = tObj.nspikeColl.getNST(neuronNumber(j)).nstCopy; - spikeTraining{j}.setName(num2str(neuronNumber(j))); -Collect the validation Data
if(diff(tObj.validationWindow)~=0) - tObj.setTrialTimesFor('validation'); - XvalData{j}{i}=tObj.getDesignMatrix(neuronNumber(j)); - XvalTime{j}{i}=tObj.covarColl.getCov(1).time; - spikeValidation{j} = tObj.nspikeColl.getNST(neuronNumber(j)).nstCopy; - spikeTraining{j}.setName(num2str(neuronNumber(j))); - tObj.setTrialTimesFor('training') +% fprintf(strcat('Analyzing Configuration #',num2str(i))); + pool = gcp('nocreate'); + if(isempty(pool)) + pools = 0; + else + pools = pool.NumWorkers; + end%matlabpool('size'); + if(pools==0) + if(batchMode==0) + fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #')); + for j=1:numNeurons ++% fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #',num2str(neuronNumber(j)))); + if(j==1) + fprintf('%d',neuronNumber(j)); + else + fprintf(',%d',neuronNumber(j)); + end + %clear tempLabels; + %tObj.setCurrentNeuron(neuronNumber); + otherLabels = tObj.getLabelsFromMask(neuronNumber(j)); + % labels{j}{i} = horzcat('Baseline',otherLabels); % Labels change depending on presence/absense of History or ensCovHist + labels{j}{i} = otherLabels; % Labels change depending on presence/absense of History or ensCovHist + numHist{j}{i} = tObj.getNumHist; + histObj{j}{i} = tObj.history; + ensHistObj{j}{i} = tObj.ensCovHist; + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp, distribTemp] = Analysis.GLMFit(tObj,neuronNumber(j),i,Algorithm); + lambda{j}{i} = lambdaTemp; b{j}{i} = bTemp; stats{j}{i} = statsTemp; + dev(j,i) = devTemp; AIC(j,i)= AICTemp; BIC(j,i)= BICTemp; logLL(j,i) = logLLTemp; + distrib{j}{i} =distribTemp; + spikeTraining{j} = tObj.nspikeColl.getNST(neuronNumber(j));%.nstCopy; + spikeTraining{j}.setName(num2str(neuronNumber(j))); ++Collect the validation Data
+if(diff(tObj.validationWindow)~=0) + tObj.setTrialTimesFor('validation'); + XvalData{j}{i}=tObj.getDesignMatrix(neuronNumber(j)); + XvalTime{j}{i}=tObj.covarColl.getCov(1).time; + spikeValidation{j} = tObj.nspikeColl.getNST(neuronNumber(j));%.nstCopy; + spikeTraining{j}.setName(num2str(neuronNumber(j))); + tObj.setTrialTimesFor('training') + end ++end + elseif(batchMode==1) + neuronNames=neuronNumber; % This is an index of names in the batchMode case + fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #')); + for j=1:numNeurons ++% if(isa(neuronNames,'cell')) + % fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #')); + % display(strcat('Analyzing Configuration #',num2str(i),': Neuron #',neuronNames{j})); + % elseif(isa(neuronNames,'char')) + % display(strcat('Analyzing Configuration #',num2str(i),': Neuron #',neuronNames)); + % elseif(isa(neuronNames,'double')) + % display(strcat('Analyzing Configuration #',num2str(i),': Neuron #',num2str(neuronNames))); + % end + if(isa(neuronNames,'cell')) + if(j==1) + fprintf('%s',neuronNames{j}); + else + fprintf(',%s',neuronNames{j}); + end + elseif(isa(neuronNames,'char')) + if(j==1) + fprintf('%s',neuronNames); + else + fprintf(',%s',neuronNames); + end + elseif(isa(neuronNames,'double')) + if(j==1) + fprintf('%d',neuronNames); + else + fprintf(',%d',neuronNames); + end + end + + %clear tempLabels; + %tObj.setCurrentNeuron(neuronNumber); + otherLabels = tObj.getLabelsFromMask(neuronNumber(j)); + % labels{j}{i} = horzcat('Baseline',otherLabels); % Labels change depending on presence/absense of History or ensCovHist + labels{j}{i} = otherLabels; % Labels change depending on presence/absense of History or ensCovHist + numHist{j}{i} = tObj.getNumHist; + histObj{j}{i} = tObj.history; + ensHistObj{j}{i} = tObj.ensCovHist; + if(isa(neuronNames,'cell')) + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber{j},i,Algorithm); + elseif(isa(neuronNames,'char')) + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber,i,Algorithm); + else + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber(j),i,Algorithm); + end + + lambda{j}{i} = lambdaTemp; b{j}{i} = bTemp; stats{j}{i} = statsTemp; + dev(j,i) = devTemp; AIC(j,i)= AICTemp; BIC(j,i)= BICTemp; logLL(j,i) = logLLTemp; + distrib{j}{i} =distribTemp; + if(isa(neuronNames,'cell')) + currSpikes=tObj.nspikeColl.getNST(tObj.getNeuronIndFromName(neuronNames{j})); + elseif(isa(neuronNames,'char')) + currSpikes=tObj.nspikeColl.getNST(tObj.getNeuronIndFromName(neuronNames)); + else + currSpikes=tObj.nspikeColl.getNST(neuronNames(j)); + end + + + for n=1:length(currSpikes) + if(isa(currSpikes,'cell')) + currSpikes{n} = currSpikes{n}.nstCopy; + if(isa(neuronNames,'cell')) + currSpikes{n}.setName(neuronNames{j}); + elseif(isa(neuronNames,'char')) + currSpikes{n}.setName(neuronNames); + else + currSpikes{n}.setName(neuronNames(j)); + end + + else + currSpikes = currSpikes.nstCopy; + + if(isa(neuronNames,'cell')) + currSpikes.setName(neuronNames{j}); + elseif(isa(neuronNames,'char')) + currSpikes.setName(neuronNames); + else + currSpikes.setName(neuronNames(j)); + end + + end + end + + spikeTraining{j} = currSpikes; ++Collect the validation Data
+if(diff(tObj.validationWindow)~=0) + tObj.setTrialTimesFor('validation'); + tempIndices=tObj.getNeuronIndFromName(neuronNames{j}); + currSpikes=tObj.nspikeColl.getNST(tempIndices); + tempX = []; + tempTime=[]; + for n=1:length(tempIndices) + currSpikes{n} = currSpikes{n}.nstCopy; + currSpikes{n}.setName(neuronNames{j}); + if(n==1) + tempX =tObj.getDesignMatrix(tempIndices(n)); + tempTime =tObj.covarColl.getCov(1).time; + else + tempX = [tempX; tObj.getDesignMatrix(tempIndices(n))]; + offset = max(tempTime)+1/tObj.sampeRate; + tempTime = [tempTime;(tObj.covarColl.getCov(1).time+offset)]; + end + end + spikeValidation{j} = currSpikes; + XvalData{j}{i}=tempX; + XvalTime{j}{i}=tempTime; + + tObj.setTrialTimesFor('training') + end ++end + end + fprintf('\n'); + else %use parallel toolbox + if(batchMode==0) + fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #',num2str(neuronNumber))); + for j=1:numNeurons ++%clear tempLabels; + %tObj.setCurrentNeuron(neuronNumber); + otherLabels = tObj.getLabelsFromMask(neuronNumber(j)); + % labels{j}{i} = horzcat('Baseline',otherLabels); % Labels change depending on presence/absense of History or ensCovHist + labels{j}{i} = otherLabels; % Labels change depending on presence/absense of History or ensCovHist + numHist{j}{i} = tObj.getNumHist; + histObj{j}{i} = tObj.history; + ensHistObj{j}{i} = tObj.ensCovHist; + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber(j),i,Algorithm); + lambda{j}{i} = lambdaTemp; b{j}{i} = bTemp; stats{j}{i} = statsTemp; + dev(j,i) = devTemp; AIC(j,i)= AICTemp; BIC(j,i)= BICTemp; logLL(j,i)=logLLTemp; + distrib{j}{i} =distribTemp; + spikeTraining{j} = tObj.nspikeColl.getNST(neuronNumber(j));%.nstCopy; + spikeTraining{j}.setName(num2str(neuronNumber(j))); ++Collect the validation Data
+if(diff(tObj.validationWindow)~=0) + tObj.setTrialTimesFor('validation'); + XvalData{j}{i}=tObj.getDesignMatrix(neuronNumber(j)); + XvalTime{j}{i}=tObj.covarColl.getCov(1).time; + spikeValidation{j} = tObj.nspikeColl.getNST(neuronNumber(j));%.nstCopy; + spikeTraining{j}.setName(num2str(neuronNumber(j))); + tObj.setTrialTimesFor('training') + end ++end + elseif(batchMode==1) + neuronNames=neuronNumber; % This is an index of names in the batchMode case + fprintf(strcat('Analyzing Configuration #',num2str(i),': Neuron #',num2str(neuronNames))); + for j=1:numNeurons ++%clear tempLabels; + %tObj.setCurrentNeuron(neuronNumber); + otherLabels = tObj.getLabelsFromMask(neuronNumber(j)); + % labels{j}{i} = horzcat('Baseline',otherLabels); % Labels change depending on presence/absense of History or ensCovHist + labels{j}{i} = otherLabels; % Labels change depending on presence/absense of History or ensCovHist + numHist{j}{i} = tObj.getNumHist; + histObj{j}{i} = tObj.history; + ensHistObj{j}{i} = tObj.ensCovHist; + if(isa(neuronNames,'cell')) + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber{j},i,Algorithm); + elseif(isa(neuronNames,'char')) + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber,i,Algorithm); + else + [lambdaTemp, bTemp, devTemp, statsTemp,AICTemp,BICTemp,logLLTemp,distribTemp] = Analysis.GLMFit(tObj,neuronNumber(j),i,Algorithm); + end + + lambda{j}{i} = lambdaTemp; b{j}{i} = bTemp; stats{j}{i} = statsTemp; + dev(j,i) = devTemp; AIC(j,i)= AICTemp; BIC(j,i)= BICTemp; logLL(j,i) = logLLTemp; + distrib{j}{i} =distribTemp; + if(isa(neuronNames,'cell')) + currSpikes=tObj.nspikeColl.getNST(tObj.getNeuronIndFromName(neuronNames{j})); + elseif(isa(neuronNames,'char')) + currSpikes=tObj.nspikeColl.getNST(tObj.getNeuronIndFromName(neuronNames)); + else + currSpikes=tObj.nspikeColl.getNST(neuronNames(j)); + end + + + for n=1:length(currSpikes) + if(isa(currSpikes,'cell')) + currSpikes{n} = currSpikes{n}.nstCopy; + if(isa(neuronNames,'cell')) + currSpikes{n}.setName(neuronNames{j}); + elseif(isa(neuronNames,'char')) + currSpikes{n}.setName(neuronNames); + else + currSpikes{n}.setName(neuronNames(j)); + end + + else + currSpikes = currSpikes.nstCopy; + + if(isa(neuronNames,'cell')) + currSpikes.setName(neuronNames{j}); + elseif(isa(neuronNames,'char')) + currSpikes.setName(neuronNames); + else + currSpikes.setName(neuronNames(j)); + end + + end + end + + spikeTraining{j} = currSpikes; ++Collect the validation Data
+if(diff(tObj.validationWindow)~=0) + tObj.setTrialTimesFor('validation'); + tempIndices=tObj.getNeuronIndFromName(neuronNames{j}); + currSpikes=tObj.nspikeColl.getNST(tempIndices); + tempX = []; + tempTime=[]; + for n=1:length(tempIndices) + currSpikes{n} = currSpikes{n}.nstCopy; + currSpikes{n}.setName(neuronNames{j}); + if(n==1) + tempX =tObj.getDesignMatrix(tempIndices(n)); + tempTime =tObj.covarColl.getCov(1).time; + else + tempX = [tempX; tObj.getDesignMatrix(tempIndices(n))]; + offset = max(tempTime)+1/tObj.sampeRate; + tempTime = [tempTime;(tObj.covarColl.getCov(1).time+offset)]; + end + end + spikeValidation{j} = currSpikes; + XvalData{j}{i}=tempX; + XvalTime{j}{i}=tempTime; + + tObj.setTrialTimesFor('training') + end ++end end -end + fprintf('\n'); + end end @@ -174,36 +492,43 @@ % %tObj.setTrialTimesFor('training'); % end % -Store the results
fitResults =cell(length(neuronNumber),1); ++Store the results
+fitResults =cell(length(neuronNumber),1); for j=1:numNeurons -fitResults{j}=FitResult(spikeTraining{j},labels{j},numHist{j},histObj{j},ensHistObj{j},lambda{j},b{j}, dev(j,:), stats{j},AIC(j,:),BIC(j,:),configColl,XvalData{j},XvalTime{j},distrib{j}); ++fitResults{j}=FitResult(spikeTraining{j},labels{j},numHist{j},histObj{j},ensHistObj{j},lambda{j},b{j}, dev(j,:), stats{j},AIC(j,:),BIC(j,:),logLL(j,:),configColl,XvalData{j},XvalTime{j},distrib{j}); if(diff(tObj.validationWindow)~=0) tObj.setTrialTimesFor('validation'); - lambdaValidation = fitResults{j}.computeValLambda; - ValResults = FitResult(spikeValidation{j},labels{j},numHist{j},histObj{j},ensHistObj{j},lambdaValidation,b{j}, dev(j,:), stats{j},AIC(j,:),BIC(j,:),configColl,XvalData{j},XvalTime{j},distrib); + [lambdaValidation, logLLValidation] = fitResults{j}.computeValLambda; + ValResults = FitResult(spikeValidation{j},labels{j},numHist{j},histObj{j},ensHistObj{j},lambdaValidation,b{j}, dev(j,:), stats{j},AIC(j,:),BIC(j,:),logLLValidation,configColl,XvalData{j},XvalTime{j},distrib); fitResults{j}.validation = ValResults; %validation field is actually another fitResults object but with the validation data end -Process the results and compute further parameters
if(makePlot==1) ++Process the results and compute further parameters
+if(makePlot==1) scrsz = get(0,'ScreenSize'); figure('Position',[scrsz(3)*.1 scrsz(4)*.1 scrsz(3)*.8 scrsz(4)*.8]); - subplot(2,4,[1 2]); Analysis.KSPlot(fitResults{j},makePlot); %make the plot + subplot(2,4,[1 2]); Analysis.KSPlot(fitResults{j},DTCorrection,makePlot); %make the plot hold on; text(.45, .95,strcat('Neuron:',num2str(neuronNumber(j)))); subplot(2,4,3); Analysis.plotInvGausTrans(fitResults{j},makePlot); subplot(2,4,4); Analysis.plotSeqCorr(fitResults{j}); subplot(2,4,[7 8]); Analysis.plotFitResidual(fitResults{j},windowSize,makePlot); subplot(2,4,[5 6]); Analysis.plotCoeffs(fitResults{j}); else - Analysis.KSPlot(fitResults{j},makePlot); + Analysis.KSPlot(fitResults{j},DTCorrection,makePlot); Analysis.plotInvGausTrans(fitResults{j},makePlot); Analysis.plotFitResidual(fitResults{j},windowSize,makePlot); %fitResults.computePlotParams; end -end ++end if(length(neuronNumber)==1) fitResults = fitResults{1}; end -end - function fitResults = RunAnalysisForAllNeurons(tObj,configs,makePlot,Algorithm) ++end + function fitResults = RunAnalysisForAllNeurons(tObj,configs,makePlot,Algorithm,DTCorrection,batchMode) % fitResults = RunAnalysisForAllNeurons(tObj,configs,makePlot,Algorithm) % Runs the fits specifed by configs (a ConfigColl object) on % all the neurons that are unmasked in the trial tObj. @@ -217,15 +542,30 @@ % BNLRCG - faster Truncated, L-2 Regularized, % Binomial Logistic Regression with Conjugate % Gradient Solver by Demba Ba (demba@mit.edu). + % DTCorrection: 0 for no DT Correction of KS plot, 1 is the + % default. + % batchMode: when batchMode=1 neurons with same name are fit at once rather than separetely - if(nargin<4) + if(nargin<6 || isempty(batchMode)) + batchMode = 0; %default treat each spike train separately + end + if(nargin<5 || isempty(DTCorrection)) + DTCorrection =1; + end + + if(nargin<4 || isempty(Algorithm)) Algorithm = 'GLM'; end - if(nargin<3) + if(nargin<3 || isempty(makePlot)) makePlot=1; %default to plotting results end - neuronIndex=tObj.getNeuronIndFromMask; + + if(batchMode==0) + neuronIndex=tObj.getNeuronIndFromMask; + elseif(batchMode==1) + neuronIndex=tObj.getUniqueNeuronNames; + end % numLoops = floor(length(neuronIndex)/4); % loopArray = cell(1,numLoops); % for k=1:numLoops @@ -237,13 +577,13 @@ % end % parfor i=1:length(neuronIndex) - fitResults = Analysis.RunAnalysisForNeuron(tObj,neuronIndex,configs,makePlot,Algorithm); + fitResults = Analysis.RunAnalysisForNeuron(tObj,neuronIndex,configs,makePlot,Algorithm,DTCorrection,batchMode); %end end - function [lambda,b, dev, stats,AIC, BIC,distribution] = GLMFit(tObj,neuronNumber,lambdaIndex,Algorithm) + function [lambda,b, dev, stats,AIC, BIC,logLL, distribution] = GLMFit(tObj,neuronNumber,lambdaIndex,Algorithm) % [lambda,b, dev, stats,AIC, BIC] = GLMFit(tObj,neuronNumber,lambdaIndex,Algorithm) % Given a trial, tObj, and a neuronNumber specifying a neuron % from the trial, extracts the design matrix X from the current @@ -268,36 +608,87 @@ % (p-values,std dev, etc.) % AIC - Akaike's information criteria for this regression. % BIC - Bayes Information Criteria for this regression. + % logLL - Log Likelihood evaluated with the fit parameters. if(nargin<4) Algorithm='GLM'; end + if(isa(neuronNumber,'double')) + binaryRep=tObj.nspikeColl.getNST(neuronNumber).isSigRepBinary; + indices=neuronNumber; + elseif(isa(neuronNumber,'char')) + indices=tObj.getNeuronIndFromName(neuronNumber); + binRep=zeros(size(indices)); + for i=1:length(indices) + binRep(i)=tObj.nspikeColl.getNST(indices(i)).isSigRepBinary; + end + binaryRep=prod(binRep); + elseif(isa(neuronNumber,'cell')) + indices=tObj.getNeuronIndFromName(neuronNumber{1}); + binRep=zeros(size(indices)); + for i=1:length(indices) + binRep(i)=tObj.nspikeColl.getNST(indices(i)).isSigRepBinary; + end + binaryRep=prod(binRep); + end - if(strcmp(Algorithm,'BNLRCG') && ~tObj.nspikeColl.getNST(neuronNumber).isSigRepBinary) + if(strcmp(Algorithm,'BNLRCG') && ~binaryRep) error('To use BNLRCG Algorithm, spikeTrain must have a binary representation. Increase sampleRate and try again'); end - %For a single neuron given covariates,perform the GLM fit. - y=tObj.getSpikeVector(neuronNumber); - X=tObj.getDesignMatrix(neuronNumber); - - if(tObj.nspikeColl.getNST(neuronNumber).isSigRepBinary) - distribution = 'binomial'; - linkfunction = 'logit'; - else - distribution = 'poisson'; - linkfunction = 'log'; - end + %If performing batchMode analysis, this stacks up the + %corresponding spike vectors and the design matrices + for i=1:length(indices) + if(i==1) + y=tObj.getSpikeVector(indices(i)); + X=tObj.getDesignMatrix(indices(i)); + lambdaTime = tObj.getCov(1).time; + else + y=[y; tObj.getSpikeVector(indices(i))]; + X=[X; tObj.getDesignMatrix(indices(i))]; + offset = max(lambdaTime)+1/tObj.sampleRate; + lambdaTime = [lambdaTime; (tObj.getCov(1).time +offset)]; + + end + end + %For a single neuron given covariates,perform the GLM fit. +% +% if(binaryRep) +% distribution = 'binomial'; +% linkfunction = 'logit'; +% else +% distribution = 'poisson'; +% linkfunction = 'log'; +% end % size(X) % size(y) + y = y(:); + if(size(X,1)~=numel(y)) + nObs = min(size(X,1),numel(y)); + X = X(1:nObs,:); + y = y(1:nObs); + if(exist('lambdaTime','var') && ~isempty(lambdaTime)) + lambdaTime = lambdaTime(1:min(numel(lambdaTime),nObs)); + end + end + if(strcmp(Algorithm,'GLM')) + distribution = 'poisson'; + linkfunction = 'log'; [b,dev,stats] = glmfit(X,y,distribution, 'link', linkfunction,'constant','off'); elseif(strcmp(Algorithm,'BNLRCG')) + distribution = 'binomial'; + linkfunction = 'logit'; rrflag=0; %ML estimation [b,dev,stats] = bnlrCG(X,y,rrflag); + else error('Algorithm not supported!'); end + b=real(b); %make sure to avoid complex coefficients ... sometimes algorithms return + %complex b with the imaginary part near zero. + %Need to explore why. For now just keep the real + %part. if(length(b)>=1) if(strcmp(distribution,'binomial')) data = exp(X*b(1:end)); @@ -310,14 +701,44 @@ end end -% size(tObj.covarColl.getCov(1).getSigRep.time) -% size(data) - lambda=Covariate(tObj.getCov(1).time,data,... - '\Lambda(t)',tObj.getCov(1).xlabelval,... - tObj.getCov(1).xunits,'Hz',strcat('\lambda_{',num2str(lambdaIndex),'}')); - AIC = 2*length(b)+dev; - BIC = length(b)*log(length(y))+dev; + + lambdaIndexStr = num2str(lambdaIndex); + + lambda=Covariate(lambdaTime,data,... + '\lambda(t)',tObj.getCov(1).xlabelval,... + tObj.getCov(1).xunits,'Hz',strcat('\lambda_{',lambdaIndexStr,'}')); + mu=b; + s=stats.se; +% Mc=30; +% for c=1:Mc +% z=normrnd(0,1,length(s),1); +% bKDraw(:,c)=mu+(s.*z); +% end +% if(strcmp(distribution,'poisson')) +% lambdaDraw=exp(X*bKDraw)*(tObj.sampleRate); +% else +% lambdaDraw=exp(X*bKDraw)./(1+exp(X*bKDraw))*(tObj.sampleRate); +% end +% lambdaDraw(isinf(lambdaDraw))=0; +% alphaVal=.05; +% for k=1:length(lambdaDraw) +% [f,x] = ecdf(squeeze(lambdaDraw(k,:))); +% CIs(k,1) = x(find(f<alphaVal/2,1,'last')); +% CIs(k,2) = x(find(f>(1-alphaVal/2),1,'first')); +% end +% +% +% ciPSTHGLM = ConfidenceInterval(lambdaTime,CIs,'CI_{psth_GLM}',lambda.xlabelval,lambda.xunits,lambda.yunits); +% lambda.setConfInterval(ciPSTHGLM); + + %The deviance should be real since it a probability measure + %and therefore any imaginary part is ignored. + AIC = 2*length(b)+real(dev); + BIC = length(b)*log(length(y))+real(dev); + delta = 1/tObj.sampleRate; + logLL =sum(y.*log(data*delta)+(1-y).*(1-data*delta)); + end function handle = plotInvGausTrans(fitResults,makePlot) % handle = plotInvGausTrans(fitResults,makePlot) @@ -354,6 +775,12 @@ % intensity function. % The result is stored in fitResult. % + if(nargin<3 || isempty(makePlot)) + makePlot=1; + end + if(nargin<2 || isempty(windowSize)) + windowSize=.01; + end M = Analysis.computeFitResidual(fitResults.neuralSpikeTrain,fitResults.lambda,windowSize); fitResults.setFitResidual(M); @@ -366,23 +793,29 @@ fitResults.plotResidual; end end - function handle = KSPlot(fitResults,makePlot) + function handle = KSPlot(fitResults,DTCorrection,makePlot) %handle = KSPlot(fitResults,makePlot) % Computes the KS statistics and makes the plot. Stores % appropriate parameters in fitResults. % If validation data is also available, it does the same for % the validation data. - if(nargin <2) + % DTCorrection: 0 for no DT Correction of KS plot, 1 is the + % default. + if(nargin <3) makePlot =1; %By default make the plot end + if(nargin<2) + DTCorrection = 1; + end - [Z, U, xAxis, KSSorted, ks_stat] = Analysis.computeKSStats(fitResults.neuralSpikeTrain,fitResults.lambda); + + [Z, U, xAxis, KSSorted, ks_stat] = Analysis.computeKSStats(fitResults.neuralSpikeTrain,fitResults.lambda,DTCorrection); fitResults.setKSStats(Z,U, xAxis, KSSorted, ks_stat); if(fitResults.isValDataPresent) %make sure nst is in appropriate window - [Z, U, xAxis, KSSorted, ks_stat] = Analysis.computeKSStats(fitResults.validation.neuralSpikeTrain,fitResults.validation.lambda); + [Z, U, xAxis, KSSorted, ks_stat] = Analysis.computeKSStats(fitResults.validation.neuralSpikeTrain,fitResults.validation.lambda,DTCorrection); fitResults.validation.setKSStats(Z, U, xAxis, KSSorted, ks_stat); end @@ -419,39 +852,48 @@ % for independence of the xj's. Independence of the xj's % suggests indepence of the uj's and zj's (a condition % necessary for the Time Rescaling Theorem). - U=1-exp(-Z); - U(U>=1)=.99999; %Prevent any 1 values which lead to infinity in X + U(U>=.999999)=.999999; %Prevent any 1 values which lead to infinity in X + U(U==0)=.000001; + U(U<0)=.000001; X = norminv(U,0,1); %X=erfinv(U); [~,colm] = size(X); - for i=1:colm - [c(:,i),lags] = xcov(X(:,i)); + if(~isempty(X)) + for i=1:colm + [c(:,i),lags] = xcov(X(:,i)); + end + else + [c,lags] = xcov(X); end index=find(lags==1); lags=lags(index:end); rho=c(index:end,:)./repmat(c(index-1,:),length(lags),1); n=length(X); + % Defaults to the 95% confidence intervals + % Can extend to allow selection of 95% or 99% CI confBound = 1.96/sqrt(n)*ones(length(lags),1); -% size(lags) -% size(rho) + % size(lags) + % size(rho) confBoundSig = SignalObj(lags,[confBound -confBound],'ACF[ \Phi^{-1}(u_i) ]','\Delta \tau','sec'); confBoundSig.setPlotProps({' ''r'', ''LineWidth'' ,3'},1); confBoundSig.setPlotProps({' ''r'', ''LineWidth'' ,3'},2); handle=[]; - rhoSig = SignalObj(lags,rho, 'ACF[ \Phi^-1(u_i) ]','\Delta \tau','sec'); + rhoSig = SignalObj(lags,rho, 'ACF[ \Phi^-1(u_i) ]','Lag \Delta \tau','sec'); plotProps = cell(1,colm); - for i=1:colm - plotProps{i}=strcat('''', '.',Analysis.colors{mod(i-1,length(Analysis.colors))+1},''''); + if(~isempty(X)) + for i=1:colm + plotProps{i}=strcat('''', '.',Analysis.colors{mod(i-1,length(Analysis.colors))+1},''''); + end + else + plotProps=strcat('''', '.',Analysis.colors{1},''''); end rhoSig.setPlotProps(plotProps); - - end - function [Z,U,xAxis,KSSorted, ks_stat] = computeKSStats(nspikeTrain,lambdaInput,dtCorrection) + function [Z,U,xAxis,KSSorted, ks_stat] = computeKSStats(nspikeObj,lambdaInput,DTCorrection) % [Z,U,xAxis,KSSorted, ks_stat] = computeKSStats(nspikeTrain,lambdaInput) % Given a neural spike train (a sequence of spike times) and a % conditional intensity function, computes the rescaled ISIs @@ -460,6 +902,8 @@ % (exponential rate 1 (according to T-R theorem) to be % uniform(0,1). % + % DTCorrection: 0 for no DT Correction of KS plot, 1 is the + % default. % nspikeTrain: a nspikeTrain object % lambdaInput: candidate conditional intensity function (a Covariate) % Z: rescaled spike times @@ -471,24 +915,46 @@ %get the relevant spike train if(nargin<3) - dtCorrection =1; + DTCorrection =1; end - nCopy =nspikeTrain.nstCopy; -% minTime = nCopy.minTime; -% maxTime = nCopy.maxTime; -% nCopy.setMinTime(minTime); -% nCopy.setMaxTime(maxTime); + if(length(nspikeObj)>1) %in batch analysis we get multiple trials + nstCollObj = nstColl(nspikeObj); + nCopy = nstCollObj.toSpikeTrain; + else + nCopy =nspikeObj.nstCopy; +% nCopy =nspikeObj; + + end + + +% minTime = nCopy.minTime; +% maxTime = nCopy.maxTime; + nCopy.resample(lambdaInput.sampleRate); + nCopy.setMinTime(lambdaInput.minTime); + nCopy.setMaxTime(lambdaInput.maxTime); + + repBin = nCopy.isSigRepBin; + if(~repBin) + lambdaInput=lambdaInput.resample(2*lambdaInput.sampleRate); + nCopy.resample(lambdaInput.sampleRate); + end - if(dtCorrection==1 && nCopy.isSigRepBin) + if(DTCorrection==1 && repBin) % Use DT Correction for Time Rescaling Theorem - Haslinger, Pipa and Brown (2010) - pkSignal=lambdaInput.*(1/lambdaInput.sampleRate); - pk = pkSignal.data; + pkSignal=lambdaInput; + pk = pkSignal.data.*(1/lambdaInput.sampleRate); + pk = max(pk,1e-10); spikeTrain = nCopy.getSigRep.data; + minDim = min(size(pk,1),size(spikeTrain,1)); + pk=pk(1:minDim,:); + spikeTrain=spikeTrain(1:minDim,:); + intValues=zeros(length(nCopy.getSpikeTimes)-1,lambdaInput.dimension); for i=1:lambdaInput.dimension + pk(:,i) = nanmin(nanmax(pk(:,i),0),1); temp = ksdiscrete(pk(:,i),spikeTrain,'spiketrain'); % length(temp) % length(intValues(:,i)) @@ -506,16 +972,20 @@ % lambda=tempLambda.getSigInTimeWindow(minTime,maxTime);%.dataToMatrix; lambdaPosdata = max(tempLambda.data,0); lambda = Covariate(tempLambda.time,lambdaPosdata,tempLambda.name,tempLambda.xlabelval,tempLambda.xunits,tempLambda.yunits,tempLambda.dataLabels); - lambdaInt = lambda.integral; + if(nCopy.isSigRepBin) spikeTimes = nCopy.getSpikeTimes; + spikeTimes = [0 spikeTimes]; else - nstSignal = nCopy.getSigRep; - spikeTimes=nstSignal.time(nstSignal.data~=0); - +% spikeTimes = nCopy.getSpikeTimes; +% maxBinSize=nCopy.getMaxBinSizeBinary; +% lambdaInt = lambda.resample(1/maxBinSize).integral; + nstSignal = nCopy.getSigRep; + spikeTimes=nstSignal.time(nstSignal.data~=0); + spikeTimes = [0 spikeTimes']; end @@ -537,7 +1007,7 @@ KSSorted = sort( U,'ascend' ); - N = length(KSSorted); + N = size(KSSorted,1); if(N~=0) xAxis=(([1:N]-.5)/N)'*ones(1,lambdaInput.dimension); ks_stat = max(abs(KSSorted - (([1:N]-.5)/N)'*ones(1,lambdaInput.dimension))); @@ -546,7 +1016,7 @@ xAxis=[]; end end - function M=computeFitResidual(nspikeTrain,lambda,windowSize) + function M=computeFitResidual(nspikeObj,lambda,windowSize) % M=computeFitResidual(nspikeTrain,lambda,windowSize) % Computes the Point Process residual defined in % 'A point process framework for relating neural spiking @@ -561,26 +1031,41 @@ % M: the point process residual (a Covariate object). % if(nargin<3 || isempty(windowSize)) - windowSize=.1; - end - windowTimes = nspikeTrain.minTime:windowSize:nspikeTrain.maxTime; - nCopy=nspikeTrain.nstCopy.getSigRep(windowSize);%tObj.getNeuron(fitResults.neuronNumber).nstCopy; - %nCopy.windowedSignal(windowTimes) - % this gets us the SUM over a window of length windowSize - % y[n] = y[n-1] + x[n] -- running sum filter - B=1; A=[1 -1]; - sumSpikes = nCopy.filter(B,A); - %spikeTimes =nCopy.getSpikeTimes'; - sumSpikesOverWindow= sumSpikes.getValueAt(windowTimes(2:end))-sumSpikes.getValueAt(windowTimes(1:(end-1))); + windowSize=.01; + end + + + if(length(nspikeObj)>1) %in batch analysis we get multiple trials + nstCollObj = nstColl(nspikeObj); + nCopy = nstCollObj.toSpikeTrain; + else + nCopy =nspikeObj.nstCopy; +% nCopy =nspikeObj; + end + + nCopy.resample(lambda.sampleRate); + nCopy.setMinTime(lambda.minTime); + nCopy.setMaxTime(lambda.maxTime); + + + sumSpikes=nCopy.getSigRep(windowSize);%tObj.getNeuron(fitResults.neuronNumber).nstCopy; +% sumSpikesOverWindow = sumSpikes.data(1:end); +% windowTimes = nCopy.minTime:windowSize:lambda.maxTime; + windowTimes = linspace(nCopy.minTime,nCopy.maxTime,length(sumSpikes.time)); lambdaInt = lambda.integral; lambdaIntVals = lambdaInt.getValueAt(windowTimes(2:end))-lambdaInt.getValueAt(windowTimes(1:(end-1))); + if(length(lambdaIntVals)==length(sumSpikes.data)) + sumSpikesOverWindow = sumSpikes.data(1:end); + elseif(length(lambdaIntVals)<length(sumSpikes.data)) + sumSpikesOverWindow = sumSpikes.data(2:end); + end Mdata=repmat(sumSpikesOverWindow,[1 lambdaInt.dimension])-lambdaIntVals; dataLabels = cell(1,lambdaInt.dimension); for i=1:lambdaInt.dimension dataLabels{i} = lambda.dataLabels{i}; end - M=Covariate(windowTimes(2:end),Mdata,'M(t_k)-Residual',lambdaInt.xlabelval, ... + M=Covariate(windowTimes(1:end),[zeros(1,size(Mdata,2));Mdata],'M(t_k)',lambdaInt.xlabelval, ... lambdaInt.xunits,lambdaInt.yunits,dataLabels); end @@ -634,7 +1119,72 @@ tcc = ConfigColl(tc); fitResults =Analysis.RunAnalysisForNeuron(ensembTrial,neuronNum,tcc,makePlot); end - function [fitResults,tcc] = computeHistLag(tObj,neuronNum,windowTimes,CovLabels,sampleRate,makePlot) + + function [fitResults,gammaMat,phiMat,devianceMat,sigMat] = computeGrangerCausalityMatrix(tObj, Algorithm,confidenceInterval, batchMode) + if(nargin<4 || isempty(batchMode)) + batchMode = 0; + end + if(nargin<3 || isempty(confidenceInterval)) + confidenceInterval = 0.95; + end + if(nargin<2 || isempty(Algorithm)) + Algorithm = 'GLM'; + end + + neuronNum = tObj.getNeuronIndFromMask; + covMask = tObj.covMask; + history = tObj.history; + ensCovHist = tObj.ensCovHist; + ensCovMask= tObj.ensCovMask; + sampleRate= tObj.sampleRate; + DTCorrection = 1; + makePlot=0; + clear fitResults; + gammaMat=zeros(length(neuronNum),length(neuronNum)); + for i=1:length(neuronNum) + tc{1}=TrialConfig(covMask, sampleRate, history, ensCovHist, ensCovMask); tc{1}.setName('Baseline'); + neighbors = find(ensCovMask(:,i)==1); + + for j=1:length(neighbors) + ensCovMaskTemp = ensCovMask; + ensCovMaskTemp(neighbors(j),neuronNum)=0; + + tc{2}=TrialConfig(covMask, sampleRate, history, ensCovHist, ensCovMaskTemp); tc{2}.setName([num2str(neighbors(j)) 'excluded from ' num2str(neuronNum(i))]); + tcc = ConfigColl(tc); + + fitResults{i}{j} =Analysis.RunAnalysisForNeuron(tObj,neuronNum(i),tcc,makePlot,Algorithm,DTCorrection,batchMode); + end + end + for i=1:length(neuronNum) + neighbors = find(ensCovMask(:,i)==1); + for j=1:length(neighbors) + gammaMat(neighbors(j),neuronNum(i)) = fitResults{i}{j}.logLL(2)-fitResults{i}{j}.logLL(1); + [coeffMat, labels, SEMat] = fitResults{i}{j}.getCoeffs(1); + coeffInd=strfind(labels,[num2str(neighbors(j)) ':[']); + gammaVals =coeffMat(~isempty(coeffInd)); + phiMat(neighbors(j),neuronNum(i)) = -sign(sum(gammaVals))*gammaMat(neighbors(j),neuronNum(i)); + dimDiff(neighbors(j),neuronNum(i)) = abs(diff(fitResults{i}{j}.numCoeffs)); + valConfInt(neighbors(j),neuronNum(i)) = chi2inv(confidenceInterval,dimDiff(neighbors(j),neuronNum(i))); + end + end + devianceMat = -2*gammaMat; + nonZeros = find(devianceMat~=0); + pVals = chi2cdf(devianceMat(nonZeros),dimDiff(nonZeros),'upper'); + + + [h, crit_p, adj_p]=fdr_bh(pVals,1-confidenceInterval,'pdep','no'); + +% sigMat = (devianceMat>valConfInt); + sigMat = zeros(size(devianceMat)); + sigMat(nonZeros) = h; + + tObj.resetEnsCovMask; + + %RunAnalysisForAllNeurons(tObj,configs,makePlot,Algorithm,DTCorrection,batchMode) + + + end + function [fitResults,tcc] = computeHistLag(tObj,neuronNum,windowTimes,CovLabels,Algorithm,batchMode,sampleRate,makePlot,histMinTimes,histMaxTimes) % [fitResults,tcc] = computeHistLag(tObj,tObj,neuronNum,windowTimes,CovLabels,sampleRate,makePlot) % For the neuron in neuronNum, runs an analysis with self % history, and no extrinsic covariates, and no ensemble history @@ -642,12 +1192,24 @@ % of windowTimes. There will be length(windowTimes) different % results (configurations) corresponding to increasing number of history % windows. - if(nargin<6) + if(nargin<10) + histMaxTimes =[]; + end + if(nargin<9) + histMinTimes=[]; + end + if(nargin<8) makePlot=1; end - if(nargin<5 || isempty(sampleRate)) + if(nargin<7 || isempty(sampleRate)) sampleRate = tObj.sampleRate; end + if(nargin<6 || isempty(batchMode)) + batchMode = 0; + end + if(nargin<5 || isempty(Algorithm)) + Algorithm = 'GLM'; + end if(nargin<4) CovLabels ={}; end @@ -658,29 +1220,50 @@ neuronNum = tObj.getNeuronIndFromMask; end - % tcObj=TrialConfig(covMask,sampleRate, history,minTime,maxTime) + % tcObj=TrialConfig(covMask,sampleRate, history,ensCovHist,ensCovMask,covLag,name) tc=cell(1,length(windowTimes)-1); for i=1:length(tc)+1 %use no covariates if(i==1) tc{i} = TrialConfig(CovLabels,sampleRate,[],[]); tc{i}.setName('Baseline'); else - tc{i} = TrialConfig(CovLabels,sampleRate,windowTimes(1:i)); + if(and(isempty(histMinTimes),isempty(histMaxTimes))) + tc{i} = TrialConfig(CovLabels,sampleRate,windowTimes(1:i)); tc{i}.setName(strcat('Window',num2str(i-1))); + else + hTemp = History(windowTimes(1:i),histMinTimes,histMaxTimes); + tc{i} = TrialConfig(CovLabels,sampleRate,hTemp); tc{i}.setName(strcat('Window',num2str(i-1))); + end end end + DTCorrection=1; tcc = ConfigColl(tc); - fitResults =Analysis.RunAnalysisForNeuron(tObj,neuronNum,tcc,makePlot); + + fitResults =Analysis.RunAnalysisForNeuron(tObj,neuronNum,tcc,makePlot,Algorithm,DTCorrection,batchMode); + end - function fitResults = computeHistLagForAll(tObj,windowTimes,CovLabels,sampleRate,makePlot) + function fitResults = computeHistLagForAll(tObj,windowTimes,CovLabels,Algorithm,batchMode,sampleRate,makePlot,histMinTimes,histMaxTimes) % [fitResults,tcc] = computeHistLagAll(tObj,windowTimes,CovLabels,sampleRate,makePlot) % Calls computeHistLab for each neuron in the trial that is not masked. - if(nargin<5) + if(nargin<9) + histMaxTimes =[]; + end + if(nargin<8) + histMinTimes=[]; + end + if(nargin<7) makePlot=1; end - if(nargin<4 || isempty(sampleRate)) + if(nargin<6 || isempty(sampleRate)) sampleRate = tObj.sampleRate; end + if(nargin<5 || isempty(batchMode)) + batchMode=0; + end + if(nargin<4 || isempty(Algorithm)) + Algorithm = 'GLM'; + end + if(nargin<3) CovLabels ={}; end @@ -691,7 +1274,7 @@ neuronIndex=tObj.getNeuronIndFromMask; fitResults = cell(1,length(neuronIndex)); for i=1:length(neuronIndex) - fitResults{i} = Analysis.computeHistLag(tObj,neuronIndex(i),windowTimes,CovLabels,sampleRate,makePlot); + fitResults{i} = Analysis.computeHistLag(tObj,neuronIndex(i),windowTimes,CovLabels,Algorithm,batchMode,sampleRate,makePlot,histMinTimes,histMaxTimes); end @@ -769,49 +1352,11 @@ cc=CovColl(cov); end - function plotStatHistData(index,spdata,svdata,sfdata) - % plotStatHistData(index,spdata,svdata,sfdata) - % Helpfer function to view historgrams of the position, velocity, - % and force data at each spike time. - % index is used to label the plot and should correspond to the - % neuron that was used to compute this data. - % - % position (x,y,z) array - % veloctity vx vy vz array - % force: fz, fzmag, fld, flf array - - sfdata((sfdata==0))=nan; % - fig(1)=figure('visible','off'); a1=scatterhist(spdata(:,1),spdata(:,2)); set(get(gca,'YLabel'),'String','y'); set(get(gca,'XLabel'),'String','x') - fig(2)=figure('visible','off'); a2=scatterhist(spdata(:,1),spdata(:,3)); set(get(gca,'YLabel'),'String','z'); set(get(gca,'XLabel'),'String','x') - fig(3)=figure('visible','off'); a3=scatterhist(spdata(:,2),spdata(:,3)); set(get(gca,'YLabel'),'String','z'); set(get(gca,'XLabel'),'String','y') - fig(4)=figure('visible','off'); a4=scatterhist(svdata(:,1),svdata(:,2)); set(get(gca,'YLabel'),'String','v_y'); set(get(gca,'XLabel'),'String','v_x') - fig(5)=figure('visible','off'); a5=scatterhist(svdata(:,1),svdata(:,3)); set(get(gca,'YLabel'),'String','v_z'); set(get(gca,'XLabel'),'String','v_x') - fig(6)=figure('visible','off'); a6=scatterhist(svdata(:,2),svdata(:,3)); set(get(gca,'YLabel'),'String','v_z'); set(get(gca,'XLabel'),'String','v_y') - fig(7)=figure('visible','off'); a7=scatterhist(sfdata(:,1),sfdata(:,3)); set(get(gca,'YLabel'),'String','f_{ld}'); set(get(gca,'XLabel'),'String','f_z') - fig(8)=figure('visible','off'); a8=scatterhist(sfdata(:,1),sfdata(:,4)); set(get(gca,'YLabel'),'String','f_{lf}'); set(get(gca,'XLabel'),'String','f_z') - fig(9)=figure('visible','off'); a9=scatterhist(sfdata(:,3),sfdata(:,4)); set(get(gca,'YLabel'),'String','f_{lf}'); set(get(gca,'XLabel'),'String','f_{ld}') - - - h=figure(index+100); - scrsz = get(0,'ScreenSize'); - - set(h,'Name',strcat('Neuron #',num2str(index)),'Position',[scrsz(3)*.1 scrsz(4)*.1 scrsz(3)*.8 scrsz(4)*.8]); - u1 = uipanel(h,'position',[.0 .66 .33 .33]); set(a1,'parent',u1); - u2 = uipanel(h,'position',[.33 .66 .33 .33]); set(a2,'parent',u2); - u3 = uipanel(h,'position',[.66 .66 .33 .33]); set(a3,'parent',u3); - u4 = uipanel(h,'position',[.0 .33 .33 .33]); set(a4,'parent',u4); - u5 = uipanel(h,'position',[.33 .33 .33 .33]); set(a5,'parent',u5); - u6 = uipanel(h,'position',[.66 .33 .33 .33]); set(a6,'parent',u6); - u7 = uipanel(h,'position',[.0 0 .33 .33]); set(a7,'parent',u7); - u8 = uipanel(h,'position',[.33 0 .33 .33]); set(a8,'parent',u8); - u9 = uipanel(h,'position',[.66 0 .33 .33]); set(a9,'parent',u9); - close(fig); - end - - end - + end ++end @@ -865,7 +1410,7 @@ % X = [ones(rows,1), X]; % CG parameters cgmax = 30; - cgeps = 1e-3; + cgeps = 1e-6; % LR parameters lrmax = 100; @@ -886,17 +1431,25 @@ devnew = 0; devdiff = abs(devnew - devold); - +% B=[]; while (i < lrmax && devdiff > lreps) % Do CG -> beta_new, i.e. solve for beta_new: XtWX*beta_new = XtWz(beta_old) using CG A = X'.*W*X; b = X'.*W*z; + % A needs to be positive definite so any zero or negative + % eigenvalues are set to machine precision. + [vec,val]=eig(A); val(val<=0)=eps; + A=vec*val*vec'; + + if(any(isnan(b))) + b(isnan(b))=0; + end if rrflag == 1 A = A + lambda*eye(size(A)); end - beta_new = cgs(A,b,cgeps,cgmax,[],[],beta_old); + [beta_new, flag] = cgs(A,b,cgeps,cgmax,[],[],beta_old); beta_old = beta_new; n = X*beta_old; @@ -910,7 +1463,7 @@ i = i+1; - +% B=[B,beta_new]; end % Compute a few statistics @@ -1034,6 +1587,10 @@ % check that those indicies are in [1:length(pk)]; + if(isempty(spikeindicies)) + rst = pk; + return; + end if spikeindicies(1)<1; error('There is at least one spike with index less than 0'); end; @@ -1048,8 +1605,7 @@ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % initialize random number generator - s = RandStream('mt19937ar','Seed', sum(100*clock)); - RandStream.setDefaultStream(s); + rng('shuffle','twister'); %rand('twister',sum(100*clock)); % make the qk's @@ -1072,8 +1628,9 @@ delta=-(1/qk(ind2))*log(1-rand()*(1-exp(-qk(ind2)))); - total=total+qk(ind2)*delta; - + if(delta~=0) + total=total+qk(ind2)*delta; + end rst(r)=total; rstold(r)=sum(qk(ind1+1:ind2)); @@ -1097,30 +1654,281 @@ varargout{4}=sort(rstold); end -