R Stuff Chapter 3


nll.ht=function(par,y)
{
#par=c(d,g)
#y=c(h,m,f,c)
d=par[1]
g=par[2]
p=1:4 # reserve space
p[1]=d+(1-d)*g #probability of a hit
p[2]=1-p[1] # probability of a miss
p[3]=g # probability of a false alarm
p[4] = 1-p[3] #probability of a correct rejection
return(-sum(y*log(p)))
}

y=c(75,25,30,20)
par=c(.5,.5)
#check likelihood
nll.ht(par,y)

optim(par,nll.ht,y=y)

#look at log likelihood

d=seq(.01,.99,.01)
g=seq(.01,.99,.01)

ll=matrix(nrow=length(d),ncol=length(g))
for (i in 1:length(d))
for (j in 1:length(g))
ll[i,j]=-nll.ht(c(d[i],g[j]),y)

contour(d,g,ll,nlevels=100)

g=optim(par,nll.ht,y=y,hessian=T)
se=sqrt(diag(solve(g$hessian)))

cov=solve(g$hessian)
cov[1,2]/sqrt(cov[1,1]*cov[2,2])

1. Fun with plots

dat=read.table(url('http://www.missouri.edu/~rouderj/lk12.dat'))
colnames(dat)=c('sub','trial','blk','trialb','setI','set','pos','true','est',
'p2','a2','p3','a3','p4','a4','p5','a5','p6','a6')

table(dat$set)

choose1= (dat$set==1)
choose6= (dat$set==6)

#look at set size 1
plot(dat$true[choose1],dat$est[choose1])
myblue=rgb(0,0,1,.2)
plot(dat$true[choose1],dat$est[choose1],col=myblue)
plot(dat$true[choose1],dat$est[choose1],col=myblue,pch=20)
plot(dat$true[choose1],dat$est[choose1]+rnorm(sum(choose1),0,1),col=myblue,pch=20)
abline(0,1)

#what about Set Size 6?
plot(dat$true[choose6],dat$est[choose6]+rnorm(sum(choose6),0,1),col=myblue,pch=20)

#what is going on, lets look at a subset of the data
choose=dat$set==6 & dat$true>50
layout(matrix(nrow=2,ncol=1,c(1,2)),heights=c(1,3))
par(mar=c(0,4,1,1))
hist(dat$est[choose],col='lightblue',xlab="",main="",ylab="",
axes=F,xlim=c(-75,75),breaks=seq(-75,75,10),prob=T)

#model
est=seq(-70,70,.1)
mem=.75*dnorm(est,40,15)
guess=.25*(dnorm(est,-30,10)/2+dnorm(est,30,10)/2)
lines(est,mem,col='darkgreen')
lines(est,guess,col='darkred')

#strip chart to show what each individual does
par(mar=c(4,4,0,1),mgp=c(2,1,0),cex=1.3)
stripchart(dat$est[choose]~dat$sub[choose],xlim=c(-75,75),axes=F,
method='stack',pch=20,cex=.8,offset=.2,xlab="Estimated Angle")
axis(1,at=seq(-75,75,25))
abline(h=1:24,col=grey(.4))

#alternative representations of bimodality?
library(geneplotter)
smoothScatter(dat$true[choose6],dat$est[choose6])
smoothScatter(dat$true[choose6],dat$est[choose6],bandwidth=c(2.5,2.5))

#add countours?
library(KernSmooth)
hist2d=bkde2D(cbind(dat$true[choose6],dat$est[choose6]),bandwidth=5)
contour(hist2d$x1,hist2d$x2,hist2d$fhat,add=T)

image(hist2d$x1,hist2d$x2,hist2d$fhat)
image(hist2d$x1,hist2d$x2,hist2d$fhat,col=topo.colors(128))

hist2d=bkde2D(cbind(dat$true[choose6],dat$est[choose6]),bandwidth=10)
persp(hist2d$x1,hist2d$x2,hist2d$fhat,theta=40,shade=.2,col='yellow',border=NA,phi=30)
persp(hist2d$x1,hist2d$x2,hist2d$fhat,theta=70,shade=.2,col='yellow',border=NA,phi=60)

plot(dat$true[choose6],dat$est[choose6]+rnorm(sum(choose6),0,1),col=myblue,pch=20,cex=2.5)

2. Binomial Across Two Conditions

#Coding, First Shot, Follows Notations in Chpts 1 and 2
#p: p[1] is hit, p[2] is fa,
#y[1] is hit, y[2] is fa
#N[1] is for signal, N[2] is for noise
nll.ugly=function(p,y,N){
-y[1]*log(p[1])-(N[1]-y[1])*log(1-p[1]) +
-y[2]*log(p[2])-(N[2]-y[2])*log(1-p[2])}
y=c(75,30)
N=c(100,50)
#
start=c(.5,.5)
optim(start,nll.ugly,y=y,N=N)

#Coding, Second Shot, new notation
#par: par[1] is hit, par[2] is fa,
#y[1] is hit, y[2] is miss, y[3] is fa, y[4] is CR
nll.clear=function(par,y,N){
p=1:4
p[1]=par[1]
p[2]=1-p[1]
p[3]=par[2]
p[4]=1-p[3]
-sum(y*log(p))}

y=c(75,25,30,20)
optim(start,nll.clear,y=y)

3. High Threshold Model Across Multiple Conditions

#negative log likelihood of high-threshold model , 1 condition.
#par=c(d,c)
#y=c(h,m,f,c)
nll.condition=function(par,y)
{
d=par[1]
g=par[2]
p=1:4 # reserve space
p[1]=d+(1-d)*g #probability of a hit
p[2]=1-p[1] # probability of a miss
p[3]=g # probability of a false alarm
p[4] = 1-p[3] #probability of a correct rejection
return(-sum(y*log(p)))
}

y=c(75,25,30,20)
par=c(.5,.5)
optim(par,nll.condition,y=y)

#Model 1, free d and free g
#negative log likelihood for Model 1:
#assign par4=d1,g1,d2,g2
#assign y8=(h1,m1,f1,c1,h2,m2,f2,c2)
nll.1=function(par4,y8)
{
nll.condition(par4[1:2],y8[1:4])+ #condition 1
nll.condition(par4[3:4],y8[5:8]) #condition 2
}

#negative log likelihood for Model 2:
#assign par3=d,g1,g2
#assign y8=(h1,m1,f1,c1,h2,m2,f2,c2)
nll.2=function(par3,y8)
{
nll.condition(par3[1:2],y8[1:4])+
nll.condition(par3[c(1,3)],y8[5:8])
}

#negative log likelihood for Model 3:
#par3=d1,d2,g
#y8=(h1,m1,f1,c1,h2,m2,f2,c2)
nll.3=function(par3,y8)
{
nll.condition(par3[c(1,3)],y8[1:4])+
nll.condition(par3[2:3],y8[5:8])
}

dat=c(40,10,30,20,15,35,2,48)
#Model 1
par=c(.5,.5,.5,.5) #starting values
mod1=optim(par,nll.1,y8=dat,hessian=T)
#Model 2
par=c(.5,.5,.5) #starting values
mod2=optim(par,nll.2,y8=dat,hessian=T)
#Model 3
par=c(.5,.5,.5) #starting values
mod3=optim(par,nll.3,y8=dat,hessian=T)

#test of constant detection M1 vs M2
-2*(mod1$value-mod2$value) #invariance of detection

#test of constant guessing rate M1 vs M3
-2*(mod1$value-mod3$value) #effect of condition on guessing

#graphical assessment
parMat=matrix(mod1$par,nrow=2,ncol=2,byrow=T)
xpos=barplot(parMat, beside=T,ylim=c(0,.8),legend.text=c('Tone=Present Bias','Tone-Absent Bias'),names.arg=c('Detection','Guessing Bias'),space=c(.1,.3))
se=matrix(sqrt(1/diag(mod1$hessian)),nrow=2,ncol=2,byrow=T)
errbar(xpos,parMat,se,.1)

4. More Fun With Graphs -- Stephanie Wade's Data

dat=read.table(url('http://www.missouri.edu/~rouderj/wade.dat'),header=T)

#error bar function
errbar=function(x,y,height,width,lty=1){
arrows(x,y,x,y+height,angle=90,length=width,lty=lty)
arrows(x,y,x,y-height,angle=90,length=width,lty=lty)
}

#look at some things about the data
table(dat$drug)
size=as.vector(table(dat$drug))
summary(dat$post)
summary(dat$pre)
my.names=c('Saline','Sz','Sz+Low','Sz+High')

#default analysis, strikes me as not the most appropriate analysis
pre.mean=tapply(dat$pre,dat$drug,mean)
post.mean=tapply(dat$post,dat$drug,mean)
latency.mean=rbind(pre.mean,post.mean)
xpos=barplot(latency.mean,beside=T,names.arg=my.names)
#error bars?
preCI=(tapply(dat$pre,dat$drug,mean)/sqrt(size))*qt(.975,size)
postCI=tapply(dat$post,dat$drug,mean)/sqrt(size)*qt(.975,size)
errbar(xpos,latency.mean,rbind(preCI,postCI),.1)
#redo graph with more y=space
xpos=barplot(latency.mean,beside=T,names.arg=my.names,ylim=c(0,120))
errbar(xpos,latency.mean,rbind(preCI,postCI),.1)
#ANOVA
latency=c(dat$pre,dat$post)
sub=as.factor(rep(dat$sub,2)) #treat as categorical, not numeric
drug=as.factor(rep(dat$drug,2)) #ditto
day=as.factor(rep(1:2,each=45))
summary(aov(latency~drug*day+Error(sub/day))) #I will teach the syntax later.
#quick check, isolate learning
learn=dat$post-dat$pre
barplot(tapply(learn,dat$drug,mean),names.arg=my.names)
summary(aov(learn~as.factor(dat$drug)))
#Seriously consider a nonparametric alternative: Friedman or Mann Whitney

#Alternative Analysis
hist(latency)
hist(log(latency))

#look at all data
matplot(log(rbind(dat$pre,dat$post)),typ='l',col=dat$drug,lty=1,lwd=2)
#Break out into 4 graphs
dev.off()
x11(width=8,height=4)
par(mfrow=c(1,4),mar=c(4,4,1,1))
for (d in 1:4)
{
datMat=t(cbind(dat$pre[dat$drug==d],dat$post[dat$drug==d]))
matplot(log(datMat),typ='l',col=d,lty=1,lwd=2,ylim=c(-2,5.2))
}

# different representation, group by drug
dev.off()
o=order(dat$drug)
dat=dat[o,]
datMat=cbind(dat$pre,dat$post)
matplot(log(datMat))
abline(v=c(12.5,23.5,33.3))

#yet anouther representation
range=c(-2,5.2)
plot(log(dat$pre),log(dat$post),pch=20,col=dat$drug,cex=2,
xlim=range,ylim=range)
abline(0,1)

#I am not convinced there are any real learning effects.
learn=log10(dat$post/dat$pre)
boxplot(learn~dat$drug)
#alternative w/o log
boxplot((dat$post-dat$pre)~dat$drug)

#AOV
dat$drug=as.factor(dat$drug)
summary(aov(learn~dat$drug))
m=tapply(learn,dat$drug,mean)

#graph
stripchart(learn~dat$drug,vertical=T,xlim=c(.5,4.5),axes=F,
ylim=log10(c(.5,100)),pch=20,cex=1.2,col=rgb(0,0,1,.5),
ylab="Lerning Quotient")
points(1:4,m,pch=22,bg=rgb(1,0,0,.5),cex=2)
vals=c(.5,1,2,5,10,20,50,100)

MSE=.2126
size=table(dat$drug)
ci=sqrt(MSE/size)*qt(.975,sum(size))
#these are model-based error bars, so they are a touch aggresive
ciNon=qt(.975,size)*tapply(learn,dat$drug,sd)/sqrt(size)
#nonparametric, less aggresive

#errbar(1:4,m,ci,.1)
errbar(1:4,m,ciNon,.1)
axis(1,at=1:4,labels=c("Saline","Sz","Sz+Low","Sz+High"))
axis(2,at=log10(vals),labels=vals)

#graph to show learning vs initial scores
#doesnt explain why Sz+high is so low. plot(log10(dat$pre),learn,col=dat$drug,pch=c('0','S','L','H')[dat$drug],axes=F,
ylab="Learning Quotient",xlab="Initial Latency (sec)",ylim=log10(c(.5,100)))
axis(2,at=log10(vals),labels=vals)
xvals=c(.1,.2,.5,1,2,5,10,20)
axis(1,at=log10(xvals),labels=xvals)
lines(lowess(log10(dat$pre),learn))

Research Methods 3020