##Exercicio 1 ##Bifurcacao ##pela binomial llbinom=function(p,N=1,obs){-sum(dbinom(x=obs,prob=p,size=N,log=T))} vals=seq(0,0.12,by=0.001) ll.bif=sapply(vals,llbinom,N=150,obs=7) plot(ll.bif-min(ll.bif)~vals,type="l") abline(h=2,lty=2,col="red") range(vals[ll.bif-min(ll.bif)<=2]) ##pela normal ##Cancro ##pela binomial vals=seq(from=0,to=0.01,by=0.0001) ll.can=sapply(vals,llbinom,N=250,obs=0) plot(ll.can-min(ll.can)~vals,type="l") abline(h=2,lty=2,col="red") range(vals[ll.can-min(ll.can)<=2]) ##Seca ##pela binomial vals=seq(from=0.01,to=0.1,by=0.001) ll.arv=sapply(vals,llbinom,N=300,obs=10) plot(ll.arv-min(ll.arv)~vals,type="l") abline(h=2,lty=2,col="red") range(vals[ll.arv-min(ll.arv)<=2]) ##exercicio 2 h1=-dbinom(x=1044+711+934,size=1146+797+1081,prob=2689/3024,log=T) h2.1=-dbinom(x=1044,size=1146,prob=1044/1146,log=T) h2.2=-dbinom(x=711,size=797,prob=711/797,log=T) h2.3=-dbinom(x=934,size=1081,prob=934/1081,log=T) ##perfis de verossimilhnaca llbinom=function(p,N,obs){-sum(dbinom(x=obs,prob=p,size=N,log=T)} ex2.vals=seq(0.84,0.92,by=0.01) h2.1.p = sapply(ex2.vals,llbinom,N=1146,obs=1044) h2.2.p = sapply(ex2.vals,llbinom,N=797,obs=711) h2.3.p = sapply(ex2.vals,llbinom,N=1081,obs=934) plot(h2.1.p-min(h2.1.p)~ex2.vals,type="l") points(h2.2.p-min(h2.2.p)~ex2.vals,type="l") points(h2.3.p-min(h2.3.p)~ex2.vals,type="l") abline(h=2,lty=2,col="red") ##Exercicio 3 m.pond=(1.717*1044+1.175*711+0.696*934)/(1044+711+934) sd.pond=(1.965*1044+1.319*711+0.963*934)/(1044+711+934) ex3.h1=-sum(dnorm(x=c(1.717,1.175,0.696),mean=m.pond, sd=sd.pond/sqrt(1044+711+934),log=T)) ex3.h2.1= -dnorm(x=1.717,mean=1.717, sd=sd.pond/sqrt(1044+711+934),log=T) ex3.h2.2= -dnorm(x=1.175,mean=1.175, sd=sd.pond/sqrt(1044+711+934),log=T) ex3.h2.3= -dnorm(x=0.696,mean=0.696, sd=sd.pond/sqrt(1044+711+934),log=T) ex3.h2=ex3.h2.1+ex3.h2.2+ex3.h2.3 ex3.h3.1= -dnorm(x=1.717,mean=1.717, sd=1.965/sqrt(1044),log=T) ex3.h3.2= -dnorm(x=1.175,mean=1.175, sd=1.319/sqrt(711),log=T) ex3.h3.3= -dnorm(x=0.696,mean=0.696, sd=0.963/sqrt(934),log=T) ex3.h3=ex3.h3.1+ex3.h3.2+ex3.h3.3 ##Exercicio 4 ex4=c( 9,5,7,7,4,3,5,0, 3,5,4,6,8,6,2,7, 2,4,5,5,4,8,7,6, 3,3,4,2,7,7,9,7, 10,9,7,15,6,6,7,6, 2,5,6,5,5,6,5,6, 5,7,10,11,6,11,9,8, 3,9,2,1,6,9,3,6) plot(table(ex4)/length(ex4)) points(x=0:15,y=dpois(x=0:15,lambda=mean(ex4)),type="l", col="red") ka=(mean(ex4))^2/(var(ex4)- mean(ex4)) points(x=0:15,y=dnbinom(x=0:15,mu=mean(ex4),size=ka),type="l", col="blue") ex4.pois=fitdistr(ex4,"poisson") ex4.nbin=fitdistr(ex4,"negative binomial", start=list(mu=mean(ex4),size=ka)) ex4.pois ex4.nbin ka-logLik(ex4.pois) -logLik(ex4.nbin) ex4.vals=seq(4,7,by=0.05) llpois=function(media,valores){-sum(dpois(x=valores, lambda=media,log=T))} ex4.LL=sapply(ex4.vals,llpois,valores=ex4) plot(ex4.LL-min(ex4.LL)~ex4.vals,type="l") abline(h=2,lty=2,col="red") #intervalo de plausibilidade range(ex4.vals[ex4.LL-min(ex4.LL)<=2]) #intervalo de confianca pela normal t.test(ex4) ##Exercicio 5 ex5=c( 11,9,13,3,3,11,14,5, 6,9,10,134,12,6,10,11, 10,10,11,13,10,6,6,10, 8,10,8,10,12,10,17,9, 12,10,12,5,12,10,12,9, 10,10,8,16,10,13,12,9, 8,12,13,9,14,11,7,8, 9,13,10,13,15,21,13,9) ##exercicio 6 ex6=read.csv("diametro-arvores.csv") hist(ex6$dap, prob=T) qqnorm(ex6$dap);qqline(ex6$dap) ex6.norm=fitdistr(ex6$dap,"normal") ex6.exp=fitdistr(ex6$dap,"exponential") ex6.weib=fitdistr(ex6$dap,"weibull") ex6.norm ex6.exp ex6.weib -logLik(ex6.norm) -logLik(ex6.exp) -logLik(ex6.weib) cnorm=as.numeric(coef(ex6.norm)) cexp=as.numeric(coef(ex6.exp)) cweib=as.numeric(coef(ex6.weib)) hist(ex6$dap, prob=T) curve(dnorm(x,mean=cnorm[1],sd=cnorm[2]),add=T,col="blue") curve(dexp(x,rate=cexp),add=T) curve(dweibull(x,shape=cweib[1],scale=cweib[2]),add=T,col="red") ##Exercicio 7 ex7=aggregate(ex6$dap,list(ponto=ex6$ponto),mean) hist(ex7$x,prob=T)