|
loadfrench <- function(zipfile, txtfile, skip, nrows) { |
|
require(xts) |
|
|
|
#my.url will be the location of the zip file with the data |
|
my.url=paste("http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/",zipfile,".zip",sep="") |
|
#this will be the temp file set up for the zip file |
|
my.tempfile<-paste(tempdir(),"\\frenchzip.zip",sep="") |
|
#my.usefile is the name of the txt file with the data |
|
my.usefile<-paste(tempdir(),"\\",txtfile,".txt",sep="") |
|
download.file(my.url, my.tempfile, method="auto", |
|
quiet = FALSE, mode = "wb",cacheOK = TRUE) |
|
unzip(my.tempfile,exdir=tempdir(),junkpath=TRUE) |
|
#read space delimited text file extracted from zip |
|
french <- read.table(file=my.usefile, |
|
header = TRUE, sep = "", |
|
as.is = TRUE, |
|
skip = skip, nrows=nrows) |
|
|
|
#get dates ready for xts index |
|
datestoformat <- rownames(french) |
|
datestoformat <- paste(substr(datestoformat,1,4), |
|
substr(datestoformat,5,6),"01",sep="-") |
|
|
|
#get xts for analysis |
|
french_xts <- as.xts(french[,1:NCOL(french)], |
|
order.by=as.Date(datestoformat)) |
|
|
|
#divide by 100 to get percent |
|
french_xts <- french_xts/100 |
|
|
|
#delete missing data which is denoted by -0.9999 |
|
french_xts[which(french_xts < -0.99,arr.ind=TRUE)[,1], |
|
unique(which(french_xts < -0.99,arr.ind=TRUE)[,2])] <- 0 |
|
|
|
return(french_xts) |
|
} |
|
|
|
filenames <- c("Global_25_Portfolios_ME_BE-ME","Europe_25_Portfolios_ME_BE-ME","Japan_25_Portfolios_ME_BE-ME","Asia_Pacific_ex_Japan_25_Portfolios_ME_BE-ME","North_America_25_Portfolios_ME_BE-ME") |
|
|
|
#loop through the filenames to load the file for each region |
|
for (i in 1:length(filenames)) { |
|
assign(substr(filenames[i],1,4), loadfrench(zipfile=filenames[i],txtfile=filenames[i],skip=21,nrows=266)) |
|
} |
|
|
|
#merge the data into one xts object for ease of reference and use |
|
big <- get(substr(filenames[1],1,4))[,21:25] |
|
colnames(big) <- paste(substr(filenames[1],1,4),".",c("expensive",2:4,"cheap"),sep="") |
|
#also set up equal weight to just explore the regions bigcap without valuation |
|
big.ew <- as.xts(apply(big,MARGIN=1,FUN=mean),order.by=index(big)) |
|
colnames(big.ew) <- substr(filenames[1],1,4) |
|
for (i in 2:length(filenames)) { |
|
temp <- get(substr(filenames[i],1,4))[,21:25] |
|
colnames(temp) <- paste(substr(filenames[i],1,4),".",c("expensive",2:4,"cheap"),sep="") |
|
big <- merge(big,temp) |
|
temp.ew <- as.xts(apply(temp,MARGIN=1,FUN=mean),order.by=index(temp)) |
|
colnames(temp.ew) <- substr(filenames[i],1,4) |
|
big.ew <- merge(big.ew,temp.ew) |
|
} |
|
|
|
#use the equal weighted big cap |
|
portfolio <- big.ew #change to big if you want to see the full 5x5 |
|
|
|
require(fPortfolio) |
|
|
|
#do a frontier plot full series and then 1990-1999 and 2000-current |
|
#sloppy but it will work |
|
frontier <- list(portfolioFrontier(as.timeSeries(portfolio["::1999",])), |
|
portfolioFrontier(as.timeSeries(portfolio["2000::",])), |
|
portfolioFrontier(as.timeSeries(portfolio))) |
|
datelabels<-c("1990-1999","2000-2012","1990-2012") |
|
#get colors with topo.colors for the three frontiers |
|
#we will use the first 3 of the 4 supplied |
|
colors <- topo.colors(4)[3:1] |
|
|
|
for(i in 1:3) { |
|
frontierPlot(frontier[[i]], pch=19, xlim=c(0,0.10), ylim=c(0,0.015), title=FALSE, col=c(colors[i],colors[i]), add=as.logical(i-1)) |
|
minvariancePoints(frontier[[i]],pch=19,col="red") |
|
#tangencyPoints(frontier,pch=19,col="blue") |
|
#tangencyLines(frontier,pch=19,col="blue") |
|
#equalWeightsPoints(frontier[[i]],pch=15,col="grey") |
|
singleAssetPoints(frontier[[i]],pch=19,cex=1,col=colors[i]) |
|
#twoAssetsLines(frontier,lty=3,col="grey") |
|
#sharpeRatioLines(frontier,col="orange",lwd=2) |
|
#legend("topleft",legend=colnames(portfolio),pch=19,col=topo.colors(10), |
|
# cex=0.65) |
|
|
|
#label assets |
|
stats <- getStatistics(frontier[[i]]) |
|
text(y=stats$mean,x=sqrt(diag(stats$Cov)),labels=names(stats$mean),pos=4,col=colors[i],cex=0.7) |
|
|
|
#set up function from equalWeightsPoints to also label the point |
|
equalLabel <- function (object, return = c("mean", "mu"), risk = c("Cov", "Sigma", |
|
"CVaR", "VaR"), auto = TRUE, ...) |
|
{ |
|
return = match.arg(return) |
|
risk = match.arg(risk) |
|
data = getSeries(object) |
|
spec = getSpec(object) |
|
constraints = getConstraints(object) |
|
numberOfAssets = getNAssets(object) |
|
setWeights(spec) = rep(1/numberOfAssets, times = numberOfAssets) |
|
ewPortfolio = feasiblePortfolio(data, spec, constraints) |
|
assets = frontierPoints(ewPortfolio, return = return, risk = risk, |
|
auto = auto) |
|
text(assets, labels = "Equal-Weight", pos=4,...) |
|
invisible(assets) |
|
} |
|
#equalLabel(frontier,cex=0.7,col="grey") |
|
|
|
#label the frontier dates at minvariance point; again very sloppy but it works |
|
#text(x=min(frontierPoints(frontier[[i]])[,1]), |
|
# y=frontierPoints(frontier[[i]])[which(frontierPoints(frontier[[i]])[,1]==min(frontierPoints(frontier[[i]])[,1]))[1],2], |
|
# labels=datelabels[i],col=colors[i],pos=2) |
|
text(x=(minvariancePoints(frontier[[i]])[,1]), |
|
y=(minvariancePoints(frontier[[i]])[,2]), |
|
labels=datelabels[i],col=colors[i],pos=2) |
|
} |
|
|
|
title(main="Global Biggest Cap Efficient Frontier",xlab="Risk(cov)",ylab="Monthly Return") |
|
mtext(side=3, text="source: http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html",font=3,cex=0.8) |
|
|
|
#also parallel coordinates of each of the minvariance might be interesting |
|
minvar <- as.data.frame(rbind((minvariancePoints(frontier[[1]])),(minvariancePoints(frontier[[2]])),(minvariancePoints(frontier[[3]])))) |
|
rownames(minvar) <- datelabels |
|
parcoord(minvar,col=colors) |
|
#might be nice to do animated gif or parallel coordinates of weights or risk/return |
|
weightsPlot(frontier[[3]])(frontier[[3]])) |