Tag Archives: ggplot

The previous post ended with the creation of a matrix that contains an unlisted version of the selection criteria for each fund & model combination. From this object let’s create three data frames which we then manipulate and use as data inputs to the ggplot function.

[6] Make a facet plot of Selection Criteria across non-problematic funds

Here we simply make a dodged barchart plot for both AIC and SBC criteria across the 9 remaining non-problematic funds.

```#ggplot across all funds
df.1 <- data.frame(Fund=rep(subfund.names,n.models),AIC=as.numeric(temp[,1]),SBC=as.numeric(temp[,2]),Model=temp[,5],stringsAsFactors=T)
df.1<-gather(df.1,Selection.Criteria,values,-c(Fund,Model))
miny <- min(df.1[,'values'])
maxy <- max(df.1[,'values'])

all.plot <- ggplot(data = df.1)+
geom_bar(aes(x=Model,y=values,fill=Selection.Criteria),stat = "identity",position='dodge')+
scale_fill_brewer(palette='Set1')+
theme(axis.text.x = element_text(angle=90, hjust=1),legend.position='bottom',legend.key.size=unit(0.2, "cm"),legend.key.width=unit(2,'cm'),legend.direction='horizontal')+
labs(title='Comparison of Selection Information for Lognormal, Autoregressive, \nand Regime Switching Models across Funds')+
facet_wrap(~Fund,shrink=T)+
coord_cartesian(ylim=c(miny,maxy+1))

```

l

Interpretation of the selection criteria is straightforward : the higher the value the better the model. Although model differences are not very significant within each fund,the values for the selection criteria do show some variation across funds (thankfully). Models seem to do particularly well for the Equity.Market.Neutral and Fixed.Income.Arbitrage cases.

To gain a better view of how individual models performed,let’s take a look at the results for a particular fund & selection criterion combination.

[7] Snap plot function for the chosen fund and selection criterion

The FundSnap() function provides [1] a dodged barchart plot of models ranked according to the selected criterion (AIC,SBC) in descending order as well as [2]  a tabulation of numeric values (Likelihood,degree of freedom,AIC,SBC) with maximum/minimum values highlighted.

```########################################################################################
# Fund snap across models
########################################################################################

FundSnap <- function(fund,df.obj,order.by){

e<-environment()

tbl.df <- df.obj%>%
filter(Fund==fund)%>%
select(Model,DF,Likelihood,AIC,SBC)

temp.sel <- paste('df.obj\$',order.by,sep='')

plot.df <- df.obj%>%
arrange(desc(eval(parse(text=temp.sel))))%>%
filter(Fund==fund)%>%
select(c(Fund,Model,eval(parse(text=order.by))))

#plot
miny<-min(plot.df[,3])
maxy <-max(plot.df[,3])

snap.plot <- ggplot(data = plot.df,environment=e)+theme_tufte()+
geom_bar(aes(x=reorder(Model,desc(eval(parse(text=order.by)))),y=eval(parse(text=order.by)),fill=(eval(parse(text=order.by)))),stat = "identity",position='dodge',alpha=0.4)+
scale_fill_continuous(low="lightgreen", high="darkgreen")+
coord_cartesian(ylim=c(miny-1,maxy+1))+
theme(legend.position='none',legend.key.size=unit(0.2, "cm"),legend.key.width=unit(2,'cm'),legend.direction='horizontal',axis.text.x = element_text(angle=90, hjust=1),panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
labs(title=paste('Snapshot for : ',fund,' & ',order.by,'\nacross models',sep=''))+
xlab('Models')+
ylab('Values')

#table
tbl.df\$Model <- as.character(tbl.df\$Model)
tbl.df\$Likelihood <- round(tbl.df\$Likelihood,2)
tbl.df\$AIC <- round(tbl.df\$AIC,2)
tbl.df\$SBC <- round(tbl.df\$SBC,2)

parse.str <- paste('tbl.df\$',order.by,sep='')
row.max.idx <- which(eval(parse(text=parse.str))==max(eval(parse(text=parse.str))))
row.min.idx <- which(eval(parse(text=parse.str))==min(eval(parse(text=parse.str))))

debug.spec <- ggTableSpec(columns.exist=T,columns.txt=colnames(tbl.df),columns.font=rep('bold',ncol(tbl.df)),columns.col='blue',columns.fill='grey',columns.alpha=0.7,
rows.exist=F,rows.txt=paste('row',1:10,sep=''),rows.font=rep('bold',10),rows.col='green',rows.fill='grey',rows.alpha=0.7,
data.obj=tbl.df,
hlt.col.exist=T,hl.col.which=c(row.min.idx,row.max.idx),hl.col.fill=c('lightgreen','darkgreen'),hl.col.alpha=c(0.4,0.4),
hlt.row.exist=F,hl.row.which=row.idx,hl.row.fill='skyblue',hl.row.alpha=0.45
)
tbl.plot <- ggTableDrawer(debug.spec)

layOut(
list(snap.plot,1,1),
list(tbl.plot,2,1)
)
}
########################################################################################

```

ll

Implementation is simple once we have the proper data frame in place:

```#ggplot for specific funds
df.2 <- data.frame(Fund=rep(subfund.names,n.models),DF=as.numeric(temp[,3]),Likelihood=as.numeric(temp[,4]),AIC=as.numeric(temp[,1]),SBC=as.numeric(temp[,2]),Model=temp[,5],stringsAsFactors=T)
FundSnap(fund='Emerging.Markets',df.obj=df.2,order.by='SBC')

```

l

Above shows the case of the Emerging.Markets & SBC combination.The AR.GARCH model provides the maximal SBC value while the ILN.Reg3 model is least useful here.

[8] Percentage of funds for which a model has maximal AIC

The final plot is a bar chart plot with text annotations showing how often a particular model is the best performer across the remaining non problematic funds.

```#Percentages
df.3 <- data.frame(Fund=rep(subfund.names,n.models),AIC=as.numeric(temp[,1]),Model=temp[,5],stringsAsFactors=T)

col.idx <- apply(df.3,1,function(x) which(x==max(x[-1]),arr.ind=TRUE))
col.names <- colnames(df.3)[col.idx]

df.percentages <- df.3%>%
mutate(Best.Model=col.names)%>%
group_by(Best.Model)%>%
summarise(count=n()/length(df.3)*100)

perc.plot <- ggplot(data = df.percentages)+
geom_bar(aes(x=Best.Model,y=count,fill=Best.Model),stat = "identity",position='dodge')+
scale_fill_brewer(palette='Set1')+
theme(axis.text.x = element_blank(),legend.position='bottom',legend.key.size=unit(0.2, "cm"),legend.key.width=unit(2,'cm'),legend.direction='horizontal')+
labs(title='Best performing models')+
geom_text(aes(x=Best.Model,y=count/2,label=paste(round(count,2),'%',sep=''),fontface=rep('bold',4)),size=rel(4))+
xlab('')+
ylab('Percentage')
```

ll

It appears that the AR.GARCH and ILN.Reg3 models provide the highest AIC values a third of the time.The simplest model,the independent log normal model fares relatively badly in comparison. Complicated models do seem to work better in our data.

Continuing with the list of tasks defined in the previous post, let’s load the edhec data set,fit each of the nine models to each of the funds and store results in a nested list object :

[4] Load data and store results

For each model and fund combination, I store the following information :

1. The Log-Likelihood value
2. The Degree of freedom
3. The AIC criterion
4. The SBC criterion

The code follows :

```########################################################################################
########################################################################################

edhec.xts <- xts(order.by=seq(as.Date('1997-01-31'),as.Date('2014-07-31'),by='1 month'),orig.csv[,-1])
edhec.xts <- log(edhec.xts+1)

########################################################################################
# Nested Lists and Regression results
########################################################################################

n.funds <- ncol(edhec.xts)
fund.names <- names(edhec.xts)
n.obs <- nrow(edhec.xts)

n.models <- 9

edhec.ret <- list()
for(i in 1:n.funds){
edhec.ret[[i]] <- list()
names(edhec.ret)[i] <- fund.names[i]
edhec.ret[[i]]\$Ret.xts <- edhec.xts[,i]
logret <- Return.clean(edhec.ret[[i]]\$Ret.xts,method='boudt')

#Independent Log-normal model
edhec.ret[[i]]\$ILN <- lm(logret~1)
edhec.ret[[i]]\$ILN\$Stat <- StatComp(edhec.ret[[i]]\$ILN,n.obs)

edhec.ret[[i]]\$ILN\$LogLik <- edhec.ret[[i]]\$ILN\$Stat['model.loglik']
edhec.ret[[i]]\$ILN\$df <- edhec.ret[[i]]\$ILN\$Stat['model.df']
edhec.ret[[i]]\$ILN\$AIC <- edhec.ret[[i]]\$ILN\$Stat['model.aic']
edhec.ret[[i]]\$ILN\$SBC <- edhec.ret[[i]]\$ILN\$Stat['model.sbc']

#Autoregressive model
edhec.ret[[i]]\$AR1 <- arima(x=logret,order=c(1,0,0))
edhec.ret[[i]]\$AR1\$Stat <- StatComp(edhec.ret[[i]]\$AR1,n.obs)

edhec.ret[[i]]\$AR1\$LogLik <- edhec.ret[[i]]\$AR1\$Stat['model.loglik']
edhec.ret[[i]]\$AR1\$df <- edhec.ret[[i]]\$AR1\$Stat['model.df']
edhec.ret[[i]]\$AR1\$AIC <- edhec.ret[[i]]\$AR1\$Stat['model.aic']
edhec.ret[[i]]\$AR1\$SBC <- edhec.ret[[i]]\$AR1\$Stat['model.sbc']

#ARCH model
arch.spec <- ugarchspec(variance.model=list(garchOrder=c(1,0)),mean.model = list(armaOrder=c(0,0)))
arch.fit<- ugarchfit(spec=arch.spec, data=logret,solver.control=list(trace = 1))

edhec.ret[[i]]\$ARCH\$LogLik <- likelihood(arch.fit)
edhec.ret[[i]]\$ARCH\$df <- 3
edhec.ret[[i]]\$ARCH\$AIC <- edhec.ret[[i]]\$ARCH\$LogLik-edhec.ret[[i]]\$ARCH\$df
edhec.ret[[i]]\$ARCH\$SBC <- edhec.ret[[i]]\$ARCH\$LogLik-0.5*edhec.ret[[i]]\$ARCH\$df*log(n.obs)

#AR-ARCH model
arch.spec <- ugarchspec(variance.model=list(garchOrder=c(1,0)),mean.model = list(armaOrder=c(1,0)))
arch.fit<- ugarchfit(spec=arch.spec, data=logret,solver.control=list(trace = 1))

edhec.ret[[i]]\$AR.ARCH\$LogLik <- likelihood(arch.fit)
edhec.ret[[i]]\$AR.ARCH\$df <- 4
edhec.ret[[i]]\$AR.ARCH\$AIC <- edhec.ret[[i]]\$AR.ARCH\$LogLik-edhec.ret[[i]]\$AR.ARCH\$df
edhec.ret[[i]]\$AR.ARCH\$SBC <- edhec.ret[[i]]\$AR.ARCH\$LogLik-0.5*edhec.ret[[i]]\$AR.ARCH\$df*log(n.obs)

#GARCH model
garch.spec <- ugarchspec(variance.model=list(garchOrder=c(1,1)),mean.model = list(armaOrder=c(0,0)))
garch.fit<- ugarchfit(spec=garch.spec, data=logret,solver.control=list(trace = 1))

edhec.ret[[i]]\$GARCH\$LogLik <- likelihood(garch.fit)
edhec.ret[[i]]\$GARCH\$df <- 4
edhec.ret[[i]]\$GARCH\$AIC <- edhec.ret[[i]]\$GARCH\$LogLik-edhec.ret[[i]]\$GARCH\$df
edhec.ret[[i]]\$GARCH\$SBC <- edhec.ret[[i]]\$GARCH\$LogLik-0.5*edhec.ret[[i]]\$GARCH\$df*log(n.obs)

#AR-GARCH model
garch.spec <- ugarchspec(variance.model=list(garchOrder=c(1,1)),mean.model = list(armaOrder=c(1,0)))
garch.fit<- ugarchfit(spec=garch.spec, data=logret,solver.control=list(trace = 1))

edhec.ret[[i]]\$AR.GARCH\$LogLik <- likelihood(garch.fit)
edhec.ret[[i]]\$AR.GARCH\$df <- 5
edhec.ret[[i]]\$AR.GARCH\$AIC <- edhec.ret[[i]]\$AR.GARCH\$LogLik-edhec.ret[[i]]\$AR.GARCH\$df
edhec.ret[[i]]\$AR.GARCH\$SBC <- edhec.ret[[i]]\$AR.GARCH\$LogLik-0.5*edhec.ret[[i]]\$AR.GARCH\$df*log(n.obs)

#ILN 2 regimes
model.spec <- depmix(eval(parse(text=fund.names[i]))~1,nstates=2,data=logret)
model.fit <- fit(model.spec)
edhec.ret[[i]]\$ILN.Reg2\$Stat <- StatComp(model.fit,n.obs)

edhec.ret[[i]]\$ILN.Reg2\$LogLik <- edhec.ret[[i]]\$ILN.Reg2\$Stat['model.loglik']
edhec.ret[[i]]\$ILN.Reg2\$df <- edhec.ret[[i]]\$ILN.Reg2\$Stat['model.df']
edhec.ret[[i]]\$ILN.Reg2\$AIC <- edhec.ret[[i]]\$ILN.Reg2\$Stat['model.aic']
edhec.ret[[i]]\$ILN.Reg2\$SBC <- edhec.ret[[i]]\$ILN.Reg2\$Stat['model.sbc']

#ILN 3 regimes
model.spec <- depmix(eval(parse(text=fund.names[i]))~1,nstates=3,data=logret)
model.fit <- fit(model.spec)
edhec.ret[[i]]\$ILN.Reg3\$Stat <- StatComp(model.fit,n.obs)

edhec.ret[[i]]\$ILN.Reg3\$LogLik <- edhec.ret[[i]]\$ILN.Reg3\$Stat['model.loglik']
edhec.ret[[i]]\$ILN.Reg3\$df <- edhec.ret[[i]]\$ILN.Reg3\$Stat['model.df']
edhec.ret[[i]]\$ILN.Reg3\$AIC <- edhec.ret[[i]]\$ILN.Reg3\$Stat['model.aic']
edhec.ret[[i]]\$ILN.Reg3\$SBC <- edhec.ret[[i]]\$ILN.Reg3\$Stat['model.sbc']

#AR-Regime switch model
temp.df <- data.frame(logret[2:n.obs],logret[2:n.obs-1])
names(temp.df) <- c('left','right')

model.spec <- depmix(left~right,nstates=2,data=temp.df)
model.fit <- fit(model.spec)

edhec.ret[[i]]\$AR.Reg2\$Stat <- StatComp(model.fit,n.obs)

edhec.ret[[i]]\$AR.Reg2\$LogLik <- edhec.ret[[i]]\$AR.Reg2\$Stat['model.loglik']
edhec.ret[[i]]\$AR.Reg2\$df <- edhec.ret[[i]]\$AR.Reg2\$Stat['model.df']
edhec.ret[[i]]\$AR.Reg2\$AIC <- edhec.ret[[i]]\$AR.Reg2\$Stat['model.aic']
edhec.ret[[i]]\$AR.Reg2\$SBC <- edhec.ret[[i]]\$AR.Reg2\$Stat['model.sbc']

}
```

l
To give an idea of the output,consider the case of CTA.Global fund and the Autoregressive-Regime switching model :

The StatComp() function used to calculate these values is simply :

```########################################################################################
# Calculating Stats for Comparison across models
########################################################################################

StatComp <- function(fitted.model,n.obs){
model.df <- attr(logLik(fitted.model),which='df')
model.loglik <- as.numeric(logLik(fitted.model))
model.aic <- model.loglik-model.df
model.sbc <- model.loglik-0.5*model.df*log(n.obs)

return(c(model.df=model.df,model.loglik=model.loglik,model.aic=model.aic,model.sbc=model.sbc))
}
```

ll
Once we have fitted each of the nine models to the data for each of the 13 funds in the edhec dataset,let’s summarise this information in a new list object by saving across models.

[5] Saving across models and making data ggplot friendly

The purpose of this step in the process is to summarise the data contained in the edhec.ret list object above into something that can be conveniently used in ggplot functions. Problematic funds are subsequently identified and deleted.

```# Saving across models & removing problematic funds
model.names <- names(edhec.ret[[1]])[-1]
model.list <- list()
problems <- NULL

for(i in 1:n.models){
model.list[[i]] <- list()
names(model.list)[i] <- model.names[i]
model.list[[i]] <- do.call(rbind,lapply(edhec.ret,function(x) eval(parse(text=paste('x\$',model.names[i],sep='')))))[,c('AIC','SBC','df','LogLik')]
problems <- c(problems,which(is.na(model.list[[i]]==0) | model.list[[i]]<0))
model.list[[i]] <- cbind(model.list[[i]],Model=rep(model.names[i],n.funds))
}

problem.funds <- unique(problems)[-which(problems>13)][-5]
model.list<-lapply(model.list,function(x) x[-problem.funds,])
subfund.names <- rownames(model.list[[1]])

temp <- apply(do.call(rbind,model.list),2,unlist)
```

ll
By problematic funds, i mean those for which the following error messages occurred in the fitting process:

For me the problematic funds are the ones corresponding to the following indices :

Ultimately,our model.list object looks like this :

This is the first element of the list. It continues this way for all other models listed in the previous post. The last line of code creates a matrix called temp which contains the unlisted version of the data contained in model.list. In the next post we will use this temp object to create three data frames, each of which we will manipulate using the tidyR package to produce the desired data structure for ggplotting purposes.

Now that we have (Gods willing) aligned fund returns,regime indices and state probabilities by time,we can finally turn to the fifth issue in the previously defined list and examine in-sample performance along the following lines :

1. Standard-deviation-Scaled difference across state means
2. Cumulative returns and drawdowns when states are known versus unknown
3. Event-state mean vs non-event-state mean for the chosen fund across all economic regimes.

[5] In-Sample Performance

To every element in the Aligned list object from the end of the previous post,
l

```#[7.3] Fund Performance : In-sample
Regimes <- list()
Regimes <- lapply(Aligned,function(x) FundPerf(x))

plot.list <- list()
plot.list <- lapply(Regimes,function(x) FundMeanPlot(x))
```

l
I applied the custom FundPerf() function to calculate among other things, the scaled differences in means.
l

```#########################################################################################
# Fund Performance
#########################################################################################
FundPerf <- function(aligned.list.element){

col.turb <- names(aligned.list.element)[1]
aligned.sub <- select(aligned.list.element,eval(parse(text=col.turb)),Normal.Class:Funds.Of.Funds)
std.smpl <- apply(aligned.sub[,-c(1:3)],2,sd)

event.ret <- select(aligned.sub,Convertible.Arbitrage:Funds.Of.Funds)*aligned.sub[,'Event.Class']
event.mean <- colMeans(event.ret[which(aligned.sub[,'Event.Class']==1),])

nonevent.ret <- select(aligned.sub,Convertible.Arbitrage:Funds.Of.Funds)*aligned.sub[,'Normal.Class']
nonevent.mean <- colMeans(nonevent.ret[which(aligned.sub[,'Normal.Class']==1),])

result.df <- data.frame(Fund=colnames(aligned.list.element[-c(1:5)]),EventMean=event.mean,NonEventMean=nonevent.mean,Stdev=std.smpl)
fund.result <- mutate(result.df,Perf=(event.mean-nonevent.mean)/std.smpl)

return(list(Aligned.df = aligned.sub,
Event.df = event.ret,
NonEvent.df = nonevent.ret,
FundPerf.df = fund.result
)
)
}
#########################################################################################
```

l
To give a sense of the output that this function returns. the following shows a snapshot of the returned list object for the Regimes\$Growth case :

• A subset of the original aligned data frame now containing only the time series of the regime variable index,state maps and fund returns.

•  A data frame which contains the time series of fund returns multiplied by a vector (of zeros and ones) corresponding to the event state.

• A data frame which contains the time series of fund returns multiplied by a vector (of zeros and ones) corresponding to the normal state.

• A data frame which contains the fund names,the event mean,the non event mean and the final scaled value (given in the paper as event mean minus non event mean divided by standard deviation)

The FundMeanPlot() function is then used to generate a ggplot on the basis of this output  :
l

```#########################################################################################
# Fund Perormance event/non event mean plot
#########################################################################################
FundMeanPlot <- function(regime.obj){

localenv <- environment()

dfdata <- regime.obj\$FundPerf.df
regime.name <- names(regime.obj\$Aligned.df)[1]

min.ret <- round(min(dfdata[,'Perf']),1)
max.ret <- round(max(dfdata[,'Perf']),1)

ggp <- ggplot(dfdata,aes(x=Fund,y=Perf),environment=localenv)+
theme_economist(base_size=5)+
geom_bar(stat='identity',position=position_dodge(),aes(fill=(dfdata\$Perf)))+
scale_y_continuous(limits=c(min.ret-0.2,max.ret+0.2),breaks=seq(min.ret-0.2,max.ret+0.2,length=5))+
labs(x='',title=paste('Scaled Difference in Means for',regime.name,'regime',sep=' '),y='Values')+
theme(legend.position = "none",panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
coord_flip()

return(ggp)
}
#########################################################################################
```

l
In addition to the in-sample scaled performance score calculated above,I also tried to look at how cumulative returns varied depending on one’s knowledge of the estimated states in each regime. I simply multiplied the chosen fund return by the state map for each economic regime variable (so that fund returns are multiplied by 1 in the case of a normal state and 0 of an event state),calculated the cumulative return as well as drawdowns for that series (using the PerformanceAnalytics package) and compared the results to the case where states are unknown.

I don’t know if these steps make sense or if my implementation is correct but that is what i did in any case. Although the data for this is already available from previous code segments,I purposefully started afresh so I can toy with it more easily (eg.multiplying by -1 in the event state as opposed to 0). Here is the code to create the data frame to be passed to the custom CumuCalc() function.
l

```#[7.3.2] Comparing Cumulative returns when states are known vs unknown

n.regimes <- num.turbs
regimes.names <- names.turbs
fund.names <- names(edhec.xts)

CompareCumu <- list()
for(i in 1:n.funds){

CompareCumu[[i]] <- list()
names(CompareCumu)[i] <- fund.names[i]

for(j in 1:n.regimes){
CompareCumu[[i]][[j]] <- list()
names(CompareCumu[[i]])[[j]] <- regimes.names[j]

CompareCumu[[i]][[j]]\$name.str <- paste(regimes.names[j],'::',fund.names[i],'Fund')
CompareCumu[[i]][[j]]\$data.orig <- data.frame(TimeIdx   = as.Date(rownames(Regimes[[j]]\$Aligned.df)),
FundReturn     = Regimes[[j]]\$Aligned.df[,fund.names[i]],
State			    = Regimes[[j]]\$Aligned.df\$Normal.Class
)
temp.store <- CompareCumu[[i]][[j]]\$data.orig

CompareCumu[[i]][[j]]\$data.new <- mutate(temp.store,State.new=ifelse(State==1,1,0),FundReturn.new=State.new*FundReturn)
CompareCumu[[i]][[j]]\$plot <- CumuCalc(CompareCumu[[i]][[j]])
}
}
```

l
The most important issue here is the call to the CumuCalc() function which returns a ggplot of cumulative returns and drawdowns for the chosen fund/regime combination.The nested list structure of the CompareCumu variable alows us to [1] loop over all funds and [2] within each fund loop over all regime variables,simplifying data access to a command like : CompareCumu\$Short.Selling\$Inflation for example.

For the sake of completeness,the code for that function is given here :
l

```#########################################################################################
# Cumulative  Calculation and ggplots saved
#########################################################################################

CumuCalc <- function(fund.df){

tindex <- as.Date(fund.df\$data.new\$TimeIdx)
fund.names <- fund.df\$name.str
orig.ret <- fund.df\$data.new[,'FundReturn']
orig.cumu.ret <- cumprod(orig.ret+1)-1

state.ret <- fund.df\$data.new[,'FundReturn.new']
state.cumu.ret <- cumprod(state.ret+1)-1

orig.dd <- Drawdowns(orig.ret)
state.dd <- Drawdowns(state.ret)

ggplot.df <- data.frame(TimeIdx  = tindex,
Ret.Original = orig.cumu.ret,
Ret.Markov   = state.cumu.ret,
DD.Original  = orig.dd,
DD.Markov    = state.dd,
stringsAsFactors=F
)
lbl.df <- data.frame(lbl.x=rep(max(ggplot.df\$TimeIdx)-300,2),lbl.y=c(last(ggplot.df\$Ret.Original),last(ggplot.df\$Ret.Markov)),lbl.txt=c('Original','Markov'),stringsAsFactors=F)
ggp1 <- ggplot(data=ggplot.df,aes(x=TimeIdx))+
theme_economist(base_size=5)+
geom_line(aes(x=TimeIdx,y=Ret.Original,colour='Original'),size = 1)+
geom_line(aes(x=TimeIdx,y=Ret.Markov,colour='Markov'),size = 1)+
scale_colour_manual("",breaks = c('Markov','Original'),values=c('skyblue4','cyan'))+
labs(x='',y='Values')+
theme(legend.position=c(0.3,1),legend.key.size=unit(0.2, "cm"),legend.key.width=unit(2,'cm'),legend.direction='horizontal',panel.grid.major = element_blank(), panel.grid.minor = element_blank())

ggp2 <- ggplot(data=ggplot.df,aes(x=TimeIdx))+
theme_economist(base_size=5)+
geom_line(aes(x=TimeIdx,y=DD.Original,colour='Original'),size=1)+
geom_line(aes(x=TimeIdx,y=DD.Markov,colour='Markov'),size=1)+
scale_colour_manual("Legend : ",breaks = c('Markov','Original'),values=c('skyblue4','cyan'))+
labs(x='Time',y='Values')+
theme(legend.position='none',panel.grid.major = element_blank(), panel.grid.minor = element_blank())

return(list(ggp1,ggp2))

}
####################################################################################################
```

l
This function simply returns a list object with two elements, ggplots of cumulative returns and drawdowns for the chosen fund and regime combination when states are known versus when they are unknown.

A final issue I tried to address,so as to make the dashboard less empty, was to compare the event-state mean with its non event counterpart for the chosen fund and economic regime variable. Once again,although all the necessary data frames are already in place,I dealt with this issue separately to ease my pain :
l

```#[7.3.4] Facet Plot Date frame and graph

Facet.obj <- list()
for(i in 1:n.funds){
Facet.obj[[i]] <- list()
names(Facet.obj)[i] <- fund.names[i]

values.matrix <- NULL
for(j in 1:n.regimes){
temp <- select(Regimes[[j]]\$FundPerf.df,c(Fund,EventMean:NonEventMean))
filtered <- filter(temp,Fund==fund.names[i])[,-1]
values.matrix <- rbind(values.matrix,filtered)
}

Facet.obj[[i]]\$df <- data.frame(Regime=regimes.names,Event=values.matrix[,1],Normal=values.matrix[,2])

Facet.obj[[i]]\$reshaped.df <- gather(Facet.obj[[i]]\$df,variable,value,-Regime)
colnames(Facet.obj[[i]]\$reshaped.df)[-1] <- c('State','Mean')
}

FacetPlot.list <- lapply(Facet.obj,function (x) FacetPlot(x))
```

l
The FacetPlot() function called at the end there simply returns a ggplot of ‘dodged’ barcharts of average returns across regimes and states for the chosen fund.
l

```#########################################################################################
# Facet Plotter
#########################################################################################

FacetPlot <- function(plot.obj){
plotdf <- plot.obj\$reshaped.df

ggbb <- ggplot(plotdf)+
theme_economist(base_size=5)+
geom_bar(data = plotdf,aes(x=Regime,y=Mean,fill=State),stat = "identity",position='dodge') +
scale_fill_brewer(palette='Set1')+
labs(title='Average returns across\nRegimes & States')+
coord_flip()+
theme(axis.ticks=element_blank(),legend.position='none',axis.title=element_blank(),axis.text.x=element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank())

return(ggbb)
}
#########################################################################################
```

ll
Across these three posts,we have gathered all the inputs necessary for our final dashboard.Let’s save these into a single list and pass it to the DashboardPlot() function in the next post.
l

```#[7.3.3] Dashboard
dashboard.list <- list(FundRet.list=edhec.ret,
Turbulence.plot=Turbulence.plot,
Turbulence1.tbl=Turbulence1.tbl,
Turbulence2.tbl=Turbulence2.tbl,
plot.list=plot.list,
CompareCumu=CompareCumu,
FacetPlot=FacetPlot.list,
FacetObj=Facet.obj,
MarkovPlots=MarkovEstPlots)
```

l