# Load libraries for Neural Networks and Deep learning
# These require installing and accessing Python and several libraries, which can be a major headache
# If not installed, and you don't want to spend several hours at the command line fixing things
# Try running this code in Kaggle environment instead: everything there is preinstalled
library(keras) #Simple syntax for fitting Neural Networks
library(tensorflow) #Programming language and tools for neural networks: what is going on behind the simple Keras code
library(reticulate) #Access to Python, which is language Tensorflow is written in
# Yes, we are going 3 layers of languages deep, from R to Keras to Tensorflow to Python, to run this code
# Subsequent code examples *show* but do not run Keras code in line for each step:
# intialize, write models (feedforward, then recurrent, then recurrent with dropout), train, display results.
library(keras)
ffmodel<- keras_model_sequential() %>%
layer_dense(units=32,activation="relu",input_shape=c(59)) %>%
layer_dense(units=32,activation="relu") %>%
layer_dense(units=1,activation="sigmoid")
ffmodel %>% compile(
optimizer = "rmsprop",
loss="binary_crossentropy",
metrics=c("accuracy")
)
ffhistory <- ffmodel %>% fit(
x = trainfeat, y = y_train,
epochs=20, batch_size=10,
validation_data=list(valfeat,y_val)
)
Loss and Accuracy in Training and Validation Data
ffmodel5<- keras_model_sequential() %>%
layer_dense(units=30,activation="relu",input_shape=c(59)) %>%
layer_dropout(rate=0.5) %>% #50% chance of dropping each neuron in each batch
layer_dense(units=30,activation="relu") %>%
layer_dropout(rate=0.5) %>% #Add dropout again
layer_dense(units=1,activation="sigmoid")
ffmodel6<- keras_model_sequential() %>%
layer_dense(units=64, kernel_regularizer = regularizer_l1(0.001),
activation="relu",input_shape=c(59)) %>%
layer_dense(units=64,kernel_regularizer = regularizer_l1(0.001),
activation="relu") %>%
layer_dense(units=1,activation="sigmoid")
recmodellstm <- keras_model_sequential() %>%
layer_lstm(units = 32, dropout=0.2, recurrent_dropout=0.2,
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_dense(units = 1)
crmodel2<-keras_model_sequential() %>%
layer_conv_1d(filters=32, kernel_size=5, kernel_regularizer = regularizer_l1(0.001), activation="relu",
input_shape = list(NULL, dim(data)[[-1]])) %>%
layer_max_pooling_1d(pool_size=3) %>%
layer_conv_1d(filters = 32, kernel_size = 5, kernel_regularizer = regularizer_l1(0.001),
activation = "relu") %>%
layer_gru(units = 32, kernel_regularizer = regularizer_l1(0.001), dropout = 0.1, recurrent_dropout = 0.5) %>%
layer_dense(units = 1)
Training and Validation MSE