sessionInfo()
## R version 4.1.2 (2021-11-01)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 20.04.3 LTS
## 
## Matrix products: default
## BLAS:   /usr/lib/x86_64-linux-gnu/atlas/libblas.so.3.10.3
## LAPACK: /usr/lib/x86_64-linux-gnu/atlas/liblapack.so.3.10.3
## 
## locale:
##  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
##  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=en_US.UTF-8    
##  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
##  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
## [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## loaded via a namespace (and not attached):
##  [1] digest_0.6.28   R6_2.5.1        jsonlite_1.7.2  magrittr_2.0.1 
##  [5] evaluate_0.14   rlang_0.4.12    stringi_1.7.5   jquerylib_0.1.4
##  [9] bslib_0.3.1     rmarkdown_2.11  tools_4.1.2     stringr_1.4.0  
## [13] xfun_0.27       yaml_2.2.1      fastmap_1.1.0   compiler_4.1.2 
## [17] htmltools_0.5.2 knitr_1.36      sass_0.4.0
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──
## ✓ ggplot2 3.3.5     ✓ purrr   0.3.4
## ✓ tibble  3.1.5     ✓ dplyr   1.0.7
## ✓ tidyr   1.1.4     ✓ stringr 1.4.0
## ✓ readr   2.1.0     ✓ forcats 0.5.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()

We apply neural network for handwritten digit recognition in this lab.

Data

We use the MNIST database (Modified National Institute of Standards and Technology database) is a large database of handwritten digits (\(28 \times 28\)) that is commonly used for training and testing machine learning algorithms.

You can prepare the data by the following code

library(keras)
mnist <- dataset_mnist(path = "mnist.npz")
## Loaded Tensorflow version 2.7.0
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y

Training set:

dim(x_train)
## [1] 60000    28    28
dim(y_train)
## [1] 60000

Let’s take a look over the first 10 images in the training set.

for (i in 1:10) {
  (image(t(x_train[i, 28:1,]), useRaster=TRUE, axes=FALSE, col=grey(seq(0, 1, length = 256)), main = y_train[i]))
}

Vectorize \(28 \times 28\) images into \(784\)-vectors and scale entries to [0, 1]:

# reshape
x_train <- array_reshape(x_train, c(nrow(x_train), 784))
x_test <- array_reshape(x_test, c(nrow(x_test), 784))
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
dim(x_train)
## [1] 60000   784
dim(x_test)
## [1] 10000   784

Encode \(y\) as binary class matrix:

y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
dim(y_train)
## [1] 60000    10
dim(y_test)
## [1] 10000    10
head(y_train)
##      [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
## [1,]    0    0    0    0    0    1    0    0    0     0
## [2,]    1    0    0    0    0    0    0    0    0     0
## [3,]    0    0    0    0    1    0    0    0    0     0
## [4,]    0    1    0    0    0    0    0    0    0     0
## [5,]    0    0    0    0    0    0    0    0    0     1
## [6,]    0    0    1    0    0    0    0    0    0     0

Q0

Fit a multinomial logit regression model to the training set and test the accuracy with the test set. Plot the first 10 digits in the test set and compare with their predicted value.

mlogit <- keras_model_sequential() 
mlogit %>% 
  layer_dense(units = 10, activation = 'softmax', input_shape = c(784))
summary(mlogit)
## Model: "sequential"
## ________________________________________________________________________________
##  Layer (type)                       Output Shape                    Param #     
## ================================================================================
##  dense (Dense)                      (None, 10)                      7850        
##                                                                                 
## ================================================================================
## Total params: 7,850
## Trainable params: 7,850
## Non-trainable params: 0
## ________________________________________________________________________________
# compile model
mlogit %>% compile(
  loss = 'categorical_crossentropy',
  optimizer = optimizer_rmsprop(),
  metrics = c('accuracy')
)
# fit model
mlogit_history <- mlogit %>% fit(
  x_train, y_train, 
  epochs = 20, batch_size = 128, 
  validation_split = 0.2
)
# Evaluate model performance on the test data:
mlogit %>% evaluate(x_test, y_test)
##     loss accuracy 
## 0.270597 0.925100

Generate predictions on new data:

y_predict <- mlogit %>% predict(x_test) %>% k_argmax() %>% as.array()
for (i in 1:10) {
  (image(t(mnist$test$x[i, 28:1,]), useRaster=TRUE, axes=FALSE, col=grey(seq(0, 1, length = 256)), main = y_predict[i]))
}

Q2

Fit a multi-layer neural network and perform the task in Q0 again.

You can refer to this example code. https://tensorflow.rstudio.com/guide/keras/examples/mnist_mlp/

Q3

Fit a convolutional neural network and perform the same task in Q0.

You can refer to this example code. https://tensorflow.rstudio.com/guide/keras/examples/mnist_cnn/

Q4

Summarize the prediction accuracy and runtime differences between these models.