We use the following packages:

library(class)
library(mvtnorm)
library(dplyr)
library(magrittr)
library(ggplot2)

  1. Reproduce the data from slide 18 twice, but now instead of \(\pm 1.5\) use an adjustment of \(\pm .5\) and \(\pm 2.5\), respectively

First we create the data:

set.seed(123)
sigma <- matrix(c(1, .5, .5, 1), 2, 2)
sim.data <- rmvnorm(n = 100, 
                    mean = c(5, 5), 
                    sigma = sigma)
colnames(sim.data) <- c("x1", "x2")

add some clustering

sim.data <- 
  sim.data %>%
  as_tibble %>%
  mutate(class = sample(c("A", "B", "C"), size = 100, replace = TRUE))

and add the adjustments to the data

sim.data.half <- 
  sim.data %>%
  mutate(x2 = case_when(class == "A" ~ x2 + .5,
                        class == "B" ~ x2 - .5,
                        class == "C" ~ x2 + .5),
         x1 = case_when(class == "A" ~ x1 - .5,
                        class == "B" ~ x1 - 0,
                        class == "C" ~ x1 + .5))
sim.data.twohalf <- 
  sim.data %>%
  mutate(x2 = case_when(class == "A" ~ x2 + 2.5,
                        class == "B" ~ x2 - 2.5,
                        class == "C" ~ x2 + 2.5),
         x1 = case_when(class == "A" ~ x1 - 2.5,
                        class == "B" ~ x1 - 0,
                        class == "C" ~ x1 + 2.5))

  1. Add a column to the data sets that indicates a Train (25%) and a Test (75%) part.
set.seed(123)
sim.data.half %<>% 
  mutate(set = sample(c("Train", "Test"), size=100, prob = c(.25, .75), replace=TRUE))
sim.data.twohalf %<>% 
  mutate(set = sample(c("Train", "Test"), size=100, prob = c(.25, .75), replace=TRUE))

  1. Fit the K-NN model to both data sets. Use k = 3. For the first model with the .5 adjustment:
# create training and test
train.half <- subset(sim.data.half, set == "Train", select = c(x1, x2))
class.half <- subset(sim.data.half, set == "Train", select = class)
test.half <- subset(sim.data.half, set == "Test", select = c(x1, x2))

#run k-nn model
fit.knn.half <- knn(train = train.half,
                    test = test.half,
                    cl = as.matrix(class.half),
                    k = 3)

Then for the model with the 2.5 adjustment:

# create training and test
train.twohalf <- subset(sim.data.twohalf, set == "Train", select = c(x1, x2))
class.twohalf <- subset(sim.data.twohalf, set == "Train", select = class)
test.twohalf <- subset(sim.data.twohalf, set == "Test", select = c(x1, x2))

#run k-nn model
fit.knn.twohalf <- knn(train = train.twohalf,
                       test = test.twohalf,
                       cl = as.matrix(class.twohalf),
                       k = 3)

  1. What is the percentage of correct predictions for each model? For the .5 adjustment data:
class.test.half <- subset(sim.data.half, set == "Test", select = class) %>%
  as.matrix()
correct.half <- fit.knn.half == class.test.half
mean(correct.half)
## [1] 0.5479452

and for the 2.5 adjustment data:

class.test.twohalf <- subset(sim.data.twohalf, set == "Test", select = class) %>%
  as.matrix()
correct.twohalf <- fit.knn.twohalf == class.test.twohalf

mean(correct.twohalf)
## [1] 0.9875

The model based on the 2.5 adjustment data performs much better. But in this model the classes are also more seperated.


  1. Plot the false and correct predictions for both models. For the .5 adjustment data:
cbind(test.half, correct.half) %>%
  ggplot(aes(x1, x2,  colour = correct.half)) +
  geom_point() +
  scale_colour_manual(values = c("red", "black")) + 
  ggtitle("K-NN classification \n Adjustment = .5")

We can see many mistakes for this model, but then again; there is not much clustering of values to detect

For the 2.5 adjustment data this changes:

cbind(test.twohalf, correct.twohalf) %>%
  ggplot(aes(x1, x2,  colour = correct.twohalf)) +
  geom_point() +
  scale_colour_manual(values = c("red", "black")) + 
  ggtitle("K-NN classification \n Adjustment = 2.5")

The clusters are visisbly seperated. It is quite difficult to misclassify values based on their three closest neighbors - except for the values that are somewhat in between two (or more) clusters. Now we only make 1 misclassification given that we have set.seed(123) - different seeds may yield different data and, hence, different results. The misclassified value is indeed one of those values: in between two clusters.


  1. Write a function that determines the optimum k with respect to classification error. Have the function return the following:
Optimize.knn <- function(train.set, test.set, train.class, test.class, min = 1, max = NULL) {
  if (is.null(max)) {
    max <- nrow(train.set)
  }
  if (!is.matrix(train.class)) {
    train.class <- as.matrix(train.class)
  }
  output <- list() #object to store in
  for (i in min:max){
    output[[i]] <- knn(train = train.set,
                       test = test.set,
                       cl = train.class,
                       k = i)
  }
  compare <- function(x) mean(x == test.class)
  correct <- data.frame(k = 1:max,
                        p.correct = sapply(output, compare))
  optimum <- min(correct$k[which.max(correct$p.correct)])
  result <- list(optimum.k = optimum,
                 max.p.correct = max(correct$p.correct),
                 results = correct)
  return(result)
}

  1. Execute your function twice: once for the data set based on the .5 adjustment and once for the data set based on the 2.5 adjustment. Does the previously used k=3 yield the optimal classification prediction?
Optimize.knn(train.half, test.half, class.half, class.test.half)
## $optimum.k
## [1] 6
## 
## $max.p.correct
## [1] 0.5753425
## 
## $results
##     k p.correct
## 1   1 0.5205479
## 2   2 0.4931507
## 3   3 0.5479452
## 4   4 0.5205479
## 5   5 0.5342466
## 6   6 0.5753425
## 7   7 0.5616438
## 8   8 0.5342466
## 9   9 0.5342466
## 10 10 0.5616438
## 11 11 0.5479452
## 12 12 0.4383562
## 13 13 0.4520548
## 14 14 0.4109589
## 15 15 0.3698630
## 16 16 0.3561644
## 17 17 0.3424658
## 18 18 0.3424658
## 19 19 0.3424658
## 20 20 0.3424658
## 21 21 0.3424658
## 22 22 0.3424658
## 23 23 0.3424658
## 24 24 0.3424658
## 25 25 0.3424658
## 26 26 0.3424658
## 27 27 0.3424658
Optimize.knn(train.twohalf, test.twohalf, class.twohalf, class.test.twohalf)
## $optimum.k
## [1] 1
## 
## $max.p.correct
## [1] 0.9875
## 
## $results
##     k p.correct
## 1   1    0.9875
## 2   2    0.9875
## 3   3    0.9875
## 4   4    0.9875
## 5   5    0.9875
## 6   6    0.8625
## 7   7    0.7125
## 8   8    0.6500
## 9   9    0.6375
## 10 10    0.6125
## 11 11    0.6125
## 12 12    0.6125
## 13 13    0.6125
## 14 14    0.5750
## 15 15    0.5375
## 16 16    0.4875
## 17 17    0.3500
## 18 18    0.2125
## 19 19    0.2125
## 20 20    0.2125
the2.5knn <- Optimize.knn(train.twohalf, test.twohalf, class.twohalf, class.test.twohalf)
the2.5knn$results
##     k p.correct
## 1   1    0.9875
## 2   2    0.9875
## 3   3    0.9875
## 4   4    0.9875
## 5   5    0.9875
## 6   6    0.9000
## 7   7    0.7125
## 8   8    0.6750
## 9   9    0.6375
## 10 10    0.6125
## 11 11    0.6125
## 12 12    0.6125
## 13 13    0.6125
## 14 14    0.5625
## 15 15    0.5125
## 16 16    0.5000
## 17 17    0.3625
## 18 18    0.2125
## 19 19    0.2125
## 20 20    0.2125

End of Practical