capstone
April 25, 2024
[37]: import random
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg # for loading in images
%matplotlib inline
[50]: training_im_list_ronaldo = []
for file in glob.glob("CelebPhotos/training/ronaldo/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
training_im_list_ronaldo.append([im,0])
[51]: training_im_list_musk = []
for file in glob.glob("CelebPhotos/training/musk/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
training_im_list_musk.append([im,1])
[52]: training_im_list_oprah = []
for file in glob.glob("CelebPhotos/training/oprah/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
training_im_list_oprah.append([im,2])
1
[53]: test_im_list_oprah = []
for file in glob.glob("CelebPhotos/test/oprah/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
test_im_list_oprah.append([im,2])
[54]: test_im_list_musk = []
for file in glob.glob("CelebPhotos/test/musk/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
test_im_list_musk.append([im,1])
[55]: test_im_list_ronaldo = []
for file in glob.glob("CelebPhotos/test/ronaldo/*"):
# Read in the image
im = mpimg.imread(file)
#Append the image into the green image list
test_im_list_ronaldo.append([im,0])
[ ]:
[56]: test_im_list_combined = test_im_list_ronaldo + test_im_list_musk +␣
↪test_im_list_oprah
[57]: training_im_list_combined = training_im_list_ronaldo + training_im_list_musk +␣
↪training_im_list_oprah
[58]: random.shuffle(test_im_list_combined)
random.shuffle(training_im_list_combined)
[59]: # This function should take in an RGB image and return a new, standardized␣
↪version
def standardize_input(image):
image_crop = np.copy(image)
row_crop = 7
col_crop = 8
2
image_crop = image[row_crop:-row_crop, col_crop:-col_crop]
## TODO: Resize image and pre-process so that all "standard" images are the␣
↪same size
standard_im = cv2.resize(image_crop, (32, 32)) ###
return standard_im
[60]: !pip install opencv-python
import cv2 # helper functions
Requirement already satisfied: opencv-python in /opt/conda/lib/python3.10/site-
packages (4.9.0.80)
Requirement already satisfied: numpy>=1.21.2 in /opt/conda/lib/python3.10/site-
packages (from opencv-python) (1.23.5)
[61]: X_test = []
y_test = []
X_train = []
y_train = []
for im in test_im_list_combined:
standardized = standardize_input(im[0])
# for loop to standardize all images
X_test.append(standardized)
# append all standardized images into x test
y_test.append(im[1])
for im in training_im_list_combined:
standardized = standardize_input(im[0])
# for loop to standardize all images
X_train.append(standardized)
# append all standardized images into x train
y_train.append(im[1])
[62]: import tensorflow as tf
X_test = np.array(X_test)
y_test = np.array(y_test)
X_train = np.array(X_train)
# n_samples = 1187
# dim1 = 32
# dim2 = 32
# dim3 = 3
# reshaping data so it works with KNN since KNN only takes 2 dimensional␣
↪datasets
# this also allows us to multiply them, converting the 4 dim dtaset into a 2␣
↪dim dataset
3
## KNN AREA ##
n_samples, dim1, dim2, dim3 = X_train.shape
X_train_knn = X_train.reshape(n_samples, dim1 * dim2 * dim3)
n_samples, dim1, dim2, dim3 = X_test.shape
X_test_knn = X_test.reshape(n_samples, dim1 * dim2 * dim3)
## TF Multilayer NN ##
X_train_tf = tf.convert_to_tensor(X_train)
y_train_tf = np.array(y_train)
[63]: X_train.shape
[63]: (21, 32, 32, 3)
[64]: X_train_knn.shape
# changed from 4d to 2d so I can input into KNN algorithm
[64]: (21, 3072)
[65]: model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(32, 32,3)), ###
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(10)
])
[66]: model.compile(optimizer='adam',
loss=tf.keras.losses.
↪SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
[19]: training_history = model.fit(X_train_tf, y_train_tf, epochs=50)
Epoch 1/50
1/1 [==============================] - 1s 549ms/step - loss: 278.4537 -
accuracy: 0.0000e+00
Epoch 2/50
1/1 [==============================] - 0s 11ms/step - loss: 136.8713 - accuracy:
0.4286
Epoch 3/50
1/1 [==============================] - 0s 11ms/step - loss: 873.1324 - accuracy:
0.3810
Epoch 4/50
1/1 [==============================] - 0s 9ms/step - loss: 633.8701 - accuracy:
0.6190
Epoch 5/50
4
1/1 [==============================] - 0s 9ms/step - loss: 947.1122 - accuracy:
0.3333
Epoch 6/50
1/1 [==============================] - 0s 9ms/step - loss: 461.8854 - accuracy:
0.3333
Epoch 7/50
1/1 [==============================] - 0s 9ms/step - loss: 93.0297 - accuracy:
0.6667
Epoch 8/50
1/1 [==============================] - 0s 41ms/step - loss: 117.3218 - accuracy:
0.7143
Epoch 9/50
1/1 [==============================] - 0s 9ms/step - loss: 351.9328 - accuracy:
0.4762
Epoch 10/50
1/1 [==============================] - 0s 9ms/step - loss: 395.5494 - accuracy:
0.5238
Epoch 11/50
1/1 [==============================] - 0s 9ms/step - loss: 298.4268 - accuracy:
0.5714
Epoch 12/50
1/1 [==============================] - 0s 9ms/step - loss: 268.7654 - accuracy:
0.6667
Epoch 13/50
1/1 [==============================] - 0s 9ms/step - loss: 232.7682 - accuracy:
0.6667
Epoch 14/50
1/1 [==============================] - 0s 8ms/step - loss: 132.1676 - accuracy:
0.7143
Epoch 15/50
1/1 [==============================] - 0s 10ms/step - loss: 24.2420 - accuracy:
0.8571
Epoch 16/50
1/1 [==============================] - 0s 9ms/step - loss: 14.2222 - accuracy:
0.9524
Epoch 17/50
1/1 [==============================] - 0s 9ms/step - loss: 45.7453 - accuracy:
0.8571
Epoch 18/50
1/1 [==============================] - 0s 8ms/step - loss: 80.2530 - accuracy:
0.7619
Epoch 19/50
1/1 [==============================] - 0s 7ms/step - loss: 77.2863 - accuracy:
0.8095
Epoch 20/50
1/1 [==============================] - 0s 9ms/step - loss: 56.7919 - accuracy:
0.9048
Epoch 21/50
5
1/1 [==============================] - 0s 15ms/step - loss: 42.7763 - accuracy:
0.9524
Epoch 22/50
1/1 [==============================] - 0s 28ms/step - loss: 61.6861 - accuracy:
0.8571
Epoch 23/50
1/1 [==============================] - 0s 9ms/step - loss: 83.4838 - accuracy:
0.7143
Epoch 24/50
1/1 [==============================] - 0s 9ms/step - loss: 46.2261 - accuracy:
0.8571
Epoch 25/50
1/1 [==============================] - 0s 9ms/step - loss: 22.9200 - accuracy:
0.9524
Epoch 26/50
1/1 [==============================] - 0s 8ms/step - loss: 19.5285 - accuracy:
0.9524
Epoch 27/50
1/1 [==============================] - 0s 8ms/step - loss: 14.8376 - accuracy:
0.9524
Epoch 28/50
1/1 [==============================] - 0s 38ms/step - loss: 8.9528 - accuracy:
0.9524
Epoch 29/50
1/1 [==============================] - 0s 9ms/step - loss: 1.9692 - accuracy:
0.9524
Epoch 30/50
1/1 [==============================] - 0s 9ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 31/50
1/1 [==============================] - 0s 8ms/step - loss: 1.9414 - accuracy:
0.9524
Epoch 32/50
1/1 [==============================] - 0s 8ms/step - loss: 3.1183 - accuracy:
0.9524
Epoch 33/50
1/1 [==============================] - 0s 9ms/step - loss: 4.9481 - accuracy:
0.9524
Epoch 34/50
1/1 [==============================] - 0s 8ms/step - loss: 4.3794 - accuracy:
0.9524
Epoch 35/50
1/1 [==============================] - 0s 39ms/step - loss: 1.5810 - accuracy:
0.9524
Epoch 36/50
1/1 [==============================] - 0s 9ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 37/50
6
1/1 [==============================] - 0s 9ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 38/50
1/1 [==============================] - 0s 9ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 39/50
1/1 [==============================] - 0s 10ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 40/50
1/1 [==============================] - 0s 10ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 41/50
1/1 [==============================] - 0s 38ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 42/50
1/1 [==============================] - 0s 9ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 43/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 44/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 45/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 46/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 47/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 48/50
1/1 [==============================] - 0s 40ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 49/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
Epoch 50/50
1/1 [==============================] - 0s 8ms/step - loss: 0.0000e+00 -
accuracy: 1.0000
[20]: print(training_history.history['loss'])
[278.45367431640625, 136.87132263183594, 873.1324462890625, 633.8700561523438,
947.1122436523438, 461.8853759765625, 93.02970123291016, 117.3218002319336,
351.93280029296875, 395.54937744140625, 298.42681884765625, 268.7654113769531,
232.76820373535156, 132.16763305664062, 24.242021560668945, 14.222150802612305,
7
45.745330810546875, 80.25296020507812, 77.28632354736328, 56.791893005371094,
42.776309967041016, 61.68613815307617, 83.48382568359375, 46.22612762451172,
22.9200496673584, 19.528507232666016, 14.837599754333496, 8.952764511108398,
1.969249963760376, 0.0, 1.94140625, 3.1183383464813232, 4.948056221008301,
4.379383087158203, 1.5809617042541504, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
[21]: test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
print('\nTest accuracy:', test_acc)
1/1 - 0s - loss: 321.6483 - accuracy: 0.5556 - 148ms/epoch - 148ms/step
Test accuracy: 0.5555555820465088
[22]: probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
[23]: predictions = probability_model.predict(X_test)
1/1 [==============================] - 0s 86ms/step
[24]: predictions[0]
[24]: array([1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
[25]: np.argmax(predictions[0])
[25]: 0
[26]: y_test[0]
[26]: 0
[27]: class_names = ['ronaldo','musk','oprah']
[28]: def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
8
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
[29]: i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], y_test, X_test)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], y_test)
plt.show()
[30]: # Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
9
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], y_test, X_test)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], y_test)
plt.tight_layout()
plt.show()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
Cell In[30], line 9
7 for i in range(num_images):
8 plt.subplot(num_rows, 2*num_cols, 2*i+1)
----> 9 plot_image(i, predictions[i], y_test, X_test)
10 plt.subplot(num_rows, 2*num_cols, 2*i+2)
11 plot_value_array(i, predictions[i], y_test)
IndexError: index 9 is out of bounds for axis 0 with size 9
10
[31]: epoch = len(training_history.history.get('loss',[]))
# Draw Model Accuracy
plt.figure(2,figsize=(6,4))
plt.plot(range(epoch),training_history.history.get('accuracy'))
#plt.plot(range(epoch),training_history.history.get('val_acc'))
plt.xlabel('# Epochs')
plt.ylabel('Accuracy')
plt.title('Model Accuracy')
plt.grid(True)
plt.legend(['train','validation'],loc=4)
plt.style.use(['classic'])
# Draw Model Loss
plt.figure(1,figsize=(6,4))
plt.plot(range(epoch),training_history.history.get('loss'))
#plt.plot(range(epoch),training_history.history.get('val_loss'))
plt.xlabel('# Epochs')
plt.ylabel('Loss')
plt.title('Model Loss')
plt.grid(True)
plt.legend(['train','validation'], loc=4)
plt.style.use(['classic'])
11
12