breastcancer
October 1, 2023
[369]: import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
import random
Working on the dataset
[370]: BCD = sklearn.datasets.load_breast_cancer()
[371]: print(BCD)
{'data': array([[1.799e+01, 1.038e+01, 1.228e+02, …, 2.654e-01, 4.601e-01,
1.189e-01],
[2.057e+01, 1.777e+01, 1.329e+02, …, 1.860e-01, 2.750e-01,
8.902e-02],
[1.969e+01, 2.125e+01, 1.300e+02, …, 2.430e-01, 3.613e-01,
8.758e-02],
…,
[1.660e+01, 2.808e+01, 1.083e+02, …, 1.418e-01, 2.218e-01,
7.820e-02],
[2.060e+01, 2.933e+01, 1.401e+02, …, 2.650e-01, 4.087e-01,
1.240e-01],
[7.760e+00, 2.454e+01, 4.792e+01, …, 0.000e+00, 2.871e-01,
7.039e-02]]), 'target': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0,
1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0,
0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1,
1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0,
1
1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1,
1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0,
1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1,
1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,
1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1]), 'frame': None,
'target_names': array(['malignant', 'benign'], dtype='<U9'), 'DESCR': '..
_breast_cancer_dataset:\n\nBreast cancer wisconsin (diagnostic)
dataset\n--------------------------------------------\n\n**Data Set
Characteristics:**\n\n :Number of Instances: 569\n\n :Number of
Attributes: 30 numeric, predictive attributes and the class\n\n :Attribute
Information:\n - radius (mean of distances from center to points on the
perimeter)\n - texture (standard deviation of gray-scale values)\n
- perimeter\n - area\n - smoothness (local variation in radius
lengths)\n - compactness (perimeter^2 / area - 1.0)\n - concavity
(severity of concave portions of the contour)\n - concave points (number
of concave portions of the contour)\n - symmetry\n - fractal
dimension ("coastline approximation" - 1)\n\n The mean, standard error,
and "worst" or largest (mean of the three\n worst/largest values) of
these features were computed for each image,\n resulting in 30 features.
For instance, field 0 is Mean Radius, field\n 10 is Radius SE, field 20
is Worst Radius.\n\n - class:\n - WDBC-Malignant\n
- WDBC-Benign\n\n :Summary Statistics:\n\n
===================================== ====== ======\n
Min Max\n ===================================== ====== ======\n radius
(mean): 6.981 28.11\n texture (mean):
9.71 39.28\n perimeter (mean): 43.79 188.5\n area
(mean): 143.5 2501.0\n smoothness (mean):
0.053 0.163\n compactness (mean): 0.019 0.345\n
concavity (mean): 0.0 0.427\n concave points (mean):
0.0 0.201\n symmetry (mean): 0.106 0.304\n
fractal dimension (mean): 0.05 0.097\n radius (standard error):
0.112 2.873\n texture (standard error): 0.36 4.885\n
perimeter (standard error): 0.757 21.98\n area (standard error):
6.802 542.2\n smoothness (standard error): 0.002 0.031\n
compactness (standard error): 0.002 0.135\n concavity (standard
error): 0.0 0.396\n concave points (standard error): 0.0
0.053\n symmetry (standard error): 0.008 0.079\n fractal
2
dimension (standard error): 0.001 0.03\n radius (worst):
7.93 36.04\n texture (worst): 12.02 49.54\n
perimeter (worst): 50.41 251.2\n area (worst):
185.2 4254.0\n smoothness (worst): 0.071 0.223\n
compactness (worst): 0.027 1.058\n concavity (worst):
0.0 1.252\n concave points (worst): 0.0 0.291\n
symmetry (worst): 0.156 0.664\n fractal dimension
(worst): 0.055 0.208\n =====================================
====== ======\n\n :Missing Attribute Values: None\n\n :Class Distribution:
212 - Malignant, 357 - Benign\n\n :Creator: Dr. William H. Wolberg, W. Nick
Street, Olvi L. Mangasarian\n\n :Donor: Nick Street\n\n :Date: November,
1995\n\nThis is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic)
datasets.\nhttps://goo.gl/U2Uwz2\n\nFeatures are computed from a digitized image
of a fine needle\naspirate (FNA) of a breast mass. They
describe\ncharacteristics of the cell nuclei present in the image.\n\nSeparating
plane described above was obtained using\nMultisurface Method-Tree (MSM-T) [K.
P. Bennett, "Decision Tree\nConstruction Via Linear Programming." Proceedings of
the 4th\nMidwest Artificial Intelligence and Cognitive Science Society,\npp.
97-101, 1992], a classification method which uses linear\nprogramming to
construct a decision tree. Relevant features\nwere selected using an exhaustive
search in the space of 1-4\nfeatures and 1-3 separating planes.\n\nThe actual
linear program used to obtain the separating plane\nin the 3-dimensional space
is that described in:\n[K. P. Bennett and O. L. Mangasarian: "Robust
Linear\nProgramming Discrimination of Two Linearly Inseparable
Sets",\nOptimization Methods and Software 1, 1992, 23-34].\n\nThis database is
also available through the UW CS ftp server:\n\nftp ftp.cs.wisc.edu\ncd math-
prog/cpo-dataset/machine-learn/WDBC/\n\n.. topic:: References\n\n - W.N.
Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction \n for
breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on \n
Electronic Imaging: Science and Technology, volume 1905, pages 861-870,\n
San Jose, CA, 1993.\n - O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast
cancer diagnosis and \n prognosis via linear programming. Operations
Research, 43(4), pages 570-577, \n July-August 1995.\n - W.H. Wolberg,
W.N. Street, and O.L. Mangasarian. Machine learning techniques\n to diagnose
breast cancer from fine-needle aspirates. Cancer Letters 77 (1994) \n
163-171.', 'feature_names': array(['mean radius', 'mean texture', 'mean
perimeter', 'mean area',
'mean smoothness', 'mean compactness', 'mean concavity',
'mean concave points', 'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error', 'perimeter error', 'area error',
'smoothness error', 'compactness error', 'concavity error',
'concave points error', 'symmetry error',
'fractal dimension error', 'worst radius', 'worst texture',
'worst perimeter', 'worst area', 'worst smoothness',
'worst compactness', 'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'], dtype='<U23'), 'filename':
'breast_cancer.csv', 'data_module': 'sklearn.datasets.data'}
3
[372]: DF_BCD.head()
[372]: mean radius mean texture mean perimeter mean area mean smoothness \
0 17.99 10.38 122.80 1001.0 0.11840
1 20.57 17.77 132.90 1326.0 0.08474
2 19.69 21.25 130.00 1203.0 0.10960
3 11.42 20.38 77.58 386.1 0.14250
4 20.29 14.34 135.10 1297.0 0.10030
mean compactness mean concavity mean concave points mean symmetry \
0 0.27760 0.3001 0.14710 0.2419
1 0.07864 0.0869 0.07017 0.1812
2 0.15990 0.1974 0.12790 0.2069
3 0.28390 0.2414 0.10520 0.2597
4 0.13280 0.1980 0.10430 0.1809
mean fractal dimension … worst texture worst perimeter worst area \
0 0.07871 … 17.33 184.60 2019.0
1 0.05667 … 23.41 158.80 1956.0
2 0.05999 … 25.53 152.50 1709.0
3 0.09744 … 26.50 98.87 567.7
4 0.05883 … 16.67 152.20 1575.0
worst smoothness worst compactness worst concavity worst concave points \
0 0.1622 0.6656 0.7119 0.2654
1 0.1238 0.1866 0.2416 0.1860
2 0.1444 0.4245 0.4504 0.2430
3 0.2098 0.8663 0.6869 0.2575
4 0.1374 0.2050 0.4000 0.1625
worst symmetry worst fractal dimension Diagnosis
0 0.4601 0.11890 0
1 0.2750 0.08902 0
2 0.3613 0.08758 0
3 0.6638 0.17300 0
4 0.2364 0.07678 0
[5 rows x 31 columns]
[373]: DF_BCD.tail()
[373]: mean radius mean texture mean perimeter mean area mean smoothness \
564 21.56 22.39 142.00 1479.0 0.11100
565 20.13 28.25 131.20 1261.0 0.09780
566 16.60 28.08 108.30 858.1 0.08455
567 20.60 29.33 140.10 1265.0 0.11780
568 7.76 24.54 47.92 181.0 0.05263
4
mean compactness mean concavity mean concave points mean symmetry \
564 0.11590 0.24390 0.13890 0.1726
565 0.10340 0.14400 0.09791 0.1752
566 0.10230 0.09251 0.05302 0.1590
567 0.27700 0.35140 0.15200 0.2397
568 0.04362 0.00000 0.00000 0.1587
mean fractal dimension … worst texture worst perimeter worst area \
564 0.05623 … 26.40 166.10 2027.0
565 0.05533 … 38.25 155.00 1731.0
566 0.05648 … 34.12 126.70 1124.0
567 0.07016 … 39.42 184.60 1821.0
568 0.05884 … 30.37 59.16 268.6
worst smoothness worst compactness worst concavity \
564 0.14100 0.21130 0.4107
565 0.11660 0.19220 0.3215
566 0.11390 0.30940 0.3403
567 0.16500 0.86810 0.9387
568 0.08996 0.06444 0.0000
worst concave points worst symmetry worst fractal dimension Diagnosis
564 0.2216 0.2060 0.07115 0
565 0.1628 0.2572 0.06637 0
566 0.1418 0.2218 0.07820 0
567 0.2650 0.4087 0.12400 0
568 0.0000 0.2871 0.07039 1
[5 rows x 31 columns]
[374]: DF_BCD.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 569 entries, 0 to 568
Data columns (total 31 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 mean radius 569 non-null float64
1 mean texture 569 non-null float64
2 mean perimeter 569 non-null float64
3 mean area 569 non-null float64
4 mean smoothness 569 non-null float64
5 mean compactness 569 non-null float64
6 mean concavity 569 non-null float64
7 mean concave points 569 non-null float64
8 mean symmetry 569 non-null float64
5
9 mean fractal dimension 569 non-null float64
10 radius error 569 non-null float64
11 texture error 569 non-null float64
12 perimeter error 569 non-null float64
13 area error 569 non-null float64
14 smoothness error 569 non-null float64
15 compactness error 569 non-null float64
16 concavity error 569 non-null float64
17 concave points error 569 non-null float64
18 symmetry error 569 non-null float64
19 fractal dimension error 569 non-null float64
20 worst radius 569 non-null float64
21 worst texture 569 non-null float64
22 worst perimeter 569 non-null float64
23 worst area 569 non-null float64
24 worst smoothness 569 non-null float64
25 worst compactness 569 non-null float64
26 worst concavity 569 non-null float64
27 worst concave points 569 non-null float64
28 worst symmetry 569 non-null float64
29 worst fractal dimension 569 non-null float64
30 Diagnosis 569 non-null int64
dtypes: float64(30), int64(1)
memory usage: 137.9 KB
[375]: DF_BCD['Diagnosis'].value_counts()
[375]: 1 357
0 212
Name: Diagnosis, dtype: int64
[376]: DF_BCD.groupby('Diagnosis').mean()
[376]: mean radius mean texture mean perimeter mean area \
Diagnosis
0 17.462830 21.604906 115.365377 978.376415
1 12.146524 17.914762 78.075406 462.790196
mean smoothness mean compactness mean concavity \
Diagnosis
0 0.102898 0.145188 0.160775
1 0.092478 0.080085 0.046058
mean concave points mean symmetry mean fractal dimension … \
Diagnosis …
0 0.087990 0.192909 0.062680 …
1 0.025717 0.174186 0.062867 …
6
worst radius worst texture worst perimeter worst area \
Diagnosis
0 21.134811 29.318208 141.370330 1422.286321
1 13.379801 23.515070 87.005938 558.899440
worst smoothness worst compactness worst concavity \
Diagnosis
0 0.144845 0.374824 0.450606
1 0.124959 0.182673 0.166238
worst concave points worst symmetry worst fractal dimension
Diagnosis
0 0.182237 0.323468 0.091530
1 0.074444 0.270246 0.079442
[2 rows x 30 columns]
[377]: DF_BCD = pd.DataFrame(BCD.data, columns=BCD.feature_names)
DF_BCD['Diagnosis'] = BCD.target
Splitting the dataset for training and testing.
[378]: BCD_FEATURES = DF_BCD.drop(columns='Diagnosis', axis=1)
BCD_TARGET = DF_BCD['Diagnosis']
BCDF_TRAIN, BCDF_TEST, BCDT_TRAIN, BCDT_TEST = train_test_split(BCD_FEATURES,␣
↪BCD_TARGET, test_size=0.2, random_state=0)
#standardizing the data
[379]: scaler = StandardScaler()
BCDF_TRAIN_std = scaler.fit_transform(BCDF_TRAIN)
BCDF_TEST_std = scaler.transform(BCDF_TEST)
constructing the neural network
[380]: random.seed(0)
np.random.seed(0)
tf.random.set_seed(0)
[381]: # Create a Sequential model
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(30,)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.2), # Add dropout for regularization
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation='sigmoid')
7
])
Compiling the Neural Network
[382]: model.compile(optimizer='adam', loss='binary_crossentropy',␣
↪metrics=['accuracy'])
[383]: # Use early stopping to prevent overfitting
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',␣
↪patience=10, restore_best_weights=True)
TRAIN
[384]: history = model.fit(BCDF_TRAIN_std, BCDT_TRAIN, validation_split=0.1,␣
↪batch_size=32, epochs=50, callbacks=[early_stopping],verbose=1)
Epoch 1/50
13/13 [==============================] - 1s 19ms/step - loss: 0.5774 - accuracy:
0.7017 - val_loss: 0.4539 - val_accuracy: 0.8913
Epoch 2/50
13/13 [==============================] - 0s 6ms/step - loss: 0.3592 - accuracy:
0.8631 - val_loss: 0.3140 - val_accuracy: 0.9348
Epoch 3/50
13/13 [==============================] - 0s 6ms/step - loss: 0.2552 - accuracy:
0.9291 - val_loss: 0.2312 - val_accuracy: 0.9565
Epoch 4/50
13/13 [==============================] - 0s 4ms/step - loss: 0.2001 - accuracy:
0.9315 - val_loss: 0.1805 - val_accuracy: 0.9565
Epoch 5/50
13/13 [==============================] - 0s 6ms/step - loss: 0.1687 - accuracy:
0.9438 - val_loss: 0.1499 - val_accuracy: 0.9565
Epoch 6/50
13/13 [==============================] - 0s 7ms/step - loss: 0.1414 - accuracy:
0.9487 - val_loss: 0.1313 - val_accuracy: 0.9565
Epoch 7/50
13/13 [==============================] - 0s 5ms/step - loss: 0.1181 - accuracy:
0.9633 - val_loss: 0.1182 - val_accuracy: 0.9565
Epoch 8/50
13/13 [==============================] - 0s 12ms/step - loss: 0.1086 - accuracy:
0.9731 - val_loss: 0.1081 - val_accuracy: 0.9565
Epoch 9/50
13/13 [==============================] - 0s 8ms/step - loss: 0.0953 - accuracy:
0.9804 - val_loss: 0.1029 - val_accuracy: 0.9565
Epoch 10/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0904 - accuracy:
0.9731 - val_loss: 0.0984 - val_accuracy: 0.9565
Epoch 11/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0812 - accuracy:
8
0.9780 - val_loss: 0.0926 - val_accuracy: 0.9565
Epoch 12/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0733 - accuracy:
0.9853 - val_loss: 0.0864 - val_accuracy: 0.9565
Epoch 13/50
13/13 [==============================] - 0s 7ms/step - loss: 0.0787 - accuracy:
0.9853 - val_loss: 0.0874 - val_accuracy: 0.9565
Epoch 14/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0639 - accuracy:
0.9878 - val_loss: 0.0851 - val_accuracy: 0.9565
Epoch 15/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0641 - accuracy:
0.9731 - val_loss: 0.0876 - val_accuracy: 0.9565
Epoch 16/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0600 - accuracy:
0.9829 - val_loss: 0.0891 - val_accuracy: 0.9565
Epoch 17/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0609 - accuracy:
0.9804 - val_loss: 0.0858 - val_accuracy: 0.9565
Epoch 18/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0582 - accuracy:
0.9829 - val_loss: 0.0890 - val_accuracy: 0.9565
Epoch 19/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0464 - accuracy:
0.9853 - val_loss: 0.0838 - val_accuracy: 0.9565
Epoch 20/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0643 - accuracy:
0.9780 - val_loss: 0.0822 - val_accuracy: 0.9565
Epoch 21/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0511 - accuracy:
0.9878 - val_loss: 0.0773 - val_accuracy: 0.9565
Epoch 22/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0461 - accuracy:
0.9878 - val_loss: 0.0768 - val_accuracy: 0.9565
Epoch 23/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0623 - accuracy:
0.9829 - val_loss: 0.0756 - val_accuracy: 0.9565
Epoch 24/50
13/13 [==============================] - 0s 8ms/step - loss: 0.0483 - accuracy:
0.9853 - val_loss: 0.0716 - val_accuracy: 0.9565
Epoch 25/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0453 - accuracy:
0.9829 - val_loss: 0.0736 - val_accuracy: 0.9783
Epoch 26/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0423 - accuracy:
0.9829 - val_loss: 0.0720 - val_accuracy: 0.9783
Epoch 27/50
13/13 [==============================] - 0s 7ms/step - loss: 0.0567 - accuracy:
9
0.9804 - val_loss: 0.0722 - val_accuracy: 0.9565
Epoch 28/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0417 - accuracy:
0.9853 - val_loss: 0.0714 - val_accuracy: 0.9565
Epoch 29/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0435 - accuracy:
0.9804 - val_loss: 0.0688 - val_accuracy: 0.9565
Epoch 30/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0400 - accuracy:
0.9902 - val_loss: 0.0705 - val_accuracy: 0.9565
Epoch 31/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0400 - accuracy:
0.9878 - val_loss: 0.0673 - val_accuracy: 0.9565
Epoch 32/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0312 - accuracy:
0.9829 - val_loss: 0.0666 - val_accuracy: 0.9565
Epoch 33/50
13/13 [==============================] - 0s 7ms/step - loss: 0.0286 - accuracy:
0.9927 - val_loss: 0.0667 - val_accuracy: 0.9565
Epoch 34/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0348 - accuracy:
0.9878 - val_loss: 0.0626 - val_accuracy: 0.9565
Epoch 35/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0353 - accuracy:
0.9878 - val_loss: 0.0676 - val_accuracy: 0.9565
Epoch 36/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0309 - accuracy:
0.9878 - val_loss: 0.0708 - val_accuracy: 0.9565
Epoch 37/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0332 - accuracy:
0.9902 - val_loss: 0.0676 - val_accuracy: 0.9565
Epoch 38/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0280 - accuracy:
0.9878 - val_loss: 0.0708 - val_accuracy: 0.9565
Epoch 39/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0256 - accuracy:
0.9902 - val_loss: 0.0748 - val_accuracy: 0.9565
Epoch 40/50
13/13 [==============================] - 0s 4ms/step - loss: 0.0304 - accuracy:
0.9902 - val_loss: 0.0739 - val_accuracy: 0.9565
Epoch 41/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0291 - accuracy:
0.9902 - val_loss: 0.0706 - val_accuracy: 0.9565
Epoch 42/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0257 - accuracy:
0.9927 - val_loss: 0.0666 - val_accuracy: 0.9565
Epoch 43/50
13/13 [==============================] - 0s 5ms/step - loss: 0.0262 - accuracy:
10
0.9878 - val_loss: 0.0712 - val_accuracy: 0.9565
Epoch 44/50
13/13 [==============================] - 0s 6ms/step - loss: 0.0267 - accuracy:
0.9951 - val_loss: 0.0750 - val_accuracy: 0.9565
[385]: # Visualize training history
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc='lower right')
plt.show()
[386]: plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc='upper right')
11
plt.show()
[387]: # Evaluate the model on the test set
loss, accuracy = model.evaluate(BCDF_TEST_std, BCDT_TEST)
print(f'Test Loss: {loss}')
print(f'Test Accuracy: {accuracy * 100:.2f}%')
4/4 [==============================] - 0s 9ms/step - loss: 0.0712 - accuracy:
0.9474
Test Loss: 0.07115145772695541
Test Accuracy: 94.74%
Building the predictive system
[388]: # Make predictions for a sample input
input_data = np.array([20.57, 17.77, 132.90, 1326.0, 0.08474, 0.07864, 0.0869,␣
↪0.07017, 0.1812, 0.05667, 0.5435, 0.7339, 3.398, 74.08, 0.005225, 0.01308, 0.
↪01860, 0.01340, 0.01389, 0.003532, 24.99, 23.41, 158.80, 1956.0, 0.1238, 0.
↪1866, 0.2416, 0.1860, 0.2750, 0.08902])
input_data_reshaped = input_data.reshape(1, -1)
12
input_data_std = scaler.transform(input_data_reshaped)
/usr/local/lib/python3.10/dist-packages/sklearn/base.py:439: UserWarning: X does
not have valid feature names, but StandardScaler was fitted with feature names
warnings.warn(
[389]: prediction = model.predict(input_data_std)
1/1 [==============================] - 0s 252ms/step
[390]: if prediction[0][0] < 0.5:
print('The tumor is Malignant')
else:
print('The tumor is Benign')
The tumor is Malignant
[391]: # Evaluate the model on the test set
loss, accuracy = model.evaluate(BCDF_TEST_std, BCDT_TEST)
print(f'Test Loss: {loss}')
print(f'Test Accuracy: {accuracy * 100:.2f}%')
4/4 [==============================] - 0s 6ms/step - loss: 0.0712 - accuracy:
0.9474
Test Loss: 0.07115145772695541
Test Accuracy: 94.74%
13