# -*- coding: utf-8 -*-
"""lethality.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1nD5rS8EG9hQ-mDOgnLKVY_n5xy1ktdEN
"""
!pip install ultralytics
from google.colab import drive
drive.mount('/content/drive')
from ultralytics import YOLO
import os
import shutil
from sklearn.model_selection import train_test_split
import random
import os
from random import choice
#arrays to store file names
imgs =[]
xmls =[]
#setup dir names
trainPath =('/content/drive/MyDrive/weaponsdataset/images/train')
valPath = ('/content/drive/MyDrive/weaponsdataset/images/val')
crsPath = ('/content/drive/MyDrive/weapons') #dir where images and annotations
stored
#setup ratio (val ratio = rest of the files in origin dir after splitting into
train and test)
train_ratio = 0.8
val_ratio = 0.2
#total count of imgs
totalImgCount = len(os.listdir(crsPath))/2
#soring files to corresponding arrays
for (dirname, dirs, files) in os.walk(crsPath):
for filename in files:
if filename.endswith('.txt'):
xmls.append(filename)
else:
imgs.append(filename)
#counting range for cycles
countForTrain = int(len(imgs)*train_ratio)
countForVal = int(len(imgs)*val_ratio)
print("Total number of images: ", len(imgs))
print("training images: ",countForTrain)
print("Validation images: ",countForVal)
import shutil, sys
trainimagePath = ('/content/drive/MyDrive/weaponsdataset/images/train')
trainlabelPath = ('/content/drive/MyDrive/weaponsdataset/labels/train')
valimagePath = ('/content/drive/MyDrive/weaponsdataset/images/val')
vallabelPath = ('/content/drive/MyDrive/weaponsdataset/labels/val')
#cycle for train dir
for x in range(countForTrain):
fileJpg = choice(imgs) # get name of random image from origin dir
fileXml = fileJpg[:-4] +'.txt' # get name of corresponding annotation file
#move both files into train dir
#shutil.move(os.path.join(crsPath, fileJpg), os.path.join(trainimagePath,
fileJpg))
#shutil.move(os.path.join(crsPath, fileXml), os.path.join(trainlabelPath,
fileXml))
shutil.copy(os.path.join(crsPath, fileJpg), os.path.join(trainimagePath,
fileJpg))
shutil.copy(os.path.join(crsPath, fileXml), os.path.join(trainlabelPath,
fileXml))
#remove files from arrays
imgs.remove(fileJpg)
xmls.remove(fileXml)
#cycle for test dir
for x in range(countForVal):
fileJpg = choice(imgs) # get name of random image from origin dir
fileXml = fileJpg[:-4] +'.txt' # get name of corresponding annotation file
#move both files into train dir
#shutil.move(os.path.join(crsPath, fileJpg), os.path.join(valimagePath,
fileJpg))
#shutil.move(os.path.join(crsPath, fileXml), os.path.join(vallabelPath,
fileXml))
shutil.copy(os.path.join(crsPath, fileJpg), os.path.join(valimagePath,
fileJpg))
shutil.copy(os.path.join(crsPath, fileXml), os.path.join(vallabelPath,
fileXml))
#remove files from arrays
imgs.remove(fileJpg)
xmls.remove(fileXml)
#rest of files will be validation files, so rename origin dir to val dir
#os.rename(crsPath, valPath)
#shutil.move(crsPath, valPath)
# Define the model
model = YOLO("yolov8n.pt")
# Use the model
model.train(data='/content/drive/MyDrive/data250.yaml', epochs=100)
model.save('/content/drive/MyDrive/final_weapons.pt')
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import Dense, Dropout, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import os
#TENSORFLOW MODEL
IMG_SIZE = (224, 224)
BATCH_SIZE = 32
EPOCHS = 20
NUM_CLASSES = 7
dataset_dir = "/content/drive/MyDrive/weapons" # Replace with your dataset path
train_dir = os.path.join(dataset_dir, "train")
val_dir = os.path.join(dataset_dir, "valid")
test_dir = os.path.join(dataset_dir, "test")
# Data Augmentation and Generators
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest"
)
val_test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical"
)
val_generator = val_test_datagen.flow_from_directory(
val_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical"
)
test_generator = val_test_datagen.flow_from_directory(
test_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=False
)
# Model Definition
base_model = MobileNetV2(weights="imagenet", include_top=False, input_shape=(224,
224, 3))
# Freeze the base model layers
base_model.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.3)(x)
x = Dense(128, activation="relu")(x)
x = Dropout(0.3)(x)
output = Dense(NUM_CLASSES, activation="softmax")(x)
model = Model(inputs=base_model.input, outputs=output)
# Compile the model
model.compile(optimizer=Adam(learning_rate=0.001),
loss="categorical_crossentropy",
metrics=["accuracy"])
# Model Training
history = model.fit(
train_generator,
epochs=EPOCHS,
validation_data=val_generator
)
# Fine-Tuning (Unfreeze base model and retrain)
base_model.trainable = True
model.compile(optimizer=Adam(learning_rate=0.0001),
loss="categorical_crossentropy",
metrics=["accuracy"])
history_fine = model.fit(
train_generator,
epochs=10,
validation_data=val_generator
)
# Evaluate on Test Data
test_loss, test_accuracy = model.evaluate(test_generator)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
# Class Labels
class_labels = list(train_generator.class_indices.keys())
print("Class Labels:", class_labels)
# Predict on Test Data
predictions = model.predict(test_generator)
predicted_classes = tf.argmax(predictions, axis=1)
true_classes = test_generator.classes
# Confusion Matrix
from sklearn.metrics import classification_report, confusion_matrix
print("Classification Report:\n")
print(classification_report(true_classes, predicted_classes,
target_names=class_labels))
print("Confusion Matrix:\n")
print(confusion_matrix(true_classes, predicted_classes))
# Save the Model
model.save("weapon_detection_model.h5")
# Evaluate on Test Data
test_loss, test_accuracy = model.evaluate(test_generator)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
# Class Labels
class_labels = list(train_generator.class_indices.keys())
print("Class Labels:", class_labels)
# Predict on Test Data
predictions = model.predict(test_generator)
predicted_classes = tf.argmax(predictions, axis=1)
true_classes = test_generator.classes
# Confusion Matrix
from sklearn.metrics import classification_report, confusion_matrix
print("Classification Report:\n")
print(classification_report(true_classes, predicted_classes,
target_names=class_labels))
print("Confusion Matrix:\n")
print(confusion_matrix(true_classes, predicted_classes))