EX.
NO: RANDOM FOREST ALGORITHM
DATE:
PROGRAM:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from matplotlib.colors import ListedColormap
X, y = make_classification(n_samples=500, n_features=2, n_informative=2, n_redundant=0,
n_clusters_per_class=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
np.arange(y_min, y_max, 0.01))
Z = rf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(figsize=(10, 8))
cmap_background = ListedColormap(['#FFAAAA', '#AAAAFF'])
cmap_points = ListedColormap(['#FF0000', '#0000FF'])
plt.contourf(xx, yy, Z, alpha=0.8, cmap=cmap_background)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cmap_points, edgecolors='k', s=50)
plt.title("Random Forest Decision Boundary")
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
accuracy = rf.score(X_test, y_test)
print(f"Test Accuracy: {accuracy * 100:.2f}%")
OUTPUT:
EX.NO: SUPPORT VECTOR MACHINE
DATE: (SVM)
PROGRAM:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
iris = datasets.load_iris()
X = iris.data[:, :2] # use only the first two features for easier visualization
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = SVC(kernel='linear')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.scatter(X_train[:, 0], X_train[:, 1], s=100, edgecolors='k', facecolors='none')
plt.scatter(X_test[:, 0], X_test[:, 1], s=100, edgecolors='r', facecolors='none')
plt.title('SVM Classification')
plt.show()
OUTPUT:
RESULT:
EX.NO: APRIORI ALGORITHM
DATE:
PROGRAM:
from mlxtend.frequent_patterns import apriori, association_rules
from mlxtend.preprocessing import TransactionEncoder
import pandas as pd
transactions = [
['milk', 'bread', 'butter'],
['bread', 'butter'],
['milk', 'bread'],
['milk', 'butter'],
['bread'],
['milk', 'bread', 'butter']
]
te = TransactionEncoder()
te_ary = te.fit(transactions).transform(transactions)
df = pd.DataFrame(te_ary, columns=te.columns_)
frequent_itemsets = apriori(df, min_support=0.5, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)
print("Frequent Itemsets:")
print(frequent_itemsets)
print("\nAssociation Rules:")
print(rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']])
OUTPUT:
RESULT: