Bridge The Gap
import networkx as nx
import matplotlib.pyplot as plt
def create_network():
  # Create an empty graph
  G = nx.Graph()
  # Add nodes
  G.add_nodes_from(range(1, 11))
  # Add edges
  edges = [(1, 2), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7), (4, 8), (5, 9), (6, 10)]
  G.add_edges_from(edges)
  return G
def segment_network(G):
  # Perform network segmentation
  # For demonstration, let's divide the network into two segments based on node attributes
  segment_1 = [n for n, d in G.nodes(data=True) if d.get('segment') == 1]
  segment_2 = [n for n, d in G.nodes(data=True) if d.get('segment') == 2]
  return segment_1, segment_2
def main():
  # Create a network
  G = create_network()
  # Assign segments to nodes (for demonstration purposes)
  for node in G.nodes():
     if node % 2 == 0:
        G.nodes[node]['segment'] = 1
     else:
        G.nodes[node]['segment'] = 2
  # Segment the network
  segment_1, segment_2 = segment_network(G)
  # Print the segments
  print("Segment 1:", segment_1)
  print("Segment 2:", segment_2)
  # Draw the network for visualization
  pos = nx.spring_layout(G)
  nx.draw(G, pos, with_labels=True, node_color='skyblue', node_size=500)
  plt.title("Network Segmentation")
  plt.show()
if __name__ == "__main__":
   main()
Output
                                                 Assignment 1
Server.py
from xmlrpc.server import SimpleXMLRPCServer
def factorial(n):
  if n == 0:
     return 1
  else:
     return n * factorial(n-1)
server = SimpleXMLRPCServer(('localhost', 8000))
server.register_function(factorial, 'calculate_factorial')
print("Server is ready to accept RPC calls...")
server.serve_forever()
Output
Client.py
import xmlrpc.client
def main():
  server = xmlrpc.client.ServerProxy('http://localhost:8000')
  n = int(input("Enter the number to calculate factorial: "))
  result = server.calculate_factorial(n)
  print(f"The factorial of {n} is: {result}")
if __name__ == "__main__":
   main()
Output
                                                Assignment 2
Server.py
import Pyro4
@Pyro4.expose
class StringConcatenator:
   def concatenate(self, str1, str2):
     return str1 + str2
daemon = Pyro4.Daemon()
uri = daemon.register(StringConcatenator)
print("Server URI:", uri)
daemon.requestLoop()
Output
Client.py
import Pyro4
uri = input("Enter the URI of the server: ")
concatenator = Pyro4.Proxy(uri)
str1 = input("Enter the first string: ")
str2 = input("Enter the second string: ")
result = concatenator.concatenate(str1, str2)
print("Concatenated string:", result)
Output
                                        Assignment 3
Text_File.txt
Hello Hello how are you!
Char_Count_Mr.py
from mrjob.job import MRJob
class MRCharCount(MRJob):
  def mapper(self, _, line):
    for char in line.strip():
       yield char, 1
  def reducer(self, char, counts):
    yield char, sum(counts)
if __name__ == '__main__':
   MRCharCount.run()
Output
Word_Count_Mr.py
from mrjob.job import MRJob
import re
WORD_REGEXP = re.compile(r"[\w']+")
class MRWordCount(MRJob):
  def mapper(self, _, line):
    for word in WORD_REGEXP.findall(line):
       yield word.lower(), 1
  def reducer(self, word, counts):
    yield word, sum(counts)
if __name__ == '__main__':
   MRWordCount.run()
Output
                                              Assignment 4
import random
class LoadBalancer:
   def __init__(self, servers):
     self.servers = servers
  def round_robin(self):
    server_index = 0
    while True:
       yield self.servers[server_index]
       server_index = (server_index + 1) % len(self.servers)
  def random_selection(self):
    while True:
       yield random.choice(self.servers)
  def least_connection(self):
    while True:
        min_connections = min(self.servers, key=lambda x: x.connections)
        min_connections.connections += 1
        yield min_connections
class Server:
   def __init__(self, name):
     self.name = name
     self.connections = 0
def simulate_requests(load_balancer, num_requests):
  print("Simulating {} requests...\n".format(num_requests))
  for i in range(num_requests):
      server = next(load_balancer)
      print("Request {} handled by Server {}".format(i+1, server.name))
  print("\nSimulation complete.")
if __name__ == "__main__":
   server1 = Server("Server1")
   server2 = Server("Server2")
   server3 = Server("Server3")
  servers = [server1, server2, server3]
  lb = LoadBalancer(servers)
  # Choose which load balancing algorithm to use
  # load_balancer = lb.round_robin()
  # load_balancer = lb.random_selection()
  load_balancer = lb.least_connection()
  simulate_requests(load_balancer, 10)
Output
                                               Assignment 5
import random
import numpy as np
# Define the objective function (fitness function)
def objective_function(x):
  # Example: Sphere function
  return sum([(i**2) for i in x])
# Generate random antibodies
def generate_antibodies(num_antibodies, num_dimensions, search_space):
  antibodies = []
  for _ in range(num_antibodies):
     antibody = [random.uniform(search_space[i][0], search_space[i][1]) for i in range(num_dimensions)]
     antibodies.append(antibody)
  return antibodies
# Clone operation
def clone(antibodies, num_clones, clone_factor):
  clones = []
  for antibody in antibodies:
     clones += [antibody] * int(num_clones * (1 / (1 + objective_function(antibody)*clone_factor)))
  return clones
# Hypermutation operation
def hypermutate(clones, mutation_rate, search_space):
  mutated_clones = []
  for clone in clones:
     mutated_clone = []
     for gene in range(len(clone)):
        if random.random() < mutation_rate:
           mutated_gene = clone[gene] + random.uniform(-0.5, 0.5) * (search_space[gene][1] -
search_space[gene][0])
           if mutated_gene < search_space[gene][0]:
              mutated_gene = search_space[gene][0]
           elif mutated_gene > search_space[gene][1]:
              mutated_gene = search_space[gene][1]
           mutated_clone.append(mutated_gene)
        else:
           mutated_clone.append(clone[gene])
     mutated_clones.append(mutated_clone)
  return mutated_clones
# Select the best antibodies for the next generation
def select_antibodies(antibodies, clones, num_antibodies):
  combined_population = antibodies + clones
  combined_population.sort(key=lambda x: objective_function(x))
  return combined_population[:num_antibodies]
# Clonal Selection Algorithm
def clonal_selection_algorithm(num_antibodies, num_dimensions, search_space, num_generations,
num_clones, clone_factor, mutation_rate):
  antibodies = generate_antibodies(num_antibodies, num_dimensions, search_space)
  for generation in range(num_generations):
    clones = clone(antibodies, num_clones, clone_factor)
    mutated_clones = hypermutate(clones, mutation_rate, search_space)
    antibodies = select_antibodies(antibodies, mutated_clones, num_antibodies)
     # Print the best antibody in the current generation
     best_antibody = min(antibodies, key=lambda x: objective_function(x))
     print(f"Generation {generation + 1}: Best Antibody - {best_antibody}, Fitness -
{objective_function(best_antibody)}")
  return min(antibodies, key=lambda x: objective_function(x))
# Example usage
if __name__ == "__main__":
   num_antibodies = 50
   num_dimensions = 3
   search_space = [(-5, 5)] * num_dimensions # Example search space, adjust as needed
   num_generations = 100
   num_clones = 10
   clone_factor = 0.1
   mutation_rate = 0.1
  best_solution = clonal_selection_algorithm(num_antibodies, num_dimensions, search_space,
num_generations, num_clones, clone_factor, mutation_rate)
  print("Best Solution:", best_solution)
  print("Objective Value:", objective_function(best_solution))
Output
                                               Assignment 6
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import vgg19
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras import Model
import matplotlib.pyplot as plt
# Define paths to content and style images
content_path = "C:/Users/shrey/OneDrive/Desktop/Code/Ass6/content_image.jpg"
style_path = "C:/Users/shrey/OneDrive/Desktop/Code/Ass6/style_image.jpg"
# Define image dimensions
width, height = load_img(content_path).size
img_size = (height, width)
# Load and preprocess images
def load_and_preprocess_image(path):
  img = load_img(path, target_size=img_size)
  img = img_to_array(img)
  img = np.expand_dims(img, axis=0)
  img = vgg19.preprocess_input(img)
  return img
# De-process and display the generated image
def deprocess_img(processed_img):
  x = processed_img.copy()
  if len(x.shape) == 4:
     x = np.squeeze(x, 0)
  x[:, :, 0] += 103.939
  x[:, :, 1] += 116.779
  x[:, :, 2] += 123.68
  x = x[:, :, ::-1]
  x = np.clip(x, 0, 255).astype('uint8')
  return x
# Load and preprocess content and style images
content_image = load_and_preprocess_image(content_path)
style_image = load_and_preprocess_image(style_path)
# Display content and style images
plt.subplot(1, 2, 1)
plt.imshow(deprocess_img(content_image))
plt.title('Content Image')
plt.subplot(1, 2, 2)
plt.imshow(deprocess_img(style_image))
plt.title('Style Image')
plt.show()
# Define a VGG19 model for feature extraction
vgg = vgg19.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
# Get the outputs from intermediate layers
content_layers = ['block5_conv2']
style_layers = [
   'block1_conv1',
   'block2_conv1',
   'block3_conv1',
   'block4_conv1',
   'block5_conv1',
]
content_outputs = [vgg.get_layer(name).output for name in content_layers]
style_outputs = [vgg.get_layer(name).output for name in style_layers]
model_outputs = content_outputs + style_outputs
# Build model
model = Model(inputs=vgg.input, outputs=model_outputs)
# Calculate content and style representations
def get_feature_representations(model, content_path, style_path):
  content_image = load_and_preprocess_image(content_path)
  style_image = load_and_preprocess_image(style_path)
  content_outputs = model(content_image)
  style_outputs = model(style_image)
  content_features = [layer[0] for layer in content_outputs[:len(content_layers)]]
  # Ensure style features have the same shape as content features
  style_features = [tf.expand_dims(layer[0], axis=0) for layer in style_outputs[len(content_layers):]]
  return content_features, style_features
content_features, style_features = get_feature_representations(model, content_path, style_path)
# Define and build the Gram matrix
def gram_matrix(input_tensor):
  result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)
  input_shape = tf.shape(input_tensor)
  num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
  return result / num_locations
# Define the style loss function
def style_loss(style, generated):
  style_gram = gram_matrix(style)
  generated_gram = gram_matrix(generated)
  return tf.reduce_mean(tf.square(style_gram - generated_gram))
# Define the content loss function
def content_loss(content, generated):
  return tf.reduce_mean(tf.square(content - generated))
# Define total variation loss
def total_variation_loss(image):
  x_var = tf.square(image[:, :-1, :-1, :] - image[:, 1:, :-1, :])
  y_var = tf.square(image[:, :-1, :-1, :] - image[:, :-1, 1:, :])
  return tf.reduce_mean(x_var + y_var)
# Define weights for content, style, and total variation loss
content_weight = 1e3
style_weight = 1e-2
total_variation_weight = 30
# Define optimizer
optimizer = tf.optimizers.Adam(learning_rate=5, beta_1=0.99, epsilon=1e-1)
# Generate the target image with neural style transfer
target_image = tf.Variable(content_image, dtype=tf.float32)
@tf.function()
def train_step(image):
  with tf.GradientTape() as tape:
      outputs = model(image)
      content_loss_val = 0
      style_loss_val = 0
     content_features_gen = outputs[:len(content_layers)]
     style_features_gen = outputs[len(content_layers):]
     for c, c_gen in zip(content_features, content_features_gen):
        content_loss_val += content_loss(c, c_gen)
     for s, s_gen in zip(style_features, style_features_gen):
        style_loss_val += style_loss(s, s_gen)
     content_loss_val *= content_weight / len(content_layers)
     style_loss_val *= style_weight / len(style_layers)
     total_variation_loss_val = total_variation_loss(image) * total_variation_weight
     total_loss = content_loss_val + style_loss_val + total_variation_loss_val
  grad = tape.gradient(total_loss, image)
  optimizer.apply_gradients([(grad, image)])
  image.assign(tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0))
# Number of optimization steps
num_iterations = 1000
# Optimization loop
for i in range(num_iterations):
   train_step(target_image)
# Display the final generated image
plt.imshow(deprocess_img(target_image.numpy()))
plt.title('Generated Image')
plt.show()
Output
                                               Assignment 7
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Load Iris dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Artificial Immune System (AIS) Classifier
class AISClassifier:
   def __init__(self, n_antibodies=10, n_features=None):
     self.n_antibodies = n_antibodies
     self.n_features = n_features
     self.antibodies = None
     self.labels = None
  def fit(self, X_train, y_train):
    self.n_features = X_train.shape[1] if self.n_features is None else self.n_features
    self.antibodies = np.random.rand(self.n_antibodies, self.n_features)
    self.labels = np.random.choice(np.unique(y_train), size=self.n_antibodies)
  def predict(self, X_test):
    predictions = []
    for x in X_test:
       distances = np.linalg.norm(self.antibodies - x, axis=1)
       closest_idx = np.argmin(distances)
       predictions.append(self.labels[closest_idx])
    return predictions
# Train AIS classifier
ais_clf = AISClassifier(n_antibodies=10)
ais_clf.fit(X_train, y_train)
# Make predictions
y_pred = ais_clf.predict(X_test)
# Calculate accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
Output
                                                Assignment 8
import random
import numpy as np
from deap import base, creator, tools, algorithms
# Step 1: Define the Fitness and Individual Classes
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
# Step 2: Initialize the Toolbox
toolbox = base.Toolbox()
toolbox.register("attr_float", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=5)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Step 3: Define the Evaluation Function
def evalOneMax(individual):
  return sum(individual),
# Step 4: Register the Evaluation Function with the Toolbox
toolbox.register("evaluate", evalOneMax)
# Step 5: Define the Operators
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
# Step 6: Define the Main Loop of the Evolutionary Algorithm
def main():
  pop = toolbox.population(n=300)
  hof = tools.HallOfFame(1)
  stats = tools.Statistics(lambda ind: ind.fitness.values)
  stats.register("avg", np.mean)
  stats.register("std", np.std)
  stats.register("min", np.min)
  stats.register("max", np.max)
  pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40,
                       stats=stats, halloffame=hof, verbose=True)
  return pop, log, hof
# Step 7: Run the Algorithm
if __name__ == "__main__":
   pop, log, hof = main()
   print("Best individual is: %s\nwith fitness: %s" % (hof[0], hof[0].fitness.values))
Output
                                             Assignment 10
import numpy as np
# Parameters
num_cities = 10
num_ants = 100
num_iterations = 1000
alpha = 1
beta = 2
evaporation_rate = 0.1
# Initialize pheromone matrix with small positive values
pheromone_matrix = np.ones((num_cities, num_cities))
# Initialize distance matrix (example with random distances)
distance_matrix = np.random.randint(1, 100, size=(num_cities, num_cities))
np.fill_diagonal(distance_matrix, 0) # No distance from a city to itself
for iteration in range(num_iterations):
   # Ant movement and pheromone update
   for ant in range(num_ants):
      tour = []
      visited = set()
      current_city = np.random.randint(0, num_cities)
      tour.append(current_city)
      visited.add(current_city)
    for _ in range(num_cities - 1):
       next_city_probabilities = []
       for city in range(num_cities):
          if city not in visited:
             # Calculate probability of moving to city
             distance = distance_matrix[current_city][city]
             pheromone = pheromone_matrix[current_city][city]
             probability = (pheromone ** alpha) * ((1 / distance) ** beta)
             next_city_probabilities.append(probability)
          else:
             next_city_probabilities.append(0)
       # Normalize probabilities
       next_city_probabilities /= np.sum(next_city_probabilities)
       # Select next city
       next_city = np.random.choice(num_cities, p=next_city_probabilities)
       tour.append(next_city)
       visited.add(next_city)
       current_city = next_city
    # Update pheromone matrix
    tour_length = sum(distance_matrix[tour[i]][tour[i+1]] for i in range(len(tour)-1))
    for i in range(len(tour)-1):
       pheromone_matrix[tour[i]][tour[i+1]] += 1 / tour_length
       pheromone_matrix[tour[i+1]][tour[i]] += 1 / tour_length
  # Evaporation
  pheromone_matrix *= (1 - evaporation_rate)
# Find the best solution
best_tour = None
best_tour_length = np.inf
for ant in range(num_ants):
   tour_length = sum(distance_matrix[tour[i]][tour[i+1]] for i in range(len(tour)-1))
   if tour_length < best_tour_length:
      best_tour = tour
      best_tour_length = tour_length
print("Best tour length:", best_tour_length)
print("Best tour:", best_tour)
Output
             Content Beyond Syllabus
Output
Simulation