diff --git a/Clustering Techniques/Affinity_Propagation_Clustering_Algorithm.py b/Clustering Techniques/Affinity_Propagation_Clustering_Algorithm.py
new file mode 100644
index 0000000..f111ff9
--- /dev/null
+++ b/Clustering Techniques/Affinity_Propagation_Clustering_Algorithm.py
@@ -0,0 +1,37 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import AffinityPropagation
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+model = AffinityPropagation(damping=0.7)
+
+# train the model
+model.fit(training_data)
+
+# assign each data point to a cluster
+result = model.predict(training_data)
+
+# get all of the unique clusters
+clusters = unique(result)
+
+# plot the clusters
+for cluster in clusters:
+ # get data points that fall in this cluster
+ index = where(result == cluster)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the plot
+pyplot.show()
diff --git a/Clustering Techniques/Agglomerative_Clustering_Algorithm.py b/Clustering Techniques/Agglomerative_Clustering_Algorithm.py
new file mode 100644
index 0000000..9786ab7
--- /dev/null
+++ b/Clustering Techniques/Agglomerative_Clustering_Algorithm.py
@@ -0,0 +1,34 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import AgglomerativeClustering
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+agglomerative_model = AgglomerativeClustering(n_clusters=2)
+
+# assign each data point to a cluster
+agglomerative_result = agglomerative_model.fit_predict(training_data)
+
+# get all of the unique clusters
+agglomerative_clusters = unique(agglomerative_result)
+
+# plot the clusters
+for agglomerative_cluster in agglomerative_clusters:
+ # get data points that fall in this cluster
+ index = where(agglomerative_result == agglomerative_clusters)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the Agglomerative Hierarchy plot
+pyplot.show()
diff --git a/Clustering Techniques/Birch_Algorithm.py b/Clustering Techniques/Birch_Algorithm.py
new file mode 100644
index 0000000..c209678
--- /dev/null
+++ b/Clustering Techniques/Birch_Algorithm.py
@@ -0,0 +1,37 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import Birch
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+birch_model = Birch(threshold=0.03, n_clusters=2)
+
+# train the model
+birch_model.fit(training_data)
+
+# assign each data point to a cluster
+birch_result = birch_model.predict(training_data)
+
+# get all of the unique clusters
+birch_clusters = unique(birch_result)
+
+# plot the BIRCH clusters
+for birch_cluster in birch_clusters:
+ # get data points that fall in this cluster
+ index = where(birch_result == birch_clusters)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the BIRCH plot
+pyplot.show()
diff --git a/Clustering Techniques/DBScan_Model.py b/Clustering Techniques/DBScan_Model.py
new file mode 100644
index 0000000..06b9af2
--- /dev/null
+++ b/Clustering Techniques/DBScan_Model.py
@@ -0,0 +1,37 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import DBSCAN
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+dbscan_model = DBSCAN(eps=0.25, min_samples=9)
+
+# train the model
+dbscan_model.fit(training_data)
+
+# assign each data point to a cluster
+dbscan_result = dbscan_model.predict(training_data)
+
+# get all of the unique clusters
+dbscan_cluster = unique(dbscan_result)
+
+# plot the DBSCAN clusters
+for dbscan_cluster in dbscan_clusters:
+ # get data points that fall in this cluster
+ index = where(dbscan_result == dbscan_clusters)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the DBSCAN plot
+pyplot.show()
diff --git a/Clustering Techniques/Gaussain_Mixture_Model.py b/Clustering Techniques/Gaussain_Mixture_Model.py
new file mode 100644
index 0000000..edfcd26
--- /dev/null
+++ b/Clustering Techniques/Gaussain_Mixture_Model.py
@@ -0,0 +1,37 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.mixture import GaussianMixture
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+gaussian_model = GaussianMixture(n_components=2)
+
+# train the model
+gaussian_model.fit(training_data)
+
+# assign each data point to a cluster
+gaussian_result = gaussian_model.predict(training_data)
+
+# get all of the unique clusters
+gaussian_clusters = unique(gaussian_result)
+
+# plot Gaussian Mixture the clusters
+for gaussian_cluster in gaussian_clusters:
+ # get data points that fall in this cluster
+ index = where(gaussian_result == gaussian_clusters)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the Gaussian Mixture plot
+pyplot.show()
diff --git a/Clustering Techniques/Mean_Shift_Clustering_Algorithm.py b/Clustering Techniques/Mean_Shift_Clustering_Algorithm.py
new file mode 100644
index 0000000..230c67b
--- /dev/null
+++ b/Clustering Techniques/Mean_Shift_Clustering_Algorithm.py
@@ -0,0 +1,34 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import MeanShift
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+mean_model = MeanShift()
+
+# assign each data point to a cluster
+mean_result = mean_model.fit_predict(training_data)
+
+# get all of the unique clusters
+mean_clusters = unique(mean_result)
+
+# plot Mean-Shift the clusters
+for mean_cluster in mean_clusters:
+ # get data points that fall in this cluster
+ index = where(mean_result == mean_cluster)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the Mean-Shift plot
+pyplot.show()
diff --git a/Clustering Techniques/Optics_Algorithm.py b/Clustering Techniques/Optics_Algorithm.py
new file mode 100644
index 0000000..c198fa3
--- /dev/null
+++ b/Clustering Techniques/Optics_Algorithm.py
@@ -0,0 +1,34 @@
+from numpy import unique
+from numpy import where
+from matplotlib import pyplot
+from sklearn.datasets import make_classification
+from sklearn.cluster import OPTICS
+
+# initialize the data set we'll work with
+training_data, _ = make_classification(
+ n_samples=1000,
+ n_features=2,
+ n_informative=2,
+ n_redundant=0,
+ n_clusters_per_class=1,
+ random_state=4
+)
+
+# define the model
+optics_model = OPTICS(eps=0.75, min_samples=10)
+
+# assign each data point to a cluster
+optics_result = optics_model.fit_predict(training_data)
+
+# get all of the unique clusters
+optics_clusters = unique(optics_clusters)
+
+# plot OPTICS the clusters
+for optics_cluster in optics_clusters:
+ # get data points that fall in this cluster
+ index = where(optics_result == optics_clusters)
+ # make the plot
+ pyplot.scatter(training_data[index, 0], training_data[index, 1])
+
+# show the OPTICS plot
+pyplot.show()
diff --git a/Graphs/BellMan_Ford_Algorithm.py b/Graphs/BellMan_Ford_Algorithm.py
new file mode 100644
index 0000000..0d305e3
--- /dev/null
+++ b/Graphs/BellMan_Ford_Algorithm.py
@@ -0,0 +1,53 @@
+# The Bellman-Ford Algorithm is a single-source, shortest-path algorithm used to find the shortest paths from a source node to all other nodes in a weighted graph, even when the graph contains negative edge weights. It works by iteratively relaxing the edges in the graph, ensuring that it can detect and handle negative weight cycles.
+
+# Class to represent a graph
+
+class Graph:
+
+ def __init__(self, vertices):
+ self.V = vertices # No. of vertices
+ self.graph = []
+
+ # function to add an edge to graph
+ def addEdge(self, u, v, w):
+ self.graph.append([u, v, w])
+
+ # utility function used to print the solution
+ def printArr(self, dist):
+ print("Vertex Distance from Source")
+ for i in range(self.V):
+ print("{0}\t\t{1}".format(i, dist[i]))
+
+ # The main function that finds shortest distances from src to
+ # all other vertices using Bellman-Ford algorithm. The function
+ # also detects negative weight cycle
+ def BellmanFord(self, src):
+
+ # Step 1: Initialize distances from src to all other vertices
+ # as INFINITE
+ dist = [float("Inf")] * self.V
+ dist[src] = 0
+
+ # Step 2: Relax all edges |V| - 1 times. A simple shortest
+ # path from src to any other vertex can have at-most |V| - 1
+ # edges
+ for _ in range(self.V - 1):
+ # Update dist value and parent index of the adjacent vertices of
+ # the picked vertex. Consider only those vertices which are still in
+ # queue
+ for u, v, w in self.graph:
+ if dist[u] != float("Inf") and dist[u] + w < dist[v]:
+ dist[v] = dist[u] + w
+
+ # Step 3: check for negative-weight cycles. The above step
+ # guarantees shortest distances if graph doesn't contain
+ # negative weight cycle. If we get a shorter path, then there
+ # is a cycle.
+
+ for u, v, w in self.graph:
+ if dist[u] != float("Inf") and dist[u] + w < dist[v]:
+ print("Graph contains negative weight cycle")
+ return
+
+ # print all distance
+ self.printArr(dist)
\ No newline at end of file
diff --git a/Graphs/Floyd_Warshall_Algorithm.py b/Graphs/Floyd_Warshall_Algorithm.py
new file mode 100644
index 0000000..1a65876
--- /dev/null
+++ b/Graphs/Floyd_Warshall_Algorithm.py
@@ -0,0 +1,78 @@
+# The Floyd-Warshall Algorithm is a dynamic programming algorithm used to find the shortest paths between all pairs of nodes in a weighted graph. It works for
+# directed or undirected graphs with positive or negative edge weights and is particularly valuable when you need to compute and store all shortest paths in
+# a graph. The algorithm has a time and space complexity of O(n ^ 3), making it
+# suitable for small to moderately sized graphs.
+
+
+# Number of vertices in the graph
+V = 4
+
+# Define infinity as the large
+# enough value. This value will be
+# used for vertices not connected to each other
+INF = 99999
+
+# Solves all pair shortest path
+# via Floyd Warshall Algorithm
+
+
+def floydWarshall(graph):
+ """ dist[][] will be the output
+ matrix that will finally
+ have the shortest distances
+ between every pair of vertices """
+ """ initializing the solution matrix
+ same as input graph matrix
+ OR we can say that the initial
+ values of shortest distances
+ are based on shortest paths considering no
+ intermediate vertices """
+
+ dist = list(map(lambda i: list(map(lambda j: j, i)), graph))
+
+ """ Add all vertices one by one
+ to the set of intermediate
+ vertices.
+ ---> Before start of an iteration,
+ we have shortest distances
+ between all pairs of vertices
+ such that the shortest
+ distances consider only the
+ vertices in the set
+ {0, 1, 2, .. k-1} as intermediate vertices.
+ ----> After the end of a
+ iteration, vertex no. k is
+ added to the set of intermediate
+ vertices and the
+ set becomes {0, 1, 2, .. k}
+ """
+ for k in range(V):
+
+ # pick all vertices as source one by one
+ for i in range(V):
+
+ # Pick all vertices as destination for the
+ # above picked source
+ for j in range(V):
+
+ # If vertex k is on the shortest path from
+ # i to j, then update the value of dist[i][j]
+ dist[i][j] = min(dist[i][j],
+ dist[i][k] + dist[k][j]
+ )
+ printSolution(dist)
+
+# A utility function to print the solution
+
+
+def printSolution(dist):
+ print("Following matrix shows the shortest distances\
+between every pair of vertices")
+ for i in range(V):
+ for j in range(V):
+ if (dist[i][j] == INF):
+ print("%7s" % ("INF"), end=" ")
+ else:
+ print("%7d\t" % (dist[i][j]), end=' ')
+ if j == V-1:
+ print()
diff --git a/Graphs/Tarjan's_Algorithm.py b/Graphs/Tarjan's_Algorithm.py
new file mode 100644
index 0000000..cc88b01
--- /dev/null
+++ b/Graphs/Tarjan's_Algorithm.py
@@ -0,0 +1,82 @@
+""" "defaultdict" in Python
+ is a dictionary-like container from the collections
+module that provides a default value for keys that do not exist."""
+
+from collections import defaultdict
+
+# Function to run Tarjan's algorithm
+def tarjan(graph):
+
+ index = 0
+ stack = []
+ components = []
+
+ # Track visited and index for each node
+ indexes = {}
+ lowlinks = {}
+
+ def strongconnect(node):
+
+ # Set the depth index for this node to the smallest unused index
+ nonlocal index
+ indexes[node] = index
+ lowlinks[node] = index
+ index += 1
+ stack.append(node)
+
+ # Consider successors of `node`
+ try:
+ successors = graph[node]
+ except:
+
+ successors = []
+ for successor in successors:
+ if successor not in indexes:
+ # Successor has not yet been visited; recurse on it
+ strongconnect(successor)
+ lowlinks[node] = min(lowlinks[node], lowlinks[successor])
+ elif successor in stack:
+ # Successor is in the stack, hence in the current SCC
+ lowlinks[node] = min(lowlinks[node], indexes[successor])
+
+ # If `node` is a root node, pop the stack and generate an SCC
+ if lowlinks[node] == indexes[node]:
+ connected_component = []
+
+ while True:
+ successor = stack.pop()
+ connected_component.append(successor)
+ if successor == node:
+ break
+ components.append(connected_component)
+
+ for node in graph:
+ if node not in indexes:
+ strongconnect(node)
+
+ return components
+
+# Accept dynamic input for the graph
+graph = defaultdict(list)
+num_nodes = int(input("Enter the number of nodes: "))
+for i in range(num_nodes):
+ node = int(input(f"Enter the successors of node {i}: "))
+ successors = list(map(int, input().split()))
+ graph[node] = successors
+
+print("Strongly Connected Components:")
+print(tarjan(graph))
+
+
+
+""" Explanation:->
+
+1) Tarjan's algorithm performs a DFS on the graph to find strongly connected components.
+
+2) It maintains an index (incremented for each visited node), a stack of visited nodes, and a lowlink value for each node (lowest index reachable from that node).
+
+3) When visiting a node, if any successor is in the stack, the lowlink is updated to be the minimum of its current value and the successor's index.
+
+4) If the lowlink of a node equals its own index, it is a root node and the current stack represents an SCC. This SCC is popped from the stack and added to the final components list.
+
+5) After Tarjan's finishes, the components list contains all the SCCs in the graph."""
\ No newline at end of file
diff --git a/Graphs/travelling_salesman_problem_solver.py b/Graphs/Travelling_Salesman_Problem.py
similarity index 100%
rename from Graphs/travelling_salesman_problem_solver.py
rename to Graphs/Travelling_Salesman_Problem.py
diff --git a/Math/Prefix_Sum.py b/Math/Prefix_Sum.py
new file mode 100644
index 0000000..4bfa726
--- /dev/null
+++ b/Math/Prefix_Sum.py
@@ -0,0 +1,25 @@
+# Function to calculate the prefix sum of a list using list comprehension
+def calculatePrefixSum(arr):
+ prefixSum = [sum(arr[:i + 1]) for i in range(len(arr))]
+ return prefixSum
+
+def main():
+ # Input the list size
+ n = int(input("Enter the size of the list: "))
+
+ if n <= 0:
+ print("List size must be a positive integer.")
+ return 1 # Exit with an error code
+
+ # Input the elements of the list
+ print(f"Enter {n} elements of the list:")
+ arr = [int(input()) for _ in range(n)]
+
+ # Calculate the prefix sum
+ prefixSum = calculatePrefixSum(arr)
+
+ # Display the prefix sum
+ print("Prefix Sum:", prefixSum)
+
+if __name__ == "__main__":
+ main()
diff --git a/Math/kadane's_algorithm.py b/Math/kadane's_algorithm.py
new file mode 100644
index 0000000..dcc8858
--- /dev/null
+++ b/Math/kadane's_algorithm.py
@@ -0,0 +1,17 @@
+def max_subarray_sum(nums):
+ # Initialize variables to keep track of the maximum subarray sum
+
+ max_ending_here = nums[0] # Maximum sum ending at the current position
+ max_so_far = nums[0] # Maximum sum seen so far
+
+ # Iterate through the array, starting from the second element
+ for i in range(1, len(nums)):
+ # Calculate the maximum sum ending at the current position by considering whether it's better to
+ # start a new subarray or extend the previous one.
+ max_ending_here = max(nums[i], max_ending_here + nums[i])
+
+ # Update the maximum sum seen so far by comparing it with the maximum sum ending at the current position.
+ max_so_far = max(max_so_far, max_ending_here)
+
+ # The 'max_so_far' variable now contains the maximum subarray sum.
+ return max_so_far
\ No newline at end of file
diff --git a/Math/sieve_of_eratosthenes.py b/Math/sieve_of_eratosthenes.py
new file mode 100644
index 0000000..88cf490
--- /dev/null
+++ b/Math/sieve_of_eratosthenes.py
@@ -0,0 +1,30 @@
+"""Sieve Of Eratosthenes:
+The sieve of eratosthenes is one of the most efficient way to find all
+the prime numbers upto the number `n`
+
+for more reference(https://www.geeksforgeeks.org/sieve-of-eratosthenes/)
+"""
+
+#importing `math` module which will be used later
+import math
+
+#specify upto where you have to find the prime numbers
+n = int(input("Enter the range : "))
+
+#`arr` is a boolean list that contains `n+1` `False` entries
+arr = [False]*(n+1)
+
+#loop upto the square root of the range `n`
+for i in range(2,int(math.sqrt(n))+1):
+ if arr[i] == False:
+ for j in range(i*i, n+1, i):
+ #making the entry `True` for all entries whose index is the multiple
+ arr[j] = True
+
+#after the loop exits, all the entry that are prime numbers
+#are marked as `False`
+
+#printing all the prime numbers
+for i in range(2,n):
+ if arr[i+1] == False:
+ print(i+1)
diff --git a/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py b/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py
new file mode 100644
index 0000000..c6e19d3
--- /dev/null
+++ b/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py
@@ -0,0 +1,117 @@
+class Q:
+ queue = []
+ MaxSize = 0
+ currSize = 0
+
+ def createQueue(self, size):
+ Q.MaxSize = size
+ Q.currSize = 0
+ for i in range(0, Q.MaxSize):
+ Q.queue.append(0)
+ print('\nQueue created of size: ', len(Q.queue))
+ print(Q.queue)
+
+ def enqueue(self, e):
+ Q.currSize += 1
+ Q.queue[Q.currSize-1] = e
+ Q.shiftUp(Q.currSize-1)
+ print(e, 'enqueued in Queue')
+ print('')
+
+ def dequeue(self):
+ temp = Q.queue[0]
+ Q.currSize -= 1
+ Q.queue[0] = Q.queue[Q.currSize]
+ Q.shiftDown(0)
+ print(temp, 'dequeued from Queue')
+ print('')
+
+ def isFull(self):
+ if Q.currSize == Q.MaxSize:
+ return True
+ else:
+ return False
+
+ def isEmpty(self):
+ if Q.currSize == 0:
+ return True
+ else:
+ return False
+
+ def printQueue(self):
+ print('Position', '\tData')
+ for i in range(Q.currSize):
+ print(i+1,'\t\t',Q.queue[i])
+
+ def shiftUp(i) :
+ parent = (i - 1) // 2
+ while (i > 0 and Q.queue[parent] < Q.queue[i]) :
+
+ # Swap parent and current node
+ (Q.queue[i], Q.queue[parent]) = (Q.queue[parent], Q.queue[i]) # swap
+
+ # Update i to parent of i
+ i = parent
+ parent = (i - 1) // 2
+
+ def shiftDown(i):
+ largest = i # Initialize largest as root
+ l = 2 * i + 1 # left = 2*i + 1
+ r = 2 * i + 2 # right = 2*i + 2
+
+ # See if left child of root exists and is
+ # greater than root
+
+ if l < Q.currSize and Q.queue[i] < Q.queue[l]:
+ largest = l
+
+ # See if right child of root exists and is
+ # greater than root
+
+ if r < Q.currSize and Q.queue[largest] < Q.queue[r]:
+ largest = r
+
+ # Change root, if needed
+
+ if largest != i:
+ (Q.queue[i], Q.queue[largest]) = (Q.queue[largest], Q.queue[i]) # swap
+ Q.shiftDown(largest)
+
+
+
+# Main Code:
+
+o = Q()
+o.createQueue(int(input('Enter size of the queue: ')))
+
+while True:
+ print('------------')
+ print('1.Enqueue\n2.Dequeue\n3.Print\n0.Exit')
+ print('------------')
+
+ ch = int(input('\nEnter your choice: '))
+
+ if ch == 1:
+ if o.isFull() != True:
+ data = int(input('\nEnter data to be enqueued: '))
+ o.enqueue(data)
+ else:
+ print('\nQueue is full..\n')
+
+ elif ch == 2:
+ if o.isEmpty() != True:
+ o.dequeue()
+ else:
+ print('\nQueue is empty..\n')
+
+ elif ch == 3:
+ if o.isEmpty() != True:
+ o.printQueue()
+ else:
+ print('\nQueue is empty..\n')
+
+ elif ch == 0:
+ break
+
+ else:
+ print('\nWrong Input..\nEnter the correct choice..!!\n')
\ No newline at end of file
diff --git a/README.md b/README.md
index 7ca53f4..e24dec8 100644
--- a/README.md
+++ b/README.md
@@ -56,18 +56,20 @@ This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/)
## Contributors ✨
-
-
+
+
+
+
Connect with me
-
   
-
   
-
   
-
   
-
   
-
(Back to top)
+

   
+

   
+

   
+

   
+

   
+
(Back to top)
diff --git a/Regression Techniques/Bayesian_Regression.py b/Regression Techniques/Bayesian_Regression.py
new file mode 100644
index 0000000..09c97a2
--- /dev/null
+++ b/Regression Techniques/Bayesian_Regression.py
@@ -0,0 +1,106 @@
+#Import the necessary libraries
+import torch
+import pyro
+import pyro.distributions as dist
+from pyro.infer import SVI, Trace_ELBO, Predictive
+from pyro.optim import Adam
+import matplotlib.pyplot as plt
+import seaborn as sns
+
+
+# Generate some sample data
+torch.manual_seed(0)
+X = torch.linspace(0, 10, 100)
+true_slope = 2
+true_intercept = 1
+Y = true_intercept + true_slope * X + torch.randn(100)
+
+# Define the Bayesian regression model
+def model(X, Y):
+ # Priors for the parameters
+ slope = pyro.sample("slope", dist.Normal(0, 10))
+ intercept = pyro.sample("intercept", dist.Normal(0, 10))
+ sigma = pyro.sample("sigma", dist.HalfNormal(1))
+
+ # Expected value of the outcome
+ mu = intercept + slope * X
+
+ # Likelihood (sampling distribution) of the observations
+ with pyro.plate("data", len(X)):
+ pyro.sample("obs", dist.Normal(mu, sigma), obs=Y)
+
+# Run Bayesian inference using SVI (Stochastic Variational Inference)
+def guide(X, Y):
+ # Approximate posterior distributions for the parameters
+ slope_loc = pyro.param("slope_loc", torch.tensor(0.0))
+ slope_scale = pyro.param("slope_scale", torch.tensor(1.0),
+ constraint=dist.constraints.positive)
+ intercept_loc = pyro.param("intercept_loc", torch.tensor(0.0))
+ intercept_scale = pyro.param("intercept_scale", torch.tensor(1.0),
+ constraint=dist.constraints.positive)
+ sigma_loc = pyro.param("sigma_loc", torch.tensor(1.0),
+ constraint=dist.constraints.positive)
+
+ # Sample from the approximate posterior distributions
+ slope = pyro.sample("slope", dist.Normal(slope_loc, slope_scale))
+ intercept = pyro.sample("intercept", dist.Normal(intercept_loc,
+ intercept_scale))
+ sigma = pyro.sample("sigma", dist.HalfNormal(sigma_loc))
+
+# Initialize the SVI and optimizer
+optim = Adam({"lr": 0.01})
+svi = SVI(model, guide, optim, loss=Trace_ELBO())
+
+# Run the inference loop
+num_iterations = 1000
+for i in range(num_iterations):
+ loss = svi.step(X, Y)
+ if (i + 1) % 100 == 0:
+ print(f"Iteration {i + 1}/{num_iterations} - Loss: {loss}")
+
+# Obtain posterior samples using Predictive
+predictive = Predictive(model, guide=guide, num_samples=1000)
+posterior = predictive(X, Y)
+
+# Extract the parameter samples
+slope_samples = posterior["slope"]
+intercept_samples = posterior["intercept"]
+sigma_samples = posterior["sigma"]
+
+# Compute the posterior means
+slope_mean = slope_samples.mean()
+intercept_mean = intercept_samples.mean()
+sigma_mean = sigma_samples.mean()
+
+# Print the estimated parameters
+print("Estimated Slope:", slope_mean.item())
+print("Estimated Intercept:", intercept_mean.item())
+print("Estimated Sigma:", sigma_mean.item())
+
+
+# Create subplots
+fig, axs = plt.subplots(1, 3, figsize=(15, 5))
+
+# Plot the posterior distribution of the slope
+sns.kdeplot(slope_samples, shade=True, ax=axs[0])
+axs[0].set_title("Posterior Distribution of Slope")
+axs[0].set_xlabel("Slope")
+axs[0].set_ylabel("Density")
+
+# Plot the posterior distribution of the intercept
+sns.kdeplot(intercept_samples, shade=True, ax=axs[1])
+axs[1].set_title("Posterior Distribution of Intercept")
+axs[1].set_xlabel("Intercept")
+axs[1].set_ylabel("Density")
+
+# Plot the posterior distribution of sigma
+sns.kdeplot(sigma_samples, shade=True, ax=axs[2])
+axs[2].set_title("Posterior Distribution of Sigma")
+axs[2].set_xlabel("Sigma")
+axs[2].set_ylabel("Density")
+
+# Adjust the layout
+plt.tight_layout()
+
+# Show the plot
+plt.show()
diff --git a/Regression Techniques/Isotonic_Regression.py b/Regression Techniques/Isotonic_Regression.py
new file mode 100644
index 0000000..b2fc215
--- /dev/null
+++ b/Regression Techniques/Isotonic_Regression.py
@@ -0,0 +1,23 @@
+from sklearn.isotonic import IsotonicRegression
+import matplotlib.pyplot as plt
+from matplotlib.collections import LineCollection
+
+ir = IsotonicRegression() # create an instance of the IsotonicRegression class
+
+# Fit isotonic regression model
+y_ir = ir.fit_transform(x, y) # fit the model and transform the data
+print('Isotonic Regression Predictions :\n',y_ir)
+
+# Create LineCollection for the isotonic regression line
+lines = [[[i, y_ir[i]] for i in range(n)]]
+
+# Line to measure the difference between actual and target value
+lc = LineCollection(lines)
+
+plt.plot(x, y_ir, '-', markersize=10, label='isotonic regression')
+
+plt.gca().add_collection(lc)
+plt.legend() # add a legend
+
+plt.title("Isotonic Regression")
+plt.show()
diff --git a/Regression Techniques/Lasso_Regression.py b/Regression Techniques/Lasso_Regression.py
new file mode 100644
index 0000000..7b9626b
--- /dev/null
+++ b/Regression Techniques/Lasso_Regression.py
@@ -0,0 +1,140 @@
+# Importing libraries
+
+import numpy as np
+
+import pandas as pd
+
+from sklearn.model_selection import train_test_split
+
+import matplotlib.pyplot as plt
+
+# Lasso Regression
+
+class LassoRegression() :
+
+ def __init__( self, learning_rate, iterations, l1_penality ) :
+
+ self.learning_rate = learning_rate
+
+ self.iterations = iterations
+
+ self.l1_penality = l1_penality
+
+ # Function for model training
+
+ def fit( self, X, Y ) :
+
+ # no_of_training_examples, no_of_features
+
+ self.m, self.n = X.shape
+
+ # weight initialization
+
+ self.W = np.zeros( self.n )
+
+ self.b = 0
+
+ self.X = X
+
+ self.Y = Y
+
+ # gradient descent learning
+
+ for i in range( self.iterations ) :
+
+ self.update_weights()
+
+ return self
+
+ # Helper function to update weights in gradient descent
+
+ def update_weights( self ) :
+
+ Y_pred = self.predict( self.X )
+
+ # calculate gradients
+
+ dW = np.zeros( self.n )
+
+ for j in range( self.n ) :
+
+ if self.W[j] > 0 :
+
+ dW[j] = ( - ( 2 * ( self.X[:, j] ).dot( self.Y - Y_pred ) )
+
+ + self.l1_penality ) / self.m
+
+ else :
+
+ dW[j] = ( - ( 2 * ( self.X[:, j] ).dot( self.Y - Y_pred ) )
+
+ - self.l1_penality ) / self.m
+
+
+ db = - 2 * np.sum( self.Y - Y_pred ) / self.m
+
+ # update weights
+
+ self.W = self.W - self.learning_rate * dW
+
+ self.b = self.b - self.learning_rate * db
+
+ return self
+
+ # Hypothetical function h( x )
+
+ def predict( self, X ) :
+
+ return X.dot( self.W ) + self.b
+
+
+def main() :
+
+ # Importing dataset
+
+ df = pd.read_csv( "salary_data.csv" )
+
+ X = df.iloc[:, :-1].values
+
+ Y = df.iloc[:, 1].values
+
+ # Splitting dataset into train and test set
+
+ X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size = 1 / 3, random_state = 0 )
+
+ # Model training
+
+ model = LassoRegression( iterations = 1000, learning_rate = 0.01, l1_penality = 500 )
+
+ model.fit( X_train, Y_train )
+
+ # Prediction on test set
+
+ Y_pred = model.predict( X_test )
+
+ print( "Predicted values ", np.round( Y_pred[:3], 2 ) )
+
+ print( "Real values ", Y_test[:3] )
+
+ print( "Trained W ", round( model.W[0], 2 ) )
+
+ print( "Trained b ", round( model.b, 2 ) )
+
+ # Visualization on test set
+
+ plt.scatter( X_test, Y_test, color = 'blue' )
+
+ plt.plot( X_test, Y_pred, color = 'orange' )
+
+ plt.title( 'Salary vs Experience' )
+
+ plt.xlabel( 'Years of Experience' )
+
+ plt.ylabel( 'Salary' )
+
+ plt.show()
+
+
+if __name__ == "__main__" :
+
+ main()
diff --git a/Regression Techniques/Least_Angle_Regression.py b/Regression Techniques/Least_Angle_Regression.py
new file mode 100644
index 0000000..f5d3593
--- /dev/null
+++ b/Regression Techniques/Least_Angle_Regression.py
@@ -0,0 +1,25 @@
+# Importing modules that are required
+
+from sklearn.datasets import load_boston
+from sklearn.linear_model import LassoLars
+from sklearn.metrics import r2_score
+from sklearn.model_selection import train_test_split
+
+# Loading dataset
+dataset = load_boston()
+X = dataset.data
+y = dataset.target
+
+# Splitting training and testing data
+X_train, X_test, y_train, y_test = train_test_split(X, y,
+ test_size = 0.15, random_state = 42)
+
+# Creating and fitting the regressor
+regressor = LassoLars(alpha = 0.1)
+regressor.fit(X_train, y_train)
+
+
+# Evaluating model
+prediction = regressor.predict(X_test)
+
+print(f"r2 Score of test set : {r2_score(y_test, prediction)}")
diff --git a/Regression Techniques/Linear_Regression.py b/Regression Techniques/Linear_Regression.py
new file mode 100644
index 0000000..e2904fe
--- /dev/null
+++ b/Regression Techniques/Linear_Regression.py
@@ -0,0 +1,49 @@
+def mse(coef, x, y):
+ return np.mean((np.dot(x, coef) - y)**2)/2
+
+
+def gradients(coef, x, y):
+ return np.mean(x.transpose()*(np.dot(x, coef) - y), axis=1)
+
+
+def multilinear_regression(coef, x, y, lr, b1=0.9, b2=0.999, epsilon=1e-8):
+ prev_error = 0
+ m_coef = np.zeros(coef.shape)
+ v_coef = np.zeros(coef.shape)
+ moment_m_coef = np.zeros(coef.shape)
+ moment_v_coef = np.zeros(coef.shape)
+ t = 0
+
+ while True:
+ error = mse(coef, x, y)
+ if abs(error - prev_error) <= epsilon:
+ break
+ prev_error = error
+ grad = gradients(coef, x, y)
+ t += 1
+ m_coef = b1 * m_coef + (1-b1)*grad
+ v_coef = b2 * v_coef + (1-b2)*grad**2
+ moment_m_coef = m_coef / (1-b1**t)
+ moment_v_coef = v_coef / (1-b2**t)
+
+ delta = ((lr / moment_v_coef**0.5 + 1e-8) *
+ (b1 * moment_m_coef + (1-b1)*grad/(1-b1**t)))
+
+ coef = np.subtract(coef, delta)
+ return coef
+
+
+coef = np.array([0, 0, 0])
+c = multilinear_regression(coef, x, y, 1e-1)
+fig = plt.figure()
+ax = fig.add_subplot(projection='3d')
+
+ax.scatter(x[:, 1], x[:, 2], y, label='y',
+ s=5, color="dodgerblue")
+
+ax.scatter(x[:, 1], x[:, 2], c[0] + c[1]*x[:, 1] + c[2]*x[:, 2],
+ label='regression', s=5, color="orange")
+
+ax.view_init(45, 0)
+ax.legend()
+plt.show()
diff --git a/Regression Techniques/Logistic_Regression.py b/Regression Techniques/Logistic_Regression.py
new file mode 100644
index 0000000..807d40a
--- /dev/null
+++ b/Regression Techniques/Logistic_Regression.py
@@ -0,0 +1,20 @@
+# import the necessary libraries
+from sklearn.datasets import load_breast_cancer
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import accuracy_score
+# load the breast cancer dataset
+X, y = load_breast_cancer(return_X_y=True)
+# split the train and test dataset
+X_train, X_test,\
+ y_train, y_test = train_test_split(X, y,
+ test_size=0.20,
+ random_state=23)
+# LogisticRegression
+clf = LogisticRegression(random_state=0)
+clf.fit(X_train, y_train)
+# Prediction
+y_pred = clf.predict(X_test)
+
+acc = accuracy_score(y_test, y_pred)
+print("Logistic Regression model accuracy (in %):", acc*100)
diff --git a/Regression Techniques/Polynomial_Regression.py b/Regression Techniques/Polynomial_Regression.py
new file mode 100644
index 0000000..d56f243
--- /dev/null
+++ b/Regression Techniques/Polynomial_Regression.py
@@ -0,0 +1,137 @@
+# Importing libraries
+
+import numpy as np
+
+import math
+
+import matplotlib.pyplot as plt
+
+# Univariate Polynomial Regression
+
+class PolynomailRegression() :
+
+ def __init__( self, degree, learning_rate, iterations ) :
+
+ self.degree = degree
+
+ self.learning_rate = learning_rate
+
+ self.iterations = iterations
+
+ # function to transform X
+
+ def transform( self, X ) :
+
+ # initialize X_transform
+
+ X_transform = np.ones( ( self.m, 1 ) )
+
+ j = 0
+
+ for j in range( self.degree + 1 ) :
+
+ if j != 0 :
+
+ x_pow = np.power( X, j )
+
+ # append x_pow to X_transform
+
+ X_transform = np.append( X_transform, x_pow.reshape( -1, 1 ), axis = 1 )
+
+ return X_transform
+
+ # function to normalize X_transform
+
+ def normalize( self, X ) :
+
+ X[:, 1:] = ( X[:, 1:] - np.mean( X[:, 1:], axis = 0 ) ) / np.std( X[:, 1:], axis = 0 )
+
+ return X
+
+ # model training
+
+ def fit( self, X, Y ) :
+
+ self.X = X
+
+ self.Y = Y
+
+ self.m, self.n = self.X.shape
+
+ # weight initialization
+
+ self.W = np.zeros( self.degree + 1 )
+
+ # transform X for polynomial h( x ) = w0 * x^0 + w1 * x^1 + w2 * x^2 + ........+ wn * x^n
+
+ X_transform = self.transform( self.X )
+
+ # normalize X_transform
+
+ X_normalize = self.normalize( X_transform )
+
+ # gradient descent learning
+
+ for i in range( self.iterations ) :
+
+ h = self.predict( self.X )
+
+ error = h - self.Y
+
+ # update weights
+
+ self.W = self.W - self.learning_rate * ( 1 / self.m ) * np.dot( X_normalize.T, error )
+
+ return self
+
+ # predict
+
+ def predict( self, X ) :
+
+ # transform X for polynomial h( x ) = w0 * x^0 + w1 * x^1 + w2 * x^2 + ........+ wn * x^n
+
+ X_transform = self.transform( X )
+
+ X_normalize = self.normalize( X_transform )
+
+ return np.dot( X_transform, self.W )
+
+
+# Driver code
+
+def main() :
+
+ # Create dataset
+
+ X = np.array( [ [1], [2], [3], [4], [5], [6], [7] ] )
+
+ Y = np.array( [ 45000, 50000, 60000, 80000, 110000, 150000, 200000 ] )
+
+ # model training
+
+ model = PolynomailRegression( degree = 2, learning_rate = 0.01, iterations = 500 )
+
+ model.fit( X, Y )
+
+ # Prediction on training set
+
+ Y_pred = model.predict( X )
+
+ # Visualization
+
+ plt.scatter( X, Y, color = 'blue' )
+
+ plt.plot( X, Y_pred, color = 'orange' )
+
+ plt.title( 'X vs Y' )
+
+ plt.xlabel( 'X' )
+
+ plt.ylabel( 'Y' )
+
+ plt.show()
+
+
+if __name__ == "__main__" :
+
+ main()
diff --git a/Regression Techniques/Quantile_Regression.py b/Regression Techniques/Quantile_Regression.py
new file mode 100644
index 0000000..7429810
--- /dev/null
+++ b/Regression Techniques/Quantile_Regression.py
@@ -0,0 +1,47 @@
+# Python program to visualize quantile regression
+
+# Importing libraries
+import numpy as np
+import pandas as pd
+import statsmodels.api as sm
+import statsmodels.formula.api as smf
+import matplotlib.pyplot as plt
+
+np.random.seed(0)
+
+# Number of rows
+rows = 20
+
+# Constructing Distance column
+Distance = np.random.uniform(1, 10, rows)
+
+# Constructing Emission column
+Emission = 40 + Distance + np.random.normal(loc=0,
+ scale=.25*Distance,
+ size=20)
+
+# Creating a dataset
+df = pd.DataFrame({'Distance': Distance,
+ 'Emission': Emission})
+
+# #fit the model
+model = smf.quantreg('Emission ~ Distance',
+ df).fit(q=0.7)
+
+# define figure and axis
+fig, ax = plt.subplots(figsize=(10, 8))
+
+# get y values
+y_line = lambda a, b: a + Distance
+y = y_line(model.params['Intercept'],
+ model.params['Distance'])
+
+# Plotting data points with the help
+# pf quantile regression equation
+ax.plot(Distance, y, color='black')
+ax.scatter(Distance, Emission, alpha=.3)
+ax.set_xlabel('Distance Traveled', fontsize=20)
+ax.set_ylabel('Emission Generated', fontsize=20)
+
+# Save the plot
+fig.savefig('quantile_regression.png')
diff --git a/Regression Techniques/Ridge_Regression.py b/Regression Techniques/Ridge_Regression.py
new file mode 100644
index 0000000..4f3df50
--- /dev/null
+++ b/Regression Techniques/Ridge_Regression.py
@@ -0,0 +1,91 @@
+# Importing libraries
+
+import numpy as np
+import pandas as pd
+from sklearn.model_selection import train_test_split
+import matplotlib.pyplot as plt
+
+# Ridge Regression
+
+class RidgeRegression() :
+
+ def __init__( self, learning_rate, iterations, l2_penality ) :
+
+ self.learning_rate = learning_rate
+ self.iterations = iterations
+ self.l2_penality = l2_penality
+
+ # Function for model training
+ def fit( self, X, Y ) :
+
+ # no_of_training_examples, no_of_features
+ self.m, self.n = X.shape
+
+ # weight initialization
+ self.W = np.zeros( self.n )
+
+ self.b = 0
+ self.X = X
+ self.Y = Y
+
+ # gradient descent learning
+
+ for i in range( self.iterations ) :
+ self.update_weights()
+ return self
+
+ # Helper function to update weights in gradient descent
+
+ def update_weights( self ) :
+ Y_pred = self.predict( self.X )
+
+ # calculate gradients
+ dW = ( - ( 2 * ( self.X.T ).dot( self.Y - Y_pred ) ) +
+ ( 2 * self.l2_penality * self.W ) ) / self.m
+ db = - 2 * np.sum( self.Y - Y_pred ) / self.m
+
+ # update weights
+ self.W = self.W - self.learning_rate * dW
+ self.b = self.b - self.learning_rate * db
+ return self
+
+ # Hypothetical function h( x )
+ def predict( self, X ) :
+ return X.dot( self.W ) + self.b
+
+# Driver code
+
+def main() :
+
+ # Importing dataset
+ df = pd.read_csv( "salary_data.csv" )
+ X = df.iloc[:, :-1].values
+ Y = df.iloc[:, 1].values
+
+ # Splitting dataset into train and test set
+ X_train, X_test, Y_train, Y_test = train_test_split( X, Y,
+
+ test_size = 1 / 3, random_state = 0 )
+
+ # Model training
+ model = RidgeRegression( iterations = 1000,
+ learning_rate = 0.01, l2_penality = 1 )
+ model.fit( X_train, Y_train )
+
+ # Prediction on test set
+ Y_pred = model.predict( X_test )
+ print( "Predicted values ", np.round( Y_pred[:3], 2 ) )
+ print( "Real values ", Y_test[:3] )
+ print( "Trained W ", round( model.W[0], 2 ) )
+ print( "Trained b ", round( model.b, 2 ) )
+
+ # Visualization on test set
+ plt.scatter( X_test, Y_test, color = 'blue' )
+ plt.plot( X_test, Y_pred, color = 'orange' )
+ plt.title( 'Salary vs Experience' )
+ plt.xlabel( 'Years of Experience' )
+ plt.ylabel( 'Salary' )
+ plt.show()
+
+if __name__ == "__main__" :
+ main()
diff --git a/Regression Techniques/Simple_Linear_Regression.py b/Regression Techniques/Simple_Linear_Regression.py
new file mode 100644
index 0000000..0f6da43
--- /dev/null
+++ b/Regression Techniques/Simple_Linear_Regression.py
@@ -0,0 +1,54 @@
+import numpy as np
+import matplotlib.pyplot as plt
+
+def estimate_coef(x, y):
+ # number of observations/points
+ n = np.size(x)
+
+ # mean of x and y vector
+ m_x = np.mean(x)
+ m_y = np.mean(y)
+
+ # calculating cross-deviation and deviation about x
+ SS_xy = np.sum(y*x) - n*m_y*m_x
+ SS_xx = np.sum(x*x) - n*m_x*m_x
+
+ # calculating regression coefficients
+ b_1 = SS_xy / SS_xx
+ b_0 = m_y - b_1*m_x
+
+ return (b_0, b_1)
+
+def plot_regression_line(x, y, b):
+ # plotting the actual points as scatter plot
+ plt.scatter(x, y, color = "m",
+ marker = "o", s = 30)
+
+ # predicted response vector
+ y_pred = b[0] + b[1]*x
+
+ # plotting the regression line
+ plt.plot(x, y_pred, color = "g")
+
+ # putting labels
+ plt.xlabel('x')
+ plt.ylabel('y')
+
+ # function to show plot
+ plt.show()
+
+def main():
+ # observations / data
+ x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ y = np.array([1, 3, 2, 5, 7, 8, 8, 9, 10, 12])
+
+ # estimating coefficients
+ b = estimate_coef(x, y)
+ print("Estimated coefficients:\nb_0 = {} \
+ \nb_1 = {}".format(b[0], b[1]))
+
+ # plotting regression line
+ plot_regression_line(x, y, b)
+
+if __name__ == "__main__":
+ main()
diff --git a/Regression Techniques/Stepwise_Regression.py b/Regression Techniques/Stepwise_Regression.py
new file mode 100644
index 0000000..3c91866
--- /dev/null
+++ b/Regression Techniques/Stepwise_Regression.py
@@ -0,0 +1,48 @@
+import pandas as pd
+import numpy as np
+from sklearn import linear_model
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import accuracy_score
+from mlxtend.feature_selection import SequentialFeatureSelector
+
+# Define the array of data
+data = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]])
+
+# Convert the array into a dataframe
+df = pd.DataFrame(data)
+
+# Select the features and target
+X = df.iloc[:, :-1]
+y = df.iloc[:, -1]
+
+# Perform stepwise regression
+sfs = SequentialFeatureSelector(linear_model.LogisticRegression(),
+ k_features=3,
+ forward=True,
+ scoring='accuracy',
+ cv=None)
+selected_features = sfs.fit(X, y)
+
+# Create a dataframe with only the selected features
+selected_columns = [0, 1, 2, 3]
+df_selected = df[selected_columns]
+
+# Split the data into train and test sets
+X_train, X_test,\
+ y_train, y_test = train_test_split(
+ df_selected, y,
+ test_size=0.3,
+ random_state=42)
+
+# Fit a logistic regression model using the selected features
+logreg = linear_model.LogisticRegression()
+logreg.fit(X_train, y_train)
+
+# Make predictions using the test set
+y_pred = logreg.predict(X_test)
+
+# Evaluate the model performance
+print(y_pred)
+
diff --git a/Searching Techniques/Boyer_Moore_Algorithm.py b/Searching Techniques/Boyer_Moore_Algorithm.py
new file mode 100644
index 0000000..1b2df13
--- /dev/null
+++ b/Searching Techniques/Boyer_Moore_Algorithm.py
@@ -0,0 +1,74 @@
+"""Program for Bad Character Heuristic
+of Boyer Moore String Matching Algorithm"""
+
+
+NO_OF_CHARS = 256
+
+def badCharHeuristic(string, size):
+ '''
+ The preprocessing function for
+ Boyer Moore's bad character heuristic
+ '''
+ # Initialize all occurrences as -1
+ badChar = [-1] * NO_OF_CHARS
+
+ # Fill the actual value of the last occurrence
+ for i in range(size):
+ badChar[ord(string[i])] = i
+
+ # Return the initialized list
+ return badChar
+
+def search(txt, pat):
+ '''
+ A pattern searching function that uses the Bad Character
+ Heuristic of the Boyer Moore Algorithm
+ '''
+ m = len(pat)
+ n = len(txt)
+
+ # Create the bad character list by calling
+ # the preprocessing function badCharHeuristic()
+ # for the given pattern
+ badChar = badCharHeuristic(pat, m)
+
+ # s is the shift of the pattern with respect to the text
+ s = 0
+ while s <= n - m:
+ j = m - 1
+
+ # Keep reducing index j of the pattern while
+ # characters of the pattern and text are matching
+ # at this shift s
+ while j >= 0 and pat[j] == txt[s + j]:
+ j -= 1
+
+ # If the pattern is present at the current shift,
+ # then index j will become -1 after the above loop
+ if j < 0:
+ print("Pattern occurs at shift =", s)
+
+ '''
+ Shift the pattern so that the next character in the text
+ aligns with the last occurrence of it in the pattern.
+ The condition s+m < n is necessary for the case when
+ the pattern occurs at the end of the text
+ '''
+ s += (m - badChar[ord(txt[s + m])] if s + m < n else 1)
+ else:
+ '''
+ Shift the pattern so that the bad character in the text
+ aligns with the last occurrence of it in the pattern. The
+ max function is used to make sure that we get a positive
+ shift. We may get a negative shift if the last occurrence
+ of the bad character in the pattern is on the right side of the
+ current character.
+ '''
+ s += max(1, j - badChar[ord(txt[s + j])])
+
+while True:
+ txt = input('Enter the text (or press Enter to exit): ')
+ if not txt:
+ break
+ pat = input('Enter the pattern to search for: ')
+ search(txt, pat)
diff --git a/Searching Techniques/Naive_Pattern_Searching.py b/Searching Techniques/Naive_Pattern_Searching.py
new file mode 100644
index 0000000..cc955c3
--- /dev/null
+++ b/Searching Techniques/Naive_Pattern_Searching.py
@@ -0,0 +1,39 @@
+# Python3 program for Naive Pattern
+# Searching algorithm
+
+
+def search(pat, txt):
+ M = len(pat)
+ N = len(txt)
+
+ # A loop to slide pat[] one by one */
+ for i in range(N - M + 1):
+ j = 0
+
+ # For current index i, check
+ # for pattern match */
+ while(j < M):
+ if (txt[i + j] != pat[j]):
+ break
+ j += 1
+
+ if (j == M):
+ print("Pattern found at index ", i)
+
+
+# Driver's Code
+if __name__ == '__main__':
+ txt = "AABAACAADAABAAABAA"
+ pat = "AABA"
+
+ # Function call
+ print('Below iis an` example of Naive Pattern Searching Algorithm\n')
+ print('It is being implemented for the following text and pattern: \n')
+ print(' Text = "AABAACAADAABAAABAA" pattern = "AABA"')
+
+ search(pat, txt)
+ #try it yourself
+ print('\nNow try it yourself\n')
+ txt = input("Enter the text: ")
+ pat = input("Enter the pattern: ")
+ search(pat, txt)
\ No newline at end of file
diff --git a/Sorting Techniques/Tim_Sort.py b/Sorting Techniques/Tim_Sort.py
new file mode 100644
index 0000000..f66b307
--- /dev/null
+++ b/Sorting Techniques/Tim_Sort.py
@@ -0,0 +1,110 @@
+# Python : Timsort algorithm
+####################################################################################################################
+# TimSort is a hybrid sorting algorithm that combines the strengths of merge sort and insertion sort.
+# It is designed to efficiently sort a wide range of real-world data types. It maintains the relative order of equal elements in the sorted output.
+# Divide into Runs: TimSort starts by dividing the input array into small, already sorted subsequences called "runs."
+# Merge Runs: It then merges these runs together using a combination of merge sort and insertion sort. This merging process optimizes performance, especially for data with pre-existing order.
+####################################################################################################################
+
+
+MIN_MERGE = 32
+
+
+def calcMinRun(n):
+ """Returns the minimum length of a run from 23 - 64 so that
+ the len(array)/minrun is less than or equal to a power of 2.
+
+ e.g. 1=>1, ..., 63=>63, 64=>32, 65=>33,
+ ..., 127=>64, 128=>32, ...
+ """
+ r = 0
+ while n >= MIN_MERGE:
+ r |= n & 1
+ n >>= 1
+ return n + r
+
+
+# This function sorts array from left index to
+# to right index which is of size atmost RUN
+def insertionSort(arr, left, right):
+ for i in range(left + 1, right + 1):
+ j = i
+ while j > left and arr[j] < arr[j - 1]:
+ arr[j], arr[j - 1] = arr[j - 1], arr[j]
+ j -= 1
+
+
+# Merge function merges the sorted runs
+def merge(arr, l, m, r):
+
+ # original array is broken in two parts
+ # left and right array
+ len1, len2 = m - l + 1, r - m
+ left, right = [], []
+ for i in range(0, len1):
+ left.append(arr[l + i])
+ for i in range(0, len2):
+ right.append(arr[m + 1 + i])
+
+ i, j, k = 0, 0, l
+
+ # after comparing, we merge those two array
+ # in larger sub array
+ while i < len1 and j < len2:
+ if left[i] <= right[j]:
+ arr[k] = left[i]
+ i += 1
+
+ else:
+ arr[k] = right[j]
+ j += 1
+
+ k += 1
+
+ # Copy remaining elements of left, if any
+ while i < len1:
+ arr[k] = left[i]
+ k += 1
+ i += 1
+
+ # Copy remaining element of right, if any
+ while j < len2:
+ arr[k] = right[j]
+ k += 1
+ j += 1
+
+
+# Iterative Timsort function to sort the
+# array[0...n-1] (similar to merge sort)
+def timSort(arr):
+ n = len(arr)
+ minRun = calcMinRun(n)
+
+ # Sort individual subarrays of size RUN
+ for start in range(0, n, minRun):
+ end = min(start + minRun - 1, n - 1)
+ insertionSort(arr, start, end)
+
+ # Start merging from size RUN (or 32). It will merge
+ # to form size 64, then 128, 256 and so on ....
+ size = minRun
+ while size < n:
+
+ # Pick starting point of left sub array. We
+ # are going to merge arr[left..left+size-1]
+ # and arr[left+size, left+2*size-1]
+ # After every merge, we increase left by 2*size
+ for left in range(0, n, 2 * size):
+
+ # Find ending point of left sub array
+ # mid+1 is starting point of right sub array
+ mid = min(n - 1, left + size - 1)
+ right = min((left + 2 * size - 1), (n - 1))
+
+ # Merge sub array arr[left.....mid] &
+ # arr[mid+1....right]
+ if mid < right:
+ merge(arr, left, mid, right)
+
+ size = 2 * size
+
diff --git a/Tree/AVL_tree.py b/Tree/AVL_tree.py
new file mode 100644
index 0000000..b4a8ed8
--- /dev/null
+++ b/Tree/AVL_tree.py
@@ -0,0 +1,146 @@
+class TreeNode:
+ def __init__(self, key):
+ self.key = key
+ self.left = None
+ self.right = None
+ self.height = 1
+
+
+class AVLTree:
+ def insert(self, root, key):
+ if not root:
+ return TreeNode(key)
+ if key < root.key:
+ root.left = self.insert(root.left, key)
+ else:
+ root.right = self.insert(root.right, key)
+ root.height = 1 + max(self.get_height(root.left), self.get_height(root.right))
+ return self.balance(root)
+
+ def delete(self, root, key):
+ if not root:
+ return root
+ if key < root.key:
+ root.left = self.delete(root.left, key)
+ elif key > root.key:
+ root.right = self.delete(root.right, key)
+ else:
+ if not root.left:
+ temp = root.right
+ root = None
+ return temp
+ elif not root.right:
+ temp = root.left
+ root = None
+ return temp
+ temp = self.get_min_value_node(root.right)
+ root.key = temp.key
+ root.right = self.delete(root.right, temp.key)
+ root.height = 1 + max(self.get_height(root.left), self.get_height(root.right))
+ return self.balance(root)
+
+ def get_height(self, node):
+ if not node:
+ return 0
+ return node.height
+
+ def get_balance(self, node):
+ if not node:
+ return 0
+ return self.get_height(node.left) - self.get_height(node.right)
+
+ def balance(self, node):
+ if not node:
+ return node
+ balance = self.get_balance(node)
+ if balance > 1:
+ if self.get_balance(node.left) < 0:
+ node.left = self.left_rotate(node.left)
+ return self.right_rotate(node)
+ if balance < -1:
+ if self.get_balance(node.right) > 0:
+ node.right = self.right_rotate(node.right)
+ return self.left_rotate(node)
+ return node
+
+ def left_rotate(self, z):
+ y = z.right
+ T2 = y.left
+ y.left = z
+ z.right = T2
+ z.height = 1 + max(self.get_height(z.left), self.get_height(z.right))
+ y.height = 1 + max(self.get_height(y.left), self.get_height(y.right))
+ return y
+
+ def right_rotate(self, y):
+ x = y.left
+ T2 = x.right
+ x.right = y
+ y.left = T2
+ y.height = 1 + max(self.get_height(y.left), self.get_height(y.right))
+ x.height = 1 + max(self.get_height(x.left), self.get_height(x.right))
+ return x
+
+ def get_min_value_node(self, node):
+ if node is None or node.left is None:
+ return node
+ return self.get_min_value_node(node.left)
+
+ def inorder_traversal(self, root):
+ if root:
+ self.inorder_traversal(root.left)
+ print(root.key, end=" ")
+ self.inorder_traversal(root.right)
+
+# Example usage:
+if __name__ == "__main__":
+ avl_tree = AVLTree()
+ root = None
+ keys = [9, 5, 10, 0, 6, 11, -1, 1, 2]
+ for key in keys:
+ root = avl_tree.insert(root, key)
+
+ print("Inorder Traversal of AVL tree:")
+ avl_tree.inorder_traversal(root)
+
+ key_to_delete = 10
+ root = avl_tree.delete(root, key_to_delete)
+ print("\nAfter deleting", key_to_delete)
+ avl_tree.inorder_traversal(root)
+
+class AVLTreeMenu:
+ def __init__(self):
+ self.avl_tree = AVLTree()
+ self.root = None
+
+ def display_menu(self):
+ print("AVL Tree Menu:")
+ print("1. Insert a key")
+ print("2. Delete a key")
+ print("3. Display the AVL tree")
+ print("4. Exit")
+
+ def run(self):
+ while True:
+ self.display_menu()
+ choice = input("Enter your choice: ")
+ if choice == "1":
+ key = int(input("Enter the key to insert: "))
+ self.root = self.avl_tree.insert(self.root, key)
+ print(f"Key {key} inserted.")
+ elif choice == "2":
+ key = int(input("Enter the key to delete: "))
+ self.root = self.avl_tree.delete(self.root, key)
+ print(f"Key {key} deleted.")
+ elif choice == "3":
+ print("Inorder Traversal of AVL tree:")
+ self.avl_tree.inorder_traversal(self.root)
+ elif choice == "4":
+ print("Exiting the AVL Tree Menu.")
+ break
+ else:
+ print("Invalid choice. Please enter a valid option.")
+
+if __name__ == "__main__":
+ avl_tree_menu = AVLTreeMenu()
+ avl_tree_menu.run()
diff --git a/Trie/Menu_Driven_Code_for_Tries.py b/Tree/Menu_Driven_Code_for_Tries.py
similarity index 100%
rename from Trie/Menu_Driven_Code_for_Tries.py
rename to Tree/Menu_Driven_Code_for_Tries.py
diff --git a/Tree/Morris_Traversal.py b/Tree/Morris_Traversal.py
new file mode 100644
index 0000000..46d3c91
--- /dev/null
+++ b/Tree/Morris_Traversal.py
@@ -0,0 +1,70 @@
+# Morris Traversal
+# Time Complexity = O(n)
+# Space Complexity = O(1) (Main advantage of using this traversal. Uses only constant space)
+# 1
+# / \
+# / \
+# 2 3
+# / \
+# / \
+# 4 5
+# \
+# \
+# 6
+#
+# Output --> 4 2 5 6 1 3
+
+class TreeNode:
+ def __init__(self, val=0, left=None, right=None):
+ self.val = val
+ self.left = left
+ self.right = right
+
+# Morris-inorder traversal
+def Morris_Traversal(root):
+ morris = []
+ cur = root
+
+ while cur:
+ if cur.left is None:
+ morris.append(cur.val)
+ cur = cur.right
+ else:
+ temp = cur.left
+ while temp.right and temp.right != cur:
+ temp = temp.right
+
+ if temp.right is None:
+ temp.right = cur
+ cur = cur.left
+ else:
+ temp.right = None
+ morris.append(cur.val)
+ cur = cur.right
+
+ return morris
+
+if __name__ == '__main__':
+ print("\033c", end='', flush=True)
+ # Input tree elements
+ root_val = int(input("Enter the value for the root: "))
+ root = TreeNode(root_val)
+
+ print('\n')
+ queue = [root]
+ while queue:
+ current = queue.pop(0)
+ left_val = int(input(f"Enter the value for the left child of {current.val} (Enter -1 for no child): "))
+ if left_val != -1:
+ current.left = TreeNode(left_val)
+ queue.append(current.left)
+ right_val = int(input(f"Enter the value for the right child of {current.val} (Enter -1 for no child): "))
+ if right_val != -1:
+ current.right = TreeNode(right_val)
+ queue.append(current.right)
+ print('\n')
+
+ # Morris Traversal starts
+ morris = Morris_Traversal(root)
+ print(' '.join([str(i) for i in morris]))
+