From 5c925445a7c1a202be49a481b2c71ee5675b0fae Mon Sep 17 00:00:00 2001 From: ConradKash Date: Mon, 16 Oct 2023 20:20:55 +0300 Subject: [PATCH 01/44] naive_search_pattern algorithm --- .../Naive_Pattern_Searching.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 Pattern Searching Algorithm/Naive_Pattern_Searching.py diff --git a/Pattern Searching Algorithm/Naive_Pattern_Searching.py b/Pattern Searching Algorithm/Naive_Pattern_Searching.py new file mode 100644 index 0000000..16e3dea --- /dev/null +++ b/Pattern Searching Algorithm/Naive_Pattern_Searching.py @@ -0,0 +1,36 @@ +# Python3 program for Naive Pattern +# Searching algorithm + + +def search(pat, txt): + M = len(pat) + N = len(txt) + + # A loop to slide pat[] one by one */ + for i in range(N - M + 1): + j = 0 + + # For current index i, check + # for pattern match */ + while(j < M): + if (txt[i + j] != pat[j]): + break + j += 1 + + if (j == M): + print("Pattern found at index ", i) + + +# Driver's Code +if __name__ == '__main__': + txt = "AABAACAADAABAAABAA" + pat = "AABA" + + # Function call + search(pat, txt) + #try it yourself + txt = input("Enter the text: ") + pat = input("Enter the pattern: ") + + + search(pat, txt) From 1ce90b6f0c16045a3ec2eb3cee81ed868fa25baf Mon Sep 17 00:00:00 2001 From: pankaj kumar Date: Mon, 16 Oct 2023 23:13:59 +0530 Subject: [PATCH 02/44] I have written full code of Tarjan's Algorithm inside graph folder with proper explanations. --- Graphs/Tarjan's_Algorithm.py | 82 ++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 Graphs/Tarjan's_Algorithm.py diff --git a/Graphs/Tarjan's_Algorithm.py b/Graphs/Tarjan's_Algorithm.py new file mode 100644 index 0000000..cf9a356 --- /dev/null +++ b/Graphs/Tarjan's_Algorithm.py @@ -0,0 +1,82 @@ +""" "defaultdict" in Python + is a dictionary-like container from the collections +module that provides a default value for keys that do not exist.""" + +from collections import defaultdict + +# Function to run Tarjan's algorithm +def tarjan(graph): + + index = 0 + stack = [] + components = [] + + # Track visited and index for each node + indexes = {} + lowlinks = {} + + def strongconnect(node): + + # Set the depth index for this node to the smallest unused index + nonlocal index + indexes[node] = index + lowlinks[node] = index + index += 1 + stack.append(node) + + # Consider successors of `node` + try: + successors = graph[node] + except: + + successors = [] + for successor in successors: + if successor not in indexes: + # Successor has not yet been visited; recurse on it + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # Successor is in stack, hence in current SCC + lowlinks[node] = min(lowlinks[node],indexes[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == indexes[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + components.append(connected_component) + + for node in graph: + if node not in indexes: + strongconnect(node) + + return components + +# Sample graph +graph = { + 0: [1], + 1: [2], + 2: [0, 3], + 3: [4], + 4: [5], + 5: [3, 6], + 6: [] +} + +print(tarjan(graph)) + + +""" Explanation:-> + +1) Tarjan's algorithm performs a DFS on the graph to find strongly connected components. + +2) It maintains an index (incremented for each visited node), a stack of visited nodes, and a lowlink value for each node (lowest index reachable from that node). + +3) When visiting a node, if any successor is in the stack, the lowlink is updated to be the minimum of its current value and the successor's index. + +4) If the lowlink of a node equals its own index, it is a root node and the current stack represents an SCC. This SCC is popped from the stack and added to the final components list. + +5) After Tarjan's finishes, the components list contains all the SCCs in the graph.""" \ No newline at end of file From 8fa7a8e14dcde637d70f042dd5bd908d7292519e Mon Sep 17 00:00:00 2001 From: Achintya Bhat Date: Tue, 17 Oct 2023 11:38:13 +0530 Subject: [PATCH 03/44] algo/code: Priority queue using max heap --- ..._for_Priority_Queue_Heap_implementation.py | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py diff --git a/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py b/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py new file mode 100644 index 0000000..c6e19d3 --- /dev/null +++ b/Queue/Menu_Driven_Code_for_Priority_Queue_Heap_implementation.py @@ -0,0 +1,117 @@ +class Q: + queue = [] + MaxSize = 0 + currSize = 0 + + def createQueue(self, size): + Q.MaxSize = size + Q.currSize = 0 + for i in range(0, Q.MaxSize): + Q.queue.append(0) + print('\nQueue created of size: ', len(Q.queue)) + print(Q.queue) + + def enqueue(self, e): + Q.currSize += 1 + Q.queue[Q.currSize-1] = e + Q.shiftUp(Q.currSize-1) + print(e, 'enqueued in Queue') + print('') + + def dequeue(self): + temp = Q.queue[0] + Q.currSize -= 1 + Q.queue[0] = Q.queue[Q.currSize] + Q.shiftDown(0) + print(temp, 'dequeued from Queue') + print('') + + def isFull(self): + if Q.currSize == Q.MaxSize: + return True + else: + return False + + def isEmpty(self): + if Q.currSize == 0: + return True + else: + return False + + def printQueue(self): + print('Position', '\tData') + for i in range(Q.currSize): + print(i+1,'\t\t',Q.queue[i]) + + def shiftUp(i) : + parent = (i - 1) // 2 + while (i > 0 and Q.queue[parent] < Q.queue[i]) : + + # Swap parent and current node + (Q.queue[i], Q.queue[parent]) = (Q.queue[parent], Q.queue[i]) # swap + + # Update i to parent of i + i = parent + parent = (i - 1) // 2 + + def shiftDown(i): + largest = i # Initialize largest as root + l = 2 * i + 1 # left = 2*i + 1 + r = 2 * i + 2 # right = 2*i + 2 + + # See if left child of root exists and is + # greater than root + + if l < Q.currSize and Q.queue[i] < Q.queue[l]: + largest = l + + # See if right child of root exists and is + # greater than root + + if r < Q.currSize and Q.queue[largest] < Q.queue[r]: + largest = r + + # Change root, if needed + + if largest != i: + (Q.queue[i], Q.queue[largest]) = (Q.queue[largest], Q.queue[i]) # swap + Q.shiftDown(largest) + + + +# Main Code: + +o = Q() +o.createQueue(int(input('Enter size of the queue: '))) + +while True: + print('------------') + print('1.Enqueue\n2.Dequeue\n3.Print\n0.Exit') + print('------------') + + ch = int(input('\nEnter your choice: ')) + + if ch == 1: + if o.isFull() != True: + data = int(input('\nEnter data to be enqueued: ')) + o.enqueue(data) + else: + print('\nQueue is full..\n') + + elif ch == 2: + if o.isEmpty() != True: + o.dequeue() + else: + print('\nQueue is empty..\n') + + elif ch == 3: + if o.isEmpty() != True: + o.printQueue() + else: + print('\nQueue is empty..\n') + + elif ch == 0: + break + + else: + print('\nWrong Input..\nEnter the correct choice..!!\n') \ No newline at end of file From 71f7b22b70df7f0bda1b02b01115a33e0bcf0df4 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 15:52:52 +0530 Subject: [PATCH 04/44] added Gaussain-Mixture-Model.py --- .../Gaussain-Mixture-Model.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 Clustering Techniques/Gaussain-Mixture-Model.py diff --git a/Clustering Techniques/Gaussain-Mixture-Model.py b/Clustering Techniques/Gaussain-Mixture-Model.py new file mode 100644 index 0000000..edfcd26 --- /dev/null +++ b/Clustering Techniques/Gaussain-Mixture-Model.py @@ -0,0 +1,37 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.mixture import GaussianMixture + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +gaussian_model = GaussianMixture(n_components=2) + +# train the model +gaussian_model.fit(training_data) + +# assign each data point to a cluster +gaussian_result = gaussian_model.predict(training_data) + +# get all of the unique clusters +gaussian_clusters = unique(gaussian_result) + +# plot Gaussian Mixture the clusters +for gaussian_cluster in gaussian_clusters: + # get data points that fall in this cluster + index = where(gaussian_result == gaussian_clusters) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the Gaussian Mixture plot +pyplot.show() From c7ca7c810f14cebd70322a657987b0e112e182e5 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 16:04:23 +0530 Subject: [PATCH 05/44] added DB-Scan-Model.py --- Clustering Techniques/DBSCAN-Model.py | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 Clustering Techniques/DBSCAN-Model.py diff --git a/Clustering Techniques/DBSCAN-Model.py b/Clustering Techniques/DBSCAN-Model.py new file mode 100644 index 0000000..06b9af2 --- /dev/null +++ b/Clustering Techniques/DBSCAN-Model.py @@ -0,0 +1,37 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import DBSCAN + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +dbscan_model = DBSCAN(eps=0.25, min_samples=9) + +# train the model +dbscan_model.fit(training_data) + +# assign each data point to a cluster +dbscan_result = dbscan_model.predict(training_data) + +# get all of the unique clusters +dbscan_cluster = unique(dbscan_result) + +# plot the DBSCAN clusters +for dbscan_cluster in dbscan_clusters: + # get data points that fall in this cluster + index = where(dbscan_result == dbscan_clusters) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the DBSCAN plot +pyplot.show() From d9030315d0a90202dca82b02a84bf60844d2368f Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 16:15:06 +0530 Subject: [PATCH 06/44] added Birch-Model.py --- Clustering Techniques/BIRCH-Algorithm.py | 37 ++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 Clustering Techniques/BIRCH-Algorithm.py diff --git a/Clustering Techniques/BIRCH-Algorithm.py b/Clustering Techniques/BIRCH-Algorithm.py new file mode 100644 index 0000000..c209678 --- /dev/null +++ b/Clustering Techniques/BIRCH-Algorithm.py @@ -0,0 +1,37 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import Birch + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +birch_model = Birch(threshold=0.03, n_clusters=2) + +# train the model +birch_model.fit(training_data) + +# assign each data point to a cluster +birch_result = birch_model.predict(training_data) + +# get all of the unique clusters +birch_clusters = unique(birch_result) + +# plot the BIRCH clusters +for birch_cluster in birch_clusters: + # get data points that fall in this cluster + index = where(birch_result == birch_clusters) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the BIRCH plot +pyplot.show() From 327205948907bde7f28f9279c1c840c1d225db91 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 17:47:33 +0530 Subject: [PATCH 07/44] added Affinity-Propagation-Clustering-Algorithm.py --- ...finity-Propagation-Clustering-ALgorithm.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py diff --git a/Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py b/Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py new file mode 100644 index 0000000..f111ff9 --- /dev/null +++ b/Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py @@ -0,0 +1,37 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import AffinityPropagation + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +model = AffinityPropagation(damping=0.7) + +# train the model +model.fit(training_data) + +# assign each data point to a cluster +result = model.predict(training_data) + +# get all of the unique clusters +clusters = unique(result) + +# plot the clusters +for cluster in clusters: + # get data points that fall in this cluster + index = where(result == cluster) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the plot +pyplot.show() From 7cbc2f3260498cb0ef48e47f6b17453384d6a56a Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 18:09:33 +0530 Subject: [PATCH 08/44] added Mean-Shift-Clustering-Algorithm.py --- .../Mean-Shift-Clustering-algorithm.py | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 Clustering Techniques/Mean-Shift-Clustering-algorithm.py diff --git a/Clustering Techniques/Mean-Shift-Clustering-algorithm.py b/Clustering Techniques/Mean-Shift-Clustering-algorithm.py new file mode 100644 index 0000000..230c67b --- /dev/null +++ b/Clustering Techniques/Mean-Shift-Clustering-algorithm.py @@ -0,0 +1,34 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import MeanShift + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +mean_model = MeanShift() + +# assign each data point to a cluster +mean_result = mean_model.fit_predict(training_data) + +# get all of the unique clusters +mean_clusters = unique(mean_result) + +# plot Mean-Shift the clusters +for mean_cluster in mean_clusters: + # get data points that fall in this cluster + index = where(mean_result == mean_cluster) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the Mean-Shift plot +pyplot.show() From b4dba196dffe26780ad17fba304bcbe8d99f942c Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 18:24:41 +0530 Subject: [PATCH 09/44] added OPTICS-algorithm.py --- Clustering Techniques/OPTICS-algorithm.py | 34 +++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 Clustering Techniques/OPTICS-algorithm.py diff --git a/Clustering Techniques/OPTICS-algorithm.py b/Clustering Techniques/OPTICS-algorithm.py new file mode 100644 index 0000000..c198fa3 --- /dev/null +++ b/Clustering Techniques/OPTICS-algorithm.py @@ -0,0 +1,34 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import OPTICS + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +optics_model = OPTICS(eps=0.75, min_samples=10) + +# assign each data point to a cluster +optics_result = optics_model.fit_predict(training_data) + +# get all of the unique clusters +optics_clusters = unique(optics_clusters) + +# plot OPTICS the clusters +for optics_cluster in optics_clusters: + # get data points that fall in this cluster + index = where(optics_result == optics_clusters) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the OPTICS plot +pyplot.show() From 56d3e9eed7fbb5c71b7f50b6c50a4db3b3370888 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 18:56:39 +0530 Subject: [PATCH 10/44] added Agglomerative-Clustering-Algorithm.py --- .../Agglomerative-Clustering-Algorithm.py | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 Clustering Techniques/Agglomerative-Clustering-Algorithm.py diff --git a/Clustering Techniques/Agglomerative-Clustering-Algorithm.py b/Clustering Techniques/Agglomerative-Clustering-Algorithm.py new file mode 100644 index 0000000..9786ab7 --- /dev/null +++ b/Clustering Techniques/Agglomerative-Clustering-Algorithm.py @@ -0,0 +1,34 @@ +from numpy import unique +from numpy import where +from matplotlib import pyplot +from sklearn.datasets import make_classification +from sklearn.cluster import AgglomerativeClustering + +# initialize the data set we'll work with +training_data, _ = make_classification( + n_samples=1000, + n_features=2, + n_informative=2, + n_redundant=0, + n_clusters_per_class=1, + random_state=4 +) + +# define the model +agglomerative_model = AgglomerativeClustering(n_clusters=2) + +# assign each data point to a cluster +agglomerative_result = agglomerative_model.fit_predict(training_data) + +# get all of the unique clusters +agglomerative_clusters = unique(agglomerative_result) + +# plot the clusters +for agglomerative_cluster in agglomerative_clusters: + # get data points that fall in this cluster + index = where(agglomerative_result == agglomerative_clusters) + # make the plot + pyplot.scatter(training_data[index, 0], training_data[index, 1]) + +# show the Agglomerative Hierarchy plot +pyplot.show() From 46b945e67887776d9a5bc2e5938523f410e5d413 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:08:18 +0530 Subject: [PATCH 11/44] added Regression folder and added Simple Linear Regression --- .../simple-linear-regression.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 Regression-Techniques/simple-linear-regression.py diff --git a/Regression-Techniques/simple-linear-regression.py b/Regression-Techniques/simple-linear-regression.py new file mode 100644 index 0000000..0f6da43 --- /dev/null +++ b/Regression-Techniques/simple-linear-regression.py @@ -0,0 +1,54 @@ +import numpy as np +import matplotlib.pyplot as plt + +def estimate_coef(x, y): + # number of observations/points + n = np.size(x) + + # mean of x and y vector + m_x = np.mean(x) + m_y = np.mean(y) + + # calculating cross-deviation and deviation about x + SS_xy = np.sum(y*x) - n*m_y*m_x + SS_xx = np.sum(x*x) - n*m_x*m_x + + # calculating regression coefficients + b_1 = SS_xy / SS_xx + b_0 = m_y - b_1*m_x + + return (b_0, b_1) + +def plot_regression_line(x, y, b): + # plotting the actual points as scatter plot + plt.scatter(x, y, color = "m", + marker = "o", s = 30) + + # predicted response vector + y_pred = b[0] + b[1]*x + + # plotting the regression line + plt.plot(x, y_pred, color = "g") + + # putting labels + plt.xlabel('x') + plt.ylabel('y') + + # function to show plot + plt.show() + +def main(): + # observations / data + x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + y = np.array([1, 3, 2, 5, 7, 8, 8, 9, 10, 12]) + + # estimating coefficients + b = estimate_coef(x, y) + print("Estimated coefficients:\nb_0 = {} \ + \nb_1 = {}".format(b[0], b[1])) + + # plotting regression line + plot_regression_line(x, y, b) + +if __name__ == "__main__": + main() From f53094a1f4d7dfad2b8d40e011060335d2fcb638 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:18:03 +0530 Subject: [PATCH 12/44] added Regression folder and added Multiple Linear Regression --- .../Multiple-Linear-Regression.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 Regression-Techniques/Multiple-Linear-Regression.py diff --git a/Regression-Techniques/Multiple-Linear-Regression.py b/Regression-Techniques/Multiple-Linear-Regression.py new file mode 100644 index 0000000..e2904fe --- /dev/null +++ b/Regression-Techniques/Multiple-Linear-Regression.py @@ -0,0 +1,49 @@ +def mse(coef, x, y): + return np.mean((np.dot(x, coef) - y)**2)/2 + + +def gradients(coef, x, y): + return np.mean(x.transpose()*(np.dot(x, coef) - y), axis=1) + + +def multilinear_regression(coef, x, y, lr, b1=0.9, b2=0.999, epsilon=1e-8): + prev_error = 0 + m_coef = np.zeros(coef.shape) + v_coef = np.zeros(coef.shape) + moment_m_coef = np.zeros(coef.shape) + moment_v_coef = np.zeros(coef.shape) + t = 0 + + while True: + error = mse(coef, x, y) + if abs(error - prev_error) <= epsilon: + break + prev_error = error + grad = gradients(coef, x, y) + t += 1 + m_coef = b1 * m_coef + (1-b1)*grad + v_coef = b2 * v_coef + (1-b2)*grad**2 + moment_m_coef = m_coef / (1-b1**t) + moment_v_coef = v_coef / (1-b2**t) + + delta = ((lr / moment_v_coef**0.5 + 1e-8) * + (b1 * moment_m_coef + (1-b1)*grad/(1-b1**t))) + + coef = np.subtract(coef, delta) + return coef + + +coef = np.array([0, 0, 0]) +c = multilinear_regression(coef, x, y, 1e-1) +fig = plt.figure() +ax = fig.add_subplot(projection='3d') + +ax.scatter(x[:, 1], x[:, 2], y, label='y', + s=5, color="dodgerblue") + +ax.scatter(x[:, 1], x[:, 2], c[0] + c[1]*x[:, 1] + c[2]*x[:, 2], + label='regression', s=5, color="orange") + +ax.view_init(45, 0) +ax.legend() +plt.show() From 0e0e6fa54e01e033df6ca7be29934d80727888fa Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:23:44 +0530 Subject: [PATCH 13/44] added Regression folder and added Polynomial Regression --- .../Polynomial-Regression.py | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 Regression-Techniques/Polynomial-Regression.py diff --git a/Regression-Techniques/Polynomial-Regression.py b/Regression-Techniques/Polynomial-Regression.py new file mode 100644 index 0000000..d56f243 --- /dev/null +++ b/Regression-Techniques/Polynomial-Regression.py @@ -0,0 +1,137 @@ +# Importing libraries + +import numpy as np + +import math + +import matplotlib.pyplot as plt + +# Univariate Polynomial Regression + +class PolynomailRegression() : + + def __init__( self, degree, learning_rate, iterations ) : + + self.degree = degree + + self.learning_rate = learning_rate + + self.iterations = iterations + + # function to transform X + + def transform( self, X ) : + + # initialize X_transform + + X_transform = np.ones( ( self.m, 1 ) ) + + j = 0 + + for j in range( self.degree + 1 ) : + + if j != 0 : + + x_pow = np.power( X, j ) + + # append x_pow to X_transform + + X_transform = np.append( X_transform, x_pow.reshape( -1, 1 ), axis = 1 ) + + return X_transform + + # function to normalize X_transform + + def normalize( self, X ) : + + X[:, 1:] = ( X[:, 1:] - np.mean( X[:, 1:], axis = 0 ) ) / np.std( X[:, 1:], axis = 0 ) + + return X + + # model training + + def fit( self, X, Y ) : + + self.X = X + + self.Y = Y + + self.m, self.n = self.X.shape + + # weight initialization + + self.W = np.zeros( self.degree + 1 ) + + # transform X for polynomial h( x ) = w0 * x^0 + w1 * x^1 + w2 * x^2 + ........+ wn * x^n + + X_transform = self.transform( self.X ) + + # normalize X_transform + + X_normalize = self.normalize( X_transform ) + + # gradient descent learning + + for i in range( self.iterations ) : + + h = self.predict( self.X ) + + error = h - self.Y + + # update weights + + self.W = self.W - self.learning_rate * ( 1 / self.m ) * np.dot( X_normalize.T, error ) + + return self + + # predict + + def predict( self, X ) : + + # transform X for polynomial h( x ) = w0 * x^0 + w1 * x^1 + w2 * x^2 + ........+ wn * x^n + + X_transform = self.transform( X ) + + X_normalize = self.normalize( X_transform ) + + return np.dot( X_transform, self.W ) + + +# Driver code + +def main() : + + # Create dataset + + X = np.array( [ [1], [2], [3], [4], [5], [6], [7] ] ) + + Y = np.array( [ 45000, 50000, 60000, 80000, 110000, 150000, 200000 ] ) + + # model training + + model = PolynomailRegression( degree = 2, learning_rate = 0.01, iterations = 500 ) + + model.fit( X, Y ) + + # Prediction on training set + + Y_pred = model.predict( X ) + + # Visualization + + plt.scatter( X, Y, color = 'blue' ) + + plt.plot( X, Y_pred, color = 'orange' ) + + plt.title( 'X vs Y' ) + + plt.xlabel( 'X' ) + + plt.ylabel( 'Y' ) + + plt.show() + + +if __name__ == "__main__" : + + main() From 6580d7df98cdb45e5cdb9ac7d5b98540f1acce56 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:29:27 +0530 Subject: [PATCH 14/44] added Regression folder and added Bayesian Regression --- Regression-Techniques/Bayesian-Regression.py | 106 +++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 Regression-Techniques/Bayesian-Regression.py diff --git a/Regression-Techniques/Bayesian-Regression.py b/Regression-Techniques/Bayesian-Regression.py new file mode 100644 index 0000000..09c97a2 --- /dev/null +++ b/Regression-Techniques/Bayesian-Regression.py @@ -0,0 +1,106 @@ +#Import the necessary libraries +import torch +import pyro +import pyro.distributions as dist +from pyro.infer import SVI, Trace_ELBO, Predictive +from pyro.optim import Adam +import matplotlib.pyplot as plt +import seaborn as sns + + +# Generate some sample data +torch.manual_seed(0) +X = torch.linspace(0, 10, 100) +true_slope = 2 +true_intercept = 1 +Y = true_intercept + true_slope * X + torch.randn(100) + +# Define the Bayesian regression model +def model(X, Y): + # Priors for the parameters + slope = pyro.sample("slope", dist.Normal(0, 10)) + intercept = pyro.sample("intercept", dist.Normal(0, 10)) + sigma = pyro.sample("sigma", dist.HalfNormal(1)) + + # Expected value of the outcome + mu = intercept + slope * X + + # Likelihood (sampling distribution) of the observations + with pyro.plate("data", len(X)): + pyro.sample("obs", dist.Normal(mu, sigma), obs=Y) + +# Run Bayesian inference using SVI (Stochastic Variational Inference) +def guide(X, Y): + # Approximate posterior distributions for the parameters + slope_loc = pyro.param("slope_loc", torch.tensor(0.0)) + slope_scale = pyro.param("slope_scale", torch.tensor(1.0), + constraint=dist.constraints.positive) + intercept_loc = pyro.param("intercept_loc", torch.tensor(0.0)) + intercept_scale = pyro.param("intercept_scale", torch.tensor(1.0), + constraint=dist.constraints.positive) + sigma_loc = pyro.param("sigma_loc", torch.tensor(1.0), + constraint=dist.constraints.positive) + + # Sample from the approximate posterior distributions + slope = pyro.sample("slope", dist.Normal(slope_loc, slope_scale)) + intercept = pyro.sample("intercept", dist.Normal(intercept_loc, + intercept_scale)) + sigma = pyro.sample("sigma", dist.HalfNormal(sigma_loc)) + +# Initialize the SVI and optimizer +optim = Adam({"lr": 0.01}) +svi = SVI(model, guide, optim, loss=Trace_ELBO()) + +# Run the inference loop +num_iterations = 1000 +for i in range(num_iterations): + loss = svi.step(X, Y) + if (i + 1) % 100 == 0: + print(f"Iteration {i + 1}/{num_iterations} - Loss: {loss}") + +# Obtain posterior samples using Predictive +predictive = Predictive(model, guide=guide, num_samples=1000) +posterior = predictive(X, Y) + +# Extract the parameter samples +slope_samples = posterior["slope"] +intercept_samples = posterior["intercept"] +sigma_samples = posterior["sigma"] + +# Compute the posterior means +slope_mean = slope_samples.mean() +intercept_mean = intercept_samples.mean() +sigma_mean = sigma_samples.mean() + +# Print the estimated parameters +print("Estimated Slope:", slope_mean.item()) +print("Estimated Intercept:", intercept_mean.item()) +print("Estimated Sigma:", sigma_mean.item()) + + +# Create subplots +fig, axs = plt.subplots(1, 3, figsize=(15, 5)) + +# Plot the posterior distribution of the slope +sns.kdeplot(slope_samples, shade=True, ax=axs[0]) +axs[0].set_title("Posterior Distribution of Slope") +axs[0].set_xlabel("Slope") +axs[0].set_ylabel("Density") + +# Plot the posterior distribution of the intercept +sns.kdeplot(intercept_samples, shade=True, ax=axs[1]) +axs[1].set_title("Posterior Distribution of Intercept") +axs[1].set_xlabel("Intercept") +axs[1].set_ylabel("Density") + +# Plot the posterior distribution of sigma +sns.kdeplot(sigma_samples, shade=True, ax=axs[2]) +axs[2].set_title("Posterior Distribution of Sigma") +axs[2].set_xlabel("Sigma") +axs[2].set_ylabel("Density") + +# Adjust the layout +plt.tight_layout() + +# Show the plot +plt.show() From 705c6a283cf632ceaa25d0586d7affa76926e2ca Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:35:10 +0530 Subject: [PATCH 15/44] added Quantile Regression --- Regression-Techniques/Quantile-Regression.py | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 Regression-Techniques/Quantile-Regression.py diff --git a/Regression-Techniques/Quantile-Regression.py b/Regression-Techniques/Quantile-Regression.py new file mode 100644 index 0000000..7429810 --- /dev/null +++ b/Regression-Techniques/Quantile-Regression.py @@ -0,0 +1,47 @@ +# Python program to visualize quantile regression + +# Importing libraries +import numpy as np +import pandas as pd +import statsmodels.api as sm +import statsmodels.formula.api as smf +import matplotlib.pyplot as plt + +np.random.seed(0) + +# Number of rows +rows = 20 + +# Constructing Distance column +Distance = np.random.uniform(1, 10, rows) + +# Constructing Emission column +Emission = 40 + Distance + np.random.normal(loc=0, + scale=.25*Distance, + size=20) + +# Creating a dataset +df = pd.DataFrame({'Distance': Distance, + 'Emission': Emission}) + +# #fit the model +model = smf.quantreg('Emission ~ Distance', + df).fit(q=0.7) + +# define figure and axis +fig, ax = plt.subplots(figsize=(10, 8)) + +# get y values +y_line = lambda a, b: a + Distance +y = y_line(model.params['Intercept'], + model.params['Distance']) + +# Plotting data points with the help +# pf quantile regression equation +ax.plot(Distance, y, color='black') +ax.scatter(Distance, Emission, alpha=.3) +ax.set_xlabel('Distance Traveled', fontsize=20) +ax.set_ylabel('Emission Generated', fontsize=20) + +# Save the plot +fig.savefig('quantile_regression.png') From c5905e6826a238cd2754d1e88d37ef5296635c67 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:40:11 +0530 Subject: [PATCH 16/44] added Isotonic Regression --- Regression-Techniques/Isotonic-Regression.py | 23 ++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 Regression-Techniques/Isotonic-Regression.py diff --git a/Regression-Techniques/Isotonic-Regression.py b/Regression-Techniques/Isotonic-Regression.py new file mode 100644 index 0000000..b2fc215 --- /dev/null +++ b/Regression-Techniques/Isotonic-Regression.py @@ -0,0 +1,23 @@ +from sklearn.isotonic import IsotonicRegression +import matplotlib.pyplot as plt +from matplotlib.collections import LineCollection + +ir = IsotonicRegression() # create an instance of the IsotonicRegression class + +# Fit isotonic regression model +y_ir = ir.fit_transform(x, y) # fit the model and transform the data +print('Isotonic Regression Predictions :\n',y_ir) + +# Create LineCollection for the isotonic regression line +lines = [[[i, y_ir[i]] for i in range(n)]] + +# Line to measure the difference between actual and target value +lc = LineCollection(lines) + +plt.plot(x, y_ir, '-', markersize=10, label='isotonic regression') + +plt.gca().add_collection(lc) +plt.legend() # add a legend + +plt.title("Isotonic Regression") +plt.show() From bddbc37c893a06a9308d540a47cb7a52ca1b0637 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:45:30 +0530 Subject: [PATCH 17/44] added Stepwise Regression --- Regression-Techniques/Stepwise-Regression.py | 48 ++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 Regression-Techniques/Stepwise-Regression.py diff --git a/Regression-Techniques/Stepwise-Regression.py b/Regression-Techniques/Stepwise-Regression.py new file mode 100644 index 0000000..3c91866 --- /dev/null +++ b/Regression-Techniques/Stepwise-Regression.py @@ -0,0 +1,48 @@ +import pandas as pd +import numpy as np +from sklearn import linear_model +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score +from mlxtend.feature_selection import SequentialFeatureSelector + +# Define the array of data +data = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]]) + +# Convert the array into a dataframe +df = pd.DataFrame(data) + +# Select the features and target +X = df.iloc[:, :-1] +y = df.iloc[:, -1] + +# Perform stepwise regression +sfs = SequentialFeatureSelector(linear_model.LogisticRegression(), + k_features=3, + forward=True, + scoring='accuracy', + cv=None) +selected_features = sfs.fit(X, y) + +# Create a dataframe with only the selected features +selected_columns = [0, 1, 2, 3] +df_selected = df[selected_columns] + +# Split the data into train and test sets +X_train, X_test,\ + y_train, y_test = train_test_split( + df_selected, y, + test_size=0.3, + random_state=42) + +# Fit a logistic regression model using the selected features +logreg = linear_model.LogisticRegression() +logreg.fit(X_train, y_train) + +# Make predictions using the test set +y_pred = logreg.predict(X_test) + +# Evaluate the model performance +print(y_pred) + From ede7bd7f4416ff4d48db9d65f5f420ba230df9e6 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:51:46 +0530 Subject: [PATCH 18/44] added Least Angle Regression --- .../Least-Angle-Regression.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 Regression-Techniques/Least-Angle-Regression.py diff --git a/Regression-Techniques/Least-Angle-Regression.py b/Regression-Techniques/Least-Angle-Regression.py new file mode 100644 index 0000000..f5d3593 --- /dev/null +++ b/Regression-Techniques/Least-Angle-Regression.py @@ -0,0 +1,25 @@ +# Importing modules that are required + +from sklearn.datasets import load_boston +from sklearn.linear_model import LassoLars +from sklearn.metrics import r2_score +from sklearn.model_selection import train_test_split + +# Loading dataset +dataset = load_boston() +X = dataset.data +y = dataset.target + +# Splitting training and testing data +X_train, X_test, y_train, y_test = train_test_split(X, y, + test_size = 0.15, random_state = 42) + +# Creating and fitting the regressor +regressor = LassoLars(alpha = 0.1) +regressor.fit(X_train, y_train) + + +# Evaluating model +prediction = regressor.predict(X_test) + +print(f"r2 Score of test set : {r2_score(y_test, prediction)}") From de7c9843fd7236795779b6a1f19debc86c0ae866 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:54:42 +0530 Subject: [PATCH 19/44] added Logistic Regression --- Regression-Techniques/Logistic-Regression.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 Regression-Techniques/Logistic-Regression.py diff --git a/Regression-Techniques/Logistic-Regression.py b/Regression-Techniques/Logistic-Regression.py new file mode 100644 index 0000000..807d40a --- /dev/null +++ b/Regression-Techniques/Logistic-Regression.py @@ -0,0 +1,20 @@ +# import the necessary libraries +from sklearn.datasets import load_breast_cancer +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score +# load the breast cancer dataset +X, y = load_breast_cancer(return_X_y=True) +# split the train and test dataset +X_train, X_test,\ + y_train, y_test = train_test_split(X, y, + test_size=0.20, + random_state=23) +# LogisticRegression +clf = LogisticRegression(random_state=0) +clf.fit(X_train, y_train) +# Prediction +y_pred = clf.predict(X_test) + +acc = accuracy_score(y_test, y_pred) +print("Logistic Regression model accuracy (in %):", acc*100) From 37be03e24671f35f5021fd2aa7dcd743e21f22e2 Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 19:57:40 +0530 Subject: [PATCH 20/44] added Lasso Regression --- Regression-Techniques/Lasso-Regression.py | 140 ++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 Regression-Techniques/Lasso-Regression.py diff --git a/Regression-Techniques/Lasso-Regression.py b/Regression-Techniques/Lasso-Regression.py new file mode 100644 index 0000000..7b9626b --- /dev/null +++ b/Regression-Techniques/Lasso-Regression.py @@ -0,0 +1,140 @@ +# Importing libraries + +import numpy as np + +import pandas as pd + +from sklearn.model_selection import train_test_split + +import matplotlib.pyplot as plt + +# Lasso Regression + +class LassoRegression() : + + def __init__( self, learning_rate, iterations, l1_penality ) : + + self.learning_rate = learning_rate + + self.iterations = iterations + + self.l1_penality = l1_penality + + # Function for model training + + def fit( self, X, Y ) : + + # no_of_training_examples, no_of_features + + self.m, self.n = X.shape + + # weight initialization + + self.W = np.zeros( self.n ) + + self.b = 0 + + self.X = X + + self.Y = Y + + # gradient descent learning + + for i in range( self.iterations ) : + + self.update_weights() + + return self + + # Helper function to update weights in gradient descent + + def update_weights( self ) : + + Y_pred = self.predict( self.X ) + + # calculate gradients + + dW = np.zeros( self.n ) + + for j in range( self.n ) : + + if self.W[j] > 0 : + + dW[j] = ( - ( 2 * ( self.X[:, j] ).dot( self.Y - Y_pred ) ) + + + self.l1_penality ) / self.m + + else : + + dW[j] = ( - ( 2 * ( self.X[:, j] ).dot( self.Y - Y_pred ) ) + + - self.l1_penality ) / self.m + + + db = - 2 * np.sum( self.Y - Y_pred ) / self.m + + # update weights + + self.W = self.W - self.learning_rate * dW + + self.b = self.b - self.learning_rate * db + + return self + + # Hypothetical function h( x ) + + def predict( self, X ) : + + return X.dot( self.W ) + self.b + + +def main() : + + # Importing dataset + + df = pd.read_csv( "salary_data.csv" ) + + X = df.iloc[:, :-1].values + + Y = df.iloc[:, 1].values + + # Splitting dataset into train and test set + + X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size = 1 / 3, random_state = 0 ) + + # Model training + + model = LassoRegression( iterations = 1000, learning_rate = 0.01, l1_penality = 500 ) + + model.fit( X_train, Y_train ) + + # Prediction on test set + + Y_pred = model.predict( X_test ) + + print( "Predicted values ", np.round( Y_pred[:3], 2 ) ) + + print( "Real values ", Y_test[:3] ) + + print( "Trained W ", round( model.W[0], 2 ) ) + + print( "Trained b ", round( model.b, 2 ) ) + + # Visualization on test set + + plt.scatter( X_test, Y_test, color = 'blue' ) + + plt.plot( X_test, Y_pred, color = 'orange' ) + + plt.title( 'Salary vs Experience' ) + + plt.xlabel( 'Years of Experience' ) + + plt.ylabel( 'Salary' ) + + plt.show() + + +if __name__ == "__main__" : + + main() From f0237235996276aa9d95db8f4b6226218bcfd42f Mon Sep 17 00:00:00 2001 From: Aditya D Date: Tue, 17 Oct 2023 20:00:14 +0530 Subject: [PATCH 21/44] added Ridge Regression --- Regression-Techniques/Ridge-Regression.py | 91 +++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 Regression-Techniques/Ridge-Regression.py diff --git a/Regression-Techniques/Ridge-Regression.py b/Regression-Techniques/Ridge-Regression.py new file mode 100644 index 0000000..4f3df50 --- /dev/null +++ b/Regression-Techniques/Ridge-Regression.py @@ -0,0 +1,91 @@ +# Importing libraries + +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt + +# Ridge Regression + +class RidgeRegression() : + + def __init__( self, learning_rate, iterations, l2_penality ) : + + self.learning_rate = learning_rate + self.iterations = iterations + self.l2_penality = l2_penality + + # Function for model training + def fit( self, X, Y ) : + + # no_of_training_examples, no_of_features + self.m, self.n = X.shape + + # weight initialization + self.W = np.zeros( self.n ) + + self.b = 0 + self.X = X + self.Y = Y + + # gradient descent learning + + for i in range( self.iterations ) : + self.update_weights() + return self + + # Helper function to update weights in gradient descent + + def update_weights( self ) : + Y_pred = self.predict( self.X ) + + # calculate gradients + dW = ( - ( 2 * ( self.X.T ).dot( self.Y - Y_pred ) ) + + ( 2 * self.l2_penality * self.W ) ) / self.m + db = - 2 * np.sum( self.Y - Y_pred ) / self.m + + # update weights + self.W = self.W - self.learning_rate * dW + self.b = self.b - self.learning_rate * db + return self + + # Hypothetical function h( x ) + def predict( self, X ) : + return X.dot( self.W ) + self.b + +# Driver code + +def main() : + + # Importing dataset + df = pd.read_csv( "salary_data.csv" ) + X = df.iloc[:, :-1].values + Y = df.iloc[:, 1].values + + # Splitting dataset into train and test set + X_train, X_test, Y_train, Y_test = train_test_split( X, Y, + + test_size = 1 / 3, random_state = 0 ) + + # Model training + model = RidgeRegression( iterations = 1000, + learning_rate = 0.01, l2_penality = 1 ) + model.fit( X_train, Y_train ) + + # Prediction on test set + Y_pred = model.predict( X_test ) + print( "Predicted values ", np.round( Y_pred[:3], 2 ) ) + print( "Real values ", Y_test[:3] ) + print( "Trained W ", round( model.W[0], 2 ) ) + print( "Trained b ", round( model.b, 2 ) ) + + # Visualization on test set + plt.scatter( X_test, Y_test, color = 'blue' ) + plt.plot( X_test, Y_pred, color = 'orange' ) + plt.title( 'Salary vs Experience' ) + plt.xlabel( 'Years of Experience' ) + plt.ylabel( 'Salary' ) + plt.show() + +if __name__ == "__main__" : + main() From 0496d0e15b9d6a3d9da01e96d81339e6606a65fa Mon Sep 17 00:00:00 2001 From: Himanshu Agarwal Date: Tue, 17 Oct 2023 20:19:41 +0530 Subject: [PATCH 22/44] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7ca53f4..369bbdf 100644 --- a/README.md +++ b/README.md @@ -56,8 +56,8 @@ This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/) ## Contributors ✨ - - + +
From 0760095d3a4ce8722cabd6fab2707cafb987b36b Mon Sep 17 00:00:00 2001 From: Himanshu Agarwal Date: Tue, 17 Oct 2023 20:21:38 +0530 Subject: [PATCH 23/44] [UPDATED] Directory Structure --- ...-ALgorithm.py => Affinity_Propagation_Clustering_Algorithm.py} | 0 ...stering-Algorithm.py => Agglomerative_Clustering_Algorithm.py} | 0 Clustering Techniques/{BIRCH-Algorithm.py => Birch_Algorithm.py} | 0 Clustering Techniques/{DBSCAN-Model.py => DBScan_Model.py} | 0 .../{Gaussain-Mixture-Model.py => Gaussain_Mixture_Model.py} | 0 ...Clustering-algorithm.py => Mean_Shift_Clustering_Algorithm.py} | 0 .../{OPTICS-algorithm.py => Optics_Algorithm.py} | 0 ..._salesman_problem_solver.py => Travelling_Salesman_Problem.py} | 0 .../Bayesian_Regression.py | 0 .../Isotonic_Regression.py | 0 .../Lasso_Regression.py | 0 .../Least_Angle_Regression.py | 0 .../Linear_Regression.py | 0 .../Logistic_Regression.py | 0 .../Polynomial_Regression.py | 0 .../Quantile_Regression.py | 0 .../Ridge_Regression.py | 0 .../Simple_Linear_Regression.py | 0 .../Stepwise_Regression.py | 0 {Trie => Tree}/Menu_Driven_Code_for_Tries.py | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename Clustering Techniques/{Affinity-Propagation-Clustering-ALgorithm.py => Affinity_Propagation_Clustering_Algorithm.py} (100%) rename Clustering Techniques/{Agglomerative-Clustering-Algorithm.py => Agglomerative_Clustering_Algorithm.py} (100%) rename Clustering Techniques/{BIRCH-Algorithm.py => Birch_Algorithm.py} (100%) rename Clustering Techniques/{DBSCAN-Model.py => DBScan_Model.py} (100%) rename Clustering Techniques/{Gaussain-Mixture-Model.py => Gaussain_Mixture_Model.py} (100%) rename Clustering Techniques/{Mean-Shift-Clustering-algorithm.py => Mean_Shift_Clustering_Algorithm.py} (100%) rename Clustering Techniques/{OPTICS-algorithm.py => Optics_Algorithm.py} (100%) rename Graphs/{travelling_salesman_problem_solver.py => Travelling_Salesman_Problem.py} (100%) rename Regression-Techniques/Bayesian-Regression.py => Regression Techniques/Bayesian_Regression.py (100%) rename Regression-Techniques/Isotonic-Regression.py => Regression Techniques/Isotonic_Regression.py (100%) rename Regression-Techniques/Lasso-Regression.py => Regression Techniques/Lasso_Regression.py (100%) rename Regression-Techniques/Least-Angle-Regression.py => Regression Techniques/Least_Angle_Regression.py (100%) rename Regression-Techniques/Multiple-Linear-Regression.py => Regression Techniques/Linear_Regression.py (100%) rename Regression-Techniques/Logistic-Regression.py => Regression Techniques/Logistic_Regression.py (100%) rename Regression-Techniques/Polynomial-Regression.py => Regression Techniques/Polynomial_Regression.py (100%) rename Regression-Techniques/Quantile-Regression.py => Regression Techniques/Quantile_Regression.py (100%) rename Regression-Techniques/Ridge-Regression.py => Regression Techniques/Ridge_Regression.py (100%) rename Regression-Techniques/simple-linear-regression.py => Regression Techniques/Simple_Linear_Regression.py (100%) rename Regression-Techniques/Stepwise-Regression.py => Regression Techniques/Stepwise_Regression.py (100%) rename {Trie => Tree}/Menu_Driven_Code_for_Tries.py (100%) diff --git a/Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py b/Clustering Techniques/Affinity_Propagation_Clustering_Algorithm.py similarity index 100% rename from Clustering Techniques/Affinity-Propagation-Clustering-ALgorithm.py rename to Clustering Techniques/Affinity_Propagation_Clustering_Algorithm.py diff --git a/Clustering Techniques/Agglomerative-Clustering-Algorithm.py b/Clustering Techniques/Agglomerative_Clustering_Algorithm.py similarity index 100% rename from Clustering Techniques/Agglomerative-Clustering-Algorithm.py rename to Clustering Techniques/Agglomerative_Clustering_Algorithm.py diff --git a/Clustering Techniques/BIRCH-Algorithm.py b/Clustering Techniques/Birch_Algorithm.py similarity index 100% rename from Clustering Techniques/BIRCH-Algorithm.py rename to Clustering Techniques/Birch_Algorithm.py diff --git a/Clustering Techniques/DBSCAN-Model.py b/Clustering Techniques/DBScan_Model.py similarity index 100% rename from Clustering Techniques/DBSCAN-Model.py rename to Clustering Techniques/DBScan_Model.py diff --git a/Clustering Techniques/Gaussain-Mixture-Model.py b/Clustering Techniques/Gaussain_Mixture_Model.py similarity index 100% rename from Clustering Techniques/Gaussain-Mixture-Model.py rename to Clustering Techniques/Gaussain_Mixture_Model.py diff --git a/Clustering Techniques/Mean-Shift-Clustering-algorithm.py b/Clustering Techniques/Mean_Shift_Clustering_Algorithm.py similarity index 100% rename from Clustering Techniques/Mean-Shift-Clustering-algorithm.py rename to Clustering Techniques/Mean_Shift_Clustering_Algorithm.py diff --git a/Clustering Techniques/OPTICS-algorithm.py b/Clustering Techniques/Optics_Algorithm.py similarity index 100% rename from Clustering Techniques/OPTICS-algorithm.py rename to Clustering Techniques/Optics_Algorithm.py diff --git a/Graphs/travelling_salesman_problem_solver.py b/Graphs/Travelling_Salesman_Problem.py similarity index 100% rename from Graphs/travelling_salesman_problem_solver.py rename to Graphs/Travelling_Salesman_Problem.py diff --git a/Regression-Techniques/Bayesian-Regression.py b/Regression Techniques/Bayesian_Regression.py similarity index 100% rename from Regression-Techniques/Bayesian-Regression.py rename to Regression Techniques/Bayesian_Regression.py diff --git a/Regression-Techniques/Isotonic-Regression.py b/Regression Techniques/Isotonic_Regression.py similarity index 100% rename from Regression-Techniques/Isotonic-Regression.py rename to Regression Techniques/Isotonic_Regression.py diff --git a/Regression-Techniques/Lasso-Regression.py b/Regression Techniques/Lasso_Regression.py similarity index 100% rename from Regression-Techniques/Lasso-Regression.py rename to Regression Techniques/Lasso_Regression.py diff --git a/Regression-Techniques/Least-Angle-Regression.py b/Regression Techniques/Least_Angle_Regression.py similarity index 100% rename from Regression-Techniques/Least-Angle-Regression.py rename to Regression Techniques/Least_Angle_Regression.py diff --git a/Regression-Techniques/Multiple-Linear-Regression.py b/Regression Techniques/Linear_Regression.py similarity index 100% rename from Regression-Techniques/Multiple-Linear-Regression.py rename to Regression Techniques/Linear_Regression.py diff --git a/Regression-Techniques/Logistic-Regression.py b/Regression Techniques/Logistic_Regression.py similarity index 100% rename from Regression-Techniques/Logistic-Regression.py rename to Regression Techniques/Logistic_Regression.py diff --git a/Regression-Techniques/Polynomial-Regression.py b/Regression Techniques/Polynomial_Regression.py similarity index 100% rename from Regression-Techniques/Polynomial-Regression.py rename to Regression Techniques/Polynomial_Regression.py diff --git a/Regression-Techniques/Quantile-Regression.py b/Regression Techniques/Quantile_Regression.py similarity index 100% rename from Regression-Techniques/Quantile-Regression.py rename to Regression Techniques/Quantile_Regression.py diff --git a/Regression-Techniques/Ridge-Regression.py b/Regression Techniques/Ridge_Regression.py similarity index 100% rename from Regression-Techniques/Ridge-Regression.py rename to Regression Techniques/Ridge_Regression.py diff --git a/Regression-Techniques/simple-linear-regression.py b/Regression Techniques/Simple_Linear_Regression.py similarity index 100% rename from Regression-Techniques/simple-linear-regression.py rename to Regression Techniques/Simple_Linear_Regression.py diff --git a/Regression-Techniques/Stepwise-Regression.py b/Regression Techniques/Stepwise_Regression.py similarity index 100% rename from Regression-Techniques/Stepwise-Regression.py rename to Regression Techniques/Stepwise_Regression.py diff --git a/Trie/Menu_Driven_Code_for_Tries.py b/Tree/Menu_Driven_Code_for_Tries.py similarity index 100% rename from Trie/Menu_Driven_Code_for_Tries.py rename to Tree/Menu_Driven_Code_for_Tries.py From 4b4e0b3ae40f15d88dc6ff4c74059baa6a001dd1 Mon Sep 17 00:00:00 2001 From: ConradKash Date: Tue, 17 Oct 2023 21:26:28 +0300 Subject: [PATCH 24/44] naive_search_pattern algorithm --- .../Naive_Pattern_Searching.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/Pattern Searching Algorithm/Naive_Pattern_Searching.py b/Pattern Searching Algorithm/Naive_Pattern_Searching.py index 16e3dea..cc955c3 100644 --- a/Pattern Searching Algorithm/Naive_Pattern_Searching.py +++ b/Pattern Searching Algorithm/Naive_Pattern_Searching.py @@ -16,7 +16,7 @@ def search(pat, txt): if (txt[i + j] != pat[j]): break j += 1 - + if (j == M): print("Pattern found at index ", i) @@ -27,10 +27,13 @@ def search(pat, txt): pat = "AABA" # Function call + print('Below iis an` example of Naive Pattern Searching Algorithm\n') + print('It is being implemented for the following text and pattern: \n') + print(' Text = "AABAACAADAABAAABAA" pattern = "AABA"') + search(pat, txt) #try it yourself + print('\nNow try it yourself\n') txt = input("Enter the text: ") pat = input("Enter the pattern: ") - - - search(pat, txt) + search(pat, txt) \ No newline at end of file From b93f55d81a2cc0da11cc4b8ee93f99f3b521c733 Mon Sep 17 00:00:00 2001 From: pankaj kumar Date: Wed, 18 Oct 2023 00:09:35 +0530 Subject: [PATCH 25/44] I have created for dynamic inputs.. Please review it.. --- Graphs/Tarjan's_Algorithm.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Graphs/Tarjan's_Algorithm.py b/Graphs/Tarjan's_Algorithm.py index cf9a356..cc88b01 100644 --- a/Graphs/Tarjan's_Algorithm.py +++ b/Graphs/Tarjan's_Algorithm.py @@ -2,7 +2,7 @@ is a dictionary-like container from the collections module that provides a default value for keys that do not exist.""" -from collections import defaultdict +from collections import defaultdict # Function to run Tarjan's algorithm def tarjan(graph): @@ -28,16 +28,16 @@ def strongconnect(node): try: successors = graph[node] except: - + successors = [] for successor in successors: if successor not in indexes: # Successor has not yet been visited; recurse on it strongconnect(successor) - lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + lowlinks[node] = min(lowlinks[node], lowlinks[successor]) elif successor in stack: - # Successor is in stack, hence in current SCC - lowlinks[node] = min(lowlinks[node],indexes[successor]) + # Successor is in the stack, hence in the current SCC + lowlinks[node] = min(lowlinks[node], indexes[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == indexes[node]: @@ -46,7 +46,8 @@ def strongconnect(node): while True: successor = stack.pop() connected_component.append(successor) - if successor == node: break + if successor == node: + break components.append(connected_component) for node in graph: @@ -55,20 +56,19 @@ def strongconnect(node): return components -# Sample graph -graph = { - 0: [1], - 1: [2], - 2: [0, 3], - 3: [4], - 4: [5], - 5: [3, 6], - 6: [] -} +# Accept dynamic input for the graph +graph = defaultdict(list) +num_nodes = int(input("Enter the number of nodes: ")) +for i in range(num_nodes): + node = int(input(f"Enter the successors of node {i}: ")) + successors = list(map(int, input().split())) + graph[node] = successors +print("Strongly Connected Components:") print(tarjan(graph)) + """ Explanation:-> 1) Tarjan's algorithm performs a DFS on the graph to find strongly connected components. From 5e86ab62f886cd09db0c283d609f4161dccca5dd Mon Sep 17 00:00:00 2001 From: ConradKash Date: Wed, 18 Oct 2023 00:10:19 +0300 Subject: [PATCH 26/44] naive_search_pattern algorithm --- .../Naive_Pattern_Searching.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {Pattern Searching Algorithm => Searching Techniques}/Naive_Pattern_Searching.py (100%) diff --git a/Pattern Searching Algorithm/Naive_Pattern_Searching.py b/Searching Techniques/Naive_Pattern_Searching.py similarity index 100% rename from Pattern Searching Algorithm/Naive_Pattern_Searching.py rename to Searching Techniques/Naive_Pattern_Searching.py From 0bbeebb82eaff897e29932c471fa6c9c7f52154d Mon Sep 17 00:00:00 2001 From: Avdhesh-Varshney <114330097+Avdhesh-Varshney@users.noreply.github.com> Date: Wed, 18 Oct 2023 10:53:17 +0530 Subject: [PATCH 27/44] Morris Traversal --- Tree/Morris_Traversal.py | 70 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 Tree/Morris_Traversal.py diff --git a/Tree/Morris_Traversal.py b/Tree/Morris_Traversal.py new file mode 100644 index 0000000..46d3c91 --- /dev/null +++ b/Tree/Morris_Traversal.py @@ -0,0 +1,70 @@ +# Morris Traversal +# Time Complexity = O(n) +# Space Complexity = O(1) (Main advantage of using this traversal. Uses only constant space) +# 1 +# / \ +# / \ +# 2 3 +# / \ +# / \ +# 4 5 +# \ +# \ +# 6 +# +# Output --> 4 2 5 6 1 3 + +class TreeNode: + def __init__(self, val=0, left=None, right=None): + self.val = val + self.left = left + self.right = right + +# Morris-inorder traversal +def Morris_Traversal(root): + morris = [] + cur = root + + while cur: + if cur.left is None: + morris.append(cur.val) + cur = cur.right + else: + temp = cur.left + while temp.right and temp.right != cur: + temp = temp.right + + if temp.right is None: + temp.right = cur + cur = cur.left + else: + temp.right = None + morris.append(cur.val) + cur = cur.right + + return morris + +if __name__ == '__main__': + print("\033c", end='', flush=True) + # Input tree elements + root_val = int(input("Enter the value for the root: ")) + root = TreeNode(root_val) + + print('\n') + queue = [root] + while queue: + current = queue.pop(0) + left_val = int(input(f"Enter the value for the left child of {current.val} (Enter -1 for no child): ")) + if left_val != -1: + current.left = TreeNode(left_val) + queue.append(current.left) + right_val = int(input(f"Enter the value for the right child of {current.val} (Enter -1 for no child): ")) + if right_val != -1: + current.right = TreeNode(right_val) + queue.append(current.right) + print('\n') + + # Morris Traversal starts + morris = Morris_Traversal(root) + print(' '.join([str(i) for i in morris])) + From 5c48d9f6b87504c7750549701861f72b90952bd3 Mon Sep 17 00:00:00 2001 From: Tanushree <60938591+aggarwal-tanushree@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:37:16 +0200 Subject: [PATCH 28/44] algo: Added TimSort Algorithm --- Sorting Techniques/Tim_Sort.py | 110 +++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 Sorting Techniques/Tim_Sort.py diff --git a/Sorting Techniques/Tim_Sort.py b/Sorting Techniques/Tim_Sort.py new file mode 100644 index 0000000..f66b307 --- /dev/null +++ b/Sorting Techniques/Tim_Sort.py @@ -0,0 +1,110 @@ +# Python : Timsort algorithm +#################################################################################################################### +# TimSort is a hybrid sorting algorithm that combines the strengths of merge sort and insertion sort. +# It is designed to efficiently sort a wide range of real-world data types. It maintains the relative order of equal elements in the sorted output. +# Divide into Runs: TimSort starts by dividing the input array into small, already sorted subsequences called "runs." +# Merge Runs: It then merges these runs together using a combination of merge sort and insertion sort. This merging process optimizes performance, especially for data with pre-existing order. +#################################################################################################################### + + +MIN_MERGE = 32 + + +def calcMinRun(n): + """Returns the minimum length of a run from 23 - 64 so that + the len(array)/minrun is less than or equal to a power of 2. + + e.g. 1=>1, ..., 63=>63, 64=>32, 65=>33, + ..., 127=>64, 128=>32, ... + """ + r = 0 + while n >= MIN_MERGE: + r |= n & 1 + n >>= 1 + return n + r + + +# This function sorts array from left index to +# to right index which is of size atmost RUN +def insertionSort(arr, left, right): + for i in range(left + 1, right + 1): + j = i + while j > left and arr[j] < arr[j - 1]: + arr[j], arr[j - 1] = arr[j - 1], arr[j] + j -= 1 + + +# Merge function merges the sorted runs +def merge(arr, l, m, r): + + # original array is broken in two parts + # left and right array + len1, len2 = m - l + 1, r - m + left, right = [], [] + for i in range(0, len1): + left.append(arr[l + i]) + for i in range(0, len2): + right.append(arr[m + 1 + i]) + + i, j, k = 0, 0, l + + # after comparing, we merge those two array + # in larger sub array + while i < len1 and j < len2: + if left[i] <= right[j]: + arr[k] = left[i] + i += 1 + + else: + arr[k] = right[j] + j += 1 + + k += 1 + + # Copy remaining elements of left, if any + while i < len1: + arr[k] = left[i] + k += 1 + i += 1 + + # Copy remaining element of right, if any + while j < len2: + arr[k] = right[j] + k += 1 + j += 1 + + +# Iterative Timsort function to sort the +# array[0...n-1] (similar to merge sort) +def timSort(arr): + n = len(arr) + minRun = calcMinRun(n) + + # Sort individual subarrays of size RUN + for start in range(0, n, minRun): + end = min(start + minRun - 1, n - 1) + insertionSort(arr, start, end) + + # Start merging from size RUN (or 32). It will merge + # to form size 64, then 128, 256 and so on .... + size = minRun + while size < n: + + # Pick starting point of left sub array. We + # are going to merge arr[left..left+size-1] + # and arr[left+size, left+2*size-1] + # After every merge, we increase left by 2*size + for left in range(0, n, 2 * size): + + # Find ending point of left sub array + # mid+1 is starting point of right sub array + mid = min(n - 1, left + size - 1) + right = min((left + 2 * size - 1), (n - 1)) + + # Merge sub array arr[left.....mid] & + # arr[mid+1....right] + if mid < right: + merge(arr, left, mid, right) + + size = 2 * size + From e992049f758031c3fb53d6685c8882f3704be17c Mon Sep 17 00:00:00 2001 From: pankaj kumar Date: Sat, 21 Oct 2023 22:14:29 +0530 Subject: [PATCH 29/44] I have implemented the Boyer moore algo inside searching techniques with dynamic inputs and proper explanation . please review it.. --- Searching Techniques/Boyer_Moore_Algorithm.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 Searching Techniques/Boyer_Moore_Algorithm.py diff --git a/Searching Techniques/Boyer_Moore_Algorithm.py b/Searching Techniques/Boyer_Moore_Algorithm.py new file mode 100644 index 0000000..1b2df13 --- /dev/null +++ b/Searching Techniques/Boyer_Moore_Algorithm.py @@ -0,0 +1,74 @@ +"""Program for Bad Character Heuristic +of Boyer Moore String Matching Algorithm""" + + +NO_OF_CHARS = 256 + +def badCharHeuristic(string, size): + ''' + The preprocessing function for + Boyer Moore's bad character heuristic + ''' + # Initialize all occurrences as -1 + badChar = [-1] * NO_OF_CHARS + + # Fill the actual value of the last occurrence + for i in range(size): + badChar[ord(string[i])] = i + + # Return the initialized list + return badChar + +def search(txt, pat): + ''' + A pattern searching function that uses the Bad Character + Heuristic of the Boyer Moore Algorithm + ''' + m = len(pat) + n = len(txt) + + # Create the bad character list by calling + # the preprocessing function badCharHeuristic() + # for the given pattern + badChar = badCharHeuristic(pat, m) + + # s is the shift of the pattern with respect to the text + s = 0 + while s <= n - m: + j = m - 1 + + # Keep reducing index j of the pattern while + # characters of the pattern and text are matching + # at this shift s + while j >= 0 and pat[j] == txt[s + j]: + j -= 1 + + # If the pattern is present at the current shift, + # then index j will become -1 after the above loop + if j < 0: + print("Pattern occurs at shift =", s) + + ''' + Shift the pattern so that the next character in the text + aligns with the last occurrence of it in the pattern. + The condition s+m < n is necessary for the case when + the pattern occurs at the end of the text + ''' + s += (m - badChar[ord(txt[s + m])] if s + m < n else 1) + else: + ''' + Shift the pattern so that the bad character in the text + aligns with the last occurrence of it in the pattern. The + max function is used to make sure that we get a positive + shift. We may get a negative shift if the last occurrence + of the bad character in the pattern is on the right side of the + current character. + ''' + s += max(1, j - badChar[ord(txt[s + j])]) + +while True: + txt = input('Enter the text (or press Enter to exit): ') + if not txt: + break + pat = input('Enter the pattern to search for: ') + search(txt, pat) From 289decf9613f065597cf6e37e4e6492fd7a5d76f Mon Sep 17 00:00:00 2001 From: Varun Singh Date: Sat, 21 Oct 2023 23:54:20 +0530 Subject: [PATCH 30/44] add: sieve_of_eratosthenes.py --- Math/sieve_of_eratosthenes.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 Math/sieve_of_eratosthenes.py diff --git a/Math/sieve_of_eratosthenes.py b/Math/sieve_of_eratosthenes.py new file mode 100644 index 0000000..88cf490 --- /dev/null +++ b/Math/sieve_of_eratosthenes.py @@ -0,0 +1,30 @@ +"""Sieve Of Eratosthenes: +The sieve of eratosthenes is one of the most efficient way to find all +the prime numbers upto the number `n` + +for more reference(https://www.geeksforgeeks.org/sieve-of-eratosthenes/) +""" + +#importing `math` module which will be used later +import math + +#specify upto where you have to find the prime numbers +n = int(input("Enter the range : ")) + +#`arr` is a boolean list that contains `n+1` `False` entries +arr = [False]*(n+1) + +#loop upto the square root of the range `n` +for i in range(2,int(math.sqrt(n))+1): + if arr[i] == False: + for j in range(i*i, n+1, i): + #making the entry `True` for all entries whose index is the multiple + arr[j] = True + +#after the loop exits, all the entry that are prime numbers +#are marked as `False` + +#printing all the prime numbers +for i in range(2,n): + if arr[i+1] == False: + print(i+1) From 6b0c4ab49c4865e5f904d717f022cb5c0f67cde9 Mon Sep 17 00:00:00 2001 From: Prateek Date: Mon, 23 Oct 2023 13:05:42 +0530 Subject: [PATCH 31/44] algo: Added Dijkstra's Algorithm --- Graphs/Dijkstra's_Algorithm.py | 60 ++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 Graphs/Dijkstra's_Algorithm.py diff --git a/Graphs/Dijkstra's_Algorithm.py b/Graphs/Dijkstra's_Algorithm.py new file mode 100644 index 0000000..ffbd4fc --- /dev/null +++ b/Graphs/Dijkstra's_Algorithm.py @@ -0,0 +1,60 @@ +# Dijkstra's Algorithm is a widely used graph algorithm designed to find the +# shortest path from a source node to all other nodes in a weighted graph. It +# was developed by Dutch computer scientist Edsger W. Dijkstra in 1956. The +# algorithm is particularly effective when all edge weights are non-negative. + +def Dijkstra(Graph, _s, _d): + row = len(Graph) + col = len(Graph[0]) + dist = [float("Inf")] * row + Blackened = [0] * row + pathlength = [0] * row + parent = [-1] * row + dist[_s] = 0 + for count in range(row-1): + u = MinDistance(dist, Blackened) + + # if MinDistance() returns INFINITY, then the graph is not + # connected and we have traversed all of the vertices in the + # connected component of the source vertex, so it can reduce + # the time complexity sometimes + # In a directed graph, it means that the source vertex + # is not a root + if u == float("Inf"): + break + else: + + # Mark the vertex as Blackened + Blackened[u] = 1 + for v in range(row): + if Blackened[v] == 0 and Graph[u][v] and dist[u]+Graph[u][v] < dist[v]: + parent[v] = u + pathlength[v] = pathlength[parent[v]]+1 + dist[v] = dist[u]+Graph[u][v] + elif Blackened[v] == 0 and Graph[u][v] and dist[u]+Graph[u][v] == dist[v] and pathlength[u]+1 < pathlength[v]: + parent[v] = u + pathlength[v] = pathlength[u] + 1 + if dist[_d] != float("Inf"): + + # Printing the path + PrintPath(parent, _d) + else: + print("There is no path between vertex ", _s, "to vertex ", _d) + +# Function to print the path + +def PrintPath(parent, _d): + if parent[_d] == -1: + print(_d, end='') + return + PrintPath(parent, parent[_d]) + print("->", _d, end='') + + +def MinDistance(dist, Blackened): + min = float("Inf") + for v in range(len(dist)): + if not Blackened[v] and dist[v] < min: + min = dist[v] + Min_index = v + return float("Inf") if min == float("Inf") else Min_index From 0f038d412f7c4d45819bcd8506a568328acfaf5b Mon Sep 17 00:00:00 2001 From: Prateek Date: Mon, 23 Oct 2023 13:35:22 +0530 Subject: [PATCH 32/44] algo: Added Floyd Warshall Algorithm --- Graphs/Floyd_Warshall_Algorithm.py | 77 ++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 Graphs/Floyd_Warshall_Algorithm.py diff --git a/Graphs/Floyd_Warshall_Algorithm.py b/Graphs/Floyd_Warshall_Algorithm.py new file mode 100644 index 0000000..239b07e --- /dev/null +++ b/Graphs/Floyd_Warshall_Algorithm.py @@ -0,0 +1,77 @@ +# The Floyd-Warshall Algorithm is a dynamic programming algorithm used to find the shortest paths between all pairs of nodes in a weighted graph. It works for +# directed or undirected graphs with positive or negative edge weights and is particularly valuable when you need to compute and store all shortest paths in +# a graph. The algorithm has a time and space complexity of O(n ^ 3), making it +# suitable for small to moderately sized graphs. + + +# Number of vertices in the graph +V = 4 + +# Define infinity as the large +# enough value. This value will be +# used for vertices not connected to each other +INF = 99999 + +# Solves all pair shortest path +# via Floyd Warshall Algorithm + + +def floydWarshall(graph): + """ dist[][] will be the output + matrix that will finally + have the shortest distances + between every pair of vertices """ + """ initializing the solution matrix + same as input graph matrix + OR we can say that the initial + values of shortest distances + are based on shortest paths considering no + intermediate vertices """ + + dist = list(map(lambda i: list(map(lambda j: j, i)), graph)) + + """ Add all vertices one by one + to the set of intermediate + vertices. + ---> Before start of an iteration, + we have shortest distances + between all pairs of vertices + such that the shortest + distances consider only the + vertices in the set + {0, 1, 2, .. k-1} as intermediate vertices. + ----> After the end of a + iteration, vertex no. k is + added to the set of intermediate + vertices and the + set becomes {0, 1, 2, .. k} + """ + for k in range(V): + + # pick all vertices as source one by one + for i in range(V): + + # Pick all vertices as destination for the + # above picked source + for j in range(V): + + # If vertex k is on the shortest path from + # i to j, then update the value of dist[i][j] + dist[i][j] = min(dist[i][j], + dist[i][k] + dist[k][j] + ) + printSolution(dist) + + +# A utility function to print the solution +def printSolution(dist): + print("Following matrix shows the shortest distances\ +between every pair of vertices") + for i in range(V): + for j in range(V): + if (dist[i][j] == INF): + print("%7s" % ("INF"), end=" ") + else: + print("%7d\t" % (dist[i][j]), end=' ') + if j == V-1: + print() From c22bd8e2f8c926940789cd6e4618f2067a1b7883 Mon Sep 17 00:00:00 2001 From: Prateek Date: Mon, 23 Oct 2023 13:57:15 +0530 Subject: [PATCH 33/44] algo: Added Floyd Warshall Algorithm --- Graphs/Floyd_Warshall_Algorithm.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Graphs/Floyd_Warshall_Algorithm.py b/Graphs/Floyd_Warshall_Algorithm.py index 239b07e..1a65876 100644 --- a/Graphs/Floyd_Warshall_Algorithm.py +++ b/Graphs/Floyd_Warshall_Algorithm.py @@ -57,13 +57,14 @@ def floydWarshall(graph): # If vertex k is on the shortest path from # i to j, then update the value of dist[i][j] - dist[i][j] = min(dist[i][j], - dist[i][k] + dist[k][j] - ) + dist[i][j] = min(dist[i][j], + dist[i][k] + dist[k][j] + ) printSolution(dist) - # A utility function to print the solution + + def printSolution(dist): print("Following matrix shows the shortest distances\ between every pair of vertices") From 13f71ef946df7d43d05d9c5c928b527cc74555dd Mon Sep 17 00:00:00 2001 From: Prateek Date: Mon, 23 Oct 2023 14:13:57 +0530 Subject: [PATCH 34/44] algo: Added Floyd Warshall Algorithm --- Graphs/Dijkstra's_Algorithm.py | 60 ---------------------------------- 1 file changed, 60 deletions(-) delete mode 100644 Graphs/Dijkstra's_Algorithm.py diff --git a/Graphs/Dijkstra's_Algorithm.py b/Graphs/Dijkstra's_Algorithm.py deleted file mode 100644 index ffbd4fc..0000000 --- a/Graphs/Dijkstra's_Algorithm.py +++ /dev/null @@ -1,60 +0,0 @@ -# Dijkstra's Algorithm is a widely used graph algorithm designed to find the -# shortest path from a source node to all other nodes in a weighted graph. It -# was developed by Dutch computer scientist Edsger W. Dijkstra in 1956. The -# algorithm is particularly effective when all edge weights are non-negative. - -def Dijkstra(Graph, _s, _d): - row = len(Graph) - col = len(Graph[0]) - dist = [float("Inf")] * row - Blackened = [0] * row - pathlength = [0] * row - parent = [-1] * row - dist[_s] = 0 - for count in range(row-1): - u = MinDistance(dist, Blackened) - - # if MinDistance() returns INFINITY, then the graph is not - # connected and we have traversed all of the vertices in the - # connected component of the source vertex, so it can reduce - # the time complexity sometimes - # In a directed graph, it means that the source vertex - # is not a root - if u == float("Inf"): - break - else: - - # Mark the vertex as Blackened - Blackened[u] = 1 - for v in range(row): - if Blackened[v] == 0 and Graph[u][v] and dist[u]+Graph[u][v] < dist[v]: - parent[v] = u - pathlength[v] = pathlength[parent[v]]+1 - dist[v] = dist[u]+Graph[u][v] - elif Blackened[v] == 0 and Graph[u][v] and dist[u]+Graph[u][v] == dist[v] and pathlength[u]+1 < pathlength[v]: - parent[v] = u - pathlength[v] = pathlength[u] + 1 - if dist[_d] != float("Inf"): - - # Printing the path - PrintPath(parent, _d) - else: - print("There is no path between vertex ", _s, "to vertex ", _d) - -# Function to print the path - -def PrintPath(parent, _d): - if parent[_d] == -1: - print(_d, end='') - return - PrintPath(parent, parent[_d]) - print("->", _d, end='') - - -def MinDistance(dist, Blackened): - min = float("Inf") - for v in range(len(dist)): - if not Blackened[v] and dist[v] < min: - min = dist[v] - Min_index = v - return float("Inf") if min == float("Inf") else Min_index From bcfd1487604e528fa1bcd2d730ca00c1ca0e9769 Mon Sep 17 00:00:00 2001 From: Prateek Date: Tue, 24 Oct 2023 14:46:25 +0530 Subject: [PATCH 35/44] algo: Added Bellman Ford Algorithm --- Graphs/BellMan_Ford_Algorithm.py | 53 ++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 Graphs/BellMan_Ford_Algorithm.py diff --git a/Graphs/BellMan_Ford_Algorithm.py b/Graphs/BellMan_Ford_Algorithm.py new file mode 100644 index 0000000..0d305e3 --- /dev/null +++ b/Graphs/BellMan_Ford_Algorithm.py @@ -0,0 +1,53 @@ +# The Bellman-Ford Algorithm is a single-source, shortest-path algorithm used to find the shortest paths from a source node to all other nodes in a weighted graph, even when the graph contains negative edge weights. It works by iteratively relaxing the edges in the graph, ensuring that it can detect and handle negative weight cycles. + +# Class to represent a graph + +class Graph: + + def __init__(self, vertices): + self.V = vertices # No. of vertices + self.graph = [] + + # function to add an edge to graph + def addEdge(self, u, v, w): + self.graph.append([u, v, w]) + + # utility function used to print the solution + def printArr(self, dist): + print("Vertex Distance from Source") + for i in range(self.V): + print("{0}\t\t{1}".format(i, dist[i])) + + # The main function that finds shortest distances from src to + # all other vertices using Bellman-Ford algorithm. The function + # also detects negative weight cycle + def BellmanFord(self, src): + + # Step 1: Initialize distances from src to all other vertices + # as INFINITE + dist = [float("Inf")] * self.V + dist[src] = 0 + + # Step 2: Relax all edges |V| - 1 times. A simple shortest + # path from src to any other vertex can have at-most |V| - 1 + # edges + for _ in range(self.V - 1): + # Update dist value and parent index of the adjacent vertices of + # the picked vertex. Consider only those vertices which are still in + # queue + for u, v, w in self.graph: + if dist[u] != float("Inf") and dist[u] + w < dist[v]: + dist[v] = dist[u] + w + + # Step 3: check for negative-weight cycles. The above step + # guarantees shortest distances if graph doesn't contain + # negative weight cycle. If we get a shorter path, then there + # is a cycle. + + for u, v, w in self.graph: + if dist[u] != float("Inf") and dist[u] + w < dist[v]: + print("Graph contains negative weight cycle") + return + + # print all distance + self.printArr(dist) \ No newline at end of file From 6103b1a9e0879f6238e00f2e790c33497f48d20a Mon Sep 17 00:00:00 2001 From: Radhey644 Date: Wed, 25 Oct 2023 22:47:43 +0530 Subject: [PATCH 36/44] Prefix Sum code added --- Array/Prefix_Sum.cpp | 46 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 Array/Prefix_Sum.cpp diff --git a/Array/Prefix_Sum.cpp b/Array/Prefix_Sum.cpp new file mode 100644 index 0000000..8d34455 --- /dev/null +++ b/Array/Prefix_Sum.cpp @@ -0,0 +1,46 @@ +#include +#include + +// Function to calculate the prefix sum of an array +std::vector calculatePrefixSum(const std::vector& arr) { + int n = arr.size(); + std::vector prefixSum(n, 0); + + prefixSum[0] = arr[0]; + for (int i = 1; i < n; i++) { + prefixSum[i] = prefixSum[i - 1] + arr[i]; + } + + return prefixSum; +} + +int main() { + // Input the array size + int n; + std::cout << "Enter the size of the array: "; + std::cin >> n; + + if (n <= 0) { + std::cout << "Array size must be a positive integer." << std::endl; + return 1; // Exit with an error code + } + + // Input the elements of the array + std::vector arr(n); + std::cout << "Enter " << n << " elements of the array: "; + for (int i = 0; i < n; i++) { + std::cin >> arr[i]; + } + + // Calculate the prefix sum + std::vector prefixSum = calculatePrefixSum(arr); + + // Display the prefix sum + std::cout << "Prefix Sum: "; + for (int i = 0; i < n; i++) { + std::cout << prefixSum[i] << " "; + } + std::cout << std::endl; + + return 0; +} From 6291240693cf7c381081067f3e9db18a483d5b32 Mon Sep 17 00:00:00 2001 From: ydvmudit07 Date: Wed, 25 Oct 2023 23:07:40 +0530 Subject: [PATCH 37/44] kadane's algoritm code --- Algoritm/kadane's_algorithm.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 Algoritm/kadane's_algorithm.py diff --git a/Algoritm/kadane's_algorithm.py b/Algoritm/kadane's_algorithm.py new file mode 100644 index 0000000..826f6d9 --- /dev/null +++ b/Algoritm/kadane's_algorithm.py @@ -0,0 +1,16 @@ +def max_subarray_sum(nums): + # Initialize variables to keep track of the maximum subarray sum + + max_ending_here = nums[0] # Maximum sum ending at the current position + max_so_far = nums[0] # Maximum sum seen so far + + # Iterate through the array, starting from the second element + for i in range(1, len(nums)): + # Calculate the maximum sum ending at the current position by considering whether it's better to start a new subarray or extend the previous one. + max_ending_here = max(nums[i], max_ending_here + nums[i]) + + # Update the maximum sum seen so far by comparing it with the maximum sum ending at the current position. + max_so_far = max(max_so_far, max_ending_here) + + # The 'max_so_far' variable now contains the maximum subarray sum. + return max_so_far From 0198111707bce5c19afd7b27ab6ba3af16c576e6 Mon Sep 17 00:00:00 2001 From: swayam patil Date: Thu, 26 Oct 2023 09:04:48 +0530 Subject: [PATCH 38/44] Create AVL_tree.py --- Tree/AVL_tree.py | 146 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 Tree/AVL_tree.py diff --git a/Tree/AVL_tree.py b/Tree/AVL_tree.py new file mode 100644 index 0000000..b4a8ed8 --- /dev/null +++ b/Tree/AVL_tree.py @@ -0,0 +1,146 @@ +class TreeNode: + def __init__(self, key): + self.key = key + self.left = None + self.right = None + self.height = 1 + + +class AVLTree: + def insert(self, root, key): + if not root: + return TreeNode(key) + if key < root.key: + root.left = self.insert(root.left, key) + else: + root.right = self.insert(root.right, key) + root.height = 1 + max(self.get_height(root.left), self.get_height(root.right)) + return self.balance(root) + + def delete(self, root, key): + if not root: + return root + if key < root.key: + root.left = self.delete(root.left, key) + elif key > root.key: + root.right = self.delete(root.right, key) + else: + if not root.left: + temp = root.right + root = None + return temp + elif not root.right: + temp = root.left + root = None + return temp + temp = self.get_min_value_node(root.right) + root.key = temp.key + root.right = self.delete(root.right, temp.key) + root.height = 1 + max(self.get_height(root.left), self.get_height(root.right)) + return self.balance(root) + + def get_height(self, node): + if not node: + return 0 + return node.height + + def get_balance(self, node): + if not node: + return 0 + return self.get_height(node.left) - self.get_height(node.right) + + def balance(self, node): + if not node: + return node + balance = self.get_balance(node) + if balance > 1: + if self.get_balance(node.left) < 0: + node.left = self.left_rotate(node.left) + return self.right_rotate(node) + if balance < -1: + if self.get_balance(node.right) > 0: + node.right = self.right_rotate(node.right) + return self.left_rotate(node) + return node + + def left_rotate(self, z): + y = z.right + T2 = y.left + y.left = z + z.right = T2 + z.height = 1 + max(self.get_height(z.left), self.get_height(z.right)) + y.height = 1 + max(self.get_height(y.left), self.get_height(y.right)) + return y + + def right_rotate(self, y): + x = y.left + T2 = x.right + x.right = y + y.left = T2 + y.height = 1 + max(self.get_height(y.left), self.get_height(y.right)) + x.height = 1 + max(self.get_height(x.left), self.get_height(x.right)) + return x + + def get_min_value_node(self, node): + if node is None or node.left is None: + return node + return self.get_min_value_node(node.left) + + def inorder_traversal(self, root): + if root: + self.inorder_traversal(root.left) + print(root.key, end=" ") + self.inorder_traversal(root.right) + +# Example usage: +if __name__ == "__main__": + avl_tree = AVLTree() + root = None + keys = [9, 5, 10, 0, 6, 11, -1, 1, 2] + for key in keys: + root = avl_tree.insert(root, key) + + print("Inorder Traversal of AVL tree:") + avl_tree.inorder_traversal(root) + + key_to_delete = 10 + root = avl_tree.delete(root, key_to_delete) + print("\nAfter deleting", key_to_delete) + avl_tree.inorder_traversal(root) + +class AVLTreeMenu: + def __init__(self): + self.avl_tree = AVLTree() + self.root = None + + def display_menu(self): + print("AVL Tree Menu:") + print("1. Insert a key") + print("2. Delete a key") + print("3. Display the AVL tree") + print("4. Exit") + + def run(self): + while True: + self.display_menu() + choice = input("Enter your choice: ") + if choice == "1": + key = int(input("Enter the key to insert: ")) + self.root = self.avl_tree.insert(self.root, key) + print(f"Key {key} inserted.") + elif choice == "2": + key = int(input("Enter the key to delete: ")) + self.root = self.avl_tree.delete(self.root, key) + print(f"Key {key} deleted.") + elif choice == "3": + print("Inorder Traversal of AVL tree:") + self.avl_tree.inorder_traversal(self.root) + elif choice == "4": + print("Exiting the AVL Tree Menu.") + break + else: + print("Invalid choice. Please enter a valid option.") + +if __name__ == "__main__": + avl_tree_menu = AVLTreeMenu() + avl_tree_menu.run() From 5a772a900f8241b8ae5838d76da01c02a0c8e1a9 Mon Sep 17 00:00:00 2001 From: Radhey644 Date: Thu, 26 Oct 2023 10:51:12 +0530 Subject: [PATCH 39/44] Code converted in the python format --- Array/Prefix_Sum.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 Array/Prefix_Sum.py diff --git a/Array/Prefix_Sum.py b/Array/Prefix_Sum.py new file mode 100644 index 0000000..4bfa726 --- /dev/null +++ b/Array/Prefix_Sum.py @@ -0,0 +1,25 @@ +# Function to calculate the prefix sum of a list using list comprehension +def calculatePrefixSum(arr): + prefixSum = [sum(arr[:i + 1]) for i in range(len(arr))] + return prefixSum + +def main(): + # Input the list size + n = int(input("Enter the size of the list: ")) + + if n <= 0: + print("List size must be a positive integer.") + return 1 # Exit with an error code + + # Input the elements of the list + print(f"Enter {n} elements of the list:") + arr = [int(input()) for _ in range(n)] + + # Calculate the prefix sum + prefixSum = calculatePrefixSum(arr) + + # Display the prefix sum + print("Prefix Sum:", prefixSum) + +if __name__ == "__main__": + main() From 00e403dc799c07afcd70ebbdd996f524f34e708b Mon Sep 17 00:00:00 2001 From: Radhey644 Date: Thu, 26 Oct 2023 10:51:42 +0530 Subject: [PATCH 40/44] Code converted in the python format --- Array/Prefix_Sum.cpp | 46 -------------------------------------------- 1 file changed, 46 deletions(-) delete mode 100644 Array/Prefix_Sum.cpp diff --git a/Array/Prefix_Sum.cpp b/Array/Prefix_Sum.cpp deleted file mode 100644 index 8d34455..0000000 --- a/Array/Prefix_Sum.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#include -#include - -// Function to calculate the prefix sum of an array -std::vector calculatePrefixSum(const std::vector& arr) { - int n = arr.size(); - std::vector prefixSum(n, 0); - - prefixSum[0] = arr[0]; - for (int i = 1; i < n; i++) { - prefixSum[i] = prefixSum[i - 1] + arr[i]; - } - - return prefixSum; -} - -int main() { - // Input the array size - int n; - std::cout << "Enter the size of the array: "; - std::cin >> n; - - if (n <= 0) { - std::cout << "Array size must be a positive integer." << std::endl; - return 1; // Exit with an error code - } - - // Input the elements of the array - std::vector arr(n); - std::cout << "Enter " << n << " elements of the array: "; - for (int i = 0; i < n; i++) { - std::cin >> arr[i]; - } - - // Calculate the prefix sum - std::vector prefixSum = calculatePrefixSum(arr); - - // Display the prefix sum - std::cout << "Prefix Sum: "; - for (int i = 0; i < n; i++) { - std::cout << prefixSum[i] << " "; - } - std::cout << std::endl; - - return 0; -} From df4fa822fd1c881d1df11b103bc39392b0a08273 Mon Sep 17 00:00:00 2001 From: ydvmudit07 Date: Thu, 26 Oct 2023 14:37:18 +0530 Subject: [PATCH 41/44] file directory changed --- {Algoritm => Math}/kadane's_algorithm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) rename {Algoritm => Math}/kadane's_algorithm.py (78%) diff --git a/Algoritm/kadane's_algorithm.py b/Math/kadane's_algorithm.py similarity index 78% rename from Algoritm/kadane's_algorithm.py rename to Math/kadane's_algorithm.py index 826f6d9..dcc8858 100644 --- a/Algoritm/kadane's_algorithm.py +++ b/Math/kadane's_algorithm.py @@ -6,11 +6,12 @@ def max_subarray_sum(nums): # Iterate through the array, starting from the second element for i in range(1, len(nums)): - # Calculate the maximum sum ending at the current position by considering whether it's better to start a new subarray or extend the previous one. + # Calculate the maximum sum ending at the current position by considering whether it's better to + # start a new subarray or extend the previous one. max_ending_here = max(nums[i], max_ending_here + nums[i]) - # Update the maximum sum seen so far by comparing it with the maximum sum ending at the current position. + # Update the maximum sum seen so far by comparing it with the maximum sum ending at the current position. max_so_far = max(max_so_far, max_ending_here) # The 'max_so_far' variable now contains the maximum subarray sum. - return max_so_far + return max_so_far \ No newline at end of file From 88650ef88bc33aa5efc11a15441ede02dc61dab6 Mon Sep 17 00:00:00 2001 From: Radhey644 Date: Thu, 26 Oct 2023 20:18:58 +0530 Subject: [PATCH 42/44] File location changed --- {Array => Math}/Prefix_Sum.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {Array => Math}/Prefix_Sum.py (100%) diff --git a/Array/Prefix_Sum.py b/Math/Prefix_Sum.py similarity index 100% rename from Array/Prefix_Sum.py rename to Math/Prefix_Sum.py From 3483b051c7a8300abbab45c83f06fd932f5d6da9 Mon Sep 17 00:00:00 2001 From: Himanshu Agarwal Date: Mon, 8 Jan 2024 14:06:09 +0530 Subject: [PATCH 43/44] Update README.md --- README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 369bbdf..1d49648 100644 --- a/README.md +++ b/README.md @@ -60,14 +60,19 @@ This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/) +## 🪪 License +This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/). + +
+

Connect with me

- Github     - LinkedIn     - Instagram     - Facebook     - Gmail    -

(Back to top)

+ Github     + LinkedIn     + Twitter     + Instagram     + Gmail    +

(Back to top)

From 721d4ba6439b55f352b1c587f8ae873314a7ad7c Mon Sep 17 00:00:00 2001 From: Himanshu Agarwal Date: Mon, 8 Jan 2024 14:06:34 +0530 Subject: [PATCH 44/44] Update README.md --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 1d49648..e24dec8 100644 --- a/README.md +++ b/README.md @@ -60,9 +60,6 @@ This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/) -## 🪪 License -This project follows the [MIT LICENSE](https://choosealicense.com/licenses/mit/). -