diff --git a/Cartooning.py b/Cartooning.py new file mode 100644 index 0000000..4069afc --- /dev/null +++ b/Cartooning.py @@ -0,0 +1,61 @@ +import cv2 +import numpy as np +thresh1=0 +thresh2=0 + +def th1(value): + global thresh1 + thresh1=value +def th2(value): + global thresh2 + thresh2=value + +cam = cv2.VideoCapture(0) + +cv2.namedWindow("Track") +cv2.createTrackbar("Thresh1","Track",0,255,th1) +cv2.createTrackbar("thres2","Track",0,255,th2) +# const=0.5 +numDown=2 +numBilateral=2 + +def cartoonizing(frame): + # frame = cv2.resize(frame, (700,500)) + for i in range(numDown): + img = cv2.pyrDown(frame) + for _ in range(numBilateral): + img = cv2.bilateralFilter(img, 3, 3, 7) + for _ in range(numDown): + img = cv2.pyrUp(img) + + blur = cv2.resize(img,(700,500)) + cv2.imshow("Bilateral",blur) + gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) + blur = cv2.medianBlur(gray,3) + + canny = cv2.Canny(frame,thresh1,thresh2) + # canny = cv2.GaussianBlur(canny,(3,3),0) + thres,threshold = cv2.threshold(canny,150,255,cv2.THRESH_BINARY_INV) + # kernel = np.zeros((5, 5), np.uint8) + # threshold = cv2.dilate(threshold, kernel, iterations=1) + # threshold = cv2.GaussianBlur(threshold,(3,3),0) + + + (x,y,z) = img.shape + img_edge = cv2.resize(threshold,(y,x)) + img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB) + # img_edge = cv2.GaussianBlur(img_edge,(3,3),0) + + cv2.imshow("Edges",threshold) + return cv2.bitwise_and(img,img_edge ) + +while True: + ignore,frame = cam.read() + frame = cv2.flip(frame,1) + frame = cv2.resize(frame, (700,500)) + end = cartoonizing(frame) + end = cv2.resize(end,(700,500)) + cv2.imshow("End",end) + if cv2.waitKey(1) & 0xff == ord('q'): + break +cam.release() diff --git a/ImageStiching.py b/ImageStiching.py new file mode 100644 index 0000000..16df695 --- /dev/null +++ b/ImageStiching.py @@ -0,0 +1,121 @@ +import cv2 +import numpy as np + +img1 = input("Enter the path of the first image: ") +img2 = input("Enter the path of the second image: ") + +train = cv2.imread(r"{}".format(img1)) +query = cv2.imread(r"{}".format(img2)) +train_RGB = cv2.cvtColor(train,cv2.COLOR_BGR2RGB) +query_RGB = cv2.cvtColor(query,cv2.COLOR_BGR2RGB) +train_gray = cv2.cvtColor(train_RGB,cv2.COLOR_RGB2GRAY) +query_gray = cv2.cvtColor(query_RGB,cv2.COLOR_RGB2GRAY) + +query_gray = cv2.resize(query,(500,300)) +train_gray = cv2.resize(train,(500,300)) +query = cv2.resize(query,(500,300)) +train = cv2.resize(train,(500,300)) +feature_extraction_algo = 'sift' +feature_to_match = 'bf' + +def select_descriptor(image,method=None): + assert method is not None,"Please define a descriptor method. Accepted values are 'Sift','Surf','orb','brisk' " + + if method == 'sift': + descriptor = cv2.SIFT_create() + if method == 'surf': + descriptor = cv2.SURF_create() + if method == 'orb': + descriptor = cv2.ORB_create() + if method == 'brisk': + descriptor = cv2.BRISK_create() + (keypoints,features) = descriptor.detectAndCompute(image,None) + return (keypoints,features) + +keypoints_train,feature_train = select_descriptor(train_gray,feature_extraction_algo) +keypoints_query,feature_query = select_descriptor(query_gray,feature_extraction_algo) + +# print(keypoints_query) +# for keypoint in keypoints_query: +# x,y = keypoint.pt +# size = keypoint.size +# orientation = keypoint.angle +# response = keypoint.response +# octave = keypoint.octave +# class_id = keypoint.class_id +# print(x,y) +# cv2.imshow("Image1 ",cv2.drawKeypoints(train_gray,keypoints_train,None,color=(0,255,0))) +# cv2.imshow("Image2",cv2.drawKeypoints(query_gray,keypoints_query,None,color=(0,255,0))) # to draw key points +cv2.imshow("Image 1",train) +cv2.imshow("Image 2",query) + +def create_matching_object(method,crossCheck): + if method == 'sift' or method == 'surf': + bf = cv2.BFMatcher(cv2.NORM_L2,crossCheck=crossCheck) + if method == 'brisk' or method == 'orb': + bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck==crossCheck) + + return bf + +def keypoints_matching(feature_train,feature_query,method): + bf = create_matching_object(method,True) + best_matches = bf.match(feature_train,feature_query) + raw_matches = sorted(best_matches,key = lambda x:x.distance) + print("Raw Matches with Brute Force",len(raw_matches)) + return raw_matches + +def keypoints_matching_knn(feature_train,feature_query,ratio,method): + bf = create_matching_object(method,False) + raw_matches = bf.knnMatch(feature_train,feature_query,k=2) + print("Raw Matches with Knn",len(raw_matches)) + + knn_matches=[] + for m,n in raw_matches: + if m.distance4: + points_train = np.float32([keypoints_train_image[m.queryIdx] for m in matches]) + points_query= np.float32([keypoints_query_image[m.trainIdx] for m in matches]) + + (H,status)=cv2.findHomography(points_train,points_query,cv2.RANSAC,reprojthreshhold) + return (matches,H,status) + + else: + return None + +M = homography_Stiching(keypoints_train,keypoints_query,4) +if M is None: + print('Error') +(matches,Homography_Matrix,status) = M +print(Homography_Matrix) +width = query.shape[1]+train.shape[1] +print(width) +height = max(query.shape[0],train.shape[0]) +print(height) + +result = cv2.warpPerspective(train,Homography_Matrix,(width,height)) +print(result) + +result[0:query.shape[0],0:query.shape[1]] = query + +cv2.imshow("Stich",result) +cv2.waitKey(0) +cv2.destroyAllWindows() \ No newline at end of file diff --git a/Tracking.py b/Tracking.py new file mode 100644 index 0000000..35373c8 --- /dev/null +++ b/Tracking.py @@ -0,0 +1,65 @@ +import cv2 +import numpy as np + +cam = cv2.VideoCapture(0) + +hueHigh = 0 +hueLow = 0 +satHigh = 0 +satLow = 0 +valHigh = 0 +valLow = 0 + +def hueh(val): + global hueHigh + hueHigh = val +def huel(val): + global hueLow + hueLow = val +def sath(val): + global satHigh + satHigh = val +def satl(val): + global satLow + satLow = val +def valh(val): + global valHigh + valHigh = val +def vall(val): + global valLow + valLow = val + +cv2.namedWindow('My frame') +cv2.createTrackbar('hueL','My frame',0,180,huel) +cv2.createTrackbar('hueH','My frame',0,180,hueh) + +cv2.createTrackbar('satL','My frame',0,255,satl) +cv2.createTrackbar('satH','My frame',0,255,sath) + +cv2.createTrackbar('valL','My frame',0,255,vall) +cv2.createTrackbar('valH','My frame',0,255,valh) + + +while True: + ignore,frame = cam.read() + frameHSV = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) + + lowerBound = np.array([hueLow,satLow,valLow]) + upperBound = np.array([hueHigh,satHigh,valHigh]) + + myMask = cv2.inRange(frameHSV,lowerBound,upperBound) + + contours,junk = cv2.findContours(myMask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + for contour in contours: + area = cv2.contourArea(contour) + if area >=1000: + # cv2.drawContours(frame,[contour],0,(255,0,0),3) + x,y,w,h=cv2.boundingRect(contour) + cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3) + + mySelection = cv2.bitwise_and(frame,frame,mask=myMask) + cv2.imshow("My Selection",mySelection) + cv2.imshow("Frame",frame) + if cv2.waitKey(1) & 0xff == ord('q'): + break +cam.release() diff --git a/extract_text_from_pdf.py b/extract_text_from_pdf.py new file mode 100644 index 0000000..8cb13d0 --- /dev/null +++ b/extract_text_from_pdf.py @@ -0,0 +1,19 @@ +# import module PyPDF2 +import PyPDF2 +# put 'example.pdf' in working directory +# and open it in read binary mode +pdfFileObj = open('example.pdf', 'rb') +# call and store PdfFileReader +# object in pdfReader +pdfReader = PyPDF2.PdfFileReader(pdfFileObj) +# to print the total number of pages in pdf +# print(pdfReader.numPages) +# get specific page of pdf by passing +# number since it stores pages in list +# to access first page pass 0 +pageObj = pdfReader.getPage(0) +# extract the page object +# by extractText() function +texts = pageObj.extractText() +# print the extracted texts +print(texts) diff --git a/filter_text.py b/filter_text.py new file mode 100644 index 0000000..6c43112 --- /dev/null +++ b/filter_text.py @@ -0,0 +1,20 @@ +# Filter Text +# Import re module +import re +# Take any string data +string = """a string we are using to filter specific items. +perhaps we would like to match credit card numbers +mistakenly entered into the user input. 4444 3232 1010 8989 +and perhaps another? 9191 0232 9999 1111""" + +# Define the searching pattern +pattern = '(([0-9](\s+)?){4}){4}' + +# match the pattern with input value +found = re.search(pattern, string) +print(found) +# Print message based on the return value +if found: + print("Found a credit card number!") +else: + print("No credit card numbers present in input") diff --git a/img_comparator_tool/README.md b/img_comparator_tool/README.md new file mode 100644 index 0000000..905691a --- /dev/null +++ b/img_comparator_tool/README.md @@ -0,0 +1,24 @@ +# Welcome to Image Comparator Tool +This is a tool for comparing two Images and getting their difference image as output
+ +# Version +1.0.0 + +# Motivation and Description +We.Contribute -> You.Levegage ; You.Contribute -> We.Leverage ; All -> Grow + +# Languages and Libraries used +Python and cv2
+ +Installation +==================== +Deploy the folder structure to the required location. + +Execution +==================== +Call python3 image_comparision.py. + +# FAQ +mail us - acharjeerishi99@gmail.com + + diff --git a/img_comparator_tool/image2.jpg b/img_comparator_tool/image2.jpg new file mode 100644 index 0000000..a080235 Binary files /dev/null and b/img_comparator_tool/image2.jpg differ diff --git a/img_comparator_tool/img_comparator_tool.py b/img_comparator_tool/img_comparator_tool.py new file mode 100644 index 0000000..2c8a9af --- /dev/null +++ b/img_comparator_tool/img_comparator_tool.py @@ -0,0 +1,37 @@ +# This tool helps to compare two images and specifically colors the difference in red in the second image argument +import cv2 +def img_comparator(imPath1,imPath2): + img1 = cv2.imread(imPath1) + img2 = cv2.imread(imPath2) + + if img1 is None or img2 is None: + return "Images weren't loaded successfully!!" + + # resize the images to same dimension + img1 = cv2.resize(img1,(300,300)) + img2 = cv2.resize(img2,(300,300)) + + # calculating difference between two images + diff = cv2.subtract(img1,img2) + b,g,r = cv2.split(diff) + if cv2.countNonZero(b)==0 and cv2.countNonZero(g)==0 and cv2.countNonZero(r)==0: + return "The images are identical" + else: + # color the mask red + conv_hsv_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) + ret, mask = cv2.threshold( + conv_hsv_gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU + ) + diff[mask != 255] = [0, 0, 255] + + # add the red mask to the images to spot the differences + img1[mask != 255] = [0, 0, 255] + img2[mask != 255] = [0, 0, 255] + + cv2.imwrite("difference.png", diff) + return "The images are different!!" + + +img1 = "python_img.jpg" +img2 = "image2.jpg" +print(img_comparator(img1,img2)) \ No newline at end of file diff --git a/img_comparator_tool/python_img.jpg b/img_comparator_tool/python_img.jpg new file mode 100644 index 0000000..ce6fe65 Binary files /dev/null and b/img_comparator_tool/python_img.jpg differ diff --git a/random_pwd_gen.py b/random_pwd_gen.py new file mode 100644 index 0000000..f97a8e0 --- /dev/null +++ b/random_pwd_gen.py @@ -0,0 +1,23 @@ +# Generate Strong Random Passwords +import random +import string +# This script will generate an 18 character password +word_length = 18 +# Generate a list of letters, digits, and some punctuation +components = [string.ascii_letters, string.digits, "!@#$%&"] +# flatten the components into a list of characters +chars = [] +for clist in components: + for item in clist: + chars.append(item) +def generate_password(): + # Store the generated password + password = [] + # Choose a random item from 'chars' and add it to 'password' + for i in range(word_length): + rchar = random.choice(chars) + password.append(rchar) + # Return the composed password as a string + return "".join(password) +# Output generated password +print(generate_password()) diff --git a/text_process_pandoc.py b/text_process_pandoc.py new file mode 100644 index 0000000..e0067dc --- /dev/null +++ b/text_process_pandoc.py @@ -0,0 +1,4 @@ +import pandoc + +in_file = open("example.md", "r").read() +pandoc.write(in_file, file="example.pdf", format="pdf")