From 8c6574f00a94e5c25751ca8290efb0a1abb8d45f Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 19:50:22 -0400 Subject: [PATCH 01/30] travis test --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 877dbee9ade2..0c7c9fd0e1c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,5 +11,4 @@ script: - mypy --ignore-missing-imports . - pytest . --doctest-modules after_success: - - scripts/build_directory_md.py > DIRECTORY.md - - cat DIRECTORY.md + - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md From cc5503500365d7728b522cb78c90ee849cf0d3b0 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 20:44:50 -0400 Subject: [PATCH 02/30] travis pull ID test --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 877dbee9ade2..7aaf813c92a5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,5 +11,6 @@ script: - mypy --ignore-missing-imports . - pytest . --doctest-modules after_success: - - scripts/build_directory_md.py > DIRECTORY.md + - scripts/build_directory_md.py 2&>1 tee DIRECTORY.md - cat DIRECTORY.md + - echo $TRAVIS_PULL_REQUEST \ No newline at end of file From 5f12b5ffe231ac6c7d2edafcc0d1c416e1e736eb Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 20:53:18 -0400 Subject: [PATCH 03/30] get pr branch test --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7aaf813c92a5..34f0cbfd64ac 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,4 +13,4 @@ script: after_success: - scripts/build_directory_md.py 2&>1 tee DIRECTORY.md - cat DIRECTORY.md - - echo $TRAVIS_PULL_REQUEST \ No newline at end of file + - echo $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file From abbf642730f10df650cc805bcc72412071f8df93 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 21:07:31 -0400 Subject: [PATCH 04/30] retry pr build --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index accf69d1b1a3..d82b8caf966b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,4 +11,6 @@ script: - mypy --ignore-missing-imports . - pytest . --doctest-modules after_success: - - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md \ No newline at end of file + - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md + - echo $TRAVIS_PULL_REQUEST + - echo $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file From 22a12c938d8f54006035a43b191b91ff4130a2e6 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 21:39:15 -0400 Subject: [PATCH 05/30] test pushing back - probable git error for origin 'not found' --- .travis.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d82b8caf966b..5997c61d69c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,5 +12,7 @@ script: - pytest . --doctest-modules after_success: - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - - echo $TRAVIS_PULL_REQUEST - - echo $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file + - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH + - git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH + - git checkout $TRAVIS_PULL_REQUEST_BRANCH + - git push origin $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file From c46312974c5cb9428d633129889facd849e8dc2a Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Wed, 30 Oct 2019 23:10:42 -0400 Subject: [PATCH 06/30] github auth? --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 5997c61d69c6..d68d089c91e7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,7 @@ script: after_success: - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH + - composer config github-oauth.github.com ${gh_token} - git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH - git checkout $TRAVIS_PULL_REQUEST_BRANCH - git push origin $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file From 6391d72cb4f0dfd333e265f0f1d3869186ccc9bf Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 00:16:22 -0400 Subject: [PATCH 07/30] add .sh --- .travis.yml | 5 +---- scripts/push_directory_md.sh | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 scripts/push_directory_md.sh diff --git a/.travis.yml b/.travis.yml index d68d089c91e7..cb7d1872cc23 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,4 @@ script: after_success: - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH - - composer config github-oauth.github.com ${gh_token} - - git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH - - git checkout $TRAVIS_PULL_REQUEST_BRANCH - - git push origin $TRAVIS_PULL_REQUEST_BRANCH \ No newline at end of file + - scripts/push_directory_md.sh \ No newline at end of file diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh new file mode 100644 index 000000000000..f0e15631dff0 --- /dev/null +++ b/scripts/push_directory_md.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Source: https://gist.github.com/willprice/e07efd73fb7f13f917ea + +setup_git() { + git config --global user.email "${gh_email}" + git config --global user.name "${gh_user}" +} + +commit_website_files() { + git checkout $TRAVIS_PULL_REQUEST_BRANCH + git add DIRECTORY.md + git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" +} + +upload_files() { + git remote add origin https://${gh_token}@github.com/$TRAVIS_REPO_SLUG > /dev/null 2>&1 + git push --quiet --set-upstream origin $TRAVIS_PULL_REQUEST_BRANCH +} + +setup_git +commit_website_files +upload_files \ No newline at end of file From b5f73055e20520dd822b0ba7482254494b3cf393 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 13:02:00 -0400 Subject: [PATCH 08/30] chmod --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index cb7d1872cc23..8915df39c152 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,7 @@ cache: pip before_install: pip install --upgrade pip setuptools install: pip install -r requirements.txt before_script: + - chmod 644 scripts/push_directory_md.sh - black --check . || true - flake8 . --count --select=E9,F4,F63,F7,F82 --show-source --statistics script: From ac50a2c8b3a6c5dbb8d6810d4c486fac03b2e738 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 14:25:15 -0400 Subject: [PATCH 09/30] add index update for permission fix --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8915df39c152..2113c352bd2f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ cache: pip before_install: pip install --upgrade pip setuptools install: pip install -r requirements.txt before_script: - - chmod 644 scripts/push_directory_md.sh - black --check . || true - flake8 . --count --select=E9,F4,F63,F7,F82 --show-source --statistics script: @@ -14,4 +13,5 @@ script: after_success: - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH + - git update-index --add --chmod=+x scripts/build_directory_md.py - scripts/push_directory_md.sh \ No newline at end of file From d895a005d240991e9d24c8afa598b2bb6c951d21 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 14:32:32 -0400 Subject: [PATCH 10/30] run sh for script --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2113c352bd2f..cd89738a0d87 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,8 @@ language: python python: 3.7 cache: pip -before_install: pip install --upgrade pip setuptools +before_install: + - pip install --upgrade pip setuptools install: pip install -r requirements.txt before_script: - black --check . || true @@ -13,5 +14,4 @@ script: after_success: - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH - - git update-index --add --chmod=+x scripts/build_directory_md.py - - scripts/push_directory_md.sh \ No newline at end of file + - sh scripts/push_directory_md.sh \ No newline at end of file From 0ac923eeb7dbf1070c99b6bf26e5e44971eb75f8 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 14:55:58 -0400 Subject: [PATCH 11/30] add all --- scripts/push_directory_md.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh index f0e15631dff0..8a7aed27ff76 100644 --- a/scripts/push_directory_md.sh +++ b/scripts/push_directory_md.sh @@ -9,13 +9,13 @@ setup_git() { commit_website_files() { git checkout $TRAVIS_PULL_REQUEST_BRANCH - git add DIRECTORY.md + git add . git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" } upload_files() { git remote add origin https://${gh_token}@github.com/$TRAVIS_REPO_SLUG > /dev/null 2>&1 - git push --quiet --set-upstream origin $TRAVIS_PULL_REQUEST_BRANCH + git push --set-upstream origin $TRAVIS_PULL_REQUEST_BRANCH } setup_git From bbe8bd31c71539705544d29a5a62b97e99436aa3 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 15:04:29 -0400 Subject: [PATCH 12/30] add pull directory --- scripts/push_directory_md.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh index 8a7aed27ff76..c1b21fb7595a 100644 --- a/scripts/push_directory_md.sh +++ b/scripts/push_directory_md.sh @@ -15,7 +15,7 @@ commit_website_files() { upload_files() { git remote add origin https://${gh_token}@github.com/$TRAVIS_REPO_SLUG > /dev/null 2>&1 - git push --set-upstream origin $TRAVIS_PULL_REQUEST_BRANCH + git push origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH } setup_git From dd4dd4e365027cd7d34bd317dc4a775e5f8a2970 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 15:10:22 -0400 Subject: [PATCH 13/30] fetch pr branch --- scripts/push_directory_md.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh index c1b21fb7595a..c88a90c519a1 100644 --- a/scripts/push_directory_md.sh +++ b/scripts/push_directory_md.sh @@ -8,6 +8,7 @@ setup_git() { } commit_website_files() { + git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH git checkout $TRAVIS_PULL_REQUEST_BRANCH git add . git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" @@ -15,7 +16,7 @@ commit_website_files() { upload_files() { git remote add origin https://${gh_token}@github.com/$TRAVIS_REPO_SLUG > /dev/null 2>&1 - git push origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH + git push origin $TRAVIS_PULL_REQUEST_BRANCH } setup_git From 78b7dabf3ab253c3e458f4df5234d0a2676f06dd Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 15:21:00 -0400 Subject: [PATCH 14/30] swap placement of adding commits --- scripts/push_directory_md.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh index c88a90c519a1..5ee84d467591 100644 --- a/scripts/push_directory_md.sh +++ b/scripts/push_directory_md.sh @@ -7,10 +7,10 @@ setup_git() { git config --global user.name "${gh_user}" } -commit_website_files() { +commit_directory_file() { + git add . git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH git checkout $TRAVIS_PULL_REQUEST_BRANCH - git add . git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" } @@ -20,5 +20,5 @@ upload_files() { } setup_git -commit_website_files +commit_directory_file upload_files \ No newline at end of file From 92df8b941391bb14faa43b39397ba08bd059ce13 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 15:52:00 -0400 Subject: [PATCH 15/30] rotate --- scripts/push_directory_md.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh index 5ee84d467591..0e834d9f0ef6 100644 --- a/scripts/push_directory_md.sh +++ b/scripts/push_directory_md.sh @@ -9,9 +9,12 @@ setup_git() { commit_directory_file() { git add . + git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" +} + +fetch_get() { git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH git checkout $TRAVIS_PULL_REQUEST_BRANCH - git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" } upload_files() { @@ -19,6 +22,7 @@ upload_files() { git push origin $TRAVIS_PULL_REQUEST_BRANCH } -setup_git commit_directory_file +setup_git +fetch_get upload_files \ No newline at end of file From bb63b2f75b2ed14b6953db428427ea8734bd25f0 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 16:11:51 -0400 Subject: [PATCH 16/30] quit trying to update Travis --- .travis.yml | 4 +--- scripts/push_directory_md.sh | 28 ---------------------------- 2 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 scripts/push_directory_md.sh diff --git a/.travis.yml b/.travis.yml index cd89738a0d87..cbde13ea3f17 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,4 @@ script: - mypy --ignore-missing-imports . - pytest . --doctest-modules after_success: - - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - - echo $TRAVIS_PULL_REQUEST && echo $TRAVIS_PULL_REQUEST_BRANCH - - sh scripts/push_directory_md.sh \ No newline at end of file + - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md \ No newline at end of file diff --git a/scripts/push_directory_md.sh b/scripts/push_directory_md.sh deleted file mode 100644 index 0e834d9f0ef6..000000000000 --- a/scripts/push_directory_md.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# Source: https://gist.github.com/willprice/e07efd73fb7f13f917ea - -setup_git() { - git config --global user.email "${gh_email}" - git config --global user.name "${gh_user}" -} - -commit_directory_file() { - git add . - git commit --message "Travis build: $TRAVIS_BUILD_NUMBER" -} - -fetch_get() { - git fetch origin pull/$TRAVIS_PULL_REQUEST/head:$TRAVIS_PULL_REQUEST_BRANCH - git checkout $TRAVIS_PULL_REQUEST_BRANCH -} - -upload_files() { - git remote add origin https://${gh_token}@github.com/$TRAVIS_REPO_SLUG > /dev/null 2>&1 - git push origin $TRAVIS_PULL_REQUEST_BRANCH -} - -commit_directory_file -setup_git -fetch_get -upload_files \ No newline at end of file From b28fdf6f1eaa92c27bfacc8820d0eb5faa11a2ae Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 19:42:50 -0400 Subject: [PATCH 17/30] formatting leftovers --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index cbde13ea3f17..accf69d1b1a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,7 @@ language: python python: 3.7 cache: pip -before_install: - - pip install --upgrade pip setuptools +before_install: pip install --upgrade pip setuptools install: pip install -r requirements.txt before_script: - black --check . || true From a5233f9599d7f4927b50dfcbb18c7cf6f9abd4da Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 20:22:32 -0400 Subject: [PATCH 18/30] testing out init action --- .github/workflows/autoDIR.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/autoDIR.yml diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml new file mode 100644 index 000000000000..e6fa9297fc5b --- /dev/null +++ b/.github/workflows/autoDIR.yml @@ -0,0 +1,17 @@ +name: autoDIR +on: [pull request] +jobs: + build: + runs-on: ubuntu-latest + strategy: + max-parallel: 1 + matrix: + python-version: [3.7] + steps: + - uses: actions/checkout@v1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Build DIRECTORY + run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md \ No newline at end of file From d0ad618b1af07f4650b88b3378163fa18072bab3 Mon Sep 17 00:00:00 2001 From: autoblack Date: Fri, 1 Nov 2019 00:25:08 +0000 Subject: [PATCH 19/30] fixup: Format Python code with psf/black --- ciphers/deterministic_miller_rabin.py | 30 +- ciphers/diffie.py | 15 +- .../binary_tree/basic_binary_tree.py | 4 +- data_structures/binary_tree/treap.py | 1 - data_structures/heap/min_heap.py | 7 +- .../linked_list/doubly_linked_list.py | 4 +- ...longest_increasing_subsequence_o(nlogn).py | 2 + dynamic_programming/max_sub_array.py | 1 + file_transfer/send_file.py | 4 +- maths/factorial_python.py | 2 +- maths/factorial_recursive.py | 2 +- maths/gaussian.py | 2 +- maths/perfect_square.py | 2 +- neural_network/gan.py | 331 +++++++----- neural_network/input_data.py | 470 +++++++++--------- other/least_recently_used.py | 10 +- project_euler/problem_20/sol4.py | 2 +- web_programming/get_imdbtop.py | 6 +- 18 files changed, 513 insertions(+), 382 deletions(-) diff --git a/ciphers/deterministic_miller_rabin.py b/ciphers/deterministic_miller_rabin.py index 37845d6c9b41..e604a7b84166 100644 --- a/ciphers/deterministic_miller_rabin.py +++ b/ciphers/deterministic_miller_rabin.py @@ -41,19 +41,21 @@ def miller_rabin(n, allow_probable=False): "A return value of True indicates a probable prime." ) # array bounds provided by analysis - bounds = [2_047, - 1_373_653, - 25_326_001, - 3_215_031_751, - 2_152_302_898_747, - 3_474_749_660_383, - 341_550_071_728_321, - 1, - 3_825_123_056_546_413_051, - 1, - 1, - 318_665_857_834_031_151_167_461, - 3_317_044_064_679_887_385_961_981] + bounds = [ + 2_047, + 1_373_653, + 25_326_001, + 3_215_031_751, + 2_152_302_898_747, + 3_474_749_660_383, + 341_550_071_728_321, + 1, + 3_825_123_056_546_413_051, + 1, + 1, + 318_665_857_834_031_151_167_461, + 3_317_044_064_679_887_385_961_981, + ] primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(bounds, 1): @@ -131,5 +133,5 @@ def test_miller_rabin(): # upper limit for probabilistic test -if __name__ == '__main__': +if __name__ == "__main__": test_miller_rabin() diff --git a/ciphers/diffie.py b/ciphers/diffie.py index 6b0cca1f45e6..c349aaa2f3b8 100644 --- a/ciphers/diffie.py +++ b/ciphers/diffie.py @@ -1,8 +1,8 @@ def find_primitive(n): for r in range(1, n): li = [] - for x in range(n-1): - val = pow(r,x,n) + for x in range(n - 1): + val = pow(r, x, n) if val in li: break li.append(val) @@ -11,16 +11,15 @@ def find_primitive(n): if __name__ == "__main__": - q = int(input('Enter a prime number q: ')) + q = int(input("Enter a prime number q: ")) a = find_primitive(q) - a_private = int(input('Enter private key of A: ')) + a_private = int(input("Enter private key of A: ")) a_public = pow(a, a_private, q) - b_private = int(input('Enter private key of B: ')) + b_private = int(input("Enter private key of B: ")) b_public = pow(a, b_private, q) a_secret = pow(b_public, a_private, q) b_secret = pow(a_public, b_private, q) - print('The key value generated by A is: ', a_secret) - print('The key value generated by B is: ', b_secret) - + print("The key value generated by A is: ", a_secret) + print("The key value generated by B is: ", b_secret) diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 6b7de7803704..4257a8e3c5b3 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -22,7 +22,7 @@ def display(tree): # In Order traversal of the tree def depth_of_tree( - tree + tree, ): # This is the recursive function to find the depth of binary tree. if tree is None: return 0 @@ -36,7 +36,7 @@ def depth_of_tree( def is_full_binary_tree( - tree + tree, ): # This functions returns that is it full binary tree or not? if tree is None: return True diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index b603eec3ef3c..6bc2403f7102 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -172,7 +172,6 @@ def main(): args = input() print("good by!") - if __name__ == "__main__": diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 6184d83be774..e68853837faa 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -77,9 +77,10 @@ def sift_down(self, idx, array): if smallest != idx: array[idx], array[smallest] = array[smallest], array[idx] - self.idx_of_element[array[idx]], self.idx_of_element[ - array[smallest] - ] = ( + ( + self.idx_of_element[array[idx]], + self.idx_of_element[array[smallest]], + ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 38fff867b416..2a95a004587c 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -23,9 +23,7 @@ def insertHead(self, x): def deleteHead(self): temp = self.head self.head = self.head.next # oldHead <--> 2ndElement(head) - self.head.previous = ( - None - ) # oldHead --> 2ndElement(head) nothing pointing at it so the old head will be removed + self.head.previous = None # oldHead --> 2ndElement(head) nothing pointing at it so the old head will be removed if self.head is None: self.tail = None # if empty linked list return temp diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py index 4b06e0d885f2..46790a5a8d41 100644 --- a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py +++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py @@ -6,6 +6,7 @@ ############################# from typing import List + def CeilIndex(v, l, r, key): while r - l > 1: m = (l + r) // 2 @@ -49,4 +50,5 @@ def LongestIncreasingSubsequenceLength(v: List[int]) -> int: if __name__ == "__main__": import doctest + doctest.testmod() diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index f7c8209718ef..7350eaf373cb 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -75,6 +75,7 @@ def max_sub_array(nums: List[int]) -> int: import time import matplotlib.pyplot as plt from random import randint + inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] tim = [] for i in inputs: diff --git a/file_transfer/send_file.py b/file_transfer/send_file.py index ebc075a30ad4..6494114a9072 100644 --- a/file_transfer/send_file.py +++ b/file_transfer/send_file.py @@ -2,8 +2,8 @@ import socket # Import socket module ONE_CONNECTION_ONLY = ( - True - ) # Set this to False if you wish to continuously accept connections + True # Set this to False if you wish to continuously accept connections + ) filename = "mytext.txt" port = 12312 # Reserve a port for your service. diff --git a/maths/factorial_python.py b/maths/factorial_python.py index b9adfdbaeaff..46688261af56 100644 --- a/maths/factorial_python.py +++ b/maths/factorial_python.py @@ -28,7 +28,7 @@ def factorial(input_number: int) -> int: return result -if __name__ == '__main__': +if __name__ == "__main__": import doctest doctest.testmod() diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py index 013560b28b42..4f7074d16587 100644 --- a/maths/factorial_recursive.py +++ b/maths/factorial_recursive.py @@ -24,7 +24,7 @@ def factorial(n: int) -> int: return 1 if n == 0 or n == 1 else n * factorial(n - 1) -if __name__ == '__main__': +if __name__ == "__main__": import doctest doctest.testmod() diff --git a/maths/gaussian.py b/maths/gaussian.py index e5f55dfaffd1..ffea20fb2ba1 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -50,7 +50,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: >>> gaussian(2523, mu=234234, sigma=3425) 0.0 """ - return 1 / sqrt(2 * pi * sigma ** 2) * exp(-(x - mu) ** 2 / 2 * sigma ** 2) + return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / 2 * sigma ** 2) if __name__ == "__main__": diff --git a/maths/perfect_square.py b/maths/perfect_square.py index 9b868c5de98a..3e7a1c07a75f 100644 --- a/maths/perfect_square.py +++ b/maths/perfect_square.py @@ -21,7 +21,7 @@ def perfect_square(num: int) -> bool: return math.sqrt(num) * math.sqrt(num) == num -if __name__ == '__main__': +if __name__ == "__main__": import doctest doctest.testmod() diff --git a/neural_network/gan.py b/neural_network/gan.py index edfff420547b..76f46314c4ba 100644 --- a/neural_network/gan.py +++ b/neural_network/gan.py @@ -7,28 +7,42 @@ random_numer = 42 np.random.seed(random_numer) + + def ReLu(x): - mask = (x>0) * 1.0 - return mask *x + mask = (x > 0) * 1.0 + return mask * x + + def d_ReLu(x): - mask = (x>0) * 1.0 + mask = (x > 0) * 1.0 return mask + def arctan(x): return np.arctan(x) + + def d_arctan(x): return 1 / (1 + x ** 2) + def log(x): - return 1 / ( 1+ np.exp(-1*x)) + return 1 / (1 + np.exp(-1 * x)) + + def d_log(x): return log(x) * (1 - log(x)) + def tanh(x): return np.tanh(x) + + def d_tanh(x): return 1 - np.tanh(x) ** 2 + def plot(samples): fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) @@ -36,104 +50,140 @@ def plot(samples): for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) - plt.axis('off') + plt.axis("off") ax.set_xticklabels([]) ax.set_yticklabels([]) - ax.set_aspect('equal') - plt.imshow(sample.reshape(28, 28), cmap='Greys_r') + ax.set_aspect("equal") + plt.imshow(sample.reshape(28, 28), cmap="Greys_r") return fig - # 1. Load Data and declare hyper -print('--------- Load Data ----------') -mnist = input_data.read_data_sets('MNIST_data', one_hot=False) +print("--------- Load Data ----------") +mnist = input_data.read_data_sets("MNIST_data", one_hot=False) temp = mnist.test images, labels = temp.images, temp.labels -images, labels = shuffle(np.asarray(images),np.asarray(labels)) +images, labels = shuffle(np.asarray(images), np.asarray(labels)) num_epoch = 10 learing_rate = 0.00009 G_input = 100 -hidden_input,hidden_input2,hidden_input3 = 128,256,346 -hidden_input4,hidden_input5,hidden_input6 = 480,560,686 +hidden_input, hidden_input2, hidden_input3 = 128, 256, 346 +hidden_input4, hidden_input5, hidden_input6 = 480, 560, 686 - -print('--------- Declare Hyper Parameters ----------') +print("--------- Declare Hyper Parameters ----------") # 2. Declare Weights -D_W1 = np.random.normal(size=(784,hidden_input),scale=(1. / np.sqrt(784 / 2.))) *0.002 +D_W1 = ( + np.random.normal(size=(784, hidden_input), scale=(1.0 / np.sqrt(784 / 2.0))) * 0.002 +) # D_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 D_b1 = np.zeros(hidden_input) -D_W2 = np.random.normal(size=(hidden_input,1),scale=(1. / np.sqrt(hidden_input / 2.))) *0.002 +D_W2 = ( + np.random.normal(size=(hidden_input, 1), scale=(1.0 / np.sqrt(hidden_input / 2.0))) + * 0.002 +) # D_b2 = np.random.normal(size=(1),scale=(1. / np.sqrt(1 / 2.))) *0.002 D_b2 = np.zeros(1) -G_W1 = np.random.normal(size=(G_input,hidden_input),scale=(1. / np.sqrt(G_input / 2.))) *0.002 +G_W1 = ( + np.random.normal(size=(G_input, hidden_input), scale=(1.0 / np.sqrt(G_input / 2.0))) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b1 = np.zeros(hidden_input) -G_W2 = np.random.normal(size=(hidden_input,hidden_input2),scale=(1. / np.sqrt(hidden_input / 2.))) *0.002 +G_W2 = ( + np.random.normal( + size=(hidden_input, hidden_input2), scale=(1.0 / np.sqrt(hidden_input / 2.0)) + ) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b2 = np.zeros(hidden_input2) -G_W3 = np.random.normal(size=(hidden_input2,hidden_input3),scale=(1. / np.sqrt(hidden_input2 / 2.))) *0.002 +G_W3 = ( + np.random.normal( + size=(hidden_input2, hidden_input3), scale=(1.0 / np.sqrt(hidden_input2 / 2.0)) + ) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b3 = np.zeros(hidden_input3) -G_W4 = np.random.normal(size=(hidden_input3,hidden_input4),scale=(1. / np.sqrt(hidden_input3 / 2.))) *0.002 +G_W4 = ( + np.random.normal( + size=(hidden_input3, hidden_input4), scale=(1.0 / np.sqrt(hidden_input3 / 2.0)) + ) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b4 = np.zeros(hidden_input4) -G_W5 = np.random.normal(size=(hidden_input4,hidden_input5),scale=(1. / np.sqrt(hidden_input4 / 2.))) *0.002 +G_W5 = ( + np.random.normal( + size=(hidden_input4, hidden_input5), scale=(1.0 / np.sqrt(hidden_input4 / 2.0)) + ) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b5 = np.zeros(hidden_input5) -G_W6 = np.random.normal(size=(hidden_input5,hidden_input6),scale=(1. / np.sqrt(hidden_input5 / 2.))) *0.002 +G_W6 = ( + np.random.normal( + size=(hidden_input5, hidden_input6), scale=(1.0 / np.sqrt(hidden_input5 / 2.0)) + ) + * 0.002 +) # G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002 G_b6 = np.zeros(hidden_input6) -G_W7 = np.random.normal(size=(hidden_input6,784),scale=(1. / np.sqrt(hidden_input6 / 2.))) *0.002 +G_W7 = ( + np.random.normal( + size=(hidden_input6, 784), scale=(1.0 / np.sqrt(hidden_input6 / 2.0)) + ) + * 0.002 +) # G_b2 = np.random.normal(size=(784),scale=(1. / np.sqrt(784 / 2.))) *0.002 G_b7 = np.zeros(784) # 3. For Adam Optimzier -v1,m1 = 0,0 -v2,m2 = 0,0 -v3,m3 = 0,0 -v4,m4 = 0,0 +v1, m1 = 0, 0 +v2, m2 = 0, 0 +v3, m3 = 0, 0 +v4, m4 = 0, 0 -v5,m5 = 0,0 -v6,m6 = 0,0 -v7,m7 = 0,0 -v8,m8 = 0,0 -v9,m9 = 0,0 -v10,m10 = 0,0 -v11,m11 = 0,0 -v12,m12 = 0,0 +v5, m5 = 0, 0 +v6, m6 = 0, 0 +v7, m7 = 0, 0 +v8, m8 = 0, 0 +v9, m9 = 0, 0 +v10, m10 = 0, 0 +v11, m11 = 0, 0 +v12, m12 = 0, 0 -v13,m13 = 0,0 -v14,m14 = 0,0 +v13, m13 = 0, 0 +v14, m14 = 0, 0 -v15,m15 = 0,0 -v16,m16 = 0,0 +v15, m15 = 0, 0 +v16, m16 = 0, 0 -v17,m17 = 0,0 -v18,m18 = 0,0 +v17, m17 = 0, 0 +v18, m18 = 0, 0 -beta_1,beta_2,eps = 0.9,0.999,0.00000001 +beta_1, beta_2, eps = 0.9, 0.999, 0.00000001 -print('--------- Started Training ----------') +print("--------- Started Training ----------") for iter in range(num_epoch): random_int = np.random.randint(len(images) - 5) - current_image = np.expand_dims(images[random_int],axis=0) + current_image = np.expand_dims(images[random_int], axis=0) # Func: Generate The first Fake Data - Z = np.random.uniform(-1., 1., size=[1, G_input]) + Z = np.random.uniform(-1.0, 1.0, size=[1, G_input]) Gl1 = Z.dot(G_W1) + G_b1 Gl1A = arctan(Gl1) Gl2 = Gl1A.dot(G_W2) + G_b2 @@ -164,38 +214,38 @@ def plot(samples): Dl2_fA = log(Dl2_f) # Func: Cost D - D_cost = -np.log(Dl2_rA) + np.log(1.0- Dl2_fA) + D_cost = -np.log(Dl2_rA) + np.log(1.0 - Dl2_fA) # Func: Gradient - grad_f_w2_part_1 = 1/(1.0- Dl2_fA) - grad_f_w2_part_2 = d_log(Dl2_f) - grad_f_w2_part_3 = Dl1_fA - grad_f_w2 = grad_f_w2_part_3.T.dot(grad_f_w2_part_1 * grad_f_w2_part_2) + grad_f_w2_part_1 = 1 / (1.0 - Dl2_fA) + grad_f_w2_part_2 = d_log(Dl2_f) + grad_f_w2_part_3 = Dl1_fA + grad_f_w2 = grad_f_w2_part_3.T.dot(grad_f_w2_part_1 * grad_f_w2_part_2) grad_f_b2 = grad_f_w2_part_1 * grad_f_w2_part_2 - grad_f_w1_part_1 = (grad_f_w2_part_1 * grad_f_w2_part_2).dot(D_W2.T) - grad_f_w1_part_2 = d_ReLu(Dl1_f) - grad_f_w1_part_3 = current_fake_data - grad_f_w1 = grad_f_w1_part_3.T.dot(grad_f_w1_part_1 * grad_f_w1_part_2) - grad_f_b1 = grad_f_w1_part_1 * grad_f_w1_part_2 + grad_f_w1_part_1 = (grad_f_w2_part_1 * grad_f_w2_part_2).dot(D_W2.T) + grad_f_w1_part_2 = d_ReLu(Dl1_f) + grad_f_w1_part_3 = current_fake_data + grad_f_w1 = grad_f_w1_part_3.T.dot(grad_f_w1_part_1 * grad_f_w1_part_2) + grad_f_b1 = grad_f_w1_part_1 * grad_f_w1_part_2 - grad_r_w2_part_1 = - 1/Dl2_rA - grad_r_w2_part_2 = d_log(Dl2_r) - grad_r_w2_part_3 = Dl1_rA - grad_r_w2 = grad_r_w2_part_3.T.dot(grad_r_w2_part_1 * grad_r_w2_part_2) - grad_r_b2 = grad_r_w2_part_1 * grad_r_w2_part_2 + grad_r_w2_part_1 = -1 / Dl2_rA + grad_r_w2_part_2 = d_log(Dl2_r) + grad_r_w2_part_3 = Dl1_rA + grad_r_w2 = grad_r_w2_part_3.T.dot(grad_r_w2_part_1 * grad_r_w2_part_2) + grad_r_b2 = grad_r_w2_part_1 * grad_r_w2_part_2 - grad_r_w1_part_1 = (grad_r_w2_part_1 * grad_r_w2_part_2).dot(D_W2.T) - grad_r_w1_part_2 = d_ReLu(Dl1_r) - grad_r_w1_part_3 = current_image - grad_r_w1 = grad_r_w1_part_3.T.dot(grad_r_w1_part_1 * grad_r_w1_part_2) - grad_r_b1 = grad_r_w1_part_1 * grad_r_w1_part_2 + grad_r_w1_part_1 = (grad_r_w2_part_1 * grad_r_w2_part_2).dot(D_W2.T) + grad_r_w1_part_2 = d_ReLu(Dl1_r) + grad_r_w1_part_3 = current_image + grad_r_w1 = grad_r_w1_part_3.T.dot(grad_r_w1_part_1 * grad_r_w1_part_2) + grad_r_b1 = grad_r_w1_part_1 * grad_r_w1_part_2 - grad_w1 =grad_f_w1 + grad_r_w1 - grad_b1 =grad_f_b1 + grad_r_b1 + grad_w1 = grad_f_w1 + grad_r_w1 + grad_b1 = grad_f_b1 + grad_r_b1 - grad_w2 =grad_f_w2 + grad_r_w2 - grad_b2 =grad_f_b2 + grad_r_b2 + grad_w2 = grad_f_w2 + grad_r_w2 + grad_b2 = grad_f_b2 + grad_r_b2 # ---- Update Gradient ---- m1 = beta_1 * m1 + (1 - beta_1) * grad_w1 @@ -210,14 +260,22 @@ def plot(samples): m4 = beta_1 * m4 + (1 - beta_1) * grad_b2 v4 = beta_2 * v4 + (1 - beta_2) * grad_b2 ** 2 - D_W1 = D_W1 - (learing_rate / (np.sqrt(v1 /(1-beta_2) ) + eps)) * (m1/(1-beta_1)) - D_b1 = D_b1 - (learing_rate / (np.sqrt(v2 /(1-beta_2) ) + eps)) * (m2/(1-beta_1)) + D_W1 = D_W1 - (learing_rate / (np.sqrt(v1 / (1 - beta_2)) + eps)) * ( + m1 / (1 - beta_1) + ) + D_b1 = D_b1 - (learing_rate / (np.sqrt(v2 / (1 - beta_2)) + eps)) * ( + m2 / (1 - beta_1) + ) - D_W2 = D_W2 - (learing_rate / (np.sqrt(v3 /(1-beta_2) ) + eps)) * (m3/(1-beta_1)) - D_b2 = D_b2 - (learing_rate / (np.sqrt(v4 /(1-beta_2) ) + eps)) * (m4/(1-beta_1)) + D_W2 = D_W2 - (learing_rate / (np.sqrt(v3 / (1 - beta_2)) + eps)) * ( + m3 / (1 - beta_1) + ) + D_b2 = D_b2 - (learing_rate / (np.sqrt(v4 / (1 - beta_2)) + eps)) * ( + m4 / (1 - beta_1) + ) # Func: Forward Feed for G - Z = np.random.uniform(-1., 1., size=[1, G_input]) + Z = np.random.uniform(-1.0, 1.0, size=[1, G_input]) Gl1 = Z.dot(G_W1) + G_b1 Gl1A = arctan(Gl1) Gl2 = Gl1A.dot(G_W2) + G_b2 @@ -244,7 +302,9 @@ def plot(samples): G_cost = -np.log(Dl2_A) # Func: Gradient - grad_G_w7_part_1 = ((-1/Dl2_A) * d_log(Dl2).dot(D_W2.T) * (d_ReLu(Dl1))).dot(D_W1.T) + grad_G_w7_part_1 = ((-1 / Dl2_A) * d_log(Dl2).dot(D_W2.T) * (d_ReLu(Dl1))).dot( + D_W1.T + ) grad_G_w7_part_2 = d_log(Gl7) grad_G_w7_part_3 = Gl6A grad_G_w7 = grad_G_w7_part_3.T.dot(grad_G_w7_part_1 * grad_G_w7_part_1) @@ -254,31 +314,31 @@ def plot(samples): grad_G_w6_part_2 = d_ReLu(Gl6) grad_G_w6_part_3 = Gl5A grad_G_w6 = grad_G_w6_part_3.T.dot(grad_G_w6_part_1 * grad_G_w6_part_2) - grad_G_b6 = (grad_G_w6_part_1 * grad_G_w6_part_2) + grad_G_b6 = grad_G_w6_part_1 * grad_G_w6_part_2 grad_G_w5_part_1 = (grad_G_w6_part_1 * grad_G_w6_part_2).dot(G_W6.T) grad_G_w5_part_2 = d_tanh(Gl5) grad_G_w5_part_3 = Gl4A grad_G_w5 = grad_G_w5_part_3.T.dot(grad_G_w5_part_1 * grad_G_w5_part_2) - grad_G_b5 = (grad_G_w5_part_1 * grad_G_w5_part_2) + grad_G_b5 = grad_G_w5_part_1 * grad_G_w5_part_2 grad_G_w4_part_1 = (grad_G_w5_part_1 * grad_G_w5_part_2).dot(G_W5.T) grad_G_w4_part_2 = d_ReLu(Gl4) grad_G_w4_part_3 = Gl3A grad_G_w4 = grad_G_w4_part_3.T.dot(grad_G_w4_part_1 * grad_G_w4_part_2) - grad_G_b4 = (grad_G_w4_part_1 * grad_G_w4_part_2) + grad_G_b4 = grad_G_w4_part_1 * grad_G_w4_part_2 grad_G_w3_part_1 = (grad_G_w4_part_1 * grad_G_w4_part_2).dot(G_W4.T) grad_G_w3_part_2 = d_arctan(Gl3) grad_G_w3_part_3 = Gl2A grad_G_w3 = grad_G_w3_part_3.T.dot(grad_G_w3_part_1 * grad_G_w3_part_2) - grad_G_b3 = (grad_G_w3_part_1 * grad_G_w3_part_2) + grad_G_b3 = grad_G_w3_part_1 * grad_G_w3_part_2 grad_G_w2_part_1 = (grad_G_w3_part_1 * grad_G_w3_part_2).dot(G_W3.T) grad_G_w2_part_2 = d_ReLu(Gl2) grad_G_w2_part_3 = Gl1A grad_G_w2 = grad_G_w2_part_3.T.dot(grad_G_w2_part_1 * grad_G_w2_part_2) - grad_G_b2 = (grad_G_w2_part_1 * grad_G_w2_part_2) + grad_G_b2 = grad_G_w2_part_1 * grad_G_w2_part_2 grad_G_w1_part_1 = (grad_G_w2_part_1 * grad_G_w2_part_2).dot(G_W2.T) grad_G_w1_part_2 = d_arctan(Gl1) @@ -329,29 +389,57 @@ def plot(samples): m18 = beta_1 * m18 + (1 - beta_1) * grad_G_b7 v18 = beta_2 * v18 + (1 - beta_2) * grad_G_b7 ** 2 - G_W1 = G_W1 - (learing_rate / (np.sqrt(v5 /(1-beta_2) ) + eps)) * (m5/(1-beta_1)) - G_b1 = G_b1 - (learing_rate / (np.sqrt(v6 /(1-beta_2) ) + eps)) * (m6/(1-beta_1)) - - G_W2 = G_W2 - (learing_rate / (np.sqrt(v7 /(1-beta_2) ) + eps)) * (m7/(1-beta_1)) - G_b2 = G_b2 - (learing_rate / (np.sqrt(v8 /(1-beta_2) ) + eps)) * (m8/(1-beta_1)) - - G_W3 = G_W3 - (learing_rate / (np.sqrt(v9 /(1-beta_2) ) + eps)) * (m9/(1-beta_1)) - G_b3 = G_b3 - (learing_rate / (np.sqrt(v10 /(1-beta_2) ) + eps)) * (m10/(1-beta_1)) - - G_W4 = G_W4 - (learing_rate / (np.sqrt(v11 /(1-beta_2) ) + eps)) * (m11/(1-beta_1)) - G_b4 = G_b4 - (learing_rate / (np.sqrt(v12 /(1-beta_2) ) + eps)) * (m12/(1-beta_1)) - - G_W5 = G_W5 - (learing_rate / (np.sqrt(v13 /(1-beta_2) ) + eps)) * (m13/(1-beta_1)) - G_b5 = G_b5 - (learing_rate / (np.sqrt(v14 /(1-beta_2) ) + eps)) * (m14/(1-beta_1)) - - G_W6 = G_W6 - (learing_rate / (np.sqrt(v15 /(1-beta_2) ) + eps)) * (m15/(1-beta_1)) - G_b6 = G_b6 - (learing_rate / (np.sqrt(v16 /(1-beta_2) ) + eps)) * (m16/(1-beta_1)) - - G_W7 = G_W7 - (learing_rate / (np.sqrt(v17 /(1-beta_2) ) + eps)) * (m17/(1-beta_1)) - G_b7 = G_b7 - (learing_rate / (np.sqrt(v18 /(1-beta_2) ) + eps)) * (m18/(1-beta_1)) + G_W1 = G_W1 - (learing_rate / (np.sqrt(v5 / (1 - beta_2)) + eps)) * ( + m5 / (1 - beta_1) + ) + G_b1 = G_b1 - (learing_rate / (np.sqrt(v6 / (1 - beta_2)) + eps)) * ( + m6 / (1 - beta_1) + ) + + G_W2 = G_W2 - (learing_rate / (np.sqrt(v7 / (1 - beta_2)) + eps)) * ( + m7 / (1 - beta_1) + ) + G_b2 = G_b2 - (learing_rate / (np.sqrt(v8 / (1 - beta_2)) + eps)) * ( + m8 / (1 - beta_1) + ) + + G_W3 = G_W3 - (learing_rate / (np.sqrt(v9 / (1 - beta_2)) + eps)) * ( + m9 / (1 - beta_1) + ) + G_b3 = G_b3 - (learing_rate / (np.sqrt(v10 / (1 - beta_2)) + eps)) * ( + m10 / (1 - beta_1) + ) + + G_W4 = G_W4 - (learing_rate / (np.sqrt(v11 / (1 - beta_2)) + eps)) * ( + m11 / (1 - beta_1) + ) + G_b4 = G_b4 - (learing_rate / (np.sqrt(v12 / (1 - beta_2)) + eps)) * ( + m12 / (1 - beta_1) + ) + + G_W5 = G_W5 - (learing_rate / (np.sqrt(v13 / (1 - beta_2)) + eps)) * ( + m13 / (1 - beta_1) + ) + G_b5 = G_b5 - (learing_rate / (np.sqrt(v14 / (1 - beta_2)) + eps)) * ( + m14 / (1 - beta_1) + ) + + G_W6 = G_W6 - (learing_rate / (np.sqrt(v15 / (1 - beta_2)) + eps)) * ( + m15 / (1 - beta_1) + ) + G_b6 = G_b6 - (learing_rate / (np.sqrt(v16 / (1 - beta_2)) + eps)) * ( + m16 / (1 - beta_1) + ) + + G_W7 = G_W7 - (learing_rate / (np.sqrt(v17 / (1 - beta_2)) + eps)) * ( + m17 / (1 - beta_1) + ) + G_b7 = G_b7 - (learing_rate / (np.sqrt(v18 / (1 - beta_2)) + eps)) * ( + m18 / (1 - beta_1) + ) # --- Print Error ---- - #print("Current Iter: ",iter, " Current D cost:",D_cost, " Current G cost: ", G_cost,end='\r') + # print("Current Iter: ",iter, " Current D cost:",D_cost, " Current G cost: ", G_cost,end='\r') if iter == 0: learing_rate = learing_rate * 0.01 @@ -359,12 +447,20 @@ def plot(samples): learing_rate = learing_rate * 0.01 # ---- Print to Out put ---- - if iter%10 == 0: - - print("Current Iter: ",iter, " Current D cost:",D_cost, " Current G cost: ", G_cost,end='\r') - print('--------- Show Example Result See Tab Above ----------') - print('--------- Wait for the image to load ---------') - Z = np.random.uniform(-1., 1., size=[16, G_input]) + if iter % 10 == 0: + + print( + "Current Iter: ", + iter, + " Current D cost:", + D_cost, + " Current G cost: ", + G_cost, + end="\r", + ) + print("--------- Show Example Result See Tab Above ----------") + print("--------- Wait for the image to load ---------") + Z = np.random.uniform(-1.0, 1.0, size=[16, G_input]) Gl1 = Z.dot(G_W1) + G_b1 Gl1A = arctan(Gl1) @@ -384,8 +480,19 @@ def plot(samples): current_fake_data = log(Gl7) fig = plot(current_fake_data) - fig.savefig('Click_Me_{}.png'.format(str(iter).zfill(3)+"_Ginput_"+str(G_input)+ \ - "_hiddenone"+str(hidden_input) + "_hiddentwo"+str(hidden_input2) + "_LR_" + str(learing_rate) - ), bbox_inches='tight') -#for complete explanation visit https://towardsdatascience.com/only-numpy-implementing-gan-general-adversarial-networks-and-adam-optimizer-using-numpy-with-2a7e4e032021 + fig.savefig( + "Click_Me_{}.png".format( + str(iter).zfill(3) + + "_Ginput_" + + str(G_input) + + "_hiddenone" + + str(hidden_input) + + "_hiddentwo" + + str(hidden_input2) + + "_LR_" + + str(learing_rate) + ), + bbox_inches="tight", + ) +# for complete explanation visit https://towardsdatascience.com/only-numpy-implementing-gan-general-adversarial-networks-and-adam-optimizer-using-numpy-with-2a7e4e032021 # -- end code -- diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 983063f0b72d..5e6c433aa97d 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -34,20 +34,20 @@ from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated -_Datasets = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) +_Datasets = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ -DEFAULT_SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/' +DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" def _read32(bytestream): - dt = numpy.dtype(numpy.uint32).newbyteorder('>') - return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] + dt = numpy.dtype(numpy.uint32).newbyteorder(">") + return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] -@deprecated(None, 'Please use tf.data to implement this functionality.') +@deprecated(None, "Please use tf.data to implement this functionality.") def _extract_images(f): - """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. + """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. Args: f: A file object that can be passed into a gzip reader. @@ -59,34 +59,35 @@ def _extract_images(f): ValueError: If the bytestream does not start with 2051. """ - print('Extracting', f.name) - with gzip.GzipFile(fileobj=f) as bytestream: - magic = _read32(bytestream) - if magic != 2051: - raise ValueError('Invalid magic number %d in MNIST image file: %s' % - (magic, f.name)) - num_images = _read32(bytestream) - rows = _read32(bytestream) - cols = _read32(bytestream) - buf = bytestream.read(rows * cols * num_images) - data = numpy.frombuffer(buf, dtype=numpy.uint8) - data = data.reshape(num_images, rows, cols, 1) - return data - - -@deprecated(None, 'Please use tf.one_hot on tensors.') + print("Extracting", f.name) + with gzip.GzipFile(fileobj=f) as bytestream: + magic = _read32(bytestream) + if magic != 2051: + raise ValueError( + "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) + ) + num_images = _read32(bytestream) + rows = _read32(bytestream) + cols = _read32(bytestream) + buf = bytestream.read(rows * cols * num_images) + data = numpy.frombuffer(buf, dtype=numpy.uint8) + data = data.reshape(num_images, rows, cols, 1) + return data + + +@deprecated(None, "Please use tf.one_hot on tensors.") def _dense_to_one_hot(labels_dense, num_classes): - """Convert class labels from scalars to one-hot vectors.""" - num_labels = labels_dense.shape[0] - index_offset = numpy.arange(num_labels) * num_classes - labels_one_hot = numpy.zeros((num_labels, num_classes)) - labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 - return labels_one_hot + """Convert class labels from scalars to one-hot vectors.""" + num_labels = labels_dense.shape[0] + index_offset = numpy.arange(num_labels) * num_classes + labels_one_hot = numpy.zeros((num_labels, num_classes)) + labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 + return labels_one_hot -@deprecated(None, 'Please use tf.data to implement this functionality.') +@deprecated(None, "Please use tf.data to implement this functionality.") def _extract_labels(f, one_hot=False, num_classes=10): - """Extract the labels into a 1D uint8 numpy array [index]. + """Extract the labels into a 1D uint8 numpy array [index]. Args: f: A file object that can be passed into a gzip reader. @@ -99,37 +100,43 @@ def _extract_labels(f, one_hot=False, num_classes=10): Raises: ValueError: If the bystream doesn't start with 2049. """ - print('Extracting', f.name) - with gzip.GzipFile(fileobj=f) as bytestream: - magic = _read32(bytestream) - if magic != 2049: - raise ValueError('Invalid magic number %d in MNIST label file: %s' % - (magic, f.name)) - num_items = _read32(bytestream) - buf = bytestream.read(num_items) - labels = numpy.frombuffer(buf, dtype=numpy.uint8) - if one_hot: - return _dense_to_one_hot(labels, num_classes) - return labels + print("Extracting", f.name) + with gzip.GzipFile(fileobj=f) as bytestream: + magic = _read32(bytestream) + if magic != 2049: + raise ValueError( + "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) + ) + num_items = _read32(bytestream) + buf = bytestream.read(num_items) + labels = numpy.frombuffer(buf, dtype=numpy.uint8) + if one_hot: + return _dense_to_one_hot(labels, num_classes) + return labels class _DataSet(object): - """Container class for a _DataSet (deprecated). + """Container class for a _DataSet (deprecated). THIS CLASS IS DEPRECATED. """ - @deprecated(None, 'Please use alternatives such as official/mnist/_DataSet.py' - ' from tensorflow/models.') - def __init__(self, - images, - labels, - fake_data=False, - one_hot=False, - dtype=dtypes.float32, - reshape=True, - seed=None): - """Construct a _DataSet. + @deprecated( + None, + "Please use alternatives such as official/mnist/_DataSet.py" + " from tensorflow/models.", + ) + def __init__( + self, + images, + labels, + fake_data=False, + one_hot=False, + dtype=dtypes.float32, + reshape=True, + seed=None, + ): + """Construct a _DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into @@ -146,101 +153,105 @@ def __init__(self, reshape: Bool. If True returned images are returned flattened to vectors. seed: The random seed to use. """ - seed1, seed2 = random_seed.get_seed(seed) - # If op level seed is not set, use whatever graph level seed is returned - numpy.random.seed(seed1 if seed is None else seed2) - dtype = dtypes.as_dtype(dtype).base_dtype - if dtype not in (dtypes.uint8, dtypes.float32): - raise TypeError('Invalid image dtype %r, expected uint8 or float32' % - dtype) - if fake_data: - self._num_examples = 10000 - self.one_hot = one_hot - else: - assert images.shape[0] == labels.shape[0], ( - 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) - self._num_examples = images.shape[0] - - # Convert shape from [num examples, rows, columns, depth] - # to [num examples, rows*columns] (assuming depth == 1) - if reshape: - assert images.shape[3] == 1 - images = images.reshape(images.shape[0], - images.shape[1] * images.shape[2]) - if dtype == dtypes.float32: - # Convert from [0, 255] -> [0.0, 1.0]. - images = images.astype(numpy.float32) - images = numpy.multiply(images, 1.0 / 255.0) - self._images = images - self._labels = labels - self._epochs_completed = 0 - self._index_in_epoch = 0 - - @property - def images(self): - return self._images - - @property - def labels(self): - return self._labels - - @property - def num_examples(self): - return self._num_examples - - @property - def epochs_completed(self): - return self._epochs_completed - - def next_batch(self, batch_size, fake_data=False, shuffle=True): - """Return the next `batch_size` examples from this data set.""" - if fake_data: - fake_image = [1] * 784 - if self.one_hot: - fake_label = [1] + [0] * 9 - else: - fake_label = 0 - return [fake_image for _ in xrange(batch_size) - ], [fake_label for _ in xrange(batch_size)] - start = self._index_in_epoch - # Shuffle for the first epoch - if self._epochs_completed == 0 and start == 0 and shuffle: - perm0 = numpy.arange(self._num_examples) - numpy.random.shuffle(perm0) - self._images = self.images[perm0] - self._labels = self.labels[perm0] - # Go to the next epoch - if start + batch_size > self._num_examples: - # Finished epoch - self._epochs_completed += 1 - # Get the rest examples in this epoch - rest_num_examples = self._num_examples - start - images_rest_part = self._images[start:self._num_examples] - labels_rest_part = self._labels[start:self._num_examples] - # Shuffle the data - if shuffle: - perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) - self._images = self.images[perm] - self._labels = self.labels[perm] - # Start next epoch - start = 0 - self._index_in_epoch = batch_size - rest_num_examples - end = self._index_in_epoch - images_new_part = self._images[start:end] - labels_new_part = self._labels[start:end] - return numpy.concatenate((images_rest_part, images_new_part), - axis=0), numpy.concatenate( - (labels_rest_part, labels_new_part), axis=0) - else: - self._index_in_epoch += batch_size - end = self._index_in_epoch - return self._images[start:end], self._labels[start:end] - - -@deprecated(None, 'Please write your own downloading logic.') + seed1, seed2 = random_seed.get_seed(seed) + # If op level seed is not set, use whatever graph level seed is returned + numpy.random.seed(seed1 if seed is None else seed2) + dtype = dtypes.as_dtype(dtype).base_dtype + if dtype not in (dtypes.uint8, dtypes.float32): + raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) + if fake_data: + self._num_examples = 10000 + self.one_hot = one_hot + else: + assert ( + images.shape[0] == labels.shape[0] + ), "images.shape: %s labels.shape: %s" % (images.shape, labels.shape) + self._num_examples = images.shape[0] + + # Convert shape from [num examples, rows, columns, depth] + # to [num examples, rows*columns] (assuming depth == 1) + if reshape: + assert images.shape[3] == 1 + images = images.reshape( + images.shape[0], images.shape[1] * images.shape[2] + ) + if dtype == dtypes.float32: + # Convert from [0, 255] -> [0.0, 1.0]. + images = images.astype(numpy.float32) + images = numpy.multiply(images, 1.0 / 255.0) + self._images = images + self._labels = labels + self._epochs_completed = 0 + self._index_in_epoch = 0 + + @property + def images(self): + return self._images + + @property + def labels(self): + return self._labels + + @property + def num_examples(self): + return self._num_examples + + @property + def epochs_completed(self): + return self._epochs_completed + + def next_batch(self, batch_size, fake_data=False, shuffle=True): + """Return the next `batch_size` examples from this data set.""" + if fake_data: + fake_image = [1] * 784 + if self.one_hot: + fake_label = [1] + [0] * 9 + else: + fake_label = 0 + return ( + [fake_image for _ in xrange(batch_size)], + [fake_label for _ in xrange(batch_size)], + ) + start = self._index_in_epoch + # Shuffle for the first epoch + if self._epochs_completed == 0 and start == 0 and shuffle: + perm0 = numpy.arange(self._num_examples) + numpy.random.shuffle(perm0) + self._images = self.images[perm0] + self._labels = self.labels[perm0] + # Go to the next epoch + if start + batch_size > self._num_examples: + # Finished epoch + self._epochs_completed += 1 + # Get the rest examples in this epoch + rest_num_examples = self._num_examples - start + images_rest_part = self._images[start : self._num_examples] + labels_rest_part = self._labels[start : self._num_examples] + # Shuffle the data + if shuffle: + perm = numpy.arange(self._num_examples) + numpy.random.shuffle(perm) + self._images = self.images[perm] + self._labels = self.labels[perm] + # Start next epoch + start = 0 + self._index_in_epoch = batch_size - rest_num_examples + end = self._index_in_epoch + images_new_part = self._images[start:end] + labels_new_part = self._labels[start:end] + return ( + numpy.concatenate((images_rest_part, images_new_part), axis=0), + numpy.concatenate((labels_rest_part, labels_new_part), axis=0), + ) + else: + self._index_in_epoch += batch_size + end = self._index_in_epoch + return self._images[start:end], self._labels[start:end] + + +@deprecated(None, "Please write your own downloading logic.") def _maybe_download(filename, work_directory, source_url): - """Download the data from source url, unless it's already here. + """Download the data from source url, unless it's already here. Args: filename: string, name of the file in the directory. @@ -250,83 +261,90 @@ def _maybe_download(filename, work_directory, source_url): Returns: Path to resulting file. """ - if not gfile.Exists(work_directory): - gfile.MakeDirs(work_directory) - filepath = os.path.join(work_directory, filename) - if not gfile.Exists(filepath): - urllib.request.urlretrieve(source_url, filepath) - with gfile.GFile(filepath) as f: - size = f.size() - print('Successfully downloaded', filename, size, 'bytes.') - return filepath - - -@deprecated(None, 'Please use alternatives such as:' - ' tensorflow_datasets.load(\'mnist\')') -def read_data_sets(train_dir, - fake_data=False, - one_hot=False, - dtype=dtypes.float32, - reshape=True, - validation_size=5000, - seed=None, - source_url=DEFAULT_SOURCE_URL): - if fake_data: - - def fake(): - return _DataSet([], [], - fake_data=True, - one_hot=one_hot, - dtype=dtype, - seed=seed) - - train = fake() - validation = fake() - test = fake() - return _Datasets(train=train, validation=validation, test=test) - - if not source_url: # empty string check - source_url = DEFAULT_SOURCE_URL - - train_images_file = 'train-images-idx3-ubyte.gz' - train_labels_file = 'train-labels-idx1-ubyte.gz' - test_images_file = 't10k-images-idx3-ubyte.gz' - test_labels_file = 't10k-labels-idx1-ubyte.gz' - - local_file = _maybe_download(train_images_file, train_dir, - source_url + train_images_file) - with gfile.Open(local_file, 'rb') as f: - train_images = _extract_images(f) - - local_file = _maybe_download(train_labels_file, train_dir, - source_url + train_labels_file) - with gfile.Open(local_file, 'rb') as f: - train_labels = _extract_labels(f, one_hot=one_hot) - - local_file = _maybe_download(test_images_file, train_dir, - source_url + test_images_file) - with gfile.Open(local_file, 'rb') as f: - test_images = _extract_images(f) - - local_file = _maybe_download(test_labels_file, train_dir, - source_url + test_labels_file) - with gfile.Open(local_file, 'rb') as f: - test_labels = _extract_labels(f, one_hot=one_hot) - - if not 0 <= validation_size <= len(train_images): - raise ValueError( - 'Validation size should be between 0 and {}. Received: {}.'.format( - len(train_images), validation_size)) - - validation_images = train_images[:validation_size] - validation_labels = train_labels[:validation_size] - train_images = train_images[validation_size:] - train_labels = train_labels[validation_size:] - - options = dict(dtype=dtype, reshape=reshape, seed=seed) + if not gfile.Exists(work_directory): + gfile.MakeDirs(work_directory) + filepath = os.path.join(work_directory, filename) + if not gfile.Exists(filepath): + urllib.request.urlretrieve(source_url, filepath) + with gfile.GFile(filepath) as f: + size = f.size() + print("Successfully downloaded", filename, size, "bytes.") + return filepath + + +@deprecated( + None, "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" +) +def read_data_sets( + train_dir, + fake_data=False, + one_hot=False, + dtype=dtypes.float32, + reshape=True, + validation_size=5000, + seed=None, + source_url=DEFAULT_SOURCE_URL, +): + if fake_data: - train = _DataSet(train_images, train_labels, **options) - validation = _DataSet(validation_images, validation_labels, **options) - test = _DataSet(test_images, test_labels, **options) + def fake(): + return _DataSet( + [], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed + ) + + train = fake() + validation = fake() + test = fake() + return _Datasets(train=train, validation=validation, test=test) + + if not source_url: # empty string check + source_url = DEFAULT_SOURCE_URL + + train_images_file = "train-images-idx3-ubyte.gz" + train_labels_file = "train-labels-idx1-ubyte.gz" + test_images_file = "t10k-images-idx3-ubyte.gz" + test_labels_file = "t10k-labels-idx1-ubyte.gz" + + local_file = _maybe_download( + train_images_file, train_dir, source_url + train_images_file + ) + with gfile.Open(local_file, "rb") as f: + train_images = _extract_images(f) + + local_file = _maybe_download( + train_labels_file, train_dir, source_url + train_labels_file + ) + with gfile.Open(local_file, "rb") as f: + train_labels = _extract_labels(f, one_hot=one_hot) + + local_file = _maybe_download( + test_images_file, train_dir, source_url + test_images_file + ) + with gfile.Open(local_file, "rb") as f: + test_images = _extract_images(f) + + local_file = _maybe_download( + test_labels_file, train_dir, source_url + test_labels_file + ) + with gfile.Open(local_file, "rb") as f: + test_labels = _extract_labels(f, one_hot=one_hot) + + if not 0 <= validation_size <= len(train_images): + raise ValueError( + "Validation size should be between 0 and {}. Received: {}.".format( + len(train_images), validation_size + ) + ) + + validation_images = train_images[:validation_size] + validation_labels = train_labels[:validation_size] + train_images = train_images[validation_size:] + train_labels = train_labels[validation_size:] + + options = dict(dtype=dtype, reshape=reshape, seed=seed) + + train = _DataSet(train_images, train_labels, **options) + validation = _DataSet(validation_images, validation_labels, **options) + test = _DataSet(test_images, test_labels, **options) - return _Datasets(train=train, validation=validation, test=test) + return _Datasets(train=train, validation=validation, test=test) diff --git a/other/least_recently_used.py b/other/least_recently_used.py index 2932e9c185e8..e1b5ab5bd380 100644 --- a/other/least_recently_used.py +++ b/other/least_recently_used.py @@ -2,12 +2,13 @@ import sys from collections import deque + class LRUCache: """ Page Replacement Algorithm, Least Recently Used (LRU) Caching.""" - dq_store = object() # Cache store of keys - key_reference_map = object() # References of the keys in cache - _MAX_CAPACITY: int = 10 # Maximum capacity of cache + dq_store = object() # Cache store of keys + key_reference_map = object() # References of the keys in cache + _MAX_CAPACITY: int = 10 # Maximum capacity of cache @abstractmethod def __init__(self, n: int): @@ -19,7 +20,7 @@ def __init__(self, n: int): if not n: LRUCache._MAX_CAPACITY = sys.maxsize elif n < 0: - raise ValueError('n should be an integer greater than 0.') + raise ValueError("n should be an integer greater than 0.") else: LRUCache._MAX_CAPACITY = n @@ -51,6 +52,7 @@ def display(self): for k in self.dq_store: print(k) + if __name__ == "__main__": lru_cache = LRUCache(4) lru_cache.refer(1) diff --git a/project_euler/problem_20/sol4.py b/project_euler/problem_20/sol4.py index 50ebca5a0bf7..4c597220f09b 100644 --- a/project_euler/problem_20/sol4.py +++ b/project_euler/problem_20/sol4.py @@ -27,7 +27,7 @@ def solution(n): """ fact = 1 result = 0 - for i in range(1,n + 1): + for i in range(1, n + 1): fact *= i for j in str(fact): diff --git a/web_programming/get_imdbtop.py b/web_programming/get_imdbtop.py index 95fbeba7a772..522e423b4eab 100644 --- a/web_programming/get_imdbtop.py +++ b/web_programming/get_imdbtop.py @@ -3,8 +3,10 @@ def imdb_top(imdb_top_n): - base_url = (f"https://www.imdb.com/search/title?title_type=" - f"feature&sort=num_votes,desc&count={imdb_top_n}") + base_url = ( + f"https://www.imdb.com/search/title?title_type=" + f"feature&sort=num_votes,desc&count={imdb_top_n}" + ) source = BeautifulSoup(requests.get(base_url).content, "html.parser") for m in source.findAll("div", class_="lister-item mode-advanced"): print("\n" + m.h3.a.text) # movie's name From 8bd4163d5e95246dc26ecc39f325c9692bf99a72 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 20:25:21 -0400 Subject: [PATCH 20/30] fix action typo - missing underscore in trigger --- .github/workflows/autoDIR.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index e6fa9297fc5b..3cb234d4dfbe 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -1,5 +1,5 @@ name: autoDIR -on: [pull request] +on: [pull_request] jobs: build: runs-on: ubuntu-latest From 0da1e279dd9469e6962b870050ef1feab02992db Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 20:47:14 -0400 Subject: [PATCH 21/30] add push to git --- .github/workflows/autoDIR.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 3cb234d4dfbe..6267d4438d4e 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -14,4 +14,12 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Build DIRECTORY - run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md \ No newline at end of file + run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md + - name: Push DIRECTORY + run: | + git config --global user.name 'mrvnmchm' + git config --global user.email 'mrvnmchm@gmail.com' + git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY + git checkout $GITHUB_HEAD_REF + git commit -am "Update: DIRECTORY.md" + git push \ No newline at end of file From 4cc8013e8ed8036fb6dce523daeecda255e65e74 Mon Sep 17 00:00:00 2001 From: mrvnmchm Date: Fri, 1 Nov 2019 00:49:56 +0000 Subject: [PATCH 22/30] Update: DIRECTORY.md --- DIRECTORY.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index e2d74d39828f..b241fca84223 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -8,6 +8,7 @@ * [Newton Forward Interpolation](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_forward_interpolation.py) * [Newton Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_method.py) * [Newton Raphson Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_raphson_method.py) + * [Secant Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/secant_method.py) ## Backtracking * [All Combinations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_combinations.py) @@ -36,8 +37,11 @@ * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py) * [Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/caesar_cipher.py) * [Cryptomath Module](https://github.com/TheAlgorithms/Python/blob/master/ciphers/cryptomath_module.py) + * [Deterministic Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/ciphers/deterministic_miller_rabin.py) + * [Diffie](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie.py) * [Elgamal Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/elgamal_key_generator.py) * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py) + * [Mixed Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mixed_keyword_cypher.py) * [Morse Code Implementation](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code_implementation.py) * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py) * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py) @@ -149,6 +153,7 @@ * [Longest Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/matrix_chain_order.py) * [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py) + * [Max Sum Contigous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contigous_subsequence.py) * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py) * [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py) * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py) @@ -228,13 +233,16 @@ * [Abs Min](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_min.py) * [Average Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mean.py) * [Average Median](https://github.com/TheAlgorithms/Python/blob/master/maths/average_median.py) + * [Average Mode](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mode.py) * [Basic Maths](https://github.com/TheAlgorithms/Python/blob/master/maths/basic_maths.py) * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation.py) + * [Binomial Coefficient](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_coefficient.py) * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py) * [Explicit Euler](https://github.com/TheAlgorithms/Python/blob/master/maths/explicit_euler.py) * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) * [Factorial Python](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_python.py) * [Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_recursive.py) + * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py) * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py) * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci.py) * [Fibonacci Sequence Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci_sequence_recursion.py) @@ -257,9 +265,11 @@ * [Mobius Function](https://github.com/TheAlgorithms/Python/blob/master/maths/mobius_function.py) * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py) * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/maths/newton_raphson.py) + * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py) * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py) * [Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_check.py) * [Prime Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_factors.py) + * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py) * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py) * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py) * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py) @@ -293,6 +303,8 @@ ## Neural Network * [Back Propagation Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/convolution_neural_network.py) + * [Gan](https://github.com/TheAlgorithms/Python/blob/master/neural_network/gan.py) + * [Input Data](https://github.com/TheAlgorithms/Python/blob/master/neural_network/input_data.py) * [Perceptron](https://github.com/TheAlgorithms/Python/blob/master/neural_network/perceptron.py) ## Other @@ -309,12 +321,14 @@ * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py) * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py) * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py) + * [Least Recently Used](https://github.com/TheAlgorithms/Python/blob/master/other/least_recently_used.py) * [Linear Congruential Generator](https://github.com/TheAlgorithms/Python/blob/master/other/linear_congruential_generator.py) * [Magicdiamondpattern](https://github.com/TheAlgorithms/Python/blob/master/other/magicdiamondpattern.py) * [Nested Brackets](https://github.com/TheAlgorithms/Python/blob/master/other/nested_brackets.py) * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/other/palindrome.py) * [Password Generator](https://github.com/TheAlgorithms/Python/blob/master/other/password_generator.py) * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/other/primelib.py) + * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py) * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/other/sierpinski_triangle.py) * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py) @@ -390,6 +404,7 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_20/sol1.py) * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_20/sol2.py) * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_20/sol3.py) + * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_20/sol4.py) * Problem 21 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_21/sol1.py) * Problem 22 @@ -404,6 +419,9 @@ * Problem 25 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_25/sol1.py) * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_25/sol2.py) + * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_25/sol3.py) + * Problem 27 + * [Problem 27 Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_27/problem_27_sol1.py) * Problem 28 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_28/sol1.py) * Problem 29 @@ -412,6 +430,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_31/sol1.py) * Problem 32 * [Sol32](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_32/sol32.py) + * Problem 33 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_33/sol1.py) * Problem 36 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_36/sol1.py) * Problem 40 @@ -471,6 +491,7 @@ * [Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) * [Random Normal Distribution Quicksort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_normal_distribution_quicksort.py) * [Random Pivot Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_pivot_quick_sort.py) + * [Recursive-Quick-Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive-quick-sort.py) * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py) * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py) * [Stooge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/stooge_sort.py) @@ -494,3 +515,4 @@ ## Web Programming * [Crawl Google Results](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_results.py) + * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) From c1963876613f18a3d7a4e7b6988e66490506659e Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 21:07:42 -0400 Subject: [PATCH 23/30] one step --- .github/workflows/autoDIR.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 6267d4438d4e..c4705032b683 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -13,10 +13,9 @@ jobs: uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - - name: Build DIRECTORY - run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - - name: Push DIRECTORY + - name: Update DIRECTORY run: | + scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md git config --global user.name 'mrvnmchm' git config --global user.email 'mrvnmchm@gmail.com' git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY From 92987098c50b1ea892f1e9864fb529905282bde1 Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 21:14:35 -0400 Subject: [PATCH 24/30] add, add --- .github/workflows/autoDIR.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index c4705032b683..1d15e6305055 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -20,5 +20,6 @@ jobs: git config --global user.email 'mrvnmchm@gmail.com' git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY git checkout $GITHUB_HEAD_REF + git add DIRECTORY.md git commit -am "Update: DIRECTORY.md" git push \ No newline at end of file From c1fa1342837d9db27a8dd4cf41ddc5fa1675d73d Mon Sep 17 00:00:00 2001 From: "Marvin M. Michum" Date: Thu, 31 Oct 2019 22:45:07 -0400 Subject: [PATCH 25/30] conditional pushing --- .github/workflows/autoDIR.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 1d15e6305055..529b9b307cf0 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -8,18 +8,22 @@ jobs: matrix: python-version: [3.7] steps: + - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Update DIRECTORY + run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - git config --global user.name 'mrvnmchm' - git config --global user.email 'mrvnmchm@gmail.com' + git config --global user.name 'autoDIR' + git config --global user.email 'mrvnmchm@users.noreply.github.com' git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY git checkout $GITHUB_HEAD_REF - git add DIRECTORY.md git commit -am "Update: DIRECTORY.md" + - name: commit + if: success() + run: | git push \ No newline at end of file From 717a62c44c9e493537e55c5132f89414ef6b79ba Mon Sep 17 00:00:00 2001 From: mrvnmchm Date: Thu, 31 Oct 2019 23:00:58 -0400 Subject: [PATCH 26/30] fix multiple runs --- .github/workflows/autoDIR.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 529b9b307cf0..49cc6d5d819c 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -15,7 +15,6 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Update DIRECTORY - run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md git config --global user.name 'autoDIR' @@ -23,7 +22,7 @@ jobs: git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY git checkout $GITHUB_HEAD_REF git commit -am "Update: DIRECTORY.md" - - name: commit + - name: Push DIRECTORY if: success() run: | git push \ No newline at end of file From 70ade0e5d768519f7b58be97776f9acd40eab318 Mon Sep 17 00:00:00 2001 From: mrvnmchm Date: Thu, 31 Oct 2019 23:04:18 -0400 Subject: [PATCH 27/30] clean exit --- .github/workflows/autoDIR.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 49cc6d5d819c..6e9f398ee092 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -24,5 +24,7 @@ jobs: git commit -am "Update: DIRECTORY.md" - name: Push DIRECTORY if: success() - run: | - git push \ No newline at end of file + run: git push + - name: If no change, exit clean + if: failure() + run: exit 0 \ No newline at end of file From 126b3a4f6d7f0ae43463fc5bc9af3716cc178f16 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 1 Nov 2019 04:05:50 +0100 Subject: [PATCH 28/30] Travis CI: Write & print DIRECTORY.md on one line --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index accf69d1b1a3..0c7c9fd0e1c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,4 +11,4 @@ script: - mypy --ignore-missing-imports . - pytest . --doctest-modules after_success: - - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md \ No newline at end of file + - scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md From c3d70ae695531fd0b957a72b785ba002d0dc7814 Mon Sep 17 00:00:00 2001 From: mrvnmchm Date: Thu, 31 Oct 2019 23:13:21 -0400 Subject: [PATCH 29/30] check before commit --- .github/workflows/autoDIR.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 6e9f398ee092..68193407a655 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -21,10 +21,12 @@ jobs: git config --global user.email 'mrvnmchm@users.noreply.github.com' git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY git checkout $GITHUB_HEAD_REF - git commit -am "Update: DIRECTORY.md" + git diff-files --quiet - name: Push DIRECTORY if: success() - run: git push + run: | + git commit -am "Update: DIRECTORY.md" + git push - name: If no change, exit clean if: failure() run: exit 0 \ No newline at end of file From 161b6924087fcff0cc250e18886fca727d3576c1 Mon Sep 17 00:00:00 2001 From: mrvnmchm Date: Thu, 31 Oct 2019 23:19:20 -0400 Subject: [PATCH 30/30] reverse order --- .github/workflows/autoDIR.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/autoDIR.yml b/.github/workflows/autoDIR.yml index 68193407a655..c2d42687887a 100644 --- a/.github/workflows/autoDIR.yml +++ b/.github/workflows/autoDIR.yml @@ -21,12 +21,9 @@ jobs: git config --global user.email 'mrvnmchm@users.noreply.github.com' git remote set-url origin https://x-access-token:${{ secrets.gh_token }}@github.com/$GITHUB_REPOSITORY git checkout $GITHUB_HEAD_REF - git diff-files --quiet + git diff-files --quiet - name: Push DIRECTORY - if: success() + if: failure() run: | git commit -am "Update: DIRECTORY.md" - git push - - name: If no change, exit clean - if: failure() - run: exit 0 \ No newline at end of file + git push \ No newline at end of file