From 1193036f4e53b335e1eba5d6aeef0a9bcd7c958d Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 02:26:39 +0530 Subject: [PATCH 1/8] fetch anime --- web_programming/fetch_anime_and_play.py | 199 ++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 web_programming/fetch_anime_and_play.py diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py new file mode 100644 index 000000000000..1c95f3bc9a3b --- /dev/null +++ b/web_programming/fetch_anime_and_play.py @@ -0,0 +1,199 @@ +from types import NoneType +from urllib.error import HTTPError +from xml.dom import NotFoundErr +import requests +from bs4 import BeautifulSoup, NavigableString + +from fake_useragent import UserAgent + + +BASE_URL = "https://ww1.gogoanime2.org" + + +def search_scraper(anime_name: str) -> list: + + """[summary] + + This function will take an url and + return list of anime after scraping the site. + + >>> type(search_scraper("demon_slayer")) + + + Raises: + e: [Raises exception on failure] + + Returns: + [list]: [List of animes] + """ + + try: + + # concat the name to form the search url. + search_url = f"{BASE_URL}/search/{anime_name}" + response = requests.get( + search_url, headers={"UserAgent": UserAgent().chrome} + ) # request the url. + + # Is the response ok? + response.raise_for_status() + + # parse with soup. + soup = BeautifulSoup(response.text, "html.parser") + + # get list of anime + items_ul = soup.find("ul", {"class": "items"}) + items_li = items_ul.children + + # for each anime, insert to list. the name and url. + anime_list = [] + for li in items_li: + if not isinstance(li, NavigableString): + anime_url, anime_title = li.find("a")["href"], li.find("a")["title"] + anime_list.append( + { + "title": anime_title, + "url": anime_url, + } + ) + + return anime_list + + except (requests.exceptions.RequestException, HTTPError, TypeError) as e: + raise e + + +def search_anime_episode_list(episode_endpoint: str) -> list: + + """[summary] + + This function will take an url and + return list of episodes after scraping the site + for an url. + + >>> type(search_anime_episode_list("/anime/kimetsu-no-yaiba")) + + + Raises: + e: [description] + + Returns: + [list]: [List of episodes] + """ + + try: + + request_url = f"{BASE_URL}{episode_endpoint}" + response = requests.get( + url=request_url, headers={"UserAgent": UserAgent().chrome} + ) + soup = BeautifulSoup(response.text, "html.parser") + + # With this id. get the episode list. + episode_page_ul = soup.find("ul", {"id": "episode_related"}) + episode_page_li = episode_page_ul.children + + episode_list = [] + for children in episode_page_li: + try: + if not isinstance(children, NavigableString): + episode_list.append( + { + "title": children.find( + "div", {"class": "name"} + ).text.replace(" ", ""), + "url": children.find("a")["href"], + } + ) + except (KeyError, NotFoundErr, TypeError): + pass + + return episode_list + + except (requests.exceptions.RequestException) as e: + raise e + + +def get_anime_episode(episode_endpoint: str) -> list: + + """[summary] + + Get click url and download url from episode url + + Raises: + e: [description] + + Returns: + [list]: [List of download and watch url] + """ + + try: + + episode_page_url = f"{BASE_URL}{episode_endpoint}" + + response = requests.get( + url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + ) + soup = BeautifulSoup(response.text, "lxml") + + episode_url = soup.find("iframe", {"id": "playerframe"})["src"] + download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" + return [f"{BASE_URL}{episode_url}", f"${BASE_URL}{download_url}"] + + except ( + KeyError, + NotFoundErr, + TypeError, + requests.exceptions.RequestException, + ) as e: + raise e + + +if __name__ == "__main__": + + try: + + anime_name = input("Enter anime name: ").split() + anime_list = search_scraper(anime_name) + print("\n") + + if len(anime_list) == 0: + print("No anime found with this name") + else: + + print(f"Found {len(anime_list)} results: ") + for (i, anime) in enumerate(anime_list): + anime_title = anime["title"] + print((f"{i+1}. {anime_title}")) + + anime_choice = int( + input("\nPlease choose from the following list: ").strip() + ) + chosen_anime = anime_list[anime_choice - 1] + print( + "You chose {0}. Searching for episodes...".format(chosen_anime["title"]) + ) + + episode_list = search_anime_episode_list(chosen_anime["url"]) + if len(episode_list) == 0: + print("No episode found for this anime") + else: + print(f"Found {len(episode_list)} results: ") + for (i, episode) in enumerate(episode_list): + print(("{0}. {1}").format(i + 1, episode["title"])) + + episode_choice = int( + input("\nChoose an episode by serial no: ").strip() + ) + chosen_episode = episode_list[episode_choice - 1] + print("You chose {0}. Searching...".format(chosen_episode["title"])) + + episode_url, download_url = get_anime_episode(chosen_episode["url"]) + print( + "\nTo watch, ctrl+click on {0}. To download, ctrl+click on {1}".format( + episode_url, download_url + ) + ) + + except (ValueError, IndexError, TypeError) as e: + raise e From 30c2d6c5906cf91afc4827e1157e74655001b25b Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 02:30:44 +0530 Subject: [PATCH 2/8] formatted code --- web_programming/fetch_anime_and_play.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 1c95f3bc9a3b..eaa92ea49d96 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -1,12 +1,11 @@ from types import NoneType from urllib.error import HTTPError from xml.dom import NotFoundErr + import requests from bs4 import BeautifulSoup, NavigableString - from fake_useragent import UserAgent - BASE_URL = "https://ww1.gogoanime2.org" @@ -20,6 +19,9 @@ def search_scraper(anime_name: str) -> list: >>> type(search_scraper("demon_slayer")) + Args: + anime_name (str): [Name of anime] + Raises: e: [Raises exception on failure] @@ -74,6 +76,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list: >>> type(search_anime_episode_list("/anime/kimetsu-no-yaiba")) + Args: + episode_endpoint (str): [Endpoint of episode] + Raises: e: [description] @@ -120,6 +125,9 @@ def get_anime_episode(episode_endpoint: str) -> list: Get click url and download url from episode url + Args: + episode_endpoint (str): [Endpoint of episode] + Raises: e: [description] @@ -164,14 +172,14 @@ def get_anime_episode(episode_endpoint: str) -> list: print(f"Found {len(anime_list)} results: ") for (i, anime) in enumerate(anime_list): anime_title = anime["title"] - print((f"{i+1}. {anime_title}")) + print(f"{i+1}. {anime_title}") anime_choice = int( input("\nPlease choose from the following list: ").strip() ) chosen_anime = anime_list[anime_choice - 1] print( - "You chose {0}. Searching for episodes...".format(chosen_anime["title"]) + "You chose {}. Searching for episodes...".format(chosen_anime["title"]) ) episode_list = search_anime_episode_list(chosen_anime["url"]) @@ -180,17 +188,17 @@ def get_anime_episode(episode_endpoint: str) -> list: else: print(f"Found {len(episode_list)} results: ") for (i, episode) in enumerate(episode_list): - print(("{0}. {1}").format(i + 1, episode["title"])) + print(("{}. {}").format(i + 1, episode["title"])) episode_choice = int( input("\nChoose an episode by serial no: ").strip() ) chosen_episode = episode_list[episode_choice - 1] - print("You chose {0}. Searching...".format(chosen_episode["title"])) + print("You chose {}. Searching...".format(chosen_episode["title"])) episode_url, download_url = get_anime_episode(chosen_episode["url"]) print( - "\nTo watch, ctrl+click on {0}. To download, ctrl+click on {1}".format( + "\nTo watch, ctrl+click on {}. To download, ctrl+click on {}".format( episode_url, download_url ) ) From c24a55073674179446112953fdd4db5bcdcdd7d8 Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 02:37:42 +0530 Subject: [PATCH 3/8] fix format errors --- web_programming/fetch_anime_and_play.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index eaa92ea49d96..749d4509177a 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -1,4 +1,3 @@ -from types import NoneType from urllib.error import HTTPError from xml.dom import NotFoundErr @@ -146,7 +145,7 @@ def get_anime_episode(episode_endpoint: str) -> list: episode_url = soup.find("iframe", {"id": "playerframe"})["src"] download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" - return [f"{BASE_URL}{episode_url}", f"${BASE_URL}{download_url}"] + return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"] except ( KeyError, @@ -161,7 +160,7 @@ def get_anime_episode(episode_endpoint: str) -> list: try: - anime_name = input("Enter anime name: ").split() + anime_name = input("Enter anime name: ").strip() anime_list = search_scraper(anime_name) print("\n") @@ -197,11 +196,8 @@ def get_anime_episode(episode_endpoint: str) -> list: print("You chose {}. Searching...".format(chosen_episode["title"])) episode_url, download_url = get_anime_episode(chosen_episode["url"]) - print( - "\nTo watch, ctrl+click on {}. To download, ctrl+click on {}".format( - episode_url, download_url - ) - ) + print(f"\nTo watch, ctrl+click on {episode_url}.") + print(f"To download, ctrl+click on {download_url}.") except (ValueError, IndexError, TypeError) as e: raise e From bb3e821628fccfcf3465defb6bc0498def5a2f98 Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 02:47:51 +0530 Subject: [PATCH 4/8] fix bot reviews --- web_programming/fetch_anime_and_play.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 749d4509177a..675e5a6de803 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -124,6 +124,9 @@ def get_anime_episode(episode_endpoint: str) -> list: Get click url and download url from episode url + >>> type(get_anime_episode("/watch/kimetsu-no-yaiba/1")) + + Args: episode_endpoint (str): [Endpoint of episode] @@ -177,9 +180,7 @@ def get_anime_episode(episode_endpoint: str) -> list: input("\nPlease choose from the following list: ").strip() ) chosen_anime = anime_list[anime_choice - 1] - print( - "You chose {}. Searching for episodes...".format(chosen_anime["title"]) - ) + print(f"You chose {chosen_anime['title']}. Searching for episodes...") episode_list = search_anime_episode_list(chosen_anime["url"]) if len(episode_list) == 0: @@ -187,13 +188,13 @@ def get_anime_episode(episode_endpoint: str) -> list: else: print(f"Found {len(episode_list)} results: ") for (i, episode) in enumerate(episode_list): - print(("{}. {}").format(i + 1, episode["title"])) + print(f"{i+1}. {episode['title']}") episode_choice = int( input("\nChoose an episode by serial no: ").strip() ) chosen_episode = episode_list[episode_choice - 1] - print("You chose {}. Searching...".format(chosen_episode["title"])) + print(f"You chose {chosen_episode['title']}. Searching...") episode_url, download_url = get_anime_episode(chosen_episode["url"]) print(f"\nTo watch, ctrl+click on {episode_url}.") From cdfffbc8679234d8407efc1157c4e8dcfdd2059e Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 03:08:17 +0530 Subject: [PATCH 5/8] pr review fixes --- web_programming/fetch_anime_and_play.py | 105 ++++++++++++------------ 1 file changed, 53 insertions(+), 52 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 675e5a6de803..df2ed43ae4a3 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -1,9 +1,9 @@ -from urllib.error import HTTPError from xml.dom import NotFoundErr import requests from bs4 import BeautifulSoup, NavigableString from fake_useragent import UserAgent +from requests.exceptions import RequestException BASE_URL = "https://ww1.gogoanime2.org" @@ -28,28 +28,31 @@ def search_scraper(anime_name: str) -> list: [list]: [List of animes] """ - try: + # concat the name to form the search url. + search_url = f"{BASE_URL}/search/{anime_name}" - # concat the name to form the search url. - search_url = f"{BASE_URL}/search/{anime_name}" + try: response = requests.get( search_url, headers={"UserAgent": UserAgent().chrome} ) # request the url. # Is the response ok? response.raise_for_status() + except RequestException as e: + raise e - # parse with soup. - soup = BeautifulSoup(response.text, "html.parser") + # parse with soup. + soup = BeautifulSoup(response.text, "html.parser") - # get list of anime - items_ul = soup.find("ul", {"class": "items"}) - items_li = items_ul.children + # get list of anime + items_ul = soup.find("ul", {"class": "items"}) + items_li = items_ul.children - # for each anime, insert to list. the name and url. - anime_list = [] - for li in items_li: - if not isinstance(li, NavigableString): + # for each anime, insert to list. the name and url. + anime_list = [] + for li in items_li: + if not isinstance(li, NavigableString): + try: anime_url, anime_title = li.find("a")["href"], li.find("a")["title"] anime_list.append( { @@ -57,11 +60,10 @@ def search_scraper(anime_name: str) -> list: "url": anime_url, } ) + except (NotFoundErr, KeyError): + pass - return anime_list - - except (requests.exceptions.RequestException, HTTPError, TypeError) as e: - raise e + return anime_list def search_anime_episode_list(episode_endpoint: str) -> list: @@ -85,37 +87,38 @@ def search_anime_episode_list(episode_endpoint: str) -> list: [list]: [List of episodes] """ - try: + request_url = f"{BASE_URL}{episode_endpoint}" - request_url = f"{BASE_URL}{episode_endpoint}" + try: response = requests.get( url=request_url, headers={"UserAgent": UserAgent().chrome} ) - soup = BeautifulSoup(response.text, "html.parser") + response.raise_for_status() + except RequestException as e: + raise e - # With this id. get the episode list. - episode_page_ul = soup.find("ul", {"id": "episode_related"}) - episode_page_li = episode_page_ul.children + soup = BeautifulSoup(response.text, "html.parser") - episode_list = [] - for children in episode_page_li: - try: - if not isinstance(children, NavigableString): - episode_list.append( - { - "title": children.find( - "div", {"class": "name"} - ).text.replace(" ", ""), - "url": children.find("a")["href"], - } - ) - except (KeyError, NotFoundErr, TypeError): - pass + # With this id. get the episode list. + episode_page_ul = soup.find("ul", {"id": "episode_related"}) + episode_page_li = episode_page_ul.children - return episode_list + episode_list = [] + for children in episode_page_li: + try: + if not isinstance(children, NavigableString): + episode_list.append( + { + "title": children.find("div", {"class": "name"}).text.replace( + " ", "" + ), + "url": children.find("a")["href"], + } + ) + except (KeyError, NotFoundErr): + pass - except (requests.exceptions.RequestException) as e: - raise e + return episode_list def get_anime_episode(episode_endpoint: str) -> list: @@ -137,27 +140,25 @@ def get_anime_episode(episode_endpoint: str) -> list: [list]: [List of download and watch url] """ - try: - - episode_page_url = f"{BASE_URL}{episode_endpoint}" + episode_page_url = f"{BASE_URL}{episode_endpoint}" + try: response = requests.get( url=episode_page_url, headers={"User-Agent": UserAgent().chrome} ) - soup = BeautifulSoup(response.text, "lxml") + except RequestException as e: + raise e + soup = BeautifulSoup(response.text, "html.parser") + + try: episode_url = soup.find("iframe", {"id": "playerframe"})["src"] download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" - return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"] - - except ( - KeyError, - NotFoundErr, - TypeError, - requests.exceptions.RequestException, - ) as e: + except (KeyError, NotFoundErr) as e: raise e + return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"] + if __name__ == "__main__": From d5f33c56116522f76a4c757a26a6cf5e4c56ba0e Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 03:23:00 +0530 Subject: [PATCH 6/8] remove unussed exception --- web_programming/fetch_anime_and_play.py | 33 +++++++++---------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index df2ed43ae4a3..c82ad25640d4 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -3,7 +3,6 @@ import requests from bs4 import BeautifulSoup, NavigableString from fake_useragent import UserAgent -from requests.exceptions import RequestException BASE_URL = "https://ww1.gogoanime2.org" @@ -31,15 +30,12 @@ def search_scraper(anime_name: str) -> list: # concat the name to form the search url. search_url = f"{BASE_URL}/search/{anime_name}" - try: - response = requests.get( - search_url, headers={"UserAgent": UserAgent().chrome} - ) # request the url. + response = requests.get( + search_url, headers={"UserAgent": UserAgent().chrome} + ) # request the url. - # Is the response ok? - response.raise_for_status() - except RequestException as e: - raise e + # Is the response ok? + response.raise_for_status() # parse with soup. soup = BeautifulSoup(response.text, "html.parser") @@ -89,13 +85,8 @@ def search_anime_episode_list(episode_endpoint: str) -> list: request_url = f"{BASE_URL}{episode_endpoint}" - try: - response = requests.get( - url=request_url, headers={"UserAgent": UserAgent().chrome} - ) - response.raise_for_status() - except RequestException as e: - raise e + response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) + response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") @@ -142,12 +133,10 @@ def get_anime_episode(episode_endpoint: str) -> list: episode_page_url = f"{BASE_URL}{episode_endpoint}" - try: - response = requests.get( - url=episode_page_url, headers={"User-Agent": UserAgent().chrome} - ) - except RequestException as e: - raise e + response = requests.get( + url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + ) + response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") From 20d1b451af8bd9df3238c768ecf3e37d5aeb995c Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 03:31:28 +0530 Subject: [PATCH 7/8] change var name --- web_programming/fetch_anime_and_play.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index c82ad25640d4..c0195afe0660 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -41,15 +41,18 @@ def search_scraper(anime_name: str) -> list: soup = BeautifulSoup(response.text, "html.parser") # get list of anime - items_ul = soup.find("ul", {"class": "items"}) - items_li = items_ul.children + anime_ul = soup.find("ul", {"class": "items"}) + anime_li = anime_ul.children # for each anime, insert to list. the name and url. anime_list = [] - for li in items_li: - if not isinstance(li, NavigableString): + for anime in anime_li: + if not isinstance(anime, NavigableString): try: - anime_url, anime_title = li.find("a")["href"], li.find("a")["title"] + anime_url, anime_title = ( + anime.find("a")["href"], + anime.find("a")["title"], + ) anime_list.append( { "title": anime_title, @@ -95,15 +98,15 @@ def search_anime_episode_list(episode_endpoint: str) -> list: episode_page_li = episode_page_ul.children episode_list = [] - for children in episode_page_li: + for episode in episode_page_li: try: - if not isinstance(children, NavigableString): + if not isinstance(episode, NavigableString): episode_list.append( { - "title": children.find("div", {"class": "name"}).text.replace( + "title": episode.find("div", {"class": "name"}).text.replace( " ", "" ), - "url": children.find("a")["href"], + "url": episode.find("a")["href"], } ) except (KeyError, NotFoundErr): From 9ee4e4e670c7b8a83e21dc119c241c850a3a7c26 Mon Sep 17 00:00:00 2001 From: saptarshi1996 Date: Wed, 2 Feb 2022 03:34:10 +0530 Subject: [PATCH 8/8] fix comments --- web_programming/fetch_anime_and_play.py | 67 +++++++++++-------------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index c0195afe0660..e11948d0ae78 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -11,7 +11,7 @@ def search_scraper(anime_name: str) -> list: """[summary] - This function will take an url and + Take an url and return list of anime after scraping the site. >>> type(search_scraper("demon_slayer")) @@ -69,7 +69,7 @@ def search_anime_episode_list(episode_endpoint: str) -> list: """[summary] - This function will take an url and + Take an url and return list of episodes after scraping the site for an url. @@ -154,44 +154,35 @@ def get_anime_episode(episode_endpoint: str) -> list: if __name__ == "__main__": - try: + anime_name = input("Enter anime name: ").strip() + anime_list = search_scraper(anime_name) + print("\n") - anime_name = input("Enter anime name: ").strip() - anime_list = search_scraper(anime_name) - print("\n") + if len(anime_list) == 0: + print("No anime found with this name") + else: - if len(anime_list) == 0: - print("No anime found with this name") - else: + print(f"Found {len(anime_list)} results: ") + for (i, anime) in enumerate(anime_list): + anime_title = anime["title"] + print(f"{i+1}. {anime_title}") - print(f"Found {len(anime_list)} results: ") - for (i, anime) in enumerate(anime_list): - anime_title = anime["title"] - print(f"{i+1}. {anime_title}") - - anime_choice = int( - input("\nPlease choose from the following list: ").strip() - ) - chosen_anime = anime_list[anime_choice - 1] - print(f"You chose {chosen_anime['title']}. Searching for episodes...") - - episode_list = search_anime_episode_list(chosen_anime["url"]) - if len(episode_list) == 0: - print("No episode found for this anime") - else: - print(f"Found {len(episode_list)} results: ") - for (i, episode) in enumerate(episode_list): - print(f"{i+1}. {episode['title']}") - - episode_choice = int( - input("\nChoose an episode by serial no: ").strip() - ) - chosen_episode = episode_list[episode_choice - 1] - print(f"You chose {chosen_episode['title']}. Searching...") + anime_choice = int(input("\nPlease choose from the following list: ").strip()) + chosen_anime = anime_list[anime_choice - 1] + print(f"You chose {chosen_anime['title']}. Searching for episodes...") - episode_url, download_url = get_anime_episode(chosen_episode["url"]) - print(f"\nTo watch, ctrl+click on {episode_url}.") - print(f"To download, ctrl+click on {download_url}.") + episode_list = search_anime_episode_list(chosen_anime["url"]) + if len(episode_list) == 0: + print("No episode found for this anime") + else: + print(f"Found {len(episode_list)} results: ") + for (i, episode) in enumerate(episode_list): + print(f"{i+1}. {episode['title']}") - except (ValueError, IndexError, TypeError) as e: - raise e + episode_choice = int(input("\nChoose an episode by serial no: ").strip()) + chosen_episode = episode_list[episode_choice - 1] + print(f"You chose {chosen_episode['title']}. Searching...") + + episode_url, download_url = get_anime_episode(chosen_episode["url"]) + print(f"\nTo watch, ctrl+click on {episode_url}.") + print(f"To download, ctrl+click on {download_url}.")