import feedparser import time import json import os from bs4 import BeautifulSoup import requests import sys def slowprint(text:str="Type a string in",delay_time:int=.05): for character in text: sys.stdout.write(character) sys.stdout.flush() time.sleep(delay_time) def slow2print(text:str="Type a string in",delay_time:int=.02): for character in text: sys.stdout.write(character) sys.stdout.flush() time.sleep(delay_time) slowprint("RandomBoo Presents \n") time.sleep(1) def get_feeds(file): if not os.path.exists(file): open(file, 'w').close() print(f'\033[1;32;40mCreated a new file: {file}\033[0m') with open(file, 'r') as f: feeds = [line.strip() for line in f] if not feeds: print('\033[1;32;40mThe file is \033[0m\033[5;37;40mempty\033[0;37;40m\033[1;32;40m. Would you like to add a feed? (\033[0m\033[1;37;40my\033[0;37;40m\033[1;32;40m/\033[0m\033[1;37;40mn\033[0;37;40m\033[1;32;40m)\033[0m ') if input().lower() == 'y': feed = input('\033[1;32;40mPlease enter the feed URL: \033[0m') with open(file, 'a') as f: f.write(feed + '\n') feeds.append(feed) return feeds def get_posts(feed): return feedparser.parse(feed).entries def get_unread_posts(posts, read_posts): return [post for post in posts if post.id not in read_posts] def save_read_posts(read_posts, file='read_posts.json'): with open(file, 'w') as f: json.dump(read_posts, f) def load_read_posts(file='read_posts.json'): try: with open(file, 'r') as f: return json.load(f) except FileNotFoundError: return [] def clean_html(html): soup = BeautifulSoup(html, 'html.parser') for a in soup.findAll('a'): a.replaceWithChildren() return soup.get_text() def scrape_page(url): page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') article_text = '' article = soup.find('article') if article: paragraphs = article.find_all('p') for paragraph in paragraphs: article_text += paragraph.get_text() + '\n' return article_text def main(): feeds = get_feeds('feeds.txt') read_posts = load_read_posts() while True: for feed in feeds: posts = get_posts(feed) unread_posts = get_unread_posts(posts, read_posts) for post in unread_posts[::-1]: print(f'\033[1;32;40mTitle: \033[0m\033[0;37;42m{post.title}\033[0m \n') if hasattr(post, 'summary'): slowprint(f' \033[1;32;40mSummary: \033[0m\033[3;37;40m{clean_html(post.summary)}\033[0;37;40m \n') slowprint('\033[1;36;40mDo you want to continue to the next post? (\033[0m\033[1;37;40my\033[0;37;40m\033[1;36;40m/\033[0m\033[1;37;40mn\033[0;37;40m\033[1;36;40m), press "\033[0m\033[1;37;40mm\033[0;37;40m\033[1;36;40m" to \033[0m\033[5;37;40mread more\033[0;37;40m \n') user_input = input().lower() if user_input == 'm': slow2print(scrape_page(post.link)) elif user_input != 'y': return read_posts.append(post.id) save_read_posts(read_posts) slowprint('\033[1;36;40mNo new posts available. Checking again in \033[0m\033[5;37;40m10 seconds\033[0;37;40m\033[1;36;40m...\033[0m ') time.sleep(10) if __name__ == '__main__': main()