import feedparser import time import json import os from bs4 import BeautifulSoup import requests import sys import random import math def slow2print(text:str="Type a string in", delay_time:float=0.05, mistake_frequency:int=30): words = text.split(' ') for i, word in enumerate(words): # Introduce a mistake approximately every 'mistake_frequency' words if i % mistake_frequency == 0 and i != 0: # Add a random word or two as the mistake mistake_word = word + ' ' + ' '.join(random.choice(words) for _ in range(random.randint(1, 2))) for character in mistake_word: print(character, end='', flush=True) time.sleep(delay_time) time.sleep(2) # pause for 2 seconds after making a mistake # Delete the incorrect characters one by one at half speed for _ in range(len(mistake_word) - len(word)): # delete up to the original word length, excluding the space print('\b \b', end='', flush=True) # backspace to delete the character time.sleep(delay_time * 2) # delete at half speed print(' ', end='', flush=True) # ensure a space is printed after each word else: # Print the correct word for character in word: print(character, end='', flush=True) time.sleep(delay_time) print(' ', end='', flush=True) # ensure a space is printed after each word def slow2bold(text:str="Type a string in", delay_time:float=0.05, mistake_frequency:int=30): words = text.split(' ') for i, word in enumerate(words): # Introduce a mistake approximately every 'mistake_frequency' words if i % mistake_frequency == 0 and i != 0: # Add a random word or two as the mistake mistake_word = word + ' ' + ' '.join(random.choice(words) for _ in range(random.randint(1, 2))) for character in mistake_word: print(character, end='', flush=True) time.sleep(delay_time) time.sleep(2) # pause for 2 seconds after making a mistake # Delete the incorrect characters one by one at half speed for _ in range(len(mistake_word) - len(word)): # delete up to the original word length, excluding the space print('\b \b', end='', flush=True) # backspace to delete the character time.sleep(delay_time * 2) # delete at half speed print(' ', end='', flush=True) # ensure a space is printed after each word else: # Print the correct word with color adjustment mid = math.ceil(len(word)/2) first_half = '\033[1;37;40m' + word[:mid] + '\033[0;37;40m' second_half = word[mid:] colored_word = first_half + second_half for character in colored_word: print(character, end='', flush=True) time.sleep(delay_time) print(' ', end='', flush=True) # ensure a space is printed after each word slow2print("RandomBoo Presents \n") time.sleep(1) def get_feeds(file): if not os.path.exists(file): open(file, 'w').close() print(f'\033[1;32;40mCreated a new file: {file}\033[0m') with open(file, 'r') as f: feeds = [line.strip() for line in f] if not feeds: print('\033[1;32;40mThe file is \033[0m\033[5;37;40mempty\033[0;37;40m\033[1;32;40m. Would you like to add a feed? (\033[0m\033[1;37;40my\033[0;37;40m\033[1;32;40m/\033[0m\033[1;37;40mn\033[0;37;40m\033[1;32;40m)\033[0m ') if input().lower() == 'y': feed = input('\033[1;32;40mPlease enter the feed URL: \033[0m') with open(file, 'a') as f: f.write(feed + '\n') feeds.append(feed) return feeds def get_posts(feed): return feedparser.parse(feed).entries def get_unread_posts(posts, read_posts): return [post for post in posts if post.id not in read_posts] def save_read_posts(read_posts, file='read_posts.json'): with open(file, 'w') as f: json.dump(read_posts, f) def load_read_posts(file='read_posts.json'): try: with open(file, 'r') as f: return json.load(f) except FileNotFoundError: return [] def clean_html(html): soup = BeautifulSoup(html, 'html.parser') for a in soup.findAll('a'): a.replaceWithChildren() return soup.get_text() def scrape_page(url): page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') article_text = '' article = soup.find('article') if article: paragraphs = article.find_all('p') for paragraph in paragraphs: article_text += paragraph.get_text() + '\n' return article_text def main(): feeds = get_feeds('feeds.txt') read_posts = load_read_posts() while True: for feed in feeds: posts = get_posts(feed) unread_posts = get_unread_posts(posts, read_posts) for post in unread_posts[::-1]: print(f'\033[1;32;40mTitle: \033[0m\033[0;37;42m{post.title}\033[0m \n') if hasattr(post, 'summary'): slow2print(f' \033[1;32;40mSummary: \033[0m\033[3;37;40m{clean_html(post.summary)}\033[0;37;40m \n\n') page_content = scrape_page(post.link) slow2bold(page_content) read_posts.append(post.id) save_read_posts(read_posts) slow2print('\033[1;36;40mNo new posts available. Checking again in \033[0m\033[5;37;40m10 seconds\033[0;37;40m\033[1;36;40m...\033[0m ') time.sleep(10) if __name__ == '__main__': main()