import requests from bs4 import BeautifulSoup from urllib.parse import quote def query_external_website(base_url, query): try: page = requests.get(base_url + quote(query)) soup = BeautifulSoup(page.content, "html.parser") title = soup.find(id="firstHeading").text mainContentElement = soup.find(id="mw-content-text") if "This page is a redirect" in mainContentElement.text: redirectLink = mainContentElement.find(class_="redirectMsg").find_all("a")[0]["href"] return query_external_website(base_url, redirectLink) content = next((paragraph for paragraph in mainContentElement.select("p") if not paragraph.has_attr("class")), None) if content == None: raise Exception("Can't parse search result :(") return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query) except Exception as e: return e def getDmzTasks(url): try: page = requests.get(url) soup = BeautifulSoup(page.content, "html.parser") tasks = soup.find_all(class_="task") result = "\nActive tasks:\n" for task in tasks: taskIndex = task.select("div")[0].text taskTitle = task.select("div")[1].text result += taskIndex + " " + taskTitle + " | " + "Link: " + url + task.find("a")["href"][1:] + "\n" return result except Exception as e: return e