Compare commits
4 Commits
834934fccd
...
scraper_fu
Author | SHA1 | Date | |
---|---|---|---|
9c63ada6e5 | |||
9c76ea38ce | |||
79f9c3ec02 | |||
dd371e3326
|
@@ -6,6 +6,8 @@ def processmsg(msg, rcpt):
|
|||||||
return msg.replace("youtube.com", "iv.datura.network")
|
return msg.replace("youtube.com", "iv.datura.network")
|
||||||
elif msg.startswith("!wiki"):
|
elif msg.startswith("!wiki"):
|
||||||
return sf.query_external_website("https://en.wikipedia.org/wiki/", msg.split(" ")[1])
|
return sf.query_external_website("https://en.wikipedia.org/wiki/", msg.split(" ")[1])
|
||||||
|
elif "good bot" in msg:
|
||||||
|
return "^_^"
|
||||||
|
|
||||||
def command(msg, rcpt):
|
def command(msg, rcpt):
|
||||||
if msg.startswith("!help"):
|
if msg.startswith("!help"):
|
||||||
|
@@ -2,8 +2,11 @@ import requests
|
|||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
def query_external_website(base_url, query):
|
def query_external_website(base_url, query):
|
||||||
|
try:
|
||||||
page = requests.get(base_url + query)
|
page = requests.get(base_url + query)
|
||||||
soup = BeautifulSoup(page.content, "html.parser")
|
soup = BeautifulSoup(page.content, "html.parser")
|
||||||
title = soup.find("span", class_="mw-page-title-main").text
|
title = soup.find("span", class_="mw-page-title-main").text
|
||||||
content = soup.find(id="mw-content-text").select("p")[2].text
|
content = soup.find(id="mw-content-text").select("p")[2].text
|
||||||
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content + "\n\nFULL LINK:\n" + base_url + query
|
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content + "\n\nFULL LINK:\n" + base_url + query
|
||||||
|
except:
|
||||||
|
return "Can't parse search result :("
|
||||||
|
Reference in New Issue
Block a user