Compare commits
10 Commits
e89201cb4b
...
scraper_fu
Author | SHA1 | Date | |
---|---|---|---|
9c63ada6e5 | |||
9c76ea38ce | |||
79f9c3ec02 | |||
834934fccd | |||
dd371e3326
|
|||
252a7fbdba | |||
70b2c9e322 | |||
4bb860b818
|
|||
bc2e7422f5
|
|||
c2286ae0a6
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
config.ini
|
||||
venv/
|
||||
__pycache__
|
||||
|
13
functions.py
13
functions.py
@@ -1,3 +1,4 @@
|
||||
import ollama
|
||||
import scraper_functions as sf
|
||||
|
||||
def processmsg(msg, rcpt):
|
||||
@@ -5,7 +6,17 @@ def processmsg(msg, rcpt):
|
||||
return msg.replace("youtube.com", "iv.datura.network")
|
||||
elif msg.startswith("!wiki"):
|
||||
return sf.query_external_website("https://en.wikipedia.org/wiki/", msg.split(" ")[1])
|
||||
elif "good bot" in msg:
|
||||
return "^_^"
|
||||
|
||||
def command(msg, rcpt):
|
||||
if msg.startswith("!help"):
|
||||
return "chatbot commands: \n" + "!help Show this help page"
|
||||
response = "chatbot commands:" + "\n"
|
||||
response += "!help Show this help page" + "\n"
|
||||
response += "!ai [message] Ask llama2"
|
||||
return response
|
||||
elif msg.startswith("!ai"):
|
||||
client = ollama.Client(host='https://ollama.krov.dmz.rs')
|
||||
response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}])
|
||||
return(response['message']['content'])
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
slixmpp
|
||||
ollama
|
||||
requests
|
||||
beautifulsoup4
|
||||
|
@@ -2,8 +2,11 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
def query_external_website(base_url, query):
|
||||
page = requests.get(base_url + query)
|
||||
soup = BeautifulSoup(page.content, "html.parser")
|
||||
title = soup.select(".mw-page-title-main")[0]
|
||||
content = soup.find(id="bodyContent").select("p")[2].text
|
||||
return "\nTITLE: " + title.text + "\n\n" + "CONTENT:" + "\n" + content + "\n\n" + "FULL LINK:\n" + base_url + query
|
||||
try:
|
||||
page = requests.get(base_url + query)
|
||||
soup = BeautifulSoup(page.content, "html.parser")
|
||||
title = soup.find("span", class_="mw-page-title-main").text
|
||||
content = soup.find(id="mw-content-text").select("p")[2].text
|
||||
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content + "\n\nFULL LINK:\n" + base_url + query
|
||||
except:
|
||||
return "Can't parse search result :("
|
||||
|
Reference in New Issue
Block a user