30 Commits

Author SHA1 Message Date
t3xhno
97d613df58 Added tasks integration 2024-02-06 21:17:49 +01:00
t3xhno
1e56a84a4c Added required positional argument placeholder 2024-02-06 21:05:02 +01:00
c011383f0e fix wiki commands 2024-02-06 20:51:31 +01:00
544f8052e9 fix wiki placing it into command functions 2024-02-06 20:48:08 +01:00
t3xhno
161abdf32e Better wiki redirect 2024-02-06 11:39:10 +01:00
a256bc277d Better heading selector 2024-02-06 03:46:17 +01:00
d4d14806db Raise exception on None content 2024-02-06 03:37:26 +01:00
6df3c82a7e Get first available paragraph from query 2024-02-06 03:34:43 +01:00
41e38ef80f Correct url link from multiword wiki query 2024-02-06 02:58:45 +01:00
0813460e8b Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot 2024-02-06 02:49:46 +01:00
4ca01a868d Multiword wiki query added 2024-02-06 02:49:40 +01:00
1795a87c4b add wiki to help 2024-02-06 02:47:01 +01:00
9704474c29 Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot 2024-02-06 02:38:46 +01:00
9c63ada6e5 Added try catch for scraper functions 2024-02-06 02:37:19 +01:00
5b4ae05582 Merge pull request 'scraper_functions' (#1) from scraper_functions into master
Reviewed-on: #1
2024-02-06 01:24:57 +00:00
9c76ea38ce Merge branch 'master' into scraper_functions
Merged master,resolved conflicts
2024-02-06 02:22:44 +01:00
79f9c3ec02 Resolvedconflicts 2024-02-06 02:22:34 +01:00
834934fccd Added scraper function 2024-02-06 02:21:53 +01:00
dd371e3326 add good bot 2024-02-06 02:13:36 +01:00
252a7fbdba Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot
Conflicts resolved
2024-02-06 02:08:13 +01:00
70b2c9e322 Resolvedconflicts 2024-02-06 02:08:08 +01:00
4bb860b818 add newlines to help 2024-02-06 02:07:47 +01:00
bc2e7422f5 add ollama to help 2024-02-06 02:05:31 +01:00
e89201cb4b Added scraper functions (wikipedia,for now) 2024-02-06 02:04:47 +01:00
c2286ae0a6 add ollama command 2024-02-06 02:00:41 +01:00
a8b9850be5 Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot 2024-02-06 01:05:09 +01:00
6b3a3853c3 add help command 2024-02-06 01:03:54 +01:00
08f958ff1e Updated README 2024-02-06 00:56:07 +01:00
21357a9b71 Removed config.ini from repo 2024-02-06 00:54:20 +01:00
ab863f03a3 Added example config.ini 2024-02-06 00:52:32 +01:00
6 changed files with 68 additions and 4 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
config.ini
venv/
__pycache__

View File

@@ -7,3 +7,6 @@ sudo apt install python3-slixmpp
## Install dependencies with pip
pip install -r requirements.txt
## Setup
Create `config.ini` based on the `config.ini.example`, with your credentials

View File

@@ -1,5 +1,29 @@
import ollama
import scraper_functions as sf
def processmsg(msg, rcpt):
if msg.startswith("!"):
return ""
elif "youtube.com/watch" in msg:
if "youtube.com/watch" in msg:
return msg.replace("youtube.com", "iv.datura.network")
elif msg.startswith("!"):
return command(msg, "")
elif "good bot" in msg:
return "^_^"
def command(msg, rcpt):
if msg.startswith("!help"):
response = "chatbot commands:" + "\n"
response += "!help Show this help page" + "\n"
response += "!ai [message] Ask llama2" + "\n"
response += "!wiki [message] Ask wiki"
return response
elif msg.startswith("!ai"):
client = ollama.Client(host='https://ollama.krov.dmz.rs')
response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}])
return(response['message']['content'])
elif msg.startswith("!wiki"):
cmd, query = msg.split(" ", 1)
return sf.query_external_website("https://en.wikipedia.org", "/wiki/" + query)
elif msg.startswith("!tasks"):
content = sf.getDmzTasks()
return content

View File

@@ -1 +1,4 @@
slixmpp
ollama
requests
beautifulsoup4

33
scraper_functions.py Normal file
View File

@@ -0,0 +1,33 @@
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
def query_external_website(base_url, query):
try:
page = requests.get(base_url + quote(query))
soup = BeautifulSoup(page.content, "html.parser")
title = soup.find(id="firstHeading").text
mainContentElement = soup.find(id="mw-content-text")
if "This page is a redirect" in mainContentElement.text:
redirectLink = mainContentElement.find(class_="redirectMsg").find_all("a")[0]["href"]
return query_external_website(base_url, redirectLink)
content = next((paragraph for paragraph in mainContentElement.select("p") if not paragraph.has_attr("class")), None)
if content == None:
raise Exception("Can't parse search result :(")
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query)
except Exception as e:
return e
def getDmzTasks():
try:
page = requests.get("https://todo.dmz.rs/")
soup = BeautifulSoup(page.content, "html.parser")
tasks = soup.find_all(class_="task")
result = "\nActive tasks:\n"
for task in tasks:
taskIndex = task.select("div")[0].text
taskTitle = task.select("div")[1].text
result += taskIndex + " " + taskTitle + "\n"
return result
except Exception as e:
return e