21 Commits

Author SHA1 Message Date
328329eb9a Dorada za vreme funckiju
- samo slova smeju u ime grada
- dodao i englesku verziju `!weather`
2024-02-07 17:47:36 -05:00
63044c545f Dodao funkciju za vreme
Upotreba: !vreme <ime_grada>
- koristi wttr.in api
- vraca liniju jednu, lakse je nego da se parsira json
- reaguje i na prognoza
2024-02-07 17:21:54 -05:00
t3xhno
5382b876e2 Added \!tasks to \!help 2024-02-06 22:18:44 +01:00
t3xhno
3d80517a6f Add link to tasks that have users assigned 2024-02-06 22:08:43 +01:00
t3xhno
d08e8199f8 Added more info for dmz tasks 2024-02-06 22:06:06 +01:00
t3xhno
adb4a25d25 Formatting 2024-02-06 21:27:55 +01:00
t3xhno
dab4e41de0 Added link to tasks 2024-02-06 21:24:04 +01:00
t3xhno
97d613df58 Added tasks integration 2024-02-06 21:17:49 +01:00
t3xhno
1e56a84a4c Added required positional argument placeholder 2024-02-06 21:05:02 +01:00
c011383f0e fix wiki commands 2024-02-06 20:51:31 +01:00
544f8052e9 fix wiki placing it into command functions 2024-02-06 20:48:08 +01:00
t3xhno
161abdf32e Better wiki redirect 2024-02-06 11:39:10 +01:00
a256bc277d Better heading selector 2024-02-06 03:46:17 +01:00
d4d14806db Raise exception on None content 2024-02-06 03:37:26 +01:00
6df3c82a7e Get first available paragraph from query 2024-02-06 03:34:43 +01:00
41e38ef80f Correct url link from multiword wiki query 2024-02-06 02:58:45 +01:00
0813460e8b Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot 2024-02-06 02:49:46 +01:00
4ca01a868d Multiword wiki query added 2024-02-06 02:49:40 +01:00
1795a87c4b add wiki to help 2024-02-06 02:47:01 +01:00
9704474c29 Merge branch 'master' of ssh://gitea.dmz.rs:2222/Decentrala/chatbot 2024-02-06 02:38:46 +01:00
5b4ae05582 Merge pull request 'scraper_functions' (#1) from scraper_functions into master
Reviewed-on: #1
2024-02-06 01:24:57 +00:00
2 changed files with 67 additions and 10 deletions

View File

@@ -4,8 +4,8 @@ import scraper_functions as sf
def processmsg(msg, rcpt): def processmsg(msg, rcpt):
if "youtube.com/watch" in msg: if "youtube.com/watch" in msg:
return msg.replace("youtube.com", "iv.datura.network") return msg.replace("youtube.com", "iv.datura.network")
elif msg.startswith("!wiki"): elif msg.startswith("!"):
return sf.query_external_website("https://en.wikipedia.org/wiki/", msg.split(" ")[1]) return command(msg, "")
elif "good bot" in msg: elif "good bot" in msg:
return "^_^" return "^_^"
@@ -13,10 +13,22 @@ def command(msg, rcpt):
if msg.startswith("!help"): if msg.startswith("!help"):
response = "chatbot commands:" + "\n" response = "chatbot commands:" + "\n"
response += "!help Show this help page" + "\n" response += "!help Show this help page" + "\n"
response += "!ai [message] Ask llama2" response += "!ai [message] Ask llama2" + "\n"
response += "!wiki [message] Ask wiki\n"
response += "!tasks Show active tasks from the taskmanager\n"
response += "!vreme [city] | !prognoza [city] | !weather [city] Show weather for [city]\n"
return response return response
elif msg.startswith("!ai"): elif msg.startswith("!ai"):
client = ollama.Client(host='https://ollama.krov.dmz.rs') client = ollama.Client(host='https://ollama.krov.dmz.rs')
response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}]) response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}])
return(response['message']['content']) return(response['message']['content'])
elif msg.startswith("!wiki"):
cmd, query = msg.split(" ", 1)
return sf.query_external_website("https://en.wikipedia.org", "/wiki/" + query)
elif msg.startswith("!tasks"):
content = sf.getDmzTasks("https://todo.dmz.rs/")
return content
elif msg.startswith("!vreme") or msg.startswith("!prognoza") or msg.startswith("!weather"):
_, query = msg.split(" ", 1)
return sf.get_weather(query)

View File

@@ -1,12 +1,57 @@
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from urllib.parse import quote
def getSoup(base_url, query = ""):
page = requests.get(base_url + quote(query))
soup = BeautifulSoup(page.content, "html.parser")
return soup
def query_external_website(base_url, query): def query_external_website(base_url, query):
try: try:
page = requests.get(base_url + query) soup = getSoup(base_url, query)
soup = BeautifulSoup(page.content, "html.parser") title = soup.find(id="firstHeading").text
title = soup.find("span", class_="mw-page-title-main").text mainContentElement = soup.find(id="mw-content-text")
content = soup.find(id="mw-content-text").select("p")[2].text if "This page is a redirect" in mainContentElement.text:
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content + "\n\nFULL LINK:\n" + base_url + query redirectLink = mainContentElement.find(class_="redirectMsg").find_all("a")[0]["href"]
except: return query_external_website(base_url, redirectLink)
return "Can't parse search result :(" content = next((paragraph for paragraph in mainContentElement.select("p") if not paragraph.has_attr("class")), None)
if content == None:
raise Exception("Can't parse search result :(")
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query)
except Exception as e:
return e
def getDmzTasks(url):
try:
soup = getSoup(url)
tasks = soup.find_all(class_="task")
result = "\nActive tasks:\n"
for task in tasks:
taskIndex = task.select("div")[0].text
taskTitle = task.select("div")[1].text
result += taskIndex + " " + taskTitle
taskSoup = getSoup(url + task.find("a")["href"][1:])
description = taskSoup.find("main").select("section")[0].find("p").text
result += "\n\tDescription:\n" + "\t\t" + description + "\n"
result += "\tAssigned users:\n" + "\t\t"
assignedUsers = taskSoup.find_all(class_="user-info-wrap")
if len(assignedUsers) == 0:
result += "None! Be the first :)\n"
result += "\tLink: " + url + task.find("a")["href"][1:] + "\n\n"
continue
usersList = ""
for user in assignedUsers:
usersList += user.find("div").text.split(": ")[1] + ", "
result += usersList[:-2] + "\n"
result += "\tLink: " + url + task.find("a")["href"][1:] + "\n\n"
return result
except Exception as e:
return e
def get_weather(city:str) -> str:
url = f"https://wttr.in/{city}?format=3"
if not city.isalpha():
return "no such city"
resp = requests.get(url)
return resp.content.decode("utf-8").strip()