Compare commits

...

23 commits
0.2.2 ... main

Author SHA1 Message Date
Alexey Skobkin 61b3bd00c3 Merge pull request 'Raising the limit of history container size' (#31) from skobkin-patch-1-history-limit into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #31
2024-10-30 16:13:24 +00:00
Alexey Skobkin 40b20b1b50 Raising the limit of history container size
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-10-30 16:12:39 +00:00
Alexey Skobkin 519b4f40bb Merge pull request 'Chat history tweaks, handlers priority fix, system prompt clarification' (#28) from feature_chat_history into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #28
2024-10-27 23:06:45 +00:00
Alexey Skobkin de5165f5ec
Presenting chat history as 'system' messages. Presenting bot replies as 'assistant' messages. Tweaking system prompt.
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-10-28 02:04:35 +03:00
Alexey Skobkin edf2158d29
Fixing handlers priority. 2024-10-28 01:55:17 +03:00
Alexey Skobkin 5d616dd31e Merge pull request 'Adding in-memory chat history support, removing inline queries and some tweaks' (#27) from feature_chat_history into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #27
2024-10-27 21:38:51 +00:00
Alexey Skobkin 1f9583cb29
#26 Adding in-memory chat history support. Removing inline queries. Refactoring stats and message processing a bit. Also changing LLM request context building a bit. Also adding alias for summarization and some other small changes.
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-10-28 00:35:35 +03:00
Alexey Skobkin 8326333bcf REAMDE.md fix (#25)
All checks were successful
continuous-integration/drone/push Build is passing
REAMDE.md fix

Co-authored-by: Alexey Skobkin <skobkin-ru@ya.ru>
Reviewed-on: #25
2024-08-16 00:59:04 +00:00
Alexey Skobkin f28670834d Model configuration and small prompt improvements (#24)
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
- Model can be configured using ENV
- Summarization prompt improved
- Single text request prompt improved
- Chat context prompt improved

Co-authored-by: Alexey Skobkin <skobkin-ru@ya.ru>
Reviewed-on: #24
2024-08-16 00:47:07 +00:00
Alexey Skobkin 82c4d953d4 Merge pull request 'Rolling back summarizing to Mistral' (#23) from change-summarize-model-mistral into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #23
2024-05-06 00:29:55 +00:00
Alexey Skobkin d65f61db82 Rolling back summarizing to Mistral
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-05-06 00:28:41 +00:00
Alexey Skobkin c4665338db Changing default model to LLaMa3 (#22)
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone/tag Build is passing
Changing default model to LLaMa3

Reviewed-on: #22
2024-05-06 00:15:36 +00:00
Alexey Skobkin db92c01131 Merge pull request 'URL scheme whitelist and Inline Queries.' (#21) from fix_chat_type_middleware_nil_pointer into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #21
2024-03-12 22:20:06 +00:00
Alexey Skobkin 7bb5c65d59
Closes #14. Adding inline queries. Also small refactoring of context prompt based on RequestContext.
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-03-13 01:18:01 +03:00
Alexey Skobkin ca005a9370
Extracting request context creation to separate file. 2024-03-13 00:32:52 +03:00
Alexey Skobkin d890faf461
Fix #20 disallowing any URL except http:// and https://. Extracting helper methods to separate file. 2024-03-13 00:32:18 +03:00
Alexey Skobkin 993c71ca71 Merge pull request 'Fixing a bunch of bugs and making some improvements' (#19) from fix_chat_type_middleware_nil_pointer into main
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #19
2024-03-12 20:08:50 +00:00
Alexey Skobkin 3fa7c2434f
Fix #17. Implementing slog-based logger for telego and passing it into the library.
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-03-12 23:05:52 +03:00
Alexey Skobkin d3c0bc28f1
Fix #18. Escaping underscore symbols to avoid Telegram's Bot API "Markdown" (v1) parser errors. 2024-03-12 23:01:05 +03:00
Alexey Skobkin 38fcd1a5a9
Adding API URL suffix to the README.md. 2024-03-12 22:13:32 +03:00
Alexey Skobkin bfacbb9f98
Tweaking summarizer prompt a bit. 2024-03-12 22:12:58 +03:00
Alexey Skobkin a2a37d0256
Trying to fix #13. Adding logging to the middleware. 2024-03-12 22:10:34 +03:00
Alexey Skobkin 5f55cec0e2
fix #15 slog usage. Also adding more logging. 2024-03-12 22:07:22 +03:00
14 changed files with 583 additions and 141 deletions

View file

@ -13,4 +13,11 @@ WORKDIR /app
COPY --from=builder /build/app .
# Do not forget "/v1" in the end
ENV OPENAI_API_BASE_URL="" \
OPENAI_API_TOKEN="" \
TELEGRAM_TOKEN="" \
MODEL_TEXT_REQUEST="llama3.1:8b-instruct-q6_K" \
MODEL_SUMMARIZE_REQUEST="llama3.1:8b-instruct-q6_K"
CMD ["/app/app"]

View file

@ -8,8 +8,10 @@
```shell
docker run \
-e OLLAMA_TOKEN=123 \
-e OLLAMA_BASE_URL=http://ollama.tld:11434 \
-e OPENAI_API_TOKEN=123 \
-e OPENAI_API_BASE_URL=http://ollama.localhost:11434/v1 \
-e TELEGRAM_TOKEN=12345 \
-e MODEL_TEXT_REQUEST=llama3.1:8b-instruct-q6_K
-e MODEL_SUMMARIZE_REQUEST=mistral-nemo:12b-instruct-2407-q4_K_M
skobkin/telegram-llm-bot
```

View file

@ -6,7 +6,6 @@ import (
th "github.com/mymmrac/telego/telegohandler"
tu "github.com/mymmrac/telego/telegoutil"
"log/slog"
"net/url"
"strings"
"telegram-ollama-reply-bot/extractor"
"telegram-ollama-reply-bot/llm"
@ -19,47 +18,75 @@ var (
ErrHandlerInit = errors.New("cannot initialize handler")
)
type BotInfo struct {
Id int64
Username string
Name string
}
type Bot struct {
api *telego.Bot
llm *llm.LlmConnector
extractor *extractor.Extractor
stats *stats.Stats
models ModelSelection
history map[int64]*MessageRingBuffer
profile BotInfo
markdownV1Replacer *strings.Replacer
}
func NewBot(api *telego.Bot, llm *llm.LlmConnector, extractor *extractor.Extractor) *Bot {
func NewBot(
api *telego.Bot,
llm *llm.LlmConnector,
extractor *extractor.Extractor,
models ModelSelection,
) *Bot {
return &Bot{
api: api,
llm: llm,
extractor: extractor,
stats: stats.NewStats(),
models: models,
history: make(map[int64]*MessageRingBuffer),
profile: BotInfo{0, "", ""},
markdownV1Replacer: strings.NewReplacer(
// https://core.telegram.org/bots/api#markdown-style
"_", "\\_",
//"*", "\\*",
//"`", "\\`",
//"[", "\\[",
),
}
}
func (b *Bot) Run() error {
botUser, err := b.api.GetMe()
if err != nil {
slog.Error("Cannot retrieve api user", err)
slog.Error("Cannot retrieve api user", "error", err)
return ErrGetMe
}
slog.Info("Running api as", map[string]any{
"id": botUser.ID,
"username": botUser.Username,
"name": botUser.FirstName,
"is_bot": botUser.IsBot,
})
slog.Info("Running api as", "id", botUser.ID, "username", botUser.Username, "name", botUser.FirstName, "is_bot", botUser.IsBot)
b.profile = BotInfo{
Id: botUser.ID,
Username: botUser.Username,
Name: botUser.FirstName,
}
updates, err := b.api.UpdatesViaLongPolling(nil)
if err != nil {
slog.Error("Cannot get update channel", err)
slog.Error("Cannot get update channel", "error", err)
return ErrUpdatesChannel
}
bh, err := th.NewBotHandler(b.api, updates)
if err != nil {
slog.Error("Cannot initialize bot handler", err)
slog.Error("Cannot initialize bot handler", "error", err)
return ErrHandlerInit
}
@ -68,42 +95,60 @@ func (b *Bot) Run() error {
defer b.api.StopLongPolling()
// Middlewares
bh.Use(b.chatHistory)
bh.Use(b.chatTypeStatsCounter)
// Handlers
// Command handlers
bh.Handle(b.startHandler, th.CommandEqual("start"))
bh.Handle(b.heyHandler, th.CommandEqual("hey"))
bh.Handle(b.summarizeHandler, th.CommandEqual("summarize"))
bh.Handle(b.summarizeHandler, th.Or(th.CommandEqual("summarize"), th.CommandEqual("s")))
bh.Handle(b.statsHandler, th.CommandEqual("stats"))
bh.Handle(b.helpHandler, th.CommandEqual("help"))
bh.Handle(b.textMessageHandler, th.AnyMessageWithText())
bh.Start()
return nil
}
func (b *Bot) heyHandler(bot *telego.Bot, update telego.Update) {
slog.Info("/hey")
func (b *Bot) textMessageHandler(bot *telego.Bot, update telego.Update) {
slog.Debug("/any-message")
b.stats.HeyRequest()
message := update.Message
parts := strings.SplitN(update.Message.Text, " ", 2)
userMessage := "Hey!"
if len(parts) == 2 {
userMessage = parts[1]
switch {
// Mentions
case b.isMentionOfMe(update):
slog.Info("/any-message", "type", "mention")
b.processMention(message)
// Replies
case b.isReplyToMe(update):
slog.Info("/any-message", "type", "reply")
b.processMention(message)
// Private chat
case b.isPrivateWithMe(update):
slog.Info("/any-message", "type", "private")
b.processMention(message)
default:
slog.Debug("/any-message", "info", "Message is not mention, reply or private chat. Skipping.")
}
}
chatID := tu.ID(update.Message.Chat.ID)
func (b *Bot) processMention(message *telego.Message) {
b.stats.Mention()
slog.Info("/mention", "chat", message.Chat.ID)
chatID := tu.ID(message.Chat.ID)
b.sendTyping(chatID)
requestContext := b.createLlmRequestContext(update)
requestContext := b.createLlmRequestContextFromMessage(message)
llmReply, err := b.llm.HandleSingleRequest(userMessage, llm.ModelMistralUncensored, requestContext)
llmReply, err := b.llm.HandleChatMessage(message.Text, b.models.TextRequestModel, requestContext)
if err != nil {
slog.Error("Cannot get reply from LLM connector")
_, _ = b.api.SendMessage(b.reply(update.Message, tu.Message(
_, _ = b.api.SendMessage(b.reply(message, tu.Message(
chatID,
"LLM request error. Try again later.",
)))
@ -111,24 +156,27 @@ func (b *Bot) heyHandler(bot *telego.Bot, update telego.Update) {
return
}
slog.Debug("Got completion. Going to send.", llmReply)
slog.Debug("Got completion. Going to send.", "llm-completion", llmReply)
message := tu.Message(
reply := tu.Message(
chatID,
llmReply,
b.escapeMarkdownV1Symbols(llmReply),
).WithParseMode("Markdown")
_, err = bot.SendMessage(b.reply(update.Message, message))
_, err = b.api.SendMessage(b.reply(message, reply))
if err != nil {
slog.Error("Can't send reply message", err)
slog.Error("Can't send reply message", "error", err)
b.trySendReplyError(update.Message)
b.trySendReplyError(message)
return
}
b.saveBotReplyToHistory(message, llmReply)
}
func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
slog.Info("/summarize", update.Message.Text)
slog.Info("/summarize", "message-text", update.Message.Text)
b.stats.SummarizeRequest()
@ -136,7 +184,7 @@ func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
b.sendTyping(chatID)
args := strings.Split(update.Message.Text, " ")
args := strings.SplitN(update.Message.Text, " ", 2)
if len(args) < 2 {
_, _ = bot.SendMessage(tu.Message(
@ -149,9 +197,8 @@ func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
return
}
_, err := url.ParseRequestURI(args[1])
if err != nil {
slog.Error("Provided URL is not valid", args[1])
if !isValidAndAllowedUrl(args[1]) {
slog.Error("Provided text is not a valid URL", "text", args[1])
_, _ = b.api.SendMessage(b.reply(update.Message, tu.Message(
chatID,
@ -163,10 +210,10 @@ func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
article, err := b.extractor.GetArticleFromUrl(args[1])
if err != nil {
slog.Error("Cannot retrieve an article using extractor", err)
slog.Error("Cannot retrieve an article using extractor", "error", err)
}
llmReply, err := b.llm.Summarize(article.Text, llm.ModelMistralUncensored)
llmReply, err := b.llm.Summarize(article.Text, b.models.SummarizeModel)
if err != nil {
slog.Error("Cannot get reply from LLM connector")
@ -178,17 +225,17 @@ func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
return
}
slog.Debug("Got completion. Going to send.", llmReply)
slog.Debug("Got completion. Going to send.", "llm-completion", llmReply)
message := tu.Message(
chatID,
llmReply,
b.escapeMarkdownV1Symbols(llmReply),
).WithParseMode("Markdown")
_, err = bot.SendMessage(b.reply(update.Message, message))
if err != nil {
slog.Error("Can't send reply message", err)
slog.Error("Can't send reply message", "error", err)
b.trySendReplyError(update.Message)
}
@ -206,10 +253,12 @@ func (b *Bot) helpHandler(bot *telego.Bot, update telego.Update) {
"Instructions:\r\n"+
"/hey <text> - Ask something from LLM\r\n"+
"/summarize <link> - Summarize text from the provided link\r\n"+
"/help - Show this help",
"/s <link> - Shorter version\r\n"+
"/help - Show this help\r\n\r\n"+
"Mention bot or reply to it's message to communicate with it",
)))
if err != nil {
slog.Error("Cannot send a message", err)
slog.Error("Cannot send a message", "error", err)
b.trySendReplyError(update.Message)
}
@ -228,7 +277,7 @@ func (b *Bot) startHandler(bot *telego.Bot, update telego.Update) {
"Check out /help to learn how to use this bot.",
)))
if err != nil {
slog.Error("Cannot send a message", err)
slog.Error("Cannot send a message", "error", err)
b.trySendReplyError(update.Message)
}
@ -249,63 +298,12 @@ func (b *Bot) statsHandler(bot *telego.Bot, update telego.Update) {
"```",
)).WithParseMode("Markdown"))
if err != nil {
slog.Error("Cannot send a message", err)
slog.Error("Cannot send a message", "error", err)
b.trySendReplyError(update.Message)
}
}
func (b *Bot) createLlmRequestContext(update telego.Update) llm.RequestContext {
message := update.Message
rc := llm.RequestContext{}
if message == nil {
return rc
}
user := message.From
if user != nil {
rc.User = llm.UserContext{
Username: user.Username,
FirstName: user.FirstName,
LastName: user.LastName,
IsPremium: user.IsPremium,
}
}
chat := message.Chat
rc.Chat = llm.ChatContext{
Title: chat.Title,
Description: chat.Description,
Type: chat.Type,
}
return rc
}
func (b *Bot) reply(originalMessage *telego.Message, newMessage *telego.SendMessageParams) *telego.SendMessageParams {
return newMessage.WithReplyParameters(&telego.ReplyParameters{
MessageID: originalMessage.MessageID,
})
}
func (b *Bot) sendTyping(chatId telego.ChatID) {
slog.Debug("Setting 'typing' chat action")
err := b.api.SendChatAction(tu.ChatAction(chatId, "typing"))
if err != nil {
slog.Error("Cannot set chat action", err)
}
}
func (b *Bot) trySendReplyError(message *telego.Message) {
if message == nil {
return
}
_, _ = b.api.SendMessage(b.reply(message, tu.Message(
tu.ID(message.Chat.ID),
"Error occurred while trying to send reply.",
)))
func (b *Bot) escapeMarkdownV1Symbols(input string) string {
return b.markdownV1Replacer.Replace(input)
}

93
bot/helpers.go Normal file
View file

@ -0,0 +1,93 @@
package bot
import (
"github.com/mymmrac/telego"
tu "github.com/mymmrac/telego/telegoutil"
"log/slog"
"net/url"
"slices"
"strings"
)
var (
allowedUrlSchemes = []string{"http", "https"}
)
func (b *Bot) reply(originalMessage *telego.Message, newMessage *telego.SendMessageParams) *telego.SendMessageParams {
return newMessage.WithReplyParameters(&telego.ReplyParameters{
MessageID: originalMessage.MessageID,
})
}
func (b *Bot) sendTyping(chatId telego.ChatID) {
slog.Debug("Setting 'typing' chat action")
err := b.api.SendChatAction(tu.ChatAction(chatId, "typing"))
if err != nil {
slog.Error("Cannot set chat action", "error", err)
}
}
func (b *Bot) trySendReplyError(message *telego.Message) {
if message == nil {
return
}
_, _ = b.api.SendMessage(b.reply(message, tu.Message(
tu.ID(message.Chat.ID),
"Error occurred while trying to send reply.",
)))
}
func (b *Bot) isMentionOfMe(update telego.Update) bool {
if update.Message == nil {
return false
}
return strings.Contains(update.Message.Text, "@"+b.profile.Username)
}
func (b *Bot) isReplyToMe(update telego.Update) bool {
message := update.Message
if message == nil {
return false
}
if message.ReplyToMessage == nil {
return false
}
if message.ReplyToMessage.From == nil {
return false
}
replyToMessage := message.ReplyToMessage
return replyToMessage != nil && replyToMessage.From.ID == b.profile.Id
}
func (b *Bot) isPrivateWithMe(update telego.Update) bool {
message := update.Message
if message == nil {
return false
}
return message.Chat.Type == telego.ChatTypePrivate
}
func isValidAndAllowedUrl(text string) bool {
u, err := url.ParseRequestURI(text)
if err != nil {
slog.Debug("Provided text is not an URL", "text", text)
return false
}
if !slices.Contains(allowedUrlSchemes, strings.ToLower(u.Scheme)) {
slog.Debug("Provided URL has disallowed scheme", "scheme", u.Scheme, "allowed-schemes", allowedUrlSchemes)
return false
}
return true
}

24
bot/logger.go Normal file
View file

@ -0,0 +1,24 @@
package bot
import (
"fmt"
"log/slog"
)
type Logger struct {
prefix string
}
func NewLogger(prefix string) Logger {
return Logger{
prefix: prefix,
}
}
func (l Logger) Debugf(format string, args ...any) {
slog.Debug(l.prefix + fmt.Sprint(format, args))
}
func (l Logger) Errorf(format string, args ...any) {
slog.Error(l.prefix + fmt.Sprintf(format, args))
}

93
bot/message_history.go Normal file
View file

@ -0,0 +1,93 @@
package bot
import (
"github.com/mymmrac/telego"
"log/slog"
)
const HistoryLength = 150
type Message struct {
Name string
Text string
IsMe bool
}
type MessageRingBuffer struct {
messages []Message
capacity int
}
func NewMessageBuffer(capacity int) *MessageRingBuffer {
return &MessageRingBuffer{
messages: make([]Message, 0, capacity),
capacity: capacity,
}
}
func (b *MessageRingBuffer) Push(element Message) {
if len(b.messages) >= b.capacity {
b.messages = b.messages[1:]
}
b.messages = append(b.messages, element)
}
func (b *MessageRingBuffer) GetAll() []Message {
return b.messages
}
func (b *Bot) saveChatMessageToHistory(message *telego.Message) {
chatId := message.Chat.ID
slog.Info(
"history-message-save",
"chat", chatId,
"from_id", message.From.ID,
"from_name", message.From.FirstName,
"text", message.Text,
)
_, ok := b.history[chatId]
if !ok {
b.history[chatId] = NewMessageBuffer(HistoryLength)
}
b.history[chatId].Push(Message{
Name: message.From.FirstName,
Text: message.Text,
IsMe: false,
})
}
func (b *Bot) saveBotReplyToHistory(message *telego.Message, reply string) {
chatId := message.Chat.ID
slog.Info(
"history-reply-save",
"chat", chatId,
"to_id", message.From.ID,
"to_name", message.From.FirstName,
"text", reply,
)
_, ok := b.history[chatId]
if !ok {
b.history[chatId] = NewMessageBuffer(HistoryLength)
}
b.history[chatId].Push(Message{
Name: b.profile.Username,
Text: reply,
IsMe: true,
})
}
func (b *Bot) getChatHistory(chatId int64) []Message {
_, ok := b.history[chatId]
if !ok {
return make([]Message, 0)
}
return b.history[chatId].GetAll()
}

View file

@ -3,21 +3,48 @@ package bot
import (
"github.com/mymmrac/telego"
"github.com/mymmrac/telego/telegohandler"
"log/slog"
)
func (b *Bot) chatTypeStatsCounter(bot *telego.Bot, update telego.Update, next telegohandler.Handler) {
message := update.Message
if message == nil {
slog.Info("stats-middleware: update has no message. skipping.")
next(bot, update)
return
}
switch message.Chat.Type {
case telego.ChatTypeGroup, telego.ChatTypeSupergroup:
b.stats.GroupRequest()
if b.isMentionOfMe(update) || b.isReplyToMe(update) {
slog.Info("stats-middleware: counting message chat type in stats", "type", message.Chat.Type)
b.stats.GroupRequest()
}
case telego.ChatTypePrivate:
slog.Info("stats-middleware: counting message chat type in stats", "type", message.Chat.Type)
b.stats.PrivateRequest()
}
next(bot, update)
}
func (b *Bot) chatHistory(bot *telego.Bot, update telego.Update, next telegohandler.Handler) {
message := update.Message
if message == nil {
slog.Info("chat-history-middleware: update has no message. skipping.")
next(bot, update)
return
}
slog.Info("chat-history-middleware: saving message to history for", "chat_id", message.Chat.ID)
b.saveChatMessageToHistory(message)
next(bot, update)
}

6
bot/models.go Normal file
View file

@ -0,0 +1,6 @@
package bot
type ModelSelection struct {
TextRequestModel string
SummarizeModel string
}

68
bot/request_context.go Normal file
View file

@ -0,0 +1,68 @@
package bot
import (
"github.com/mymmrac/telego"
"log/slog"
"telegram-ollama-reply-bot/llm"
)
func (b *Bot) createLlmRequestContextFromMessage(message *telego.Message) llm.RequestContext {
rc := llm.RequestContext{
Empty: true,
}
if message == nil {
slog.Debug("request context creation problem: no message provided. returning empty context.", "request-context", rc)
return rc
}
rc.Empty = false
user := message.From
if user != nil {
rc.User = llm.UserContext{
Username: user.Username,
FirstName: user.FirstName,
LastName: user.LastName,
IsPremium: user.IsPremium,
}
}
// TODO: implement retrieval of chat description
chat := message.Chat
history := b.getChatHistory(chat.ID)
rc.Chat = llm.ChatContext{
Title: chat.Title,
// TODO: fill when ChatFullInfo retrieved
//Description: chat.Description,
Type: chat.Type,
History: historyToLlmMessages(history),
}
slog.Debug("request context created", "request-context", rc)
return rc
}
func historyToLlmMessages(history []Message) []llm.ChatMessage {
length := len(history)
if length > 0 {
result := make([]llm.ChatMessage, 0, length)
for _, msg := range history {
result = append(result, llm.ChatMessage{
Name: msg.Name,
Text: msg.Text,
})
}
return result
}
return make([]llm.ChatMessage, 0)
}

View file

@ -3,6 +3,7 @@ package extractor
import (
"errors"
"github.com/advancedlogic/GoOse"
"log/slog"
)
var (
@ -28,12 +29,18 @@ type Article struct {
}
func (e *Extractor) GetArticleFromUrl(url string) (Article, error) {
slog.Info("extractor: requested extraction from URL ", "url", url)
article, err := e.goose.ExtractFromURL(url)
if err != nil {
slog.Error("extractor: failed extracting from URL", "url", url)
return Article{}, ErrExtractFailed
}
slog.Debug("extractor: article extracted", "article", article)
return Article{
Title: article.Title,
Text: article.CleanedText,

View file

@ -5,13 +5,13 @@ import (
"errors"
"github.com/sashabaranov/go-openai"
"log/slog"
"strconv"
"strings"
)
var (
ErrLlmBackendRequestFailed = errors.New("llm back-end request failed")
ErrNoChoices = errors.New("no choices in LLM response")
ModelMistralUncensored = "dolphin-mistral"
)
type LlmConnector struct {
@ -29,20 +29,47 @@ func NewConnector(baseUrl string, token string) *LlmConnector {
}
}
func (l *LlmConnector) HandleSingleRequest(text string, model string, requestContext RequestContext) (string, error) {
func (l *LlmConnector) HandleChatMessage(text string, model string, requestContext RequestContext) (string, error) {
systemPrompt := "You're a bot in the Telegram chat.\n" +
"You're using a free model called \"" + model + "\".\n\n" +
requestContext.Prompt()
historyLength := len(requestContext.Chat.History)
if historyLength > 0 {
systemPrompt += "\nYou have access to last " + strconv.Itoa(historyLength) + "messages in this chat."
}
req := openai.ChatCompletionRequest{
Model: model,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: "You're a bot in the Telegram chat. " +
"You're using a free model called \"" + model + "\". " +
"You see only messages addressed to you using commands due to privacy settings. " +
requestContext.Prompt(),
Role: openai.ChatMessageRoleSystem,
Content: systemPrompt,
},
},
}
if historyLength > 0 {
for _, msg := range requestContext.Chat.History {
var msgRole string
var msgText string
if msg.IsMe {
msgRole = openai.ChatMessageRoleAssistant
msgText = msg.Text
} else {
msgRole = openai.ChatMessageRoleSystem
msgText = "User " + msg.Name + " said:\n" + msg.Text
}
req.Messages = append(req.Messages, openai.ChatCompletionMessage{
Role: msgRole,
Content: msgText,
})
}
}
req.Messages = append(req.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser,
Content: text,
@ -50,15 +77,15 @@ func (l *LlmConnector) HandleSingleRequest(text string, model string, requestCon
resp, err := l.client.CreateChatCompletion(context.Background(), req)
if err != nil {
slog.Error("LLM back-end request failed", err)
slog.Error("llm: LLM back-end request failed", "error", err)
return "", ErrLlmBackendRequestFailed
}
slog.Debug("Received LLM back-end response", resp)
slog.Debug("llm: Received LLM back-end response", "response", resp)
if len(resp.Choices) < 1 {
slog.Error("LLM back-end reply has no choices")
slog.Error("llm: LLM back-end reply has no choices")
return "", ErrNoChoices
}
@ -72,9 +99,11 @@ func (l *LlmConnector) Summarize(text string, model string) (string, error) {
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: "You are a short digest editor. Summarize the text you received " +
"as a list of bullet points with most important facts from the text. " +
"If possible, use the same language as the original text.",
Content: "You're a text shortener. Give a very brief summary of the main facts " +
"point by point. Format them as a list of bullet points each starting with \"-\". " +
"Avoid any commentaries and value judgement on the matter. " +
"If possible, respond in the same language as the original text." +
"Do not use any non-ASCII characters.",
},
},
}
@ -86,18 +115,56 @@ func (l *LlmConnector) Summarize(text string, model string) (string, error) {
resp, err := l.client.CreateChatCompletion(context.Background(), req)
if err != nil {
slog.Error("LLM back-end request failed", err)
slog.Error("llm: LLM back-end request failed", "error", err)
return "", ErrLlmBackendRequestFailed
}
slog.Debug("Received LLM back-end response", resp)
slog.Debug("llm: Received LLM back-end response", resp)
if len(resp.Choices) < 1 {
slog.Error("LLM back-end reply has no choices")
slog.Error("llm: LLM back-end reply has no choices")
return "", ErrNoChoices
}
return resp.Choices[0].Message.Content, nil
}
func (l *LlmConnector) GetModels() []string {
var result []string
models, err := l.client.ListModels(context.Background())
if err != nil {
slog.Error("llm: Model list request failed", "error", err)
return result
}
slog.Info("Model list retrieved", "models", models)
for _, model := range models.Models {
result = append(result, model.ID)
}
return result
}
func (l *LlmConnector) HasModel(id string) bool {
model, err := l.client.GetModel(context.Background(), id)
if err != nil {
slog.Error("llm: Model request failed", "error", err)
}
slog.Debug("llm: Returned model", "model", model)
if model.ID != "" {
return true
}
return false
}
func quoteMessage(text string) string {
return "> " + strings.ReplaceAll(text, "\n", "\n> ")
}

View file

@ -1,8 +1,9 @@
package llm
type RequestContext struct {
User UserContext
Chat ChatContext
Empty bool
User UserContext
Chat ChatContext
}
type UserContext struct {
@ -16,28 +17,48 @@ type ChatContext struct {
Title string
Description string
Type string
History []ChatMessage
}
type ChatMessage struct {
Name string
Text string
IsMe bool
}
func (c RequestContext) Prompt() string {
prompt := "The type of chat you're in is \"" + c.Chat.Type + "\". "
if c.Empty {
return ""
}
prompt := ""
prompt += "The type of chat you're in is \"" + c.Chat.Type + "\". "
if c.Chat.Type == "group" || c.Chat.Type == "supergroup" {
prompt += "Please consider that there are several users in this chat type who may discuss several unrelated " +
"topics. Try to respond only about the topic you were asked about and only to the user who asked you, " +
"but keep in mind another chat history. "
}
if c.Chat.Title != "" {
prompt += "Chat is called \"" + c.Chat.Title + "\". "
prompt += "\nChat is called \"" + c.Chat.Title + "\". "
}
if c.Chat.Description != "" {
prompt += "Chat description is \"" + c.Chat.Description + "\". "
}
prompt += "According to their profile, first name of the user who wrote you is \"" + c.User.FirstName + "\". "
prompt += "\nProfile of the user who mentioned you in the chat:" +
"First name: \"" + c.User.FirstName + "\"\n"
if c.User.Username != "" {
prompt += "Their username is @" + c.User.Username + ". "
prompt += "Username: @" + c.User.Username + ".\n"
}
if c.User.LastName != "" {
prompt += "Their last name is \"" + c.User.LastName + "\". "
}
if c.User.IsPremium {
prompt += "They have Telegram Premium subscription. "
prompt += "Last name: \"" + c.User.LastName + "\"\n"
}
//if c.User.IsPremium {
// prompt += "Telegram Premium subscription: active."
//}
return prompt
}

31
main.go
View file

@ -12,25 +12,44 @@ import (
)
func main() {
ollamaToken := os.Getenv("OLLAMA_TOKEN")
ollamaBaseUrl := os.Getenv("OLLAMA_BASE_URL")
apiToken := os.Getenv("OPENAI_API_TOKEN")
apiBaseUrl := os.Getenv("OPENAI_API_BASE_URL")
models := bot.ModelSelection{
TextRequestModel: os.Getenv("MODEL_TEXT_REQUEST"),
SummarizeModel: os.Getenv("MODEL_SUMMARIZE_REQUEST"),
}
slog.Info("Selected", "models", models)
telegramToken := os.Getenv("TELEGRAM_TOKEN")
llmc := llm.NewConnector(ollamaBaseUrl, ollamaToken)
llmc := llm.NewConnector(apiBaseUrl, apiToken)
slog.Info("Checking models availability")
for _, model := range []string{models.TextRequestModel, models.SummarizeModel} {
if !llmc.HasModel(model) {
slog.Error("Model not unavailable", "model", model)
os.Exit(1)
}
}
slog.Info("All needed models are available")
ext := extractor.NewExtractor()
telegramApi, err := tg.NewBot(telegramToken, tg.WithDefaultLogger(false, true))
telegramApi, err := tg.NewBot(telegramToken, tg.WithLogger(bot.NewLogger("telego: ")))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
botService := bot.NewBot(telegramApi, llmc, ext)
botService := bot.NewBot(telegramApi, llmc, ext, models)
err = botService.Run()
if err != nil {
slog.Error("Running bot finished with an error", err)
slog.Error("Running bot finished with an error", "error", err)
os.Exit(1)
}
}

View file

@ -13,8 +13,9 @@ type Stats struct {
GroupRequests uint64
PrivateRequests uint64
InlineQueries uint64
HeyRequests uint64
Mentions uint64
SummarizeRequests uint64
}
@ -24,8 +25,9 @@ func NewStats() *Stats {
GroupRequests: 0,
PrivateRequests: 0,
InlineQueries: 0,
HeyRequests: 0,
Mentions: 0,
SummarizeRequests: 0,
}
}
@ -36,16 +38,18 @@ func (s *Stats) MarshalJSON() ([]byte, error) {
GroupRequests uint64 `json:"group_requests"`
PrivateRequests uint64 `json:"private_requests"`
InlineQueries uint64 `json:"inline_queries"`
HeyRequests uint64 `json:"hey_requests"`
Mentions uint64 `json:"mentions"`
SummarizeRequests uint64 `json:"summarize_requests"`
}{
Uptime: time.Now().Sub(s.RunningSince).String(),
GroupRequests: s.GroupRequests,
PrivateRequests: s.PrivateRequests,
InlineQueries: s.InlineQueries,
HeyRequests: s.HeyRequests,
Mentions: s.Mentions,
SummarizeRequests: s.SummarizeRequests,
})
}
@ -59,6 +63,12 @@ func (s *Stats) String() string {
return string(data)
}
func (s *Stats) InlineQuery() {
s.mu.Lock()
defer s.mu.Unlock()
s.InlineQueries++
}
func (s *Stats) GroupRequest() {
s.mu.Lock()
defer s.mu.Unlock()
@ -71,10 +81,10 @@ func (s *Stats) PrivateRequest() {
s.PrivateRequests++
}
func (s *Stats) HeyRequest() {
func (s *Stats) Mention() {
s.mu.Lock()
defer s.mu.Unlock()
s.HeyRequests++
s.Mentions++
}
func (s *Stats) SummarizeRequest() {