Compare commits

..

No commits in common. "main" and "0.3.2" have entirely different histories.
main ... 0.3.2

12 changed files with 189 additions and 393 deletions

View file

@ -13,11 +13,4 @@ WORKDIR /app
COPY --from=builder /build/app . COPY --from=builder /build/app .
# Do not forget "/v1" in the end
ENV OPENAI_API_BASE_URL="" \
OPENAI_API_TOKEN="" \
TELEGRAM_TOKEN="" \
MODEL_TEXT_REQUEST="llama3.1:8b-instruct-q6_K" \
MODEL_SUMMARIZE_REQUEST="llama3.1:8b-instruct-q6_K"
CMD ["/app/app"] CMD ["/app/app"]

View file

@ -8,10 +8,8 @@
```shell ```shell
docker run \ docker run \
-e OPENAI_API_TOKEN=123 \ -e OLLAMA_TOKEN=123 \
-e OPENAI_API_BASE_URL=http://ollama.localhost:11434/v1 \ -e OLLAMA_BASE_URL=http://ollama.localhost:11434/v1 \
-e TELEGRAM_TOKEN=12345 \ -e TELEGRAM_TOKEN=12345 \
-e MODEL_TEXT_REQUEST=llama3.1:8b-instruct-q6_K
-e MODEL_SUMMARIZE_REQUEST=mistral-nemo:12b-instruct-2407-q4_K_M
skobkin/telegram-llm-bot skobkin/telegram-llm-bot
``` ```

View file

@ -18,38 +18,21 @@ var (
ErrHandlerInit = errors.New("cannot initialize handler") ErrHandlerInit = errors.New("cannot initialize handler")
) )
type BotInfo struct {
Id int64
Username string
Name string
}
type Bot struct { type Bot struct {
api *telego.Bot api *telego.Bot
llm *llm.LlmConnector llm *llm.LlmConnector
extractor *extractor.Extractor extractor *extractor.Extractor
stats *stats.Stats stats *stats.Stats
models ModelSelection
history map[int64]*MessageRingBuffer
profile BotInfo
markdownV1Replacer *strings.Replacer markdownV1Replacer *strings.Replacer
} }
func NewBot( func NewBot(api *telego.Bot, llm *llm.LlmConnector, extractor *extractor.Extractor) *Bot {
api *telego.Bot,
llm *llm.LlmConnector,
extractor *extractor.Extractor,
models ModelSelection,
) *Bot {
return &Bot{ return &Bot{
api: api, api: api,
llm: llm, llm: llm,
extractor: extractor, extractor: extractor,
stats: stats.NewStats(), stats: stats.NewStats(),
models: models,
history: make(map[int64]*MessageRingBuffer),
profile: BotInfo{0, "", ""},
markdownV1Replacer: strings.NewReplacer( markdownV1Replacer: strings.NewReplacer(
// https://core.telegram.org/bots/api#markdown-style // https://core.telegram.org/bots/api#markdown-style
@ -71,12 +54,6 @@ func (b *Bot) Run() error {
slog.Info("Running api as", "id", botUser.ID, "username", botUser.Username, "name", botUser.FirstName, "is_bot", botUser.IsBot) slog.Info("Running api as", "id", botUser.ID, "username", botUser.Username, "name", botUser.FirstName, "is_bot", botUser.IsBot)
b.profile = BotInfo{
Id: botUser.ID,
Username: botUser.Username,
Name: botUser.FirstName,
}
updates, err := b.api.UpdatesViaLongPolling(nil) updates, err := b.api.UpdatesViaLongPolling(nil)
if err != nil { if err != nil {
slog.Error("Cannot get update channel", "error", err) slog.Error("Cannot get update channel", "error", err)
@ -95,60 +72,133 @@ func (b *Bot) Run() error {
defer b.api.StopLongPolling() defer b.api.StopLongPolling()
// Middlewares // Middlewares
bh.Use(b.chatHistory)
bh.Use(b.chatTypeStatsCounter) bh.Use(b.chatTypeStatsCounter)
// Command handlers // Command handlers
bh.Handle(b.startHandler, th.CommandEqual("start")) bh.Handle(b.startHandler, th.CommandEqual("start"))
bh.Handle(b.summarizeHandler, th.Or(th.CommandEqual("summarize"), th.CommandEqual("s"))) bh.Handle(b.heyHandler, th.CommandEqual("hey"))
bh.Handle(b.summarizeHandler, th.CommandEqual("summarize"))
bh.Handle(b.statsHandler, th.CommandEqual("stats")) bh.Handle(b.statsHandler, th.CommandEqual("stats"))
bh.Handle(b.helpHandler, th.CommandEqual("help")) bh.Handle(b.helpHandler, th.CommandEqual("help"))
bh.Handle(b.textMessageHandler, th.AnyMessageWithText())
// Inline query handlers
bh.Handle(b.inlineHandler, th.AnyInlineQuery())
bh.Start() bh.Start()
return nil return nil
} }
func (b *Bot) textMessageHandler(bot *telego.Bot, update telego.Update) { func (b *Bot) inlineHandler(bot *telego.Bot, update telego.Update) {
slog.Debug("/any-message") iq := update.InlineQuery
slog.Info("inline query received", "query", iq.Query)
message := update.Message slog.Debug("query", "query", iq)
switch { if len(iq.Query) < 3 {
// Mentions return
case b.isMentionOfMe(update): }
slog.Info("/any-message", "type", "mention")
b.processMention(message) b.stats.InlineQuery()
// Replies
case b.isReplyToMe(update): queryParts := strings.SplitN(iq.Query, " ", 2)
slog.Info("/any-message", "type", "reply")
b.processMention(message) if len(queryParts) < 1 {
// Private chat slog.Debug("Empty query. Skipping.")
case b.isPrivateWithMe(update):
slog.Info("/any-message", "type", "private") return
b.processMention(message) }
default:
slog.Debug("/any-message", "info", "Message is not mention, reply or private chat. Skipping.") var response *telego.AnswerInlineQueryParams
switch isValidAndAllowedUrl(queryParts[0]) {
case true:
slog.Info("Inline /summarize request", "url", queryParts[0])
b.stats.SummarizeRequest()
article, err := b.extractor.GetArticleFromUrl(queryParts[0])
if err != nil {
slog.Error("Cannot retrieve an article using extractor", "error", err)
}
llmReply, err := b.llm.Summarize(article.Text, llm.ModelLlama3Uncensored )
if err != nil {
slog.Error("Cannot get reply from LLM connector")
b.trySendInlineQueryError(iq, "LLM request error. Try again later.")
return
}
slog.Debug("Got completion. Going to send.", "llm-completion", llmReply)
response = tu.InlineQuery(
iq.ID,
tu.ResultArticle(
"reply_"+iq.ID,
"Summary for "+queryParts[0],
tu.TextMessage(b.escapeMarkdownV1Symbols(llmReply)).WithParseMode("Markdown"),
),
)
case false:
b.stats.HeyRequest()
slog.Info("Inline /hey request", "text", iq.Query)
requestContext := createLlmRequestContextFromUpdate(update)
llmReply, err := b.llm.HandleSingleRequest(iq.Query, llm.ModelLlama3Uncensored, requestContext)
if err != nil {
slog.Error("Cannot get reply from LLM connector")
b.trySendInlineQueryError(iq, "LLM request error. Try again later.")
return
}
slog.Debug("Got completion. Going to send.", "llm-completion", llmReply)
response = tu.InlineQuery(
iq.ID,
tu.ResultArticle(
"reply_"+iq.ID,
"LLM reply to\""+iq.Query+"\"",
tu.TextMessage(b.escapeMarkdownV1Symbols(llmReply)).WithParseMode("Markdown"),
),
)
}
err := bot.AnswerInlineQuery(response)
if err != nil {
slog.Error("Can't answer to inline query", "error", err)
b.trySendInlineQueryError(iq, "Couldn't send intended reply, sorry")
} }
} }
func (b *Bot) processMention(message *telego.Message) { func (b *Bot) heyHandler(bot *telego.Bot, update telego.Update) {
b.stats.Mention() slog.Info("/hey", "message-text", update.Message.Text)
slog.Info("/mention", "chat", message.Chat.ID) b.stats.HeyRequest()
chatID := tu.ID(message.Chat.ID) parts := strings.SplitN(update.Message.Text, " ", 2)
userMessage := "Hey!"
if len(parts) == 2 {
userMessage = parts[1]
}
chatID := tu.ID(update.Message.Chat.ID)
b.sendTyping(chatID) b.sendTyping(chatID)
requestContext := b.createLlmRequestContextFromMessage(message) requestContext := createLlmRequestContextFromUpdate(update)
llmReply, err := b.llm.HandleChatMessage(message.Text, b.models.TextRequestModel, requestContext) llmReply, err := b.llm.HandleSingleRequest(userMessage, llm.ModelLlama3Uncensored, requestContext)
if err != nil { if err != nil {
slog.Error("Cannot get reply from LLM connector") slog.Error("Cannot get reply from LLM connector")
_, _ = b.api.SendMessage(b.reply(message, tu.Message( _, _ = b.api.SendMessage(b.reply(update.Message, tu.Message(
chatID, chatID,
"LLM request error. Try again later.", "LLM request error. Try again later.",
))) )))
@ -158,21 +208,17 @@ func (b *Bot) processMention(message *telego.Message) {
slog.Debug("Got completion. Going to send.", "llm-completion", llmReply) slog.Debug("Got completion. Going to send.", "llm-completion", llmReply)
reply := tu.Message( message := tu.Message(
chatID, chatID,
b.escapeMarkdownV1Symbols(llmReply), b.escapeMarkdownV1Symbols(llmReply),
).WithParseMode("Markdown") ).WithParseMode("Markdown")
_, err = b.api.SendMessage(b.reply(message, reply)) _, err = bot.SendMessage(b.reply(update.Message, message))
if err != nil { if err != nil {
slog.Error("Can't send reply message", "error", err) slog.Error("Can't send reply message", "error", err)
b.trySendReplyError(message) b.trySendReplyError(update.Message)
return
} }
b.saveBotReplyToHistory(message, llmReply)
} }
func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) { func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
@ -213,7 +259,7 @@ func (b *Bot) summarizeHandler(bot *telego.Bot, update telego.Update) {
slog.Error("Cannot retrieve an article using extractor", "error", err) slog.Error("Cannot retrieve an article using extractor", "error", err)
} }
llmReply, err := b.llm.Summarize(article.Text, b.models.SummarizeModel) llmReply, err := b.llm.Summarize(article.Text, llm.ModelMistralUncensored)
if err != nil { if err != nil {
slog.Error("Cannot get reply from LLM connector") slog.Error("Cannot get reply from LLM connector")
@ -253,9 +299,7 @@ func (b *Bot) helpHandler(bot *telego.Bot, update telego.Update) {
"Instructions:\r\n"+ "Instructions:\r\n"+
"/hey <text> - Ask something from LLM\r\n"+ "/hey <text> - Ask something from LLM\r\n"+
"/summarize <link> - Summarize text from the provided link\r\n"+ "/summarize <link> - Summarize text from the provided link\r\n"+
"/s <link> - Shorter version\r\n"+ "/help - Show this help",
"/help - Show this help\r\n\r\n"+
"Mention bot or reply to it's message to communicate with it",
))) )))
if err != nil { if err != nil {
slog.Error("Cannot send a message", "error", err) slog.Error("Cannot send a message", "error", err)

View file

@ -39,40 +39,19 @@ func (b *Bot) trySendReplyError(message *telego.Message) {
))) )))
} }
func (b *Bot) isMentionOfMe(update telego.Update) bool { func (b *Bot) trySendInlineQueryError(iq *telego.InlineQuery, text string) {
if update.Message == nil { if iq == nil {
return false return
} }
return strings.Contains(update.Message.Text, "@"+b.profile.Username) _ = b.api.AnswerInlineQuery(tu.InlineQuery(
} iq.ID,
tu.ResultArticle(
func (b *Bot) isReplyToMe(update telego.Update) bool { string("error_"+iq.ID),
message := update.Message "Error: "+text,
tu.TextMessage(text),
if message == nil { ),
return false ))
}
if message.ReplyToMessage == nil {
return false
}
if message.ReplyToMessage.From == nil {
return false
}
replyToMessage := message.ReplyToMessage
return replyToMessage != nil && replyToMessage.From.ID == b.profile.Id
}
func (b *Bot) isPrivateWithMe(update telego.Update) bool {
message := update.Message
if message == nil {
return false
}
return message.Chat.Type == telego.ChatTypePrivate
} }
func isValidAndAllowedUrl(text string) bool { func isValidAndAllowedUrl(text string) bool {

View file

@ -1,93 +0,0 @@
package bot
import (
"github.com/mymmrac/telego"
"log/slog"
)
const HistoryLength = 150
type Message struct {
Name string
Text string
IsMe bool
}
type MessageRingBuffer struct {
messages []Message
capacity int
}
func NewMessageBuffer(capacity int) *MessageRingBuffer {
return &MessageRingBuffer{
messages: make([]Message, 0, capacity),
capacity: capacity,
}
}
func (b *MessageRingBuffer) Push(element Message) {
if len(b.messages) >= b.capacity {
b.messages = b.messages[1:]
}
b.messages = append(b.messages, element)
}
func (b *MessageRingBuffer) GetAll() []Message {
return b.messages
}
func (b *Bot) saveChatMessageToHistory(message *telego.Message) {
chatId := message.Chat.ID
slog.Info(
"history-message-save",
"chat", chatId,
"from_id", message.From.ID,
"from_name", message.From.FirstName,
"text", message.Text,
)
_, ok := b.history[chatId]
if !ok {
b.history[chatId] = NewMessageBuffer(HistoryLength)
}
b.history[chatId].Push(Message{
Name: message.From.FirstName,
Text: message.Text,
IsMe: false,
})
}
func (b *Bot) saveBotReplyToHistory(message *telego.Message, reply string) {
chatId := message.Chat.ID
slog.Info(
"history-reply-save",
"chat", chatId,
"to_id", message.From.ID,
"to_name", message.From.FirstName,
"text", reply,
)
_, ok := b.history[chatId]
if !ok {
b.history[chatId] = NewMessageBuffer(HistoryLength)
}
b.history[chatId].Push(Message{
Name: b.profile.Username,
Text: reply,
IsMe: true,
})
}
func (b *Bot) getChatHistory(chatId int64) []Message {
_, ok := b.history[chatId]
if !ok {
return make([]Message, 0)
}
return b.history[chatId].GetAll()
}

View file

@ -10,41 +10,21 @@ func (b *Bot) chatTypeStatsCounter(bot *telego.Bot, update telego.Update, next t
message := update.Message message := update.Message
if message == nil { if message == nil {
slog.Info("stats-middleware: update has no message. skipping.") slog.Info("chat-type-middleware: update has no message. skipping.")
next(bot, update) next(bot, update)
return return
} }
slog.Info("chat-type-middleware: counting message chat type in stats", "type", message.Chat.Type)
switch message.Chat.Type { switch message.Chat.Type {
case telego.ChatTypeGroup, telego.ChatTypeSupergroup: case telego.ChatTypeGroup, telego.ChatTypeSupergroup:
if b.isMentionOfMe(update) || b.isReplyToMe(update) { b.stats.GroupRequest()
slog.Info("stats-middleware: counting message chat type in stats", "type", message.Chat.Type)
b.stats.GroupRequest()
}
case telego.ChatTypePrivate: case telego.ChatTypePrivate:
slog.Info("stats-middleware: counting message chat type in stats", "type", message.Chat.Type)
b.stats.PrivateRequest() b.stats.PrivateRequest()
} }
next(bot, update) next(bot, update)
} }
func (b *Bot) chatHistory(bot *telego.Bot, update telego.Update, next telegohandler.Handler) {
message := update.Message
if message == nil {
slog.Info("chat-history-middleware: update has no message. skipping.")
next(bot, update)
return
}
slog.Info("chat-history-middleware: saving message to history for", "chat_id", message.Chat.ID)
b.saveChatMessageToHistory(message)
next(bot, update)
}

View file

@ -1,6 +0,0 @@
package bot
type ModelSelection struct {
TextRequestModel string
SummarizeModel string
}

View file

@ -6,20 +6,33 @@ import (
"telegram-ollama-reply-bot/llm" "telegram-ollama-reply-bot/llm"
) )
func (b *Bot) createLlmRequestContextFromMessage(message *telego.Message) llm.RequestContext { func createLlmRequestContextFromUpdate(update telego.Update) llm.RequestContext {
message := update.Message
iq := update.InlineQuery
rc := llm.RequestContext{ rc := llm.RequestContext{
Empty: true, Empty: true,
Inline: false,
} }
if message == nil { switch {
case message == nil && iq == nil:
slog.Debug("request context creation problem: no message provided. returning empty context.", "request-context", rc) slog.Debug("request context creation problem: no message provided. returning empty context.", "request-context", rc)
return rc return rc
case iq != nil:
rc.Inline = true
} }
rc.Empty = false rc.Empty = false
user := message.From var user *telego.User
if rc.Inline {
user = &iq.From
} else {
user = message.From
}
if user != nil { if user != nil {
rc.User = llm.UserContext{ rc.User = llm.UserContext{
@ -30,39 +43,16 @@ func (b *Bot) createLlmRequestContextFromMessage(message *telego.Message) llm.Re
} }
} }
// TODO: implement retrieval of chat description if !rc.Inline {
chat := message.Chat chat := message.Chat
rc.Chat = llm.ChatContext{
history := b.getChatHistory(chat.ID) Title: chat.Title,
Description: chat.Description,
rc.Chat = llm.ChatContext{ Type: chat.Type,
Title: chat.Title, }
// TODO: fill when ChatFullInfo retrieved
//Description: chat.Description,
Type: chat.Type,
History: historyToLlmMessages(history),
} }
slog.Debug("request context created", "request-context", rc) slog.Debug("request context created", "request-context", rc)
return rc return rc
} }
func historyToLlmMessages(history []Message) []llm.ChatMessage {
length := len(history)
if length > 0 {
result := make([]llm.ChatMessage, 0, length)
for _, msg := range history {
result = append(result, llm.ChatMessage{
Name: msg.Name,
Text: msg.Text,
})
}
return result
}
return make([]llm.ChatMessage, 0)
}

View file

@ -5,13 +5,14 @@ import (
"errors" "errors"
"github.com/sashabaranov/go-openai" "github.com/sashabaranov/go-openai"
"log/slog" "log/slog"
"strconv"
"strings"
) )
var ( var (
ErrLlmBackendRequestFailed = errors.New("llm back-end request failed") ErrLlmBackendRequestFailed = errors.New("llm back-end request failed")
ErrNoChoices = errors.New("no choices in LLM response") ErrNoChoices = errors.New("no choices in LLM response")
ModelMistralUncensored = "dolphin-mistral:7b-v2.8-q4_K_M"
ModelLlama3Uncensored = "dolphin-llama3:8b-v2.9-q4_K_M"
) )
type LlmConnector struct { type LlmConnector struct {
@ -29,15 +30,13 @@ func NewConnector(baseUrl string, token string) *LlmConnector {
} }
} }
func (l *LlmConnector) HandleChatMessage(text string, model string, requestContext RequestContext) (string, error) { func (l *LlmConnector) HandleSingleRequest(text string, model string, requestContext RequestContext) (string, error) {
systemPrompt := "You're a bot in the Telegram chat.\n" + systemPrompt := "You're a bot in the Telegram chat. " +
"You're using a free model called \"" + model + "\".\n\n" + "You're using a free model called \"" + model + "\". " +
requestContext.Prompt() "You see only messages addressed to you using commands due to privacy settings."
historyLength := len(requestContext.Chat.History) if !requestContext.Empty {
systemPrompt += " " + requestContext.Prompt()
if historyLength > 0 {
systemPrompt += "\nYou have access to last " + strconv.Itoa(historyLength) + "messages in this chat."
} }
req := openai.ChatCompletionRequest{ req := openai.ChatCompletionRequest{
@ -50,26 +49,6 @@ func (l *LlmConnector) HandleChatMessage(text string, model string, requestConte
}, },
} }
if historyLength > 0 {
for _, msg := range requestContext.Chat.History {
var msgRole string
var msgText string
if msg.IsMe {
msgRole = openai.ChatMessageRoleAssistant
msgText = msg.Text
} else {
msgRole = openai.ChatMessageRoleSystem
msgText = "User " + msg.Name + " said:\n" + msg.Text
}
req.Messages = append(req.Messages, openai.ChatCompletionMessage{
Role: msgRole,
Content: msgText,
})
}
}
req.Messages = append(req.Messages, openai.ChatCompletionMessage{ req.Messages = append(req.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
Content: text, Content: text,
@ -100,10 +79,9 @@ func (l *LlmConnector) Summarize(text string, model string) (string, error) {
{ {
Role: openai.ChatMessageRoleSystem, Role: openai.ChatMessageRoleSystem,
Content: "You're a text shortener. Give a very brief summary of the main facts " + Content: "You're a text shortener. Give a very brief summary of the main facts " +
"point by point. Format them as a list of bullet points each starting with \"-\". " + "point by point. Format them as a list of bullet points. " +
"Avoid any commentaries and value judgement on the matter. " + "Avoid any commentaries and value judgement on the matter. " +
"If possible, respond in the same language as the original text." + "If possible, use the same language as the original text.",
"Do not use any non-ASCII characters.",
}, },
}, },
} }
@ -130,41 +108,3 @@ func (l *LlmConnector) Summarize(text string, model string) (string, error) {
return resp.Choices[0].Message.Content, nil return resp.Choices[0].Message.Content, nil
} }
func (l *LlmConnector) GetModels() []string {
var result []string
models, err := l.client.ListModels(context.Background())
if err != nil {
slog.Error("llm: Model list request failed", "error", err)
return result
}
slog.Info("Model list retrieved", "models", models)
for _, model := range models.Models {
result = append(result, model.ID)
}
return result
}
func (l *LlmConnector) HasModel(id string) bool {
model, err := l.client.GetModel(context.Background(), id)
if err != nil {
slog.Error("llm: Model request failed", "error", err)
}
slog.Debug("llm: Returned model", "model", model)
if model.ID != "" {
return true
}
return false
}
func quoteMessage(text string) string {
return "> " + strings.ReplaceAll(text, "\n", "\n> ")
}

View file

@ -1,9 +1,10 @@
package llm package llm
type RequestContext struct { type RequestContext struct {
Empty bool Empty bool
User UserContext Inline bool
Chat ChatContext User UserContext
Chat ChatContext
} }
type UserContext struct { type UserContext struct {
@ -17,13 +18,6 @@ type ChatContext struct {
Title string Title string
Description string Description string
Type string Type string
History []ChatMessage
}
type ChatMessage struct {
Name string
Text string
IsMe bool
} }
func (c RequestContext) Prompt() string { func (c RequestContext) Prompt() string {
@ -32,33 +26,29 @@ func (c RequestContext) Prompt() string {
} }
prompt := "" prompt := ""
if !c.Inline {
prompt += "The type of chat you're in is \"" + c.Chat.Type + "\". "
prompt += "The type of chat you're in is \"" + c.Chat.Type + "\". " if c.Chat.Title != "" {
prompt += "Chat is called \"" + c.Chat.Title + "\". "
if c.Chat.Type == "group" || c.Chat.Type == "supergroup" { }
prompt += "Please consider that there are several users in this chat type who may discuss several unrelated " + if c.Chat.Description != "" {
"topics. Try to respond only about the topic you were asked about and only to the user who asked you, " + prompt += "Chat description is \"" + c.Chat.Description + "\". "
"but keep in mind another chat history. " }
} else {
prompt += "You're responding to inline query, so you're not in the chat right now. "
} }
if c.Chat.Title != "" { prompt += "According to their profile, first name of the user who wrote you is \"" + c.User.FirstName + "\". "
prompt += "\nChat is called \"" + c.Chat.Title + "\". "
}
if c.Chat.Description != "" {
prompt += "Chat description is \"" + c.Chat.Description + "\". "
}
prompt += "\nProfile of the user who mentioned you in the chat:" +
"First name: \"" + c.User.FirstName + "\"\n"
if c.User.Username != "" { if c.User.Username != "" {
prompt += "Username: @" + c.User.Username + ".\n" prompt += "Their username is @" + c.User.Username + ". "
} }
if c.User.LastName != "" { if c.User.LastName != "" {
prompt += "Last name: \"" + c.User.LastName + "\"\n" prompt += "Their last name is \"" + c.User.LastName + "\". "
}
if c.User.IsPremium {
prompt += "They have Telegram Premium subscription. "
} }
//if c.User.IsPremium {
// prompt += "Telegram Premium subscription: active."
//}
return prompt return prompt
} }

27
main.go
View file

@ -12,31 +12,12 @@ import (
) )
func main() { func main() {
apiToken := os.Getenv("OPENAI_API_TOKEN") ollamaToken := os.Getenv("OLLAMA_TOKEN")
apiBaseUrl := os.Getenv("OPENAI_API_BASE_URL") ollamaBaseUrl := os.Getenv("OLLAMA_BASE_URL")
models := bot.ModelSelection{
TextRequestModel: os.Getenv("MODEL_TEXT_REQUEST"),
SummarizeModel: os.Getenv("MODEL_SUMMARIZE_REQUEST"),
}
slog.Info("Selected", "models", models)
telegramToken := os.Getenv("TELEGRAM_TOKEN") telegramToken := os.Getenv("TELEGRAM_TOKEN")
llmc := llm.NewConnector(apiBaseUrl, apiToken) llmc := llm.NewConnector(ollamaBaseUrl, ollamaToken)
slog.Info("Checking models availability")
for _, model := range []string{models.TextRequestModel, models.SummarizeModel} {
if !llmc.HasModel(model) {
slog.Error("Model not unavailable", "model", model)
os.Exit(1)
}
}
slog.Info("All needed models are available")
ext := extractor.NewExtractor() ext := extractor.NewExtractor()
telegramApi, err := tg.NewBot(telegramToken, tg.WithLogger(bot.NewLogger("telego: "))) telegramApi, err := tg.NewBot(telegramToken, tg.WithLogger(bot.NewLogger("telego: ")))
@ -45,7 +26,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
botService := bot.NewBot(telegramApi, llmc, ext, models) botService := bot.NewBot(telegramApi, llmc, ext)
err = botService.Run() err = botService.Run()
if err != nil { if err != nil {

View file

@ -15,7 +15,7 @@ type Stats struct {
PrivateRequests uint64 PrivateRequests uint64
InlineQueries uint64 InlineQueries uint64
Mentions uint64 HeyRequests uint64
SummarizeRequests uint64 SummarizeRequests uint64
} }
@ -27,7 +27,7 @@ func NewStats() *Stats {
PrivateRequests: 0, PrivateRequests: 0,
InlineQueries: 0, InlineQueries: 0,
Mentions: 0, HeyRequests: 0,
SummarizeRequests: 0, SummarizeRequests: 0,
} }
} }
@ -40,7 +40,7 @@ func (s *Stats) MarshalJSON() ([]byte, error) {
PrivateRequests uint64 `json:"private_requests"` PrivateRequests uint64 `json:"private_requests"`
InlineQueries uint64 `json:"inline_queries"` InlineQueries uint64 `json:"inline_queries"`
Mentions uint64 `json:"mentions"` HeyRequests uint64 `json:"hey_requests"`
SummarizeRequests uint64 `json:"summarize_requests"` SummarizeRequests uint64 `json:"summarize_requests"`
}{ }{
Uptime: time.Now().Sub(s.RunningSince).String(), Uptime: time.Now().Sub(s.RunningSince).String(),
@ -49,7 +49,7 @@ func (s *Stats) MarshalJSON() ([]byte, error) {
PrivateRequests: s.PrivateRequests, PrivateRequests: s.PrivateRequests,
InlineQueries: s.InlineQueries, InlineQueries: s.InlineQueries,
Mentions: s.Mentions, HeyRequests: s.HeyRequests,
SummarizeRequests: s.SummarizeRequests, SummarizeRequests: s.SummarizeRequests,
}) })
} }
@ -81,10 +81,10 @@ func (s *Stats) PrivateRequest() {
s.PrivateRequests++ s.PrivateRequests++
} }
func (s *Stats) Mention() { func (s *Stats) HeyRequest() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
s.Mentions++ s.HeyRequests++
} }
func (s *Stats) SummarizeRequest() { func (s *Stats) SummarizeRequest() {