telegram-ollama-reply-bot/llm/llm.go

147 lines
3.7 KiB
Go
Raw Permalink Normal View History

package llm
import (
"context"
"errors"
"github.com/sashabaranov/go-openai"
"log/slog"
"slices"
"strconv"
)
var (
ErrLlmBackendRequestFailed = errors.New("llm back-end request failed")
ErrNoChoices = errors.New("no choices in LLM response")
)
type LlmConnector struct {
client *openai.Client
}
func NewConnector(baseUrl string, token string) *LlmConnector {
config := openai.DefaultConfig(token)
config.BaseURL = baseUrl
client := openai.NewClientWithConfig(config)
return &LlmConnector{
client: client,
}
}
func (l *LlmConnector) HandleChatMessage(userMessage ChatMessage, model string, requestContext RequestContext) (string, error) {
systemPrompt := "You're a bot in the Telegram chat.\n" +
"You're using a free model called \"" + model + "\".\n\n" +
requestContext.Prompt()
historyLength := len(requestContext.Chat.History)
if historyLength > 0 {
2024-11-03 23:10:01 +00:00
systemPrompt += "\nYou have access to last " + strconv.Itoa(historyLength) + " messages in this chat."
}
req := openai.ChatCompletionRequest{
Model: model,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: systemPrompt,
},
},
}
if historyLength > 0 {
for _, msg := range requestContext.Chat.History {
req.Messages = append(req.Messages, chatMessageToOpenAiChatCompletionMessage(msg))
}
}
req.Messages = append(req.Messages, chatMessageToOpenAiChatCompletionMessage(userMessage))
resp, err := l.client.CreateChatCompletion(context.Background(), req)
if err != nil {
slog.Error("llm: LLM back-end request failed", "error", err)
return "", ErrLlmBackendRequestFailed
}
slog.Debug("llm: Received LLM back-end response", "response", resp)
if len(resp.Choices) < 1 {
slog.Error("llm: LLM back-end reply has no choices")
return "", ErrNoChoices
}
return resp.Choices[0].Message.Content, nil
}
func (l *LlmConnector) Summarize(text string, model string) (string, error) {
req := openai.ChatCompletionRequest{
Model: model,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
2024-03-12 19:12:58 +00:00
Content: "You're a text shortener. Give a very brief summary of the main facts " +
"point by point. Format them as a list of bullet points each starting with \"-\". " +
2024-03-12 19:12:58 +00:00
"Avoid any commentaries and value judgement on the matter. " +
"If possible, respond in the same language as the original text." +
"Do not use any non-ASCII characters.",
},
},
}
req.Messages = append(req.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser,
Content: text,
})
resp, err := l.client.CreateChatCompletion(context.Background(), req)
if err != nil {
slog.Error("llm: LLM back-end request failed", "error", err)
return "", ErrLlmBackendRequestFailed
}
slog.Debug("llm: Received LLM back-end response", resp)
if len(resp.Choices) < 1 {
slog.Error("llm: LLM back-end reply has no choices")
return "", ErrNoChoices
}
return resp.Choices[0].Message.Content, nil
}
func (l *LlmConnector) HasAllModels(modelIds []string) (bool, map[string]bool) {
modelList, err := l.client.ListModels(context.Background())
if err != nil {
slog.Error("llm: Model list request failed", "error", err)
}
slog.Info("llm: Returned model list", "models", modelList)
slog.Info("llm: Checking for requested models", "requested", modelIds)
requestedModelsCount := len(modelIds)
searchResult := make(map[string]bool, requestedModelsCount)
for _, modelId := range modelIds {
searchResult[modelId] = false
}
for _, model := range modelList.Models {
if slices.Contains(modelIds, model.ID) {
searchResult[model.ID] = true
}
}
for _, v := range searchResult {
if !v {
return false, searchResult
}
}
return true, searchResult
}