73 lines
2.1 KiB
Go
73 lines
2.1 KiB
Go
package ollama
|
|
|
|
import (
|
|
"ai_scheduler/internal/domain/llm"
|
|
"context"
|
|
|
|
eino_ollama "github.com/cloudwego/eino-ext/components/model/ollama"
|
|
eino_model "github.com/cloudwego/eino/components/model"
|
|
"github.com/cloudwego/eino/schema"
|
|
)
|
|
|
|
type Adapter struct{}
|
|
|
|
func New() *Adapter { return &Adapter{} }
|
|
|
|
func (a *Adapter) Generate(ctx context.Context, input []*schema.Message, opts llm.Options) (*schema.Message, error) {
|
|
cm, err := eino_ollama.NewChatModel(ctx, &eino_ollama.ChatModelConfig{
|
|
BaseURL: opts.Endpoint,
|
|
Timeout: opts.Timeout,
|
|
Model: opts.Model,
|
|
Options: &eino_ollama.Options{Temperature: opts.Temperature, NumPredict: opts.MaxTokens},
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
var mopts []eino_model.Option
|
|
if opts.Temperature != 0 {
|
|
mopts = append(mopts, eino_model.WithTemperature(opts.Temperature))
|
|
}
|
|
if opts.MaxTokens > 0 {
|
|
mopts = append(mopts, eino_model.WithMaxTokens(opts.MaxTokens))
|
|
}
|
|
if opts.Model != "" {
|
|
mopts = append(mopts, eino_model.WithModel(opts.Model))
|
|
}
|
|
if opts.TopP != 0 {
|
|
mopts = append(mopts, eino_model.WithTopP(opts.TopP))
|
|
}
|
|
if len(opts.Stop) > 0 {
|
|
mopts = append(mopts, eino_model.WithStop(opts.Stop))
|
|
}
|
|
return cm.Generate(ctx, input, mopts...)
|
|
}
|
|
|
|
func (a *Adapter) Stream(ctx context.Context, input []*schema.Message, opts llm.Options) (*schema.StreamReader[*schema.Message], error) {
|
|
cm, err := eino_ollama.NewChatModel(ctx, &eino_ollama.ChatModelConfig{
|
|
BaseURL: opts.Endpoint,
|
|
Timeout: opts.Timeout,
|
|
Model: opts.Model,
|
|
Options: &eino_ollama.Options{Temperature: opts.Temperature, NumPredict: opts.MaxTokens},
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
var mopts []eino_model.Option
|
|
if opts.Temperature != 0 {
|
|
mopts = append(mopts, eino_model.WithTemperature(opts.Temperature))
|
|
}
|
|
if opts.MaxTokens > 0 {
|
|
mopts = append(mopts, eino_model.WithMaxTokens(opts.MaxTokens))
|
|
}
|
|
if opts.Model != "" {
|
|
mopts = append(mopts, eino_model.WithModel(opts.Model))
|
|
}
|
|
if opts.TopP != 0 {
|
|
mopts = append(mopts, eino_model.WithTopP(opts.TopP))
|
|
}
|
|
if len(opts.Stop) > 0 {
|
|
mopts = append(mopts, eino_model.WithStop(opts.Stop))
|
|
}
|
|
return cm.Stream(ctx, input, mopts...)
|
|
}
|