52 lines
1.5 KiB
Python
52 lines
1.5 KiB
Python
import os
|
|
from pydantic_settings import BaseSettings
|
|
|
|
class Settings(BaseSettings):
|
|
# App
|
|
APP_TITLE: str = "LightRAG Knowledge Base API"
|
|
APP_VERSION: str = "1.0"
|
|
HOST: str = "0.0.0.0"
|
|
PORT: int = 9600
|
|
|
|
# Data
|
|
DATA_DIR: str = "./index_data"
|
|
|
|
# LLM (Text) - vLLM
|
|
LLM_BINDING: str = "vllm" # ollama, vllm, openai
|
|
LLM_BINDING_HOST: str = "http://192.168.6.115:8002/v1" # vLLM OpenAI API base
|
|
LLM_MODEL: str = "qwen2.5-7b-awq"
|
|
LLM_KEY: str = "EMPTY" # vLLM default key
|
|
|
|
# LLM (Vision) - vLLM
|
|
VL_BINDING_HOST: str = "http://192.168.6.115:8001/v1"
|
|
VL_MODEL: str = "qwen2.5-vl-3b-awq"
|
|
VL_KEY: str = "EMPTY"
|
|
|
|
# Embedding - TEI
|
|
EMBEDDING_BINDING: str = "tei" # ollama, tei, openai
|
|
EMBEDDING_BINDING_HOST: str = "http://192.168.6.115:8003" # TEI usually exposes /embed
|
|
EMBEDDING_MODEL: str = "BAAI/bge-m3" # model id in TEI
|
|
EMBEDDING_KEY: str = "EMPTY"
|
|
|
|
# Rerank - TEI
|
|
RERANK_ENABLED: bool = True
|
|
RERANK_BINDING_HOST: str = "http://192.168.6.115:8004"
|
|
RERANK_MODEL: str = "BAAI/bge-reranker-v2-m3"
|
|
RERANK_KEY: str = "EMPTY"
|
|
|
|
# RAG Config
|
|
EMBEDDING_DIM: int = 1024
|
|
MAX_TOKEN_SIZE: int = 8192
|
|
MAX_RAG_INSTANCES: int = 3 # 最大活跃 RAG 实例数
|
|
COSINE_THRESHOLD: float = 0.4 # 向量检索相似度阈值
|
|
|
|
# Admin & Security
|
|
ADMIN_TOKEN: str = "fzy"
|
|
|
|
class Config:
|
|
env_file = ".env"
|
|
env_file_encoding = 'utf-8'
|
|
extra = "ignore" # 忽略多余的环境变量
|
|
|
|
settings = Settings()
|