fix: 线上使用ollama云模型
This commit is contained in:
parent
a155cb1e01
commit
88ed4ff714
|
|
@ -5,9 +5,12 @@ server:
|
|||
|
||||
ollama:
|
||||
base_url: "http://172.17.0.1:11434"
|
||||
model: "qwen3:8b"
|
||||
generate_model: "qwen3:8b"
|
||||
mapping_model: "qwen3:8b"
|
||||
# model: "qwen3:8b"
|
||||
# generate_model: "qwen3:8b"
|
||||
# mapping_model: "qwen3:8b"
|
||||
model: "qwen3-coder:480b-cloud"
|
||||
generate_model: "qwen3-coder:480b-cloud"
|
||||
mapping_model: "deepseek-v3.2:cloud"
|
||||
vl_model: "qwen2.5vl:3b"
|
||||
timeout: "120s"
|
||||
level: "info"
|
||||
|
|
|
|||
Loading…
Reference in New Issue