226 lines
6.4 KiB
Python
226 lines
6.4 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Ollama 服务测试
|
|
"""
|
|
|
|
import asyncio
|
|
import sys
|
|
import os
|
|
|
|
# 添加项目根目录到 Python 路径
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
from src.services.ollama_service import OllamaService
|
|
|
|
|
|
async def test_ollama_health():
|
|
"""测试 Ollama 健康状态"""
|
|
print("🧪 测试 Ollama 健康状态")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
try:
|
|
is_healthy = await service.health_check()
|
|
if is_healthy:
|
|
print("✅ Ollama 服务健康")
|
|
else:
|
|
print("❌ Ollama 服务不健康")
|
|
|
|
# 获取详细健康信息
|
|
health_info = await service.get_health_info()
|
|
print(f"📊 健康信息: {health_info}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 健康检查异常: {e}")
|
|
|
|
|
|
async def test_list_models():
|
|
"""测试获取模型列表"""
|
|
print("\n🧪 测试获取模型列表")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
try:
|
|
models = await service.list_models()
|
|
print(f"✅ 获取到 {len(models)} 个模型:")
|
|
|
|
for model in models:
|
|
name = model.get('name', 'unknown')
|
|
size = model.get('size', 0)
|
|
size_mb = size / (1024 * 1024) if size > 0 else 0
|
|
print(f" 📦 {name} ({size_mb:.1f} MB)")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 获取模型列表异常: {e}")
|
|
|
|
|
|
async def test_simple_chat():
|
|
"""测试简单聊天"""
|
|
print("\n🧪 测试简单聊天")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
try:
|
|
messages = [
|
|
{"role": "user", "content": "你好,请简单介绍一下你自己"}
|
|
]
|
|
|
|
print("📝 用户: 你好,请简单介绍一下你自己")
|
|
print("🤖 AI: ", end="", flush=True)
|
|
|
|
response = await service.chat(messages)
|
|
print(response)
|
|
print("✅ 简单聊天测试通过")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 简单聊天异常: {e}")
|
|
|
|
|
|
async def test_streaming_chat():
|
|
"""测试流式聊天"""
|
|
print("\n🧪 测试流式聊天")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
try:
|
|
messages = [
|
|
{"role": "user", "content": "请用一句话介绍人工智能"}
|
|
]
|
|
|
|
print("📝 用户: 请用一句话介绍人工智能")
|
|
print("🤖 AI: ", end="", flush=True)
|
|
|
|
chunk_count = 0
|
|
async for chunk in service.chat_streaming(messages):
|
|
print(chunk, end="", flush=True)
|
|
chunk_count += 1
|
|
|
|
print(f"\n✅ 流式聊天测试通过,收到 {chunk_count} 个数据块")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 流式聊天异常: {e}")
|
|
|
|
|
|
async def test_router_model():
|
|
"""测试路由模型"""
|
|
print("\n🧪 测试路由模型")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
test_cases = [
|
|
"我想查询订单 ORD12345",
|
|
"你好,今天天气怎么样?",
|
|
"帮我退货",
|
|
"谢谢你的帮助"
|
|
]
|
|
|
|
try:
|
|
for user_input in test_cases:
|
|
print(f"📝 用户: {user_input}")
|
|
|
|
result = await service.analyze_intent(user_input)
|
|
intent = result.get('intent', 'unknown')
|
|
confidence = result.get('confidence', 0.0)
|
|
|
|
print(f"🎯 意图: {intent} (置信度: {confidence:.2f})")
|
|
print("-" * 20)
|
|
|
|
print("✅ 路由模型测试通过")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 路由模型异常: {e}")
|
|
|
|
|
|
async def test_model_availability():
|
|
"""测试模型可用性"""
|
|
print("\n🧪 测试模型可用性")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
# 测试聊天模型
|
|
chat_available = await service.is_model_available(service.chat_model)
|
|
chat_icon = "✅" if chat_available else "❌"
|
|
print(f"{chat_icon} 聊天模型 ({service.chat_model}): {'可用' if chat_available else '不可用'}")
|
|
|
|
# 测试路由模型
|
|
router_available = await service.is_model_available(service.router_model)
|
|
router_icon = "✅" if router_available else "❌"
|
|
print(f"{router_icon} 路由模型 ({service.router_model}): {'可用' if router_available else '不可用'}")
|
|
|
|
if not chat_available or not router_available:
|
|
print("\n💡 如果模型不可用,请运行以下命令下载:")
|
|
if not chat_available:
|
|
print(f" ollama pull {service.chat_model}")
|
|
if not router_available:
|
|
print(f" ollama pull {service.router_model}")
|
|
|
|
|
|
async def test_error_handling():
|
|
"""测试错误处理"""
|
|
print("\n🧪 测试错误处理")
|
|
print("-" * 30)
|
|
|
|
service = OllamaService()
|
|
|
|
try:
|
|
# 测试不存在的模型
|
|
print("📝 测试不存在的模型...")
|
|
old_model = service.chat_model
|
|
service.chat_model = "nonexistent-model"
|
|
|
|
try:
|
|
await service.chat([{"role": "user", "content": "test"}])
|
|
print("❌ 应该抛出异常但没有")
|
|
except Exception as e:
|
|
print(f"✅ 正确处理了模型不存在的错误: {type(e).__name__}")
|
|
finally:
|
|
service.chat_model = old_model
|
|
|
|
# 测试空消息
|
|
print("📝 测试空消息...")
|
|
try:
|
|
await service.chat([])
|
|
print("❌ 应该抛出异常但没有")
|
|
except Exception as e:
|
|
print(f"✅ 正确处理了空消息错误: {type(e).__name__}")
|
|
|
|
print("✅ 错误处理测试通过")
|
|
|
|
except Exception as e:
|
|
print(f"❌ 错误处理测试异常: {e}")
|
|
|
|
|
|
async def main():
|
|
"""主测试函数"""
|
|
print("🚀 开始测试 Ollama 服务")
|
|
print("=" * 50)
|
|
|
|
try:
|
|
# 基础测试
|
|
await test_ollama_health()
|
|
await test_list_models()
|
|
await test_model_availability()
|
|
|
|
# 功能测试
|
|
await test_simple_chat()
|
|
await test_streaming_chat()
|
|
await test_router_model()
|
|
|
|
# 错误处理测试
|
|
await test_error_handling()
|
|
|
|
print("\n🎊 所有 Ollama 测试通过!")
|
|
|
|
except Exception as e:
|
|
print(f"\n💥 测试过程中出现错误: {e}")
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main()) |