This commit is contained in:
renzhiyuan 2025-08-20 13:40:52 +08:00
commit 7845b82718
10 changed files with 1541 additions and 0 deletions

8
.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

8
.idea/ChatWithMe.iml Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="311" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

338
app.py Normal file
View File

@ -0,0 +1,338 @@
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
from pydantic_settings import BaseSettings
from pydantic import BaseModel, Field
from funasr import AutoModel
import numpy as np
import argparse
import uvicorn
from urllib.parse import parse_qs
from loguru import logger
import sys
import re
import json
import traceback
import time
from modelscope import AutoModelForCausalLM, AutoTokenizer
import asyncio
import tempfile
import os
logger.remove()
log_format = "{time:YYYY-MM-DD HH:mm:ss} [{level}] {file}:{line} - {message}"
logger.add(sys.stdout, format=log_format, level="DEBUG", filter=lambda record: record["level"].no < 40)
logger.add(sys.stderr, format=log_format, level="ERROR", filter=lambda record: record["level"].no >= 40)
class Config(BaseSettings):
chunk_size_ms: int = Field(300, description="Chunk size in milliseconds")
sample_rate: int = Field(16000, description="Sample rate in Hz")
model_chat: str = Field("Qwen/Qwen3-0.6B", description="model")
sys_set: str = Field("你是一个感情丰富的聊天机器人,你名字叫蓝哥,回答内容必须是中文,只回答中文,不要有颜文字,也不要有表情符号,每次的回答内容必须超过20个文字", description="set")#当用户输入的话不完整、有歧义,或者你不太确定如何回答时,可以回应:'没听懂',
tts_rate:str = Field("+20%", description="tts rate")
tts_pitch: str = Field("+10Hz", description="tts rate")
voice:str = Field("zh-CN-XiaoxiaoNeural", description="voice")
config = Config()
messages = [
{"role": "system", "content": config.sys_set}
]
model_asr = AutoModel(
model="iic/SenseVoiceSmall",
trust_remote_code=True,
remote_code="./model.py",
device="cuda:0",
disable_update=True
)
model_vad = AutoModel(
model="fsmn-vad",
model_revision="v2.0.4",
disable_pbar=True,
max_end_silence_time=800,#500
# speech_noise_thres=0.6,
disable_update=True,
)
# load the tokenizer and the model
tokenizer = AutoTokenizer.from_pretrained(config.model_chat)
model_qw = AutoModelForCausalLM.from_pretrained(
config.model_chat,
torch_dtype="auto",
device_map="auto"
)
def asr(audio, lang, cache, use_itn=False):
start_time = time.time()
result = model_asr.generate(
input=audio,
cache=cache,
language=lang.strip(),
use_itn=use_itn,
batch_size_s=60,
)
end_time = time.time()
elapsed_time = end_time - start_time
logger.debug(f"asr elapsed: {elapsed_time * 1000:.2f} milliseconds")
return result
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.exception_handler(Exception)
async def custom_exception_handler(request: Request, exc: Exception):
logger.error("Exception occurred", exc_info=True)
if isinstance(exc, HTTPException):
status_code = exc.status_code
message = exc.detail
data = ""
elif isinstance(exc, RequestValidationError):
status_code = HTTP_422_UNPROCESSABLE_ENTITY
message = "Validation error: " + str(exc.errors())
data = ""
else:
status_code = 500
message = "Internal server error: " + str(exc)
data = ""
return JSONResponse(
status_code=status_code,
content=TranscriptionResponse(
code=status_code,
msg=message,
data=data
).model_dump()
)
# Define the response model
class TranscriptionResponse(BaseModel):
code: int
info: str
data: str
async def text_to_speech_stream_with_wav(text: str, websocket: WebSocket):
# 创建临时 MP3 文件edge-tts 输出)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_mp3:
temp_mp3_path = temp_mp3.name
try:
# 启动 edge-tts输出到临时 MP3 文件
edge_tts_process = await asyncio.create_subprocess_exec(
"edge-tts",
"--text", text,
"--voice", config.voice,
"--rate", config.tts_rate,
"--pitch", config.tts_pitch,
"--write-media", temp_mp3_path,
stderr=asyncio.subprocess.PIPE,
)
await edge_tts_process.wait()
if edge_tts_process.returncode != 0:
error_msg = await edge_tts_process.stderr.read()
raise RuntimeError(f"TTS 生成失败: {error_msg.decode()}")
ffmpeg_process = await asyncio.create_subprocess_exec(
"ffmpeg",
"-i", temp_mp3_path,
"-f", "s16le", # 输出原始 16-bit PCM
"-ar", "16000", # 采样率 16kHz
"-ac", "1", # 单声道
"-loglevel", "warning",
"pipe:1",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# 流式传输 PCM 数据到前端
while True:
chunk = await ffmpeg_process.stdout.read(4096)
if not chunk:
break
await websocket.send_bytes(chunk)
await ffmpeg_process.wait()
if ffmpeg_process.returncode != 0:
error_msg = await ffmpeg_process.stderr.read()
raise RuntimeError(f"FFmpeg 转换失败: {error_msg.decode()}")
# 发送 EOF 标记(附带 WAV 头信息)
await websocket.send_json({
"type": "EOF",
"sampleRate": 16000,
"numChannels": 1,
"bitDepth": 16,
})
finally:
# 清理临时 MP3 文件
try:
if os.path.exists(temp_mp3_path):
os.unlink(temp_mp3_path)
except Exception as e:
print(f"临时文件清理失败: {e}")
async def text_to_speech_stream(text: str, websocket: WebSocket):
process = await asyncio.create_subprocess_exec(
"edge-tts",
"--text", text,
"--voice", config.voice,
"--rate", config.tts_rate,
"--pitch", config.tts_pitch,
stdout=asyncio.subprocess.PIPE, # 直接读取 stdout
stderr=asyncio.subprocess.PIPE,
)
# 直接从进程 stdout 读取音频流并发送
while chunk := await process.stdout.read(1024): # 1KB 块
await websocket.send_bytes(chunk)
# 等待进程结束
await process.wait()
@app.websocket("/ws/transcribe")
async def websocket_endpoint(websocket: WebSocket):
try:
query_params = parse_qs(websocket.scope['query_string'].decode())
sv = query_params.get('sv', ['false'])[0].lower() in ['true', '1', 't', 'y', 'yes']
lang = query_params.get('lang', ['auto'])[0].lower()
await websocket.accept()
chunk_size = int(config.chunk_size_ms * config.sample_rate / 1000)
audio_buffer = np.array([], dtype=np.float32)
audio_vad = np.array([], dtype=np.float32)
cache = {}
cache_asr = {}
last_vad_beg = last_vad_end = -1
offset = 0
buffer = b""
while True:
data = await websocket.receive_bytes()
# logger.info(f"received {len(data)} bytes")
buffer += data
if len(buffer) < 2:
continue
audio_buffer = np.append(
audio_buffer,
np.frombuffer(buffer[:len(buffer) - (len(buffer) % 2)], dtype=np.int16).astype(np.float32) / 32767.0
)
buffer = buffer[len(buffer) - (len(buffer) % 2):]
while len(audio_buffer) >= chunk_size:
chunk = audio_buffer[:chunk_size]
audio_buffer = audio_buffer[chunk_size:]
audio_vad = np.append(audio_vad, chunk)
res = model_vad.generate(input=chunk, cache=cache, is_final=False, chunk_size=config.chunk_size_ms)
if len(res[0]["value"]):
vad_segments = res[0]["value"]
for segment in vad_segments:
if segment[0] > -1: # speech begin
last_vad_beg = segment[0]
if segment[1] > -1: # speech end
last_vad_end = segment[1]
if last_vad_beg > -1 and last_vad_end > -1:
last_vad_beg -= offset
last_vad_end -= offset
offset += last_vad_end
beg = int(last_vad_beg * config.sample_rate / 1000)
end = int(last_vad_end * config.sample_rate / 1000)
logger.info(f"[vad segment] audio_len: {end - beg}")
result = asr(audio_vad[beg:end], lang.strip(), cache_asr, True)
logger.info(f"asr response: {result}")
audio_vad = audio_vad[end:]
last_vad_beg = last_vad_end = -1
if result is not None:
global messages
say=re.sub(r"<\|.*?\|>", "", result[0]['text'])
messages.append({"role": "user", "content": say})
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model_qw.device)
# conduct text completion
generated_ids = model_qw.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# parsing thinking content
try:
# rindex finding 151668 (</think>)
index = len(output_ids) - output_ids[::-1].index(151668)
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
# 6. 重置 messages保留系统消息清空用户和助手消息
# 假设第一条是系统消息,需要保留
if len(messages) > 0 and messages[0]["role"] == "system":
messages = [messages[0]] # 只保留系统消息
else:
messages = [] # 如果没有系统消息,直接清空
response = TranscriptionResponse(
code=0,
info=json.dumps(result[0], ensure_ascii=False),
data=content
)
# 发送聊天回复(文字)
await websocket.send_json(response.model_dump())
# 发送 TTS 音频流
await text_to_speech_stream_with_wav(content, websocket)
# logger.debug(f'last_vad_beg: {last_vad_beg}; last_vad_end: {last_vad_end} len(audio_vad): {len(audio_vad)}')
except WebSocketDisconnect:
logger.info("WebSocket disconnected")
except Exception as e:
logger.error(f"Unexpected error: {e}\nCall stack:\n{traceback.format_exc()}")
await websocket.close()
finally:
cache.clear()
logger.info("Cleaned up resources after WebSocket disconnect")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the FastAPI app with a specified port.")
parser.add_argument('--port', type=int, default=27004, help='Port number to run the FastAPI app on.')
# parser.add_argument('--certfile', type=str, default='path_to_your_SSL_certificate_file.crt', help='SSL certificate file')
# parser.add_argument('--keyfile', type=str, default='path_to_your_SSL_certificate_file.key', help='SSL key file')
args = parser.parse_args()
# uvicorn.run(app, host="0.0.0.0", port=args.port, ssl_certfile=args.certfile, ssl_keyfile=args.keyfile)
uvicorn.run(app, host="0.0.0.0", port=args.port)

25
lsxd.txt Normal file
View File

@ -0,0 +1,25 @@
成都蓝色兄弟网络科技有限公司于2014年04月23日成立。法定代表人毛勇,公司经营范围包括:网络技术开发;其它电信业务代理服务;信息系统集成服务;组织文化交流活动;互联网信息服务;互联网上销售:日用品等。
成都蓝色兄弟网络科技有限公司简称“蓝色兄弟”成立于2014年总部位于成都高新区注册资本5000万元是一家国家级高新技术企业。公司聚焦“数字权益+实物产品”双赛道,以一站式数智化场景权益服务为核心,为金融、民生、消费、企业服务等领域提供全链路技术赋能与精准化运营支持,现已成为行业领先的数字权益服务标杆企业。
技术驱动安全为本——蓝色兄弟拥有百余人的专业研发团队累计研发投入数千万元已取得数十项专利和软件著作权。自主研发的平台通过AI技术与加密算法实现全场景业务覆盖为合作伙伴构建安全可靠的IT技术支持和保障。
资源整合生态共建——公司整合上游优势资源搭建覆盖视频文娱、生活服务、商超卡券、餐饮美食、实物产品等全场景的商品库并与邮储银行、兴业银行、中国移动、一汽大众等超千家企业建立长期合作年交易规模突破45亿元。
全域服务,场景赋能——以“聚合服务+融合服务”双模式为核心,蓝色兄弟提供从数字商品交易、用户运营、技术解决方案到实物商品供应链的一站式服务,助力企业实现拓客拉新、沉默焕活、留存转化等营销目标。
虚拟支付
通过市场分析及目标用户洞察,蓝色兄弟为客户打造匹配不同业务场景的虚拟权益产品及支付立减的营销方案。 [2]
实物产品
拥有丰富的实物产品供应链网络及服务经验,为企业用户提供全场景适用的实物产品供应及运营服务 [2]
场景专区
提供: 充值频道(含话费油卡会员)、美团外卖场景、美团到店场景、线上买菜场景、打车场景、一键加油场景、一键充电场景、车主场景(含加油/加电/洗车/保养)、青桔单车场景、读书场景、医疗场景(含问诊/体检/绿通等、实物场景、王者荣耀游戏场景、旅游场景、养老场景、高端场景、宠物场景、到家服务场景)等。 [3]
积分商城
快速对接系统,满足银行级金融机构的多样化积分场景需求。多年大型项目服务经验,结合企业需求,量身打造企业积分商城,全方位解决积分用户促活、转化、留存等运营难题 [4]
分期商城
集合多个合作伙伴的市场影响力,为用户提供统一的服务和资源,解决了信用卡业务中的分期付款痛点。用户可以在一个平台上了解支持分期付款的商家,并获得清晰的分期选项和便捷的申请流程。分期商城提升了银行信用卡业务的竞争力和用户体验,同时也为银行拓展业务渠道和增加收入提供了机会。 [4]
分发工具
分发工具是蓝色兄弟自主研发的轻量级系统化解决工具实现银行总分支行活动管理、礼赠品分发、客户触达、沉淀转化 [5]
营销工具
可根据平台用户喜好个性化定制例如:养成类游戏、红包雨、挑战、记忆类、抓娃娃、拼团、抽奖、签到打卡、答题、寄卡、分享裂变、任务、砍价、抽盲盒、秒杀 [6]
代运营合作
承接品牌的运营工作包括营销策划、内容创作、社交媒体管理等通过短视频、主播、续订优惠等运营玩法联合官方打差异化营销稳步提升ROI数据

924
model.py Normal file
View File

@ -0,0 +1,924 @@
from typing import Iterable, Optional
import types
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch import nn
from torch.cuda.amp import autocast
from funasr.metrics.compute_acc import compute_accuracy, th_accuracy
from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
from funasr.train_utils.device_funcs import force_gatherable
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.utils.datadir_writer import DatadirWriter
from funasr.models.ctc.ctc import CTC
from funasr.register import tables
from funasr.models.paraformer.search import Hypothesis
class SinusoidalPositionEncoder(torch.nn.Module):
""" """
def __int__(self, d_model=80, dropout_rate=0.1):
pass
def encode(
self, positions: torch.Tensor = None, depth: int = None, dtype: torch.dtype = torch.float32
):
batch_size = positions.size(0)
positions = positions.type(dtype)
device = positions.device
log_timescale_increment = torch.log(torch.tensor([10000], dtype=dtype, device=device)) / (
depth / 2 - 1
)
inv_timescales = torch.exp(
torch.arange(depth / 2, device=device).type(dtype) * (-log_timescale_increment)
)
inv_timescales = torch.reshape(inv_timescales, [batch_size, -1])
scaled_time = torch.reshape(positions, [1, -1, 1]) * torch.reshape(
inv_timescales, [1, 1, -1]
)
encoding = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)
return encoding.type(dtype)
def forward(self, x):
batch_size, timesteps, input_dim = x.size()
positions = torch.arange(1, timesteps + 1, device=x.device)[None, :]
position_encoding = self.encode(positions, input_dim, x.dtype).to(x.device)
return x + position_encoding
class PositionwiseFeedForward(torch.nn.Module):
"""Positionwise feed forward layer.
Args:
idim (int): Input dimenstion.
hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):
"""Construct an PositionwiseFeedForward object."""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = torch.nn.Linear(idim, hidden_units)
self.w_2 = torch.nn.Linear(hidden_units, idim)
self.dropout = torch.nn.Dropout(dropout_rate)
self.activation = activation
def forward(self, x):
"""Forward function."""
return self.w_2(self.dropout(self.activation(self.w_1(x))))
class MultiHeadedAttentionSANM(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(
self,
n_head,
in_feat,
n_feat,
dropout_rate,
kernel_size,
sanm_shfit=0,
lora_list=None,
lora_rank=8,
lora_alpha=16,
lora_dropout=0.1,
):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
# self.linear_q = nn.Linear(n_feat, n_feat)
# self.linear_k = nn.Linear(n_feat, n_feat)
# self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
self.fsmn_block = nn.Conv1d(
n_feat, n_feat, kernel_size, stride=1, padding=0, groups=n_feat, bias=False
)
# padding
left_padding = (kernel_size - 1) // 2
if sanm_shfit > 0:
left_padding = left_padding + sanm_shfit
right_padding = kernel_size - 1 - left_padding
self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
def forward_fsmn(self, inputs, mask, mask_shfit_chunk=None):
b, t, d = inputs.size()
if mask is not None:
mask = torch.reshape(mask, (b, -1, 1))
if mask_shfit_chunk is not None:
mask = mask * mask_shfit_chunk
inputs = inputs * mask
x = inputs.transpose(1, 2)
x = self.pad_fn(x)
x = self.fsmn_block(x)
x = x.transpose(1, 2)
x += inputs
x = self.dropout(x)
if mask is not None:
x = x * mask
return x
def forward_qkv(self, x):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
b, t, d = x.size()
q_k_v = self.linear_q_k_v(x)
q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(
1, 2
) # (batch, head, time1, d_k)
k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(
1, 2
) # (batch, head, time2, d_k)
v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(
1, 2
) # (batch, head, time2, d_k)
return q_h, k_h, v_h, v
def forward_attention(self, value, scores, mask, mask_att_chunk_encoder=None):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
if mask_att_chunk_encoder is not None:
mask = mask * mask_att_chunk_encoder
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = -float(
"inf"
) # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
scores = scores.masked_fill(mask, min_value)
attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q_h, k_h, v_h, v = self.forward_qkv(x)
fsmn_memory = self.forward_fsmn(v, mask, mask_shfit_chunk)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
return att_outs + fsmn_memory
def forward_chunk(self, x, cache=None, chunk_size=None, look_back=0):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q_h, k_h, v_h, v = self.forward_qkv(x)
if chunk_size is not None and look_back > 0 or look_back == -1:
if cache is not None:
k_h_stride = k_h[:, :, : -(chunk_size[2]), :]
v_h_stride = v_h[:, :, : -(chunk_size[2]), :]
k_h = torch.cat((cache["k"], k_h), dim=2)
v_h = torch.cat((cache["v"], v_h), dim=2)
cache["k"] = torch.cat((cache["k"], k_h_stride), dim=2)
cache["v"] = torch.cat((cache["v"], v_h_stride), dim=2)
if look_back != -1:
cache["k"] = cache["k"][:, :, -(look_back * chunk_size[1]) :, :]
cache["v"] = cache["v"][:, :, -(look_back * chunk_size[1]) :, :]
else:
cache_tmp = {
"k": k_h[:, :, : -(chunk_size[2]), :],
"v": v_h[:, :, : -(chunk_size[2]), :],
}
cache = cache_tmp
fsmn_memory = self.forward_fsmn(v, None)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
att_outs = self.forward_attention(v_h, scores, None)
return att_outs + fsmn_memory, cache
class LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
def sequence_mask(lengths, maxlen=None, dtype=torch.float32, device=None):
if maxlen is None:
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
matrix = torch.unsqueeze(lengths, dim=-1)
mask = row_vector < matrix
mask = mask.detach()
return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
class EncoderLayerSANM(nn.Module):
def __init__(
self,
in_size,
size,
self_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False,
stochastic_depth_rate=0.0,
):
"""Construct an EncoderLayer object."""
super(EncoderLayerSANM, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(in_size)
self.norm2 = LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
self.in_size = in_size
self.size = size
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear = nn.Linear(size + size, size)
self.stochastic_depth_rate = stochastic_depth_rate
self.dropout_rate = dropout_rate
def forward(self, x, mask, cache=None, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time).
"""
skip_layer = False
# with stochastic depth, residual connection `x + f(x)` becomes
# `x <- x + 1 / (1 - p) * f(x)` at training time.
stoch_layer_coeff = 1.0
if self.training and self.stochastic_depth_rate > 0:
skip_layer = torch.rand(1).item() < self.stochastic_depth_rate
stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)
if skip_layer:
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, mask
residual = x
if self.normalize_before:
x = self.norm1(x)
if self.concat_after:
x_concat = torch.cat(
(
x,
self.self_attn(
x,
mask,
mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
),
),
dim=-1,
)
if self.in_size == self.size:
x = residual + stoch_layer_coeff * self.concat_linear(x_concat)
else:
x = stoch_layer_coeff * self.concat_linear(x_concat)
else:
if self.in_size == self.size:
x = residual + stoch_layer_coeff * self.dropout(
self.self_attn(
x,
mask,
mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
)
else:
x = stoch_layer_coeff * self.dropout(
self.self_attn(
x,
mask,
mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
)
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
return x, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder
def forward_chunk(self, x, cache=None, chunk_size=None, look_back=0):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time).
"""
residual = x
if self.normalize_before:
x = self.norm1(x)
if self.in_size == self.size:
attn, cache = self.self_attn.forward_chunk(x, cache, chunk_size, look_back)
x = residual + attn
else:
x, cache = self.self_attn.forward_chunk(x, cache, chunk_size, look_back)
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.feed_forward(x)
if not self.normalize_before:
x = self.norm2(x)
return x, cache
@tables.register("encoder_classes", "SenseVoiceEncoderSmall")
class SenseVoiceEncoderSmall(nn.Module):
"""
Author: Speech Lab of DAMO Academy, Alibaba Group
SCAMA: Streaming chunk-aware multihead attention for online end-to-end speech recognition
https://arxiv.org/abs/2006.01713
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
tp_blocks: int = 0,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
stochastic_depth_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
pos_enc_class=SinusoidalPositionEncoder,
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
kernel_size: int = 11,
sanm_shfit: int = 0,
selfattention_layer_type: str = "sanm",
**kwargs,
):
super().__init__()
self._output_size = output_size
self.embed = SinusoidalPositionEncoder()
self.normalize_before = normalize_before
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
encoder_selfattn_layer = MultiHeadedAttentionSANM
encoder_selfattn_layer_args0 = (
attention_heads,
input_size,
output_size,
attention_dropout_rate,
kernel_size,
sanm_shfit,
)
encoder_selfattn_layer_args = (
attention_heads,
output_size,
output_size,
attention_dropout_rate,
kernel_size,
sanm_shfit,
)
self.encoders0 = nn.ModuleList(
[
EncoderLayerSANM(
input_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args0),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
)
for i in range(1)
]
)
self.encoders = nn.ModuleList(
[
EncoderLayerSANM(
output_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
)
for i in range(num_blocks - 1)
]
)
self.tp_encoders = nn.ModuleList(
[
EncoderLayerSANM(
output_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
)
for i in range(tp_blocks)
]
)
self.after_norm = LayerNorm(output_size)
self.tp_norm = LayerNorm(output_size)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
):
"""Embed positions in tensor."""
masks = sequence_mask(ilens, device=ilens.device)[:, None, :]
xs_pad *= self.output_size() ** 0.5
xs_pad = self.embed(xs_pad)
# forward encoder1
for layer_idx, encoder_layer in enumerate(self.encoders0):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
for layer_idx, encoder_layer in enumerate(self.encoders):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
xs_pad = self.after_norm(xs_pad)
# forward encoder2
olens = masks.squeeze(1).sum(1).int()
for layer_idx, encoder_layer in enumerate(self.tp_encoders):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
xs_pad = self.tp_norm(xs_pad)
return xs_pad, olens
@tables.register("model_classes", "SenseVoiceSmall")
class SenseVoiceSmall(nn.Module):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
specaug: str = None,
specaug_conf: dict = None,
normalize: str = None,
normalize_conf: dict = None,
encoder: str = None,
encoder_conf: dict = None,
ctc_conf: dict = None,
input_size: int = 80,
vocab_size: int = -1,
ignore_id: int = -1,
blank_id: int = 0,
sos: int = 1,
eos: int = 2,
length_normalized_loss: bool = False,
**kwargs,
):
super().__init__()
if specaug is not None:
specaug_class = tables.specaug_classes.get(specaug)
specaug = specaug_class(**specaug_conf)
if normalize is not None:
normalize_class = tables.normalize_classes.get(normalize)
normalize = normalize_class(**normalize_conf)
encoder_class = tables.encoder_classes.get(encoder)
encoder = encoder_class(input_size=input_size, **encoder_conf)
encoder_output_size = encoder.output_size()
if ctc_conf is None:
ctc_conf = {}
ctc = CTC(odim=vocab_size, encoder_output_size=encoder_output_size, **ctc_conf)
self.blank_id = blank_id
self.sos = sos if sos is not None else vocab_size - 1
self.eos = eos if eos is not None else vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.specaug = specaug
self.normalize = normalize
self.encoder = encoder
self.error_calculator = None
self.ctc = ctc
self.length_normalized_loss = length_normalized_loss
self.encoder_output_size = encoder_output_size
self.lid_dict = {"auto": 0, "zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
self.lid_int_dict = {24884: 3, 24885: 4, 24888: 7, 24892: 11, 24896: 12, 24992: 13}
self.textnorm_dict = {"withitn": 14, "woitn": 15}
self.textnorm_int_dict = {25016: 14, 25017: 15}
self.embed = torch.nn.Embedding(
7 + len(self.lid_dict) + len(self.textnorm_dict), input_size
)
self.emo_dict = {
"unk": 25009,
"happy": 25001,
"sad": 25002,
"angry": 25003,
"neutral": 25004,
}
self.criterion_att = LabelSmoothingLoss(
size=self.vocab_size,
padding_idx=self.ignore_id,
smoothing=kwargs.get("lsm_weight", 0.0),
normalize_length=self.length_normalized_loss,
)
@staticmethod
def from_pretrained(model: str = None, **kwargs):
from funasr import AutoModel
model, kwargs = AutoModel.build_model(model=model, trust_remote_code=True, **kwargs)
return model, kwargs
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
):
"""Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
"""
# import pdb;
# pdb.set_trace()
if len(text_lengths.size()) > 1:
text_lengths = text_lengths[:, 0]
if len(speech_lengths.size()) > 1:
speech_lengths = speech_lengths[:, 0]
batch_size = speech.shape[0]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, text)
loss_ctc, cer_ctc = None, None
loss_rich, acc_rich = None, None
stats = dict()
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out[:, 4:, :], encoder_out_lens - 4, text[:, 4:], text_lengths - 4
)
loss_rich, acc_rich = self._calc_rich_ce_loss(encoder_out[:, :4, :], text[:, :4])
loss = loss_ctc + loss_rich
# Collect total loss stats
stats["loss_ctc"] = torch.clone(loss_ctc.detach()) if loss_ctc is not None else None
stats["loss_rich"] = torch.clone(loss_rich.detach()) if loss_rich is not None else None
stats["loss"] = torch.clone(loss.detach()) if loss is not None else None
stats["acc_rich"] = acc_rich
# force_gatherable: to-device and to-tensor if scalar for DataParallel
if self.length_normalized_loss:
batch_size = int((text_lengths + 1).sum())
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
**kwargs,
):
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
ind: int
"""
# Data augmentation
if self.specaug is not None and self.training:
speech, speech_lengths = self.specaug(speech, speech_lengths)
# Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
speech, speech_lengths = self.normalize(speech, speech_lengths)
lids = torch.LongTensor(
[
[
(
self.lid_int_dict[int(lid)]
if torch.rand(1) > 0.2 and int(lid) in self.lid_int_dict
else 0
)
]
for lid in text[:, 0]
]
).to(speech.device)
language_query = self.embed(lids)
styles = torch.LongTensor(
[[self.textnorm_int_dict[int(style)]] for style in text[:, 3]]
).to(speech.device)
style_query = self.embed(styles)
speech = torch.cat((style_query, speech), dim=1)
speech_lengths += 1
event_emo_query = self.embed(torch.LongTensor([[1, 2]]).to(speech.device)).repeat(
speech.size(0), 1, 1
)
input_query = torch.cat((language_query, event_emo_query), dim=1)
speech = torch.cat((input_query, speech), dim=1)
speech_lengths += 3
encoder_out, encoder_out_lens = self.encoder(speech, speech_lengths)
return encoder_out, encoder_out_lens
def _calc_ctc_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
# Calc CTC loss
loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
# Calc CER using CTC
cer_ctc = None
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(encoder_out).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
return loss_ctc, cer_ctc
def _calc_rich_ce_loss(
self,
encoder_out: torch.Tensor,
ys_pad: torch.Tensor,
):
decoder_out = self.ctc.ctc_lo(encoder_out)
# 2. Compute attention loss
loss_rich = self.criterion_att(decoder_out, ys_pad.contiguous())
acc_rich = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_pad.contiguous(),
ignore_label=self.ignore_id,
)
return loss_rich, acc_rich
def inference(
self,
data_in,
data_lengths=None,
key: list = ["wav_file_tmp_name"],
tokenizer=None,
frontend=None,
**kwargs,
):
meta_data = {}
if (
isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
): # fbank
speech, speech_lengths = data_in, data_lengths
if len(speech.shape) < 3:
speech = speech[None, :, :]
if speech_lengths is None:
speech_lengths = speech.shape[1]
else:
# extract fbank feats
time1 = time.perf_counter()
audio_sample_list = load_audio_text_image_video(
data_in,
fs=frontend.fs,
audio_fs=kwargs.get("fs", 16000),
data_type=kwargs.get("data_type", "sound"),
tokenizer=tokenizer,
)
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(
audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
)
time3 = time.perf_counter()
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
meta_data["batch_data_time"] = (
speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
)
speech = speech.to(device=kwargs["device"])
speech_lengths = speech_lengths.to(device=kwargs["device"])
language = kwargs.get("language", "auto")
language_query = self.embed(
torch.LongTensor([[self.lid_dict[language] if language in self.lid_dict else 0]]).to(
speech.device
)
).repeat(speech.size(0), 1, 1)
use_itn = kwargs.get("use_itn", False)
textnorm = kwargs.get("text_norm", None)
if textnorm is None:
textnorm = "withitn" if use_itn else "woitn"
textnorm_query = self.embed(
torch.LongTensor([[self.textnorm_dict[textnorm]]]).to(speech.device)
).repeat(speech.size(0), 1, 1)
speech = torch.cat((textnorm_query, speech), dim=1)
speech_lengths += 1
event_emo_query = self.embed(torch.LongTensor([[1, 2]]).to(speech.device)).repeat(
speech.size(0), 1, 1
)
input_query = torch.cat((language_query, event_emo_query), dim=1)
speech = torch.cat((input_query, speech), dim=1)
speech_lengths += 3
# Encoder
encoder_out, encoder_out_lens = self.encoder(speech, speech_lengths)
if isinstance(encoder_out, tuple):
encoder_out = encoder_out[0]
# c. Passed the encoder result and the beam search
ctc_logits = self.ctc.log_softmax(encoder_out)
if kwargs.get("ban_emo_unk", False):
ctc_logits[:, :, self.emo_dict["unk"]] = -float("inf")
results = []
b, n, d = encoder_out.size()
if isinstance(key[0], (list, tuple)):
key = key[0]
if len(key) < b:
key = key * b
for i in range(b):
x = ctc_logits[i, : encoder_out_lens[i].item(), :]
yseq = x.argmax(dim=-1)
yseq = torch.unique_consecutive(yseq, dim=-1)
ibest_writer = None
if kwargs.get("output_dir") is not None:
if not hasattr(self, "writer"):
self.writer = DatadirWriter(kwargs.get("output_dir"))
ibest_writer = self.writer[f"1best_recog"]
mask = yseq != self.blank_id
token_int = yseq[mask].tolist()
# Change integer-ids to tokens
text = tokenizer.decode(token_int)
# 计算所有logprob的平均值
avg_logprob = x.max(dim=-1)[0].mean().item()
result_i = {"key": key[i], "text": text, "avg_logprob": avg_logprob}
results.append(result_i)
if ibest_writer is not None:
ibest_writer["text"][key[i]] = text
return results, meta_data
def export(self, **kwargs):
from .export_meta import export_rebuild_model
if "max_seq_len" not in kwargs:
kwargs["max_seq_len"] = 512
models = export_rebuild_model(model=self, **kwargs)
return models

44
qwen.py Normal file
View File

@ -0,0 +1,44 @@
from modelscope import AutoModelForCausalLM, AutoTokenizer
model_name = "Qwen/Qwen3-0.6B"
# load the tokenizer and the model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# prepare the model input
prompt = "爱你爱你爱你"
messages = [
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# parsing thinking content
try:
# rindex finding 151668 (</think>)
index = len(output_ids) - output_ids[::-1].index(151668)
except ValueError:
index = 0
thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
print("thinking content:", thinking_content)
print("content:", content)

129
rag.py Normal file
View File

@ -0,0 +1,129 @@
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document, BaseRetriever
from langchain_community.vectorstores import Chroma, FAISS
from langchain.retrievers import BM25Retriever
from typing import List
from pydantic_settings import BaseSettings
from pydantic import BaseModel, Field
from modelscope import AutoModel, AutoTokenizer,AutoModelForCausalLM # 改用 AutoModel
import torch
import numpy as np
# 1. 自定义 EnsembleRetriever保持不变
class EnsembleRetriever(BaseRetriever):
def __init__(self, retrievers: List[BaseRetriever], weights: List[float] = None):
self.retrievers = retrievers
self.weights = weights or [1.0 / len(retrievers)] * len(retrievers)
def get_relevant_documents(self, query: str) -> List[Document]:
all_results = []
for retriever, weight in zip(self.retrievers, self.weights):
docs = retriever.get_relevant_documents(query)
all_results.extend(docs)
# 简单去重
seen = set()
unique_docs = []
for doc in all_results:
if doc.page_content not in seen:
seen.add(doc.page_content)
unique_docs.append(doc)
return unique_docs
# 2. 自定义 ModelScopeEmbeddings改用 AutoModel
class ModelScopeEmbeddings:
def __init__(self, model_name: str, device: str = None):
self.model_name = model_name
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
self.model = AutoModel.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map=self.device,
trust_remote_code=True,
use_safetensors=True # 强制使用 safetensors
)
def embed_query(self, text: str) -> np.ndarray:
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
with torch.no_grad():
outputs = self.model(**inputs)
embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
return embeddings.squeeze(0)
def embed_documents(self, texts: List[str]) -> np.ndarray:
return np.array([self.embed_query(text) for text in texts])
# 3. 文档加载与分块(保持不变)
loader = TextLoader("./lsxd.txt", encoding="utf-8")
pages = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=200)
docs = text_splitter.split_documents(pages)
# 4. 初始化自定义嵌入模型和向量存储
embeddings = ModelScopeEmbeddings(model_name="BAAI/bge-large-zh-v1.5") # 改用嵌入专用模型
# 初始化 Chroma 和 FAISS
vector_store = Chroma.from_documents(docs, embeddings) # 主存储
faiss_store = FAISS.from_documents(docs, embeddings)
faiss_retriever = faiss_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
# 5. 混合检索器BM25Retriever
bm25_retriever = BM25Retriever.from_documents(docs)
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, faiss_retriever],
weights=[0.3, 0.7]
)
# 6. 模型配置(使用 ModelScope 的 Qwen 模型)
class Config(BaseSettings):
model_name: str = Field("Qwen/Qwen-7B-Chat", description="模型名称")
device: str = Field("cuda" if torch.cuda.is_available() else "cpu", description="运行设备")
config = Config()
tokenizer = AutoTokenizer.from_pretrained(config.model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
config.model_name,
torch_dtype=torch.float16,
device_map=config.device,
trust_remote_code=True
)
# 7. 查询与生成(保持不变)
def generate_response(query: str) -> str:
results = ensemble_retriever.get_relevant_documents(query)
context = "\n".join([f"文档片段:{doc.page_content[:500]}..." for doc in results[:3]])
prompt = f"""你是一个智能助手,请根据以下上下文回答用户问题。若信息不足,请回答“我不知道”。
用户问题{query}
上下文信息
{context}
回答"""
inputs = tokenizer(prompt, return_tensors="pt").to(config.device)
outputs = model.generate(
inputs.input_ids,
max_new_tokens=512,
temperature=0.3,
repetition_penalty=1.1,
do_sample=True,
top_p=0.9,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True)
return response.strip()
# 示例查询
if __name__ == "__main__":
query = "蓝色兄弟是一家怎样的公司?"
answer = generate_response(query)
print("AI回答", answer)

32
requirements.txt Normal file
View File

@ -0,0 +1,32 @@
fastapi
funasr
modelscope
transformers
numpy<=1.26.4
pydantic
pydantic_settings
soundfile
starlette
torch<=2.3
torchaudio
uvicorn
addict
datasets==2.21.0
pillow
simplejson
sortedcontainers
websockets
loguru
whoami
tiktoken
accelerate
edge-tts
#rag
langchain-community
sentence-transformers
chromadb
#pip install chromadb -i https://mirrors.aliyun.com/pypi/simple/
#modelscope download --model qwen/Qwen-1_8B --local_dir ./qwen

25
voice.py Normal file
View File

@ -0,0 +1,25 @@
import asyncio
async def generate_speech(text, output_path):
try:
print(f"正在生成语音: '{text}'...")
process = await asyncio.create_subprocess_exec(
"edge-tts",
"--text", text,
"--voice", "zh-CN-XiaoxiaoNeural",
"--rate", "+20%", # 语速加快 20%
"--pitch", "+10%", # 音调略高,增强可爱感
"--write-media", output_path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await process.wait() # 等待合成完成
print(f"语音已保存到: {output_path}")
except Exception as e:
print(f"生成语音时出错: {e}")
async def main():
await generate_speech("你好呀,我是小晓!", "/var/pyp/output.mp3")
if __name__ == "__main__":
asyncio.run(main()) # 正确启动异步任务

8
wav.py Normal file
View File

@ -0,0 +1,8 @@
import wave
with wave.open("./wav/7.wav", "rb") as wav_file:
print(f"采样率: {wav_file.getframerate()} Hz")
print(f"声道数: {wav_file.getnchannels()}")
print(f"位深度: {wav_file.getsampwidth() * 8} bits") # 字节转比特
print(f"帧数: {wav_file.getnframes()}")
print(f"时长: {wav_file.getnframes() / wav_file.getframerate():.2f}")