| | |
| | | import io |
| | | import json |
| | | |
| | | import fitz |
| | | |
| | | from Log import logger |
| | | from app.config.agent_base_url import RG_CHAT_DIALOG, DF_CHAT_AGENT, DF_CHAT_PARAMETERS, RG_CHAT_SESSIONS, \ |
| | | DF_CHAT_WORKFLOW |
| | | DF_CHAT_WORKFLOW, DF_UPLOAD_FILE |
| | | from app.config.config import settings |
| | | from app.config.const import * |
| | | from app.models import DialogModel, ApiTokenModel |
| | | from app.models import DialogModel, ApiTokenModel, UserTokenModel |
| | | from app.models.v2.session_model import ChatSessionDao, ChatData |
| | | from app.service.v2.app_driver.chat_agent import ChatAgent |
| | | from app.service.v2.app_driver.chat_data import ChatBaseApply |
| | | from app.service.v2.app_driver.chat_dialog import ChatDialog |
| | | from app.service.v2.app_driver.chat_workflow import ChatWorkflow |
| | | from docx import Document |
| | | from dashscope import get_tokenizer # dashscope版本 >= 1.14.0 |
| | | |
| | | |
| | | async def update_session_log(db, session_id: str, message: dict, conversation_id: str): |
| | |
| | | logger.error(e) |
| | | return None |
| | | |
| | | async def get_app_token(db, app_id): |
| | | app_token = db.query(UserTokenModel).filter_by(id=app_id).first() |
| | | if app_token: |
| | | return app_token.access_token |
| | | return "" |
| | | |
| | | |
| | | |
| | | async def get_chat_token(db, app_id): |
| | | app_token = db.query(ApiTokenModel).filter_by(app_id=app_id).first() |
| | | if app_token: |
| | | return app_token.token |
| | | return "" |
| | | |
| | | |
| | | async def add_chat_token(db, data): |
| | | try: |
| | | api_token = ApiTokenModel(**data) |
| | | db.add(api_token) |
| | | db.commit() |
| | | except Exception as e: |
| | | logger.error(e) |
| | | |
| | | |
| | | |
| | | async def get_chat_info(db, chat_id: str): |
| | |
| | | logger.error(e) |
| | | try: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "**ERROR**: " + str(e), "status": http_500}, |
| | | "error": "\n**ERROR**: " + str(e), "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | except: |
| | | ... |
| | |
| | | files = [] |
| | | node_list = [] |
| | | token = await get_chat_token(db, chat_id) |
| | | |
| | | chat, url = await get_chat_object(mode) |
| | | if hasattr(chat_data, "query"): |
| | | query = chat_data.query |
| | |
| | | data = ans.get("data", {}) |
| | | event = smart_workflow_finished |
| | | node_list.append(ans) |
| | | |
| | | elif ans.get("event") == message_end: |
| | | event = smart_message_end |
| | | else: |
| | |
| | | logger.error(e) |
| | | try: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "**ERROR**: " + str(e), "status": http_500}, |
| | | "error": "\n**ERROR**: " + str(e), "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | except: |
| | | ... |
| | |
| | | "error": error}, conversation_id) |
| | | |
| | | |
| | | async def service_chat_basic(db, chat_id: str, question: str, session_id: str, user_id): |
| | | async def service_chat_basic(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str): |
| | | ... |
| | | |
| | | |
| | |
| | | url = settings.fwr_base_url + RG_CHAT_SESSIONS.format(chat_id) |
| | | chat = ChatDialog() |
| | | return await chat.chat_sessions(url, {"name": name}, await chat.get_headers(token)) |
| | | |
| | | |
| | | async def service_chat_upload(db, chat_id, file, user_id): |
| | | files = [] |
| | | token = await get_chat_token(db, chat_id) |
| | | if not token: |
| | | return files |
| | | url = settings.dify_base_url + DF_UPLOAD_FILE |
| | | chat = ChatBaseApply() |
| | | for f in file: |
| | | try: |
| | | file_content = await f.read() |
| | | file_upload = await chat.chat_upload(url, {"file": (f.filename, file_content)}, {"user": str(user_id)}, |
| | | {'Authorization': f'Bearer {token}'}) |
| | | try: |
| | | tokens = await read_file(file_content, f.filename, f.content_type) |
| | | file_upload["tokens"] = tokens |
| | | except: |
| | | ... |
| | | files.append(file_upload) |
| | | except Exception as e: |
| | | logger.error(e) |
| | | return json.dumps(files) if files else "" |
| | | |
| | | |
| | | async def get_str_token(input_str): |
| | | # 获取tokenizer对象,目前只支持通义千问系列模型 |
| | | tokenizer = get_tokenizer('qwen-turbo') |
| | | # 将字符串切分成token并转换为token id |
| | | tokens = tokenizer.encode(input_str) |
| | | # print(f"经过切分后的token id为:{tokens}。") |
| | | # # 经过切分后的token id为: [31935, 64559, 99320, 56007, 100629, 104795, 99788, 1773] |
| | | # print(f"经过切分后共有{len(tokens)}个token") |
| | | # # 经过切分后共有8个token |
| | | # |
| | | # # 将token id转化为字符串并打印出来 |
| | | # for i in range(len(tokens)): |
| | | # print(f"token id为{tokens[i]}对应的字符串为:{tokenizer.decode(tokens[i])}") |
| | | return len(tokens) |
| | | |
| | | async def read_pdf(pdf_stream): |
| | | text = "" |
| | | with fitz.open(stream=pdf_stream, filetype="pdf") as pdf_document: |
| | | for page in pdf_document: |
| | | text += page.get_text() |
| | | return text |
| | | |
| | | |
| | | async def read_word(word_stream): |
| | | # 使用 python-docx 打开 Word 文件流 |
| | | doc = Document(io.BytesIO(word_stream)) |
| | | |
| | | # 提取每个段落的文本 |
| | | text = "" |
| | | for para in doc.paragraphs: |
| | | text += para.text |
| | | |
| | | return text |
| | | |
| | | async def read_file(file, filename, content_type): |
| | | text = "" |
| | | if content_type == "application/pdf" or filename.endswith('.pdf'): |
| | | |
| | | # 提取 PDF 内容 |
| | | text = await read_pdf(file) |
| | | elif content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" or filename.endswith( |
| | | '.docx'): |
| | | text = await read_word(file) |
| | | |
| | | return await get_str_token(text) |