From e26a7859a8900b152e10961d91fa6ad19a8deb9c Mon Sep 17 00:00:00 2001 From: zhaoqingang <zhaoqg0118@163.com> Date: 星期四, 06 三月 2025 14:41:27 +0800 Subject: [PATCH] 首页通用对话增加 --- app/service/v2/chat.py | 350 ++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 314 insertions(+), 36 deletions(-) diff --git a/app/service/v2/chat.py b/app/service/v2/chat.py index 246b3f1..05a37be 100644 --- a/app/service/v2/chat.py +++ b/app/service/v2/chat.py @@ -1,14 +1,17 @@ +import asyncio import io import json +import uuid import fitz +from fastapi import HTTPException from Log import logger from app.config.agent_base_url import RG_CHAT_DIALOG, DF_CHAT_AGENT, DF_CHAT_PARAMETERS, RG_CHAT_SESSIONS, \ - DF_CHAT_WORKFLOW, DF_UPLOAD_FILE + DF_CHAT_WORKFLOW, DF_UPLOAD_FILE, RG_ORIGINAL_URL from app.config.config import settings from app.config.const import * -from app.models import DialogModel, ApiTokenModel, UserTokenModel +from app.models import DialogModel, ApiTokenModel, UserTokenModel, ComplexChatSessionDao, ChatDataRequest from app.models.v2.session_model import ChatSessionDao, ChatData from app.service.v2.app_driver.chat_agent import ChatAgent from app.service.v2.app_driver.chat_data import ChatBaseApply @@ -28,13 +31,13 @@ async def add_session_log(db, session_id: str, question: str, chat_id: str, user_id, event_type: str, - conversation_id: str): + conversation_id: str, agent_type): try: session = await ChatSessionDao(db).update_or_insert_by_id( session_id=session_id, name=question[:255], agent_id=chat_id, - agent_type=1, + agent_type=agent_type, tenant_id=user_id, message={"role": "user", "content": question}, conversation_id=conversation_id, @@ -45,12 +48,12 @@ logger.error(e) return None + async def get_app_token(db, app_id): app_token = db.query(UserTokenModel).filter_by(id=app_id).first() if app_token: return app_token.access_token return "" - async def get_chat_token(db, app_id): @@ -67,7 +70,6 @@ db.commit() except Exception as e: logger.error(e) - async def get_chat_info(db, chat_id: str): @@ -88,7 +90,7 @@ token = await get_chat_token(db, rg_api_token) url = settings.fwr_base_url + RG_CHAT_DIALOG.format(chat_id) chat = ChatDialog() - session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id) + session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id, 1) if session: conversation_id = session.conversation_id message = {"role": "assistant", "answer": "", "reference": {}} @@ -129,26 +131,49 @@ except: ... finally: + message["role"] = "assistant" await update_session_log(db, session_id, message, conversation_id) + + +async def data_process(data): + if isinstance(data, str): + return data.replace("dify", "smart") + elif isinstance(data, dict): + for k in list(data.keys()): + if isinstance(k, str) and "dify" in k: + new_k = k.replace("dify", "smart") + data[new_k] = await data_process(data[k]) + del data[k] + else: + data[k] = await data_process(data[k]) + return data + elif isinstance(data, list): + for i in range(len(data)): + data[i] = await data_process(data[i]) + return data + else: + return data async def service_chat_workflow(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str): conversation_id = "" answer_event = "" answer_agent = "" + answer_workflow = "" + download_url = "" message_id = "" task_id = "" error = "" files = [] node_list = [] token = await get_chat_token(db, chat_id) - chat, url = await get_chat_object(mode) if hasattr(chat_data, "query"): query = chat_data.query else: - query = "start new workflow" - session = await add_session_log(db, session_id, query, chat_id, user_id, mode, conversation_id) + query = "start new conversation" + session = await add_session_log(db, session_id, query if query else "start new conversation", chat_id, user_id, + mode, conversation_id, 3) if session: conversation_id = session.conversation_id try: @@ -180,15 +205,24 @@ event = smart_message_file elif ans.get("event") in [workflow_started, node_started, node_finished]: data = ans.get("data", {}) - data["inputs"] = [] - data["outputs"] = [] + data["inputs"] = await data_process(data.get("inputs", {})) + data["outputs"] = await data_process(data.get("outputs", {})) + data["files"] = await data_process(data.get("files", [])) data["process_data"] = "" + if data.get("status") == "failed": + status = http_500 + error = data.get("error", "") node_list.append(ans) event = [smart_workflow_started, smart_node_started, smart_node_finished][ [workflow_started, node_started, node_finished].index(ans.get("event"))] elif ans.get("event") == workflow_finished: data = ans.get("data", {}) + answer_workflow = data.get("outputs", {}).get("output", data.get("outputs", {}).get("answer")) + download_url = data.get("outputs", {}).get("download_url") event = smart_workflow_finished + if data.get("status") == "failed": + status = http_500 + error = data.get("error", "") node_list.append(ans) elif ans.get("event") == message_end: @@ -210,7 +244,9 @@ except: ... finally: - await update_session_log(db, session_id, {"role": "assistant", "answer": answer_event or answer_agent, + await update_session_log(db, session_id, {"role": "assistant", + "answer": answer_event or answer_agent or answer_workflow or error, + "download_url": download_url, "node_list": node_list, "task_id": task_id, "id": message_id, "error": error}, conversation_id) @@ -224,30 +260,32 @@ if not chat_info: return {} return chat_info.parameters - # if chat_info.dialog_type == RG_TYPE: - # return {"retriever_resource": - # { - # "enabled": True - # } - # } - # elif chat_info.dialog_type == BASIC_TYPE: - # ... - # elif chat_info.dialog_type == DF_TYPE: - # token = await get_chat_token(db, chat_id) - # if not token: - # return {} - # url = settings.dify_base_url + DF_CHAT_PARAMETERS - # chat = ChatBaseApply() - # return await chat.chat_get(url, {"user": str(user_id)}, await chat.get_headers(token)) async def service_chat_sessions(db, chat_id, name): token = await get_chat_token(db, rg_api_token) + # print(token) if not token: return {} url = settings.fwr_base_url + RG_CHAT_SESSIONS.format(chat_id) chat = ChatDialog() return await chat.chat_sessions(url, {"name": name}, await chat.get_headers(token)) + + +async def service_chat_sessions_list(db, chat_id, current, page_size, user_id, keyword): + total, session_list = await ChatSessionDao(db).get_session_list( + user_id=user_id, + agent_id=chat_id, + keyword=keyword, + page=current, + page_size=page_size + ) + return json.dumps({"total": total, "rows": [session.to_dict() for session in session_list]}) + + +async def service_chat_session_log(db, session_id): + session_log = await ChatSessionDao(db).get_session_by_id(session_id) + return json.dumps(session_log.log_to_json() if session_log else {}) async def service_chat_upload(db, chat_id, file, user_id): @@ -278,15 +316,8 @@ tokenizer = get_tokenizer('qwen-turbo') # 灏嗗瓧绗︿覆鍒囧垎鎴恡oken骞惰浆鎹负token id tokens = tokenizer.encode(input_str) - # print(f"缁忚繃鍒囧垎鍚庣殑token id涓猴細{tokens}銆�") - # # 缁忚繃鍒囧垎鍚庣殑token id涓猴細 [31935, 64559, 99320, 56007, 100629, 104795, 99788, 1773] - # print(f"缁忚繃鍒囧垎鍚庡叡鏈墈len(tokens)}涓猼oken") - # # 缁忚繃鍒囧垎鍚庡叡鏈�8涓猼oken - # - # # 灏唗oken id杞寲涓哄瓧绗︿覆骞舵墦鍗板嚭鏉� - # for i in range(len(tokens)): - # print(f"token id涓簕tokens[i]}瀵瑰簲鐨勫瓧绗︿覆涓猴細{tokenizer.decode(tokens[i])}") return len(tokens) + async def read_pdf(pdf_stream): text = "" @@ -307,6 +338,7 @@ return text + async def read_file(file, filename, content_type): text = "" if content_type == "application/pdf" or filename.endswith('.pdf'): @@ -318,3 +350,249 @@ text = await read_word(file) return await get_str_token(text) + + +async def service_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key): + # print(query) + + try: + request_data = json.loads(query) + payload = { + "question": request_data.get("query", ""), + "dataset_ids": request_data.get("dataset_ids", []), + "page_size": top_k, + "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 + } + except json.JSONDecodeError as e: + fixed_json = query.replace("'", '"') + try: + request_data = json.loads(fixed_json) + payload = { + "question": request_data.get("query", ""), + "dataset_ids": request_data.get("dataset_ids", []), + "page_size": top_k, + "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 + } + except Exception: + payload = { + "question": query, + "dataset_ids": [knowledge_id], + "page_size": top_k, + "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 + } + # print(payload) + url = settings.fwr_base_url + RG_ORIGINAL_URL + chat = ChatBaseApply() + response = await chat.chat_post(url, payload, await chat.get_headers(api_key)) + if not response: + raise HTTPException(status_code=500, detail="鏈嶅姟寮傚父锛�") + records = [ + { + "content": chunk["content"], + "score": chunk["similarity"], + "title": chunk.get("document_keyword", "Unknown Document"), + "metadata": {"document_id": chunk["document_id"], + "path": f"{settings.fwr_base_url}/document/{chunk['document_id']}?ext={chunk.get('document_keyword').split('.')[-1]}&prefix=document", + 'highlight': chunk.get("highlight"), "image_id": chunk.get("image_id"), + "positions": chunk.get("positions"), } + } + for chunk in response.get("data", {}).get("chunks", []) + ] + # print(len(records)) + # print(records) + return records + + +async def service_base_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key): + # request_data = json.loads(query) + payload = { + "question": query, + "dataset_ids": [knowledge_id], + "page_size": top_k, + "similarity_threshold": similarity_threshold + } + url = settings.fwr_base_url + RG_ORIGINAL_URL + # url = "http://192.168.20.116:11080/" + RG_ORIGINAL_URL + chat = ChatBaseApply() + response = await chat.chat_post(url, payload, await chat.get_headers(api_key)) + if not response: + raise HTTPException(status_code=500, detail="鏈嶅姟寮傚父锛�") + records = [ + { + "content": chunk["content"], + "score": chunk["similarity"], + "title": chunk.get("document_keyword", "Unknown Document"), + "metadata": {"document_id": chunk["document_id"]} + } + for chunk in response.get("data", {}).get("chunks", []) + ] + return records + + +async def add_complex_log(db, message_id, chat_id, session_id, chat_mode, query, user_id, mode, agent_type, message_type, conversation_id="", node_data=None, query_data=None): + if not node_data: + node_data = [] + if not query_data: + query_data = {} + try: + complex_log = ComplexChatSessionDao(db) + if not conversation_id: + session = await complex_log.get_session_by_session_id(session_id, chat_id) + if session: + conversation_id = session.conversation_id + await complex_log.create_session(message_id, + chat_id=chat_id, + session_id=session_id, + chat_mode=chat_mode, + message_type=message_type, + content=query, + event_type=mode, + tenant_id=user_id, + conversation_id=conversation_id, + node_data=json.dumps(node_data), + query=json.dumps(query_data), + agent_type=agent_type) + return conversation_id, True + + except Exception as e: + logger.error(e) + return conversation_id, False + + + +async def service_complex_chat(db, chat_id, mode, user_id, chat_request: ChatDataRequest): + answer_event = "" + answer_agent = "" + answer_workflow = "" + download_url = "" + message_id = "" + task_id = "" + error = "" + files = [] + node_list = [] + token = await get_chat_token(db, chat_id) + chat, url = await get_chat_object(mode) + conversation_id, message = await add_complex_log(db, str(uuid.uuid4()),chat_id, chat_request.sessionId, chat_request.chatMode, chat_request.query, user_id, mode, DF_TYPE, 1, query_data=chat_request.to_dict()) + if not message: + yield "data: " + json.dumps({"message": smart_message_error, + "error": "\n**ERROR**: 鍒涘缓浼氳瘽澶辫触锛�", "status": http_500}, + ensure_ascii=False) + "\n\n" + return + inputs = {"is_deep": chat_request.isDeep} + if chat_request.chatMode == complex_knowledge_chat: + inputs["query_json"] = json.dumps({"query": chat_request.query, "dataset_ids": chat_request.knowledgeId}) + + try: + async for ans in chat.chat_completions(url, + await chat.complex_request_data(chat_request.query, conversation_id, str(user_id), files=chat_request.files, inputs=inputs), + await chat.get_headers(token)): + # print(ans) + data = {} + status = http_200 + conversation_id = ans.get("conversation_id") + task_id = ans.get("task_id") + if ans.get("event") == message_error: + error = ans.get("message", "鍙傛暟寮傚父锛�") + status = http_400 + event = smart_message_error + elif ans.get("event") == message_agent: + data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} + answer_agent += ans.get("answer", "") + message_id = ans.get("message_id", "") + event = smart_message_stream + elif ans.get("event") == message_event: + data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} + answer_event += ans.get("answer", "") + message_id = ans.get("message_id", "") + event = smart_message_stream + elif ans.get("event") == message_file: + data = {"url": ans.get("url", ""), "id": ans.get("id", ""), + "type": ans.get("type", "")} + files.append(data) + event = smart_message_file + elif ans.get("event") in [workflow_started, node_started, node_finished]: + data = ans.get("data", {}) + data["inputs"] = await data_process(data.get("inputs", {})) + data["outputs"] = await data_process(data.get("outputs", {})) + data["files"] = await data_process(data.get("files", [])) + data["process_data"] = "" + if data.get("status") == "failed": + status = http_500 + error = data.get("error", "") + node_list.append(ans) + event = [smart_workflow_started, smart_node_started, smart_node_finished][ + [workflow_started, node_started, node_finished].index(ans.get("event"))] + elif ans.get("event") == workflow_finished: + data = ans.get("data", {}) + answer_workflow = data.get("outputs", {}).get("output", data.get("outputs", {}).get("answer")) + download_url = data.get("outputs", {}).get("download_url") + event = smart_workflow_finished + if data.get("status") == "failed": + status = http_500 + error = data.get("error", "") + node_list.append(ans) + + elif ans.get("event") == message_end: + event = smart_message_end + else: + continue + + yield "data: " + json.dumps( + {"event": event, "data": data, "error": error, "status": status, "task_id": task_id, "message_id":message_id, + "session_id": chat_request.sessionId}, + ensure_ascii=False) + "\n\n" + + except Exception as e: + logger.error(e) + try: + yield "data: " + json.dumps({"message": smart_message_error, + "error": "\n**ERROR**: " + str(e), "status": http_500}, + ensure_ascii=False) + "\n\n" + except: + ... + finally: + # await update_session_log(db, session_id, {"role": "assistant", + # "answer": answer_event or answer_agent or answer_workflow or error, + # "download_url": download_url, + # "node_list": node_list, "task_id": task_id, "id": message_id, + # "error": error}, conversation_id) + if message_id: + await add_complex_log(db, message_id, chat_id, chat_request.sessionId, chat_request.chatMode, answer_event or answer_agent or answer_workflow or error, user_id, mode, DF_TYPE, 2, conversation_id, node_data=node_list, query_data=chat_request.to_dict()) + +async def service_complex_upload(db, chat_id, file, user_id): + files = [] + token = await get_chat_token(db, chat_id) + if not token: + return files + url = settings.dify_base_url + DF_UPLOAD_FILE + chat = ChatBaseApply() + for f in file: + try: + file_content = await f.read() + file_upload = await chat.chat_upload(url, {"file": (f.filename, file_content)}, {"user": str(user_id)}, + {'Authorization': f'Bearer {token}'}) + # try: + # tokens = await read_file(file_content, f.filename, f.content_type) + # file_upload["tokens"] = tokens + # except: + # ... + files.append(file_upload) + except Exception as e: + logger.error(e) + return json.dumps(files) if files else "" + +if __name__ == "__main__": + q = json.dumps({"query": "璁惧", "dataset_ids": ["fc68db52f43111efb94a0242ac120004"]}) + top_k = 2 + similarity_threshold = 0.5 + api_key = "ragflow-Y4MGYwY2JlZjM2YjExZWY4ZWU5MDI0Mm" + + + # a = service_chunk_retrieval(q, top_k, similarity_threshold, api_key) + # print(a) + async def a(): + b = await service_chunk_retrieval(q, top_k, similarity_threshold, api_key) + print(b) + + + asyncio.run(a()) -- Gitblit v1.8.0