| | |
| | | import asyncio |
| | | import io |
| | | import json |
| | | import time |
| | | import uuid |
| | | |
| | | import fitz |
| | | from fastapi import HTTPException |
| | | |
| | | from Log import logger |
| | | from app.config.agent_base_url import RG_CHAT_DIALOG |
| | | from app.config.agent_base_url import RG_CHAT_DIALOG, DF_CHAT_AGENT, DF_CHAT_PARAMETERS, RG_CHAT_SESSIONS, \ |
| | | DF_CHAT_WORKFLOW, DF_UPLOAD_FILE, RG_ORIGINAL_URL |
| | | from app.config.config import settings |
| | | from app.config.const import max_chunk_size |
| | | from app.models.v2.session_model import ChatSessionDao |
| | | from app.config.const import * |
| | | from app.models import DialogModel, ApiTokenModel, UserTokenModel, ComplexChatSessionDao, ChatDataRequest, \ |
| | | ComplexChatDao |
| | | from app.models.v2.session_model import ChatSessionDao, ChatData |
| | | from app.service.v2.app_driver.chat_agent import ChatAgent |
| | | from app.service.v2.app_driver.chat_data import ChatBaseApply |
| | | from app.service.v2.app_driver.chat_dialog import ChatDialog |
| | | from app.service.v2.app_driver.chat_workflow import ChatWorkflow |
| | | from docx import Document |
| | | from dashscope import get_tokenizer # dashscope版本 >= 1.14.0 |
| | | |
| | | |
| | | async def service_chat_dialog(db, chat_id:str, question: str, session_id: str, user_id): |
| | | token = "ragflow-YzMzE1NDRjYzMyZjExZWY5ZjkxMDI0Mm" |
| | | url = settings.fwr_base_url+RG_CHAT_DIALOG.format(chat_id) |
| | | chat = ChatDialog(token) |
| | | request_data = { |
| | | "question": question, |
| | | "stream": True, |
| | | "session_id": session_id |
| | | } |
| | | headers = { |
| | | 'Content-Type': 'application/json', |
| | | 'Authorization': f"Bearer {token}" |
| | | } |
| | | async def update_session_log(db, session_id: str, message: dict, conversation_id: str): |
| | | await ChatSessionDao(db).update_session_by_id( |
| | | session_id=session_id, |
| | | session=None, |
| | | message=message, |
| | | conversation_id=conversation_id |
| | | ) |
| | | |
| | | |
| | | async def add_session_log(db, session_id: str, question: str, chat_id: str, user_id, event_type: str, |
| | | conversation_id: str, agent_type): |
| | | try: |
| | | await ChatSessionDao(db).update_or_insert_by_id( |
| | | session = await ChatSessionDao(db).update_or_insert_by_id( |
| | | session_id=session_id, |
| | | name=question[:255], |
| | | agent_id=chat_id, |
| | | agent_type=1, |
| | | agent_type=agent_type, |
| | | tenant_id=user_id, |
| | | message={"role": "user", "content": question}, |
| | | conversation_id=session_id, |
| | | event_type="message" |
| | | conversation_id=conversation_id, |
| | | event_type=event_type |
| | | ) |
| | | return session |
| | | except Exception as e: |
| | | logger.error(e) |
| | | return None |
| | | |
| | | |
| | | async def get_app_token(db, app_id): |
| | | app_token = db.query(UserTokenModel).filter_by(id=app_id).first() |
| | | if app_token: |
| | | return app_token.access_token |
| | | return "" |
| | | |
| | | |
| | | async def get_chat_token(db, app_id): |
| | | app_token = db.query(ApiTokenModel).filter_by(app_id=app_id).first() |
| | | if app_token: |
| | | return app_token.token |
| | | return "" |
| | | |
| | | |
| | | async def add_chat_token(db, data): |
| | | try: |
| | | message = {"role": "assistant","answer":"", "reference": {}} |
| | | async for ans in chat.chat_completions(url, request_data, headers): |
| | | api_token = ApiTokenModel(**data) |
| | | db.add(api_token) |
| | | db.commit() |
| | | except Exception as e: |
| | | logger.error(e) |
| | | |
| | | |
| | | async def get_chat_info(db, chat_id: str): |
| | | return db.query(DialogModel).filter_by(id=chat_id, status=Dialog_STATSU_ON).first() |
| | | |
| | | |
| | | async def get_chat_object(mode): |
| | | if mode == workflow_chat: |
| | | url = settings.dify_base_url + DF_CHAT_WORKFLOW |
| | | return ChatWorkflow(), url |
| | | else: |
| | | url = settings.dify_base_url + DF_CHAT_AGENT |
| | | return ChatAgent(), url |
| | | |
| | | |
| | | async def service_chat_dialog(db, chat_id: str, question: str, session_id: str, user_id, mode: str): |
| | | conversation_id = "" |
| | | token = await get_chat_token(db, rg_api_token) |
| | | url = settings.fwr_base_url + RG_CHAT_DIALOG.format(chat_id) |
| | | chat = ChatDialog() |
| | | session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id, RG_TYPE) |
| | | if session: |
| | | conversation_id = session.conversation_id |
| | | message = {"role": "assistant", "answer": "", "reference": {}} |
| | | try: |
| | | async for ans in chat.chat_completions(url, await chat.request_data(question, conversation_id), |
| | | await chat.get_headers(token)): |
| | | data = {} |
| | | error = "" |
| | | status = http_200 |
| | | if ans.get("code", None) == 102: |
| | | error = ans.get("message", "请输入你的问题!") |
| | | data = {"answer":error} |
| | | event = "message" |
| | | error = ans.get("message", "error!") |
| | | status = http_400 |
| | | event = smart_message_error |
| | | else: |
| | | if isinstance(ans.get("data"), bool) and ans.get("data") is True: |
| | | data = {} |
| | | event = "message_end" |
| | | event = smart_message_end |
| | | else: |
| | | data = ans.get("data", {}) |
| | | message = ans.get("data", {}) |
| | | event = "message" |
| | | message_str = "data: " + json.dumps({"event": event, "data": data}, ensure_ascii=False) + "\n\n" |
| | | # conversation_id = data.get("session_id", "") |
| | | if "session_id" in data: |
| | | del data["session_id"] |
| | | message = data |
| | | event = smart_message_cover |
| | | message_str = "data: " + json.dumps( |
| | | {"event": event, "data": data, "error": error, "status": status, "session_id": session_id}, |
| | | ensure_ascii=False) + "\n\n" |
| | | for i in range(0, len(message_str), max_chunk_size): |
| | | chunk = message_str[i:i + max_chunk_size] |
| | | # print(chunk) |
| | | yield chunk # 发送分块消息 |
| | | await ChatSessionDao(db).update_session_by_id( |
| | | session_id=session_id, |
| | | session=None, |
| | | message=message |
| | | ) |
| | | except Exception as e: |
| | | |
| | | logger.error(e) |
| | | try: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "\n**ERROR**: " + str(e), "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | except: |
| | | ... |
| | | finally: |
| | | message["role"] = "assistant" |
| | | await update_session_log(db, session_id, message, conversation_id) |
| | | |
| | | |
| | | async def data_process(data): |
| | | if isinstance(data, str): |
| | | return data.replace("dify", "smart") |
| | | elif isinstance(data, dict): |
| | | for k in list(data.keys()): |
| | | if isinstance(k, str) and "dify" in k: |
| | | new_k = k.replace("dify", "smart") |
| | | data[new_k] = await data_process(data[k]) |
| | | del data[k] |
| | | else: |
| | | data[k] = await data_process(data[k]) |
| | | return data |
| | | elif isinstance(data, list): |
| | | for i in range(len(data)): |
| | | data[i] = await data_process(data[i]) |
| | | return data |
| | | else: |
| | | return data |
| | | |
| | | |
| | | async def service_chat_workflow(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str): |
| | | conversation_id = "" |
| | | answer_event = "" |
| | | answer_agent = "" |
| | | answer_workflow = "" |
| | | download_url = "" |
| | | message_id = "" |
| | | task_id = "" |
| | | error = "" |
| | | files = [] |
| | | node_list = [] |
| | | token = await get_chat_token(db, chat_id) |
| | | chat, url = await get_chat_object(mode) |
| | | if hasattr(chat_data, "query"): |
| | | query = chat_data.query |
| | | else: |
| | | query = "start new conversation" |
| | | session = await add_session_log(db, session_id, query if query else "start new conversation", chat_id, user_id, |
| | | mode, conversation_id, DF_TYPE) |
| | | if session: |
| | | conversation_id = session.conversation_id |
| | | try: |
| | | async for ans in chat.chat_completions(url, |
| | | await chat.request_data(query, conversation_id, str(user_id), chat_data), |
| | | await chat.get_headers(token)): |
| | | data = {} |
| | | status = http_200 |
| | | conversation_id = ans.get("conversation_id") |
| | | task_id = ans.get("task_id") |
| | | if ans.get("event") == message_error: |
| | | error = ans.get("message", "参数异常!") |
| | | status = http_400 |
| | | event = smart_message_error |
| | | elif ans.get("event") == message_agent: |
| | | data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} |
| | | answer_agent += ans.get("answer", "") |
| | | message_id = ans.get("message_id", "") |
| | | event = smart_message_stream |
| | | elif ans.get("event") == message_event: |
| | | data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} |
| | | answer_event += ans.get("answer", "") |
| | | message_id = ans.get("message_id", "") |
| | | event = smart_message_stream |
| | | elif ans.get("event") == message_file: |
| | | data = {"url": ans.get("url", ""), "id": ans.get("id", ""), |
| | | "type": ans.get("type", "")} |
| | | files.append(data) |
| | | event = smart_message_file |
| | | elif ans.get("event") in [workflow_started, node_started, node_finished]: |
| | | data = ans.get("data", {}) |
| | | data["inputs"] = await data_process(data.get("inputs", {})) |
| | | data["outputs"] = await data_process(data.get("outputs", {})) |
| | | data["files"] = await data_process(data.get("files", [])) |
| | | data["process_data"] = "" |
| | | if data.get("status") == "failed": |
| | | status = http_500 |
| | | error = data.get("error", "") |
| | | node_list.append(ans) |
| | | event = [smart_workflow_started, smart_node_started, smart_node_finished][ |
| | | [workflow_started, node_started, node_finished].index(ans.get("event"))] |
| | | elif ans.get("event") == workflow_finished: |
| | | data = ans.get("data", {}) |
| | | answer_workflow = data.get("outputs", {}).get("output", data.get("outputs", {}).get("answer")) |
| | | download_url = data.get("outputs", {}).get("download_url") |
| | | event = smart_workflow_finished |
| | | if data.get("status") == "failed": |
| | | status = http_500 |
| | | error = data.get("error", "") |
| | | node_list.append(ans) |
| | | |
| | | elif ans.get("event") == message_end: |
| | | event = smart_message_end |
| | | else: |
| | | continue |
| | | |
| | | yield "data: " + json.dumps( |
| | | {"event": event, "data": data, "error": error, "status": status, "task_id": task_id, |
| | | "session_id": session_id}, |
| | | ensure_ascii=False) + "\n\n" |
| | | |
| | | except Exception as e: |
| | | logger.error(e) |
| | | yield "data: " + json.dumps({"message": "message", |
| | | "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, |
| | | ensure_ascii=False) + "\n\n" |
| | | try: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "\n**ERROR**: " + str(e), "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | except: |
| | | ... |
| | | finally: |
| | | await update_session_log(db, session_id, {"role": "assistant", |
| | | "answer": answer_event or answer_agent or answer_workflow or error, |
| | | "download_url": download_url, |
| | | "node_list": node_list, "task_id": task_id, "id": message_id, |
| | | "error": error}, conversation_id) |
| | | |
| | | yield "data: " + json.dumps({"message": "message_end", |
| | | "data": {}}, |
| | | ensure_ascii=False) + "\n\n" |
| | | |
| | | |
| | | |
| | | async def service_chat_basic(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str): |
| | | |
| | | if chat_id == basic_report_talk: |
| | | complex_chat = await ComplexChatDao(db).get_complex_chat_by_mode(chat_data.report_mode) |
| | | if complex_chat: |
| | | ... |
| | | |
| | | |
| | | |
| | | async def service_chat_parameters(db, chat_id, user_id): |
| | | chat_info = db.query(DialogModel).filter_by(id=chat_id).first() |
| | | if not chat_info: |
| | | return {} |
| | | return chat_info.parameters |
| | | |
| | | |
| | | async def service_chat_sessions(db, chat_id, name): |
| | | token = await get_chat_token(db, rg_api_token) |
| | | # print(token) |
| | | if not token: |
| | | return {} |
| | | url = settings.fwr_base_url + RG_CHAT_SESSIONS.format(chat_id) |
| | | chat = ChatDialog() |
| | | return await chat.chat_sessions(url, {"name": name}, await chat.get_headers(token)) |
| | | |
| | | |
| | | async def service_chat_sessions_list(db, chat_id, current, page_size, user_id, keyword): |
| | | total, session_list = await ChatSessionDao(db).get_session_list( |
| | | user_id=user_id, |
| | | agent_id=chat_id, |
| | | keyword=keyword, |
| | | page=current, |
| | | page_size=page_size |
| | | ) |
| | | return json.dumps({"total": total, "rows": [session.to_dict() for session in session_list]}) |
| | | |
| | | |
| | | async def service_chat_session_log(db, session_id): |
| | | session_log = await ChatSessionDao(db).get_session_by_id(session_id) |
| | | if not session_log: |
| | | return {} |
| | | log_info =session_log.log_to_json() |
| | | if session_log.event_type == complex_chat: |
| | | |
| | | total, message_list = await ComplexChatSessionDao(db).get_session_list(session_id) |
| | | log_info["message"] = [message.log_to_json() for message in message_list[::-1]] |
| | | |
| | | return json.dumps(log_info) |
| | | |
| | | |
| | | async def service_chat_upload(db, chat_id, file, user_id): |
| | | files = [] |
| | | token = await get_chat_token(db, chat_id) |
| | | if not token: |
| | | return files |
| | | url = settings.dify_base_url + DF_UPLOAD_FILE |
| | | chat = ChatBaseApply() |
| | | for f in file: |
| | | try: |
| | | file_content = await f.read() |
| | | file_upload = await chat.chat_upload(url, {"file": (f.filename, file_content)}, {"user": str(user_id)}, |
| | | {'Authorization': f'Bearer {token}'}) |
| | | try: |
| | | tokens = await read_file(file_content, f.filename, f.content_type) |
| | | file_upload["tokens"] = tokens |
| | | except: |
| | | ... |
| | | files.append(file_upload) |
| | | except Exception as e: |
| | | logger.error(e) |
| | | return json.dumps(files) if files else "" |
| | | |
| | | |
| | | async def get_str_token(input_str): |
| | | # 获取tokenizer对象,目前只支持通义千问系列模型 |
| | | tokenizer = get_tokenizer('qwen-turbo') |
| | | # 将字符串切分成token并转换为token id |
| | | tokens = tokenizer.encode(input_str) |
| | | return len(tokens) |
| | | |
| | | |
| | | async def read_pdf(pdf_stream): |
| | | text = "" |
| | | with fitz.open(stream=pdf_stream, filetype="pdf") as pdf_document: |
| | | for page in pdf_document: |
| | | text += page.get_text() |
| | | return text |
| | | |
| | | |
| | | async def read_word(word_stream): |
| | | # 使用 python-docx 打开 Word 文件流 |
| | | doc = Document(io.BytesIO(word_stream)) |
| | | |
| | | # 提取每个段落的文本 |
| | | text = "" |
| | | for para in doc.paragraphs: |
| | | text += para.text |
| | | |
| | | return text |
| | | |
| | | |
| | | async def read_file(file, filename, content_type): |
| | | text = "" |
| | | if content_type == "application/pdf" or filename.endswith('.pdf'): |
| | | |
| | | # 提取 PDF 内容 |
| | | text = await read_pdf(file) |
| | | elif content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" or filename.endswith( |
| | | '.docx'): |
| | | text = await read_word(file) |
| | | |
| | | return await get_str_token(text) |
| | | |
| | | |
| | | async def service_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key): |
| | | # print(query) |
| | | |
| | | try: |
| | | request_data = json.loads(query) |
| | | payload = { |
| | | "question": request_data.get("query", ""), |
| | | "dataset_ids": request_data.get("dataset_ids", []), |
| | | "page_size": top_k, |
| | | "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 |
| | | } |
| | | except json.JSONDecodeError as e: |
| | | fixed_json = query.replace("'", '"') |
| | | try: |
| | | request_data = json.loads(fixed_json) |
| | | payload = { |
| | | "question": request_data.get("query", ""), |
| | | "dataset_ids": request_data.get("dataset_ids", []), |
| | | "page_size": top_k, |
| | | "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 |
| | | } |
| | | except Exception: |
| | | payload = { |
| | | "question": query, |
| | | "dataset_ids": [knowledge_id], |
| | | "page_size": top_k, |
| | | "similarity_threshold": similarity_threshold if similarity_threshold else 0.2 |
| | | } |
| | | # print(payload) |
| | | url = settings.fwr_base_url + RG_ORIGINAL_URL |
| | | chat = ChatBaseApply() |
| | | response = await chat.chat_post(url, payload, await chat.get_headers(api_key)) |
| | | if not response: |
| | | raise HTTPException(status_code=500, detail="服务异常!") |
| | | records = [ |
| | | { |
| | | "content": chunk["content"], |
| | | "score": chunk["similarity"], |
| | | "title": chunk.get("document_keyword", "Unknown Document"), |
| | | "metadata": {"document_id": chunk["document_id"], |
| | | "path": f"{settings.fwr_base_url}/document/{chunk['document_id']}?ext={chunk.get('document_keyword').split('.')[-1]}&prefix=document", |
| | | 'highlight': chunk.get("highlight"), "image_id": chunk.get("image_id"), |
| | | "positions": chunk.get("positions"), } |
| | | } |
| | | for chunk in response.get("data", {}).get("chunks", []) |
| | | ] |
| | | # print(len(records)) |
| | | # print(records) |
| | | return records |
| | | |
| | | |
| | | async def service_base_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key): |
| | | # request_data = json.loads(query) |
| | | payload = { |
| | | "question": query, |
| | | "dataset_ids": [knowledge_id], |
| | | "page_size": top_k, |
| | | "similarity_threshold": similarity_threshold |
| | | } |
| | | url = settings.fwr_base_url + RG_ORIGINAL_URL |
| | | # url = "http://192.168.20.116:11080/" + RG_ORIGINAL_URL |
| | | chat = ChatBaseApply() |
| | | response = await chat.chat_post(url, payload, await chat.get_headers(api_key)) |
| | | if not response: |
| | | raise HTTPException(status_code=500, detail="服务异常!") |
| | | records = [ |
| | | { |
| | | "content": chunk["content"], |
| | | "score": chunk["similarity"], |
| | | "title": chunk.get("document_keyword", "Unknown Document"), |
| | | "metadata": {"document_id": chunk["document_id"]} |
| | | } |
| | | for chunk in response.get("data", {}).get("chunks", []) |
| | | ] |
| | | return records |
| | | |
| | | |
| | | async def add_complex_log(db, message_id, chat_id, session_id, chat_mode, query, user_id, mode, agent_type, message_type, conversation_id="", node_data=None, query_data=None): |
| | | if not node_data: |
| | | node_data = [] |
| | | if not query_data: |
| | | query_data = {} |
| | | try: |
| | | complex_log = ComplexChatSessionDao(db) |
| | | if not conversation_id: |
| | | session = await complex_log.get_session_by_session_id(session_id, chat_id) |
| | | if session: |
| | | conversation_id = session.conversation_id |
| | | await complex_log.create_session(message_id, |
| | | chat_id=chat_id, |
| | | session_id=session_id, |
| | | chat_mode=chat_mode, |
| | | message_type=message_type, |
| | | content=query, |
| | | event_type=mode, |
| | | tenant_id=user_id, |
| | | conversation_id=conversation_id, |
| | | node_data=json.dumps(node_data), |
| | | query=json.dumps(query_data), |
| | | agent_type=agent_type) |
| | | return conversation_id, True |
| | | |
| | | except Exception as e: |
| | | logger.error(e) |
| | | return conversation_id, False |
| | | |
| | | async def add_query_files(db, message_id): |
| | | query = {} |
| | | complex_log = await ComplexChatSessionDao(db).get_session_by_id(message_id) |
| | | if complex_log: |
| | | query = json.loads(complex_log.query) |
| | | return query.get("files", []) |
| | | |
| | | async def service_complex_chat(db, chat_id, mode, user_id, chat_request: ChatDataRequest): |
| | | answer_event = "" |
| | | answer_agent = "" |
| | | answer_workflow = "" |
| | | download_url = "" |
| | | message_id = "" |
| | | task_id = "" |
| | | error = "" |
| | | node_list = [] |
| | | conversation_id = "" |
| | | query_data = chat_request.to_dict() |
| | | new_message_id = str(uuid.uuid4()) |
| | | inputs = {"is_deep": chat_request.isDeep} |
| | | files = chat_request.files |
| | | if chat_request.chatMode == complex_knowledge_chat: |
| | | inputs["query_json"] = json.dumps({"query": chat_request.query, "dataset_ids": chat_request.knowledgeId}) |
| | | elif chat_request.chatMode == complex_content_optimization_chat: |
| | | inputs["type"] = chat_request.optimizeType |
| | | elif chat_request.chatMode == complex_dialog_chat: |
| | | if not files and chat_request.parentId: |
| | | files = await add_query_files(db, chat_request.parentId) |
| | | if chat_request.chatMode != complex_content_optimization_chat: |
| | | await add_session_log(db, chat_request.sessionId, chat_request.query if chat_request.query else "未命名会话", chat_id, user_id, |
| | | mode, "", DF_TYPE) |
| | | conversation_id, message = await add_complex_log(db, new_message_id, chat_id, chat_request.sessionId, chat_request.chatMode, chat_request.query, user_id, mode, DF_TYPE, 1, query_data=query_data) |
| | | if not message: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "\n**ERROR**: 创建会话失败!", "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | return |
| | | query_data["parentId"] = new_message_id |
| | | try: |
| | | token = await get_chat_token(db, chat_id) |
| | | chat, url = await get_chat_object(mode) |
| | | async for ans in chat.chat_completions(url, |
| | | await chat.complex_request_data(chat_request.query, conversation_id, str(user_id), files=files, inputs=inputs), |
| | | await chat.get_headers(token)): |
| | | # print(ans) |
| | | data = {} |
| | | status = http_200 |
| | | conversation_id = ans.get("conversation_id") |
| | | task_id = ans.get("task_id") |
| | | if ans.get("event") == message_error: |
| | | error = ans.get("message", "参数异常!") |
| | | status = http_400 |
| | | event = smart_message_error |
| | | elif ans.get("event") == message_agent: |
| | | data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} |
| | | answer_agent += ans.get("answer", "") |
| | | message_id = ans.get("message_id", "") |
| | | event = smart_message_stream |
| | | elif ans.get("event") == message_event: |
| | | data = {"answer": ans.get("answer", ""), "id": ans.get("message_id", "")} |
| | | answer_event += ans.get("answer", "") |
| | | message_id = ans.get("message_id", "") |
| | | event = smart_message_stream |
| | | elif ans.get("event") == message_file: |
| | | data = {"url": ans.get("url", ""), "id": ans.get("id", ""), |
| | | "type": ans.get("type", "")} |
| | | files.append(data) |
| | | event = smart_message_file |
| | | elif ans.get("event") in [workflow_started, node_started, node_finished]: |
| | | data = ans.get("data", {}) |
| | | data["inputs"] = await data_process(data.get("inputs", {})) |
| | | data["outputs"] = await data_process(data.get("outputs", {})) |
| | | data["files"] = await data_process(data.get("files", [])) |
| | | data["process_data"] = "" |
| | | if data.get("status") == "failed": |
| | | status = http_500 |
| | | error = data.get("error", "") |
| | | node_list.append(ans) |
| | | event = [smart_workflow_started, smart_node_started, smart_node_finished][ |
| | | [workflow_started, node_started, node_finished].index(ans.get("event"))] |
| | | elif ans.get("event") == workflow_finished: |
| | | data = ans.get("data", {}) |
| | | answer_workflow = data.get("outputs", {}).get("output", data.get("outputs", {}).get("answer")) |
| | | download_url = data.get("outputs", {}).get("download_url") |
| | | event = smart_workflow_finished |
| | | if data.get("status") == "failed": |
| | | status = http_500 |
| | | error = data.get("error", "") |
| | | node_list.append(ans) |
| | | |
| | | elif ans.get("event") == message_end: |
| | | event = smart_message_end |
| | | else: |
| | | continue |
| | | |
| | | yield "data: " + json.dumps( |
| | | {"event": event, "data": data, "error": error, "status": status, "task_id": task_id, "message_id":message_id, |
| | | "parent_id": new_message_id, |
| | | "session_id": chat_request.sessionId}, |
| | | ensure_ascii=False) + "\n\n" |
| | | |
| | | except Exception as e: |
| | | logger.error(e) |
| | | try: |
| | | yield "data: " + json.dumps({"message": smart_message_error, |
| | | "error": "\n**ERROR**: " + str(e), "status": http_500}, |
| | | ensure_ascii=False) + "\n\n" |
| | | except: |
| | | ... |
| | | finally: |
| | | # await update_session_log(db, session_id, {"role": "assistant", |
| | | # "answer": answer_event or answer_agent or answer_workflow or error, |
| | | # "download_url": download_url, |
| | | # "node_list": node_list, "task_id": task_id, "id": message_id, |
| | | # "error": error}, conversation_id) |
| | | if message_id: |
| | | await add_complex_log(db, message_id, chat_id, chat_request.sessionId, chat_request.chatMode, answer_event or answer_agent or answer_workflow or error, user_id, mode, DF_TYPE, 2, conversation_id, node_data=node_list, query_data=query_data) |
| | | |
| | | async def service_complex_upload(db, chat_id, file, user_id): |
| | | files = [] |
| | | token = await get_chat_token(db, chat_id) |
| | | if not token: |
| | | return files |
| | | url = settings.dify_base_url + DF_UPLOAD_FILE |
| | | chat = ChatBaseApply() |
| | | for f in file: |
| | | try: |
| | | file_content = await f.read() |
| | | file_upload = await chat.chat_upload(url, {"file": (f.filename, file_content)}, {"user": str(user_id)}, |
| | | {'Authorization': f'Bearer {token}'}) |
| | | # try: |
| | | # tokens = await read_file(file_content, f.filename, f.content_type) |
| | | # file_upload["tokens"] = tokens |
| | | # except: |
| | | # ... |
| | | files.append(file_upload) |
| | | except Exception as e: |
| | | logger.error(e) |
| | | return json.dumps(files) if files else "" |
| | | |
| | | if __name__ == "__main__": |
| | | q = json.dumps({"query": "设备", "dataset_ids": ["fc68db52f43111efb94a0242ac120004"]}) |
| | | top_k = 2 |
| | | similarity_threshold = 0.5 |
| | | api_key = "ragflow-Y4MGYwY2JlZjM2YjExZWY4ZWU5MDI0Mm" |
| | | |
| | | |
| | | # a = service_chunk_retrieval(q, top_k, similarity_threshold, api_key) |
| | | # print(a) |
| | | async def a(): |
| | | b = await service_chunk_retrieval(q, top_k, similarity_threshold, api_key) |
| | | print(b) |
| | | |
| | | |
| | | asyncio.run(a()) |