| | |
| | | for i in session.log_to_json().get("message", []): |
| | | if i.get("role") == "user": |
| | | tmp_data["question"] = i.get("content") |
| | | if "type" in i.get("content"): |
| | | tmp_data["type"] = i.get("content")["type"] |
| | | elif i.get("role") == "assistant": |
| | | if isinstance(i.get("content"), dict): |
| | | tmp_data["answer"] = i.get("content", {}).get("answer") |
| | | if "file_name" in i.get("content", {}): |
| | | tmp_data["files"] = [{"file_name": i.get("content", {}).get("file_name"), |
| | | "file_url": i.get("content", {}).get("file_url")}] |
| | | content = i.get("content", {}) |
| | | tmp_data["answer"] = content.get("answer") |
| | | if "file_name" in content: |
| | | tmp_data["files"] = [{"file_name": content.get("file_name"), |
| | | "file_url": content.get("file_url")}] |
| | | if "images" in i.get("content", {}): |
| | | tmp_data["images"] = i.get("content", {}).get("images") |
| | | tmp_data["images"] = content.get("images") |
| | | |
| | | if "download_url" in i.get("content", {}): |
| | | tmp_data["download_url"] = i.get("content", {}).get("download_url") |
| | | if "download_url" in content: |
| | | tmp_data["download_url"] = content.get("download_url") |
| | | |
| | | if "node_list" in content: |
| | | node_dict = { |
| | | "node_data": [], # {"title": "去除冗余", # 节点名称 "status": "succeeded", # 节点状态"created_at": 1735817337, # 开始时间"finished_at": 1735817337, # 结束时间"error": "" # 错误日志} |
| | | "total_tokens": 0, # 花费token数 |
| | | "created_at": 0, # 开始时间 |
| | | "finished_at": 0, # 结束时间 |
| | | "status": "succeeded", # 工作流状态 |
| | | "error": "", # 错误日志 |
| | | } |
| | | for node in content["node_list"]: |
| | | if node.get("event") == "node_finished": |
| | | node_dict["node_data"].append({ |
| | | "title": node.get("data", {}).get("title", ""), |
| | | "status": node.get("data", {}).get("status", ""), |
| | | "created_at":node.get("data", {}).get("created_at", 0), |
| | | "finished_at":node.get("data", {}).get("finished_at", 0), |
| | | "node_type":node.get("data", {}).get("node_type", 0), |
| | | "elapsed_time":node.get("data", {}).get("elapsed_time", 0), |
| | | "error":node.get("data", {}).get("error", ""), |
| | | }) |
| | | elif node.get("event") == "workflow_finished": |
| | | node_dict["total_tokens"] = node.get("data", {}).get("total_tokens", 0) |
| | | node_dict["created_at"] = node.get("data", {}).get("created_at", 0) |
| | | node_dict["finished_at"] = node.get("data", {}).get("finished_at", 0) |
| | | node_dict["status"] = node.get("data", {}).get("status", "") |
| | | node_dict["error"] = node.get("data", {}).get("error", "") |
| | | node_dict["elapsed_time"] = node.get("data", {}).get("elapsed_time", 0) |
| | | tmp_data["workflow"] = node_dict |
| | | |
| | | else: |
| | | tmp_data["answer"] = i.get("content") |
| | | data.append(tmp_data) |
| | |
| | | return Response(code=404, msg="Agent not found") |
| | | |
| | | return Response(code=200, msg="", data={"chat_id": uuid.uuid4().hex}) |
| | | |
| | | |
| | |
| | | title if title else title_query, |
| | | agent_id, |
| | | AgentType.DIFY, |
| | | current_user.id |
| | | current_user.id, |
| | | {"role": "user", "content": title if title else title_query, "type": workflow_type, "is_clean":is_clean}, |
| | | workflow_type |
| | | ) |
| | | conversation_id = session.conversation_id |
| | | except Exception as e: |
| | |
| | | "upload_file_id": "" |
| | | }) |
| | | inputs_list = [] |
| | | token_list = [] |
| | | if workflow_type == 1: |
| | | inputs["input_files"] = files |
| | | inputs_list.append(inputs) |
| | | token_list.append(token) |
| | | elif workflow_type == 2 and is_clean == 0: |
| | | inputs["Completion_of_main_indicators"] = title |
| | | inputs_list.append({"inputs": inputs, "token": token, "workflow_type": workflow_type}) |
| | | elif workflow_type == 2: |
| | | inputs["file_list"] = files |
| | | inputs["Completion_of_main_indicators"] = title |
| | | inputs["sub_titles"] = sub_titles |
| | |
| | | if not token: |
| | | await websocket.send_json( |
| | | {"message": "Invalid token document_to_report", "type": "error"}) |
| | | inputs_list.append(inputs) |
| | | token_list.append(token) |
| | | elif workflow_type == 3: |
| | | inputs_list.append({"inputs": inputs, "token": token, "workflow_type": workflow_type}) |
| | | elif workflow_type == 3 and is_clean == 0 and tokens < max_token: |
| | | inputs["file_list"] = files |
| | | inputs["number_of_title"] = title_number |
| | | inputs["title_style"] = title_style |
| | |
| | | if not token: |
| | | await websocket.send_json( |
| | | {"message": "Invalid token document_to_title", "type": "error"}) |
| | | # inputs_list.append(inputs) |
| | | # token_list.append(token) |
| | | elif workflow_type == 2 and is_clean == 1: |
| | | # inputs["input_files"] = files |
| | | inputs_list.append(inputs) |
| | | token_list.append(token) |
| | | inputs_list.append({"inputs": inputs, "token": token, "workflow_type": workflow_type}) |
| | | elif workflow_type == 3 and is_clean == 1 or tokens >= max_token: |
| | | inputs["input_files"] = files |
| | | inputs["Completion_of_main_indicators"] = title |
| | | inputs_list.append({"inputs": inputs, "token": token, "workflow_type": 1}) |
| | | inputs1 = {} |
| | | # inputs1["file_list"] = files |
| | | inputs1["Completion_of_main_indicators"] = title |
| | | inputs1["sub_titles"] = sub_titles |
| | | token = DfTokenDao(db).get_token_by_id(DOCUMENT_TO_REPORT_TITLE) |
| | | inputs1["file_list"] = files |
| | | inputs1["number_of_title"] = title_number |
| | | inputs1["title_style"] = title_style |
| | | token = DfTokenDao(db).get_token_by_id(DOCUMENT_TO_TITLE) |
| | | if not token: |
| | | await websocket.send_json( |
| | | {"message": "Invalid token document_to_report", "type": "error"}) |
| | | inputs_list.append(inputs1) |
| | | token_list.append(token) |
| | | inputs_list.append({"inputs": inputs, "token": token, "workflow_type": 3}) |
| | | |
| | | # print(inputs_list) |
| | | for input in inputs_list: |
| | | i = input["inputs"] |
| | | if "file_list" in i: |
| | | i["file_list"] = files |
| | | node_list = [] |
| | | complete_response = "" |
| | | if workflow_type == 1 or workflow_type == 2: |
| | | for inputs in inputs_list: |
| | | inputs["input_files"] = files |
| | | async for rag_response in dify_service.workflow(token, current_user.id, inputs): |
| | | workflow_list = [] |
| | | workflow_dict = {} |
| | | if input["workflow_type"] == 1 or input["workflow_type"] == 2: |
| | | async for rag_response in dify_service.workflow(input["token"], current_user.id, i): |
| | | # print(rag_response) |
| | | try: |
| | | if rag_response[:5] == "data:": |
| | |
| | | complete_response += rag_response |
| | | try: |
| | | data = json.loads(complete_response) |
| | | # print(data) |
| | | node_list.append(data) |
| | | complete_response = "" |
| | | if data.get("event") == "node_started" or data.get("event") == "node_finished": # "event": "message_end" |
| | | if data.get("event") == "node_started": # "event": "message_end" |
| | | |
| | | if "data" not in data or not data["data"]: # 信息过滤 |
| | | logger.error("非法数据--------------------") |
| | | logger.error(data) |
| | |
| | | message = answer.get("title", "") |
| | | |
| | | result = {"message": message, "type": "system"} |
| | | elif data.get("event") == "node_finished": |
| | | workflow_list.append({ |
| | | "title": data.get("data", {}).get("title", ""), |
| | | "status": data.get("data", {}).get("status", ""), |
| | | "created_at":data.get("data", {}).get("created_at", 0), |
| | | "finished_at":data.get("data", {}).get("finished_at", 0), |
| | | "node_type":data.get("data", {}).get("node_type", 0), |
| | | "elapsed_time":data.get("data", {}).get("elapsed_time", 0), |
| | | "error":data.get("data", {}).get("error", ""), |
| | | }) |
| | | elif data.get("event") == "workflow_finished": |
| | | answer = data.get("data", "") |
| | | if isinstance(answer, str): |
| | |
| | | "url": download_url, |
| | | "upload_file_id": "" |
| | | }] |
| | | workflow_dict = { |
| | | "node_data": workflow_list, |
| | | "total_tokens": answer.get("total_tokens", 0), |
| | | "created_at": answer.get("created_at", 0), |
| | | "finished_at": answer.get("finished_at", 0), |
| | | "status": answer.get("status", ""), |
| | | "error": answer.get("error", ""), |
| | | "elapsed_time": answer.get("elapsed_time", 0) |
| | | } |
| | | result = {"message": message, "type": "message", "download_url": download_url} |
| | | try: |
| | | SessionService(db).update_session(chat_id, |
| | | message={"role": "assistant", |
| | | "content": { |
| | | "answer": message, |
| | | "node_list": node_list, |
| | | "download_url": download_url}}, |
| | | conversation_id=data.get( |
| | | "conversation_id")) |
| | |
| | | except Exception as e: |
| | | logger.error(e) |
| | | logger.error("返回客户端消息异常!") |
| | | result = {"message": "", "type": "close", "download_url": ""} |
| | | |
| | | result = {"message": "", "type": "close", "workflow": workflow_dict} |
| | | |
| | | |
| | | else: |
| | |
| | | result = {"message": f"内部错误: {e2}", "type": "close"} |
| | | await websocket.send_json(result) |
| | | print(f"Error process message of ragflow: {e2}") |
| | | elif workflow_type == 3: |
| | | elif input["workflow_type"] == 3: |
| | | image_list = [] |
| | | # print(inputs) |
| | | complete_response = "" |
| | | async for rag_response in dify_service.chat(token, current_user.id, title_query, [], |
| | | conversation_id, inputs): |
| | | print(rag_response) |
| | | async for rag_response in dify_service.chat(input["token"], current_user.id, title_query, [], |
| | | conversation_id, i): |
| | | # print(rag_response) |
| | | try: |
| | | if rag_response[:5] == "data:": |
| | | # 如果是,则截取掉前5个字符,并去除首尾空白符 |
| | |
| | | complete_response += rag_response |
| | | try: |
| | | data = json.loads(complete_response) |
| | | node_list.append(data) |
| | | complete_response = "" |
| | | if data.get("event") == "node_started" or data.get( |
| | | "event") == "node_finished": # "event": "message_end" |
| | | if data.get("event") == "node_started": # "event": "message_end" |
| | | if "data" not in data or not data["data"]: # 信息过滤 |
| | | logger.error("非法数据--------------------") |
| | | logger.error(data) |
| | |
| | | message = answer.get("title", "") |
| | | |
| | | result = {"message": message, "type": "system"} |
| | | elif data.get("event") == "node_finished": |
| | | workflow_list.append({ |
| | | "title": data.get("data", {}).get("title", ""), |
| | | "status": data.get("data", {}).get("status", ""), |
| | | "created_at":data.get("data", {}).get("created_at", 0), |
| | | "finished_at":data.get("data", {}).get("finished_at", 0), |
| | | "node_type":data.get("data", {}).get("node_type", 0), |
| | | "elapsed_time":data.get("data", {}).get("elapsed_time", 0), |
| | | "error":data.get("data", {}).get("error", ""), |
| | | }) |
| | | elif data.get("event") == "message": |
| | | message = data.get("answer", "") |
| | | # try: |
| | |
| | | message={"role": "assistant", |
| | | "content": { |
| | | "answer": message, |
| | | "node_list": node_list, |
| | | "download_url": ""}}, |
| | | conversation_id=data.get( |
| | | "conversation_id")) |
| | |
| | | # except Exception as e: |
| | | # logger.error(e) |
| | | # logger.error("返回客户端消息异常!") |
| | | |
| | | elif data.get("event") == "workflow_finished": |
| | | workflow_dict = { |
| | | "node_data": workflow_list, |
| | | "total_tokens": answer.get("total_tokens", 0), |
| | | "created_at": answer.get("created_at", 0), |
| | | "finished_at": answer.get("finished_at", 0), |
| | | "status": answer.get("status", ""), |
| | | "error": answer.get("error", ""), |
| | | "elapsed_time": answer.get("elapsed_time", 0) |
| | | } |
| | | elif data.get("event") == "message_end": |
| | | result = {"message": "", "type": "close", "download_url": ""} |
| | | result = {"message": "", "type": "close", "workflow": workflow_dict} |
| | | else: |
| | | continue |
| | | try: |
| | |
| | | await websocket.send_json(result) |
| | | print(f"Error process message of ragflow: {e2}") |
| | | elif agent.type == "documentIa": |
| | | print(122112) |
| | | token = DfTokenDao(db).get_token_by_id(DOCUMENT_IA_QUESTIONS) |
| | | # print(token) |
| | | if not token: |
| | |
| | | dify_workflow_clean: str = '' |
| | | dify_workflow_report: str = '' |
| | | postgresql_database_url: str = '' |
| | | max_report_tokens: int = 100000 |
| | | def __init__(self, **kwargs): |
| | | # 替换配置中的IP地址 |
| | | host_ip = os.getenv('HOST_IP', '127.0.0.1') |
| | |
| | | tenant_id = Column(Integer) # 创建人 |
| | | message = Column(TEXT) # 说明 |
| | | conversation_id = Column(String(64)) |
| | | # workflow = Column(Integer, default=0) |
| | | |
| | | # to_dict 方法 |
| | | def to_dict(self): |
| | |
| | | 'name': self.name, |
| | | 'agent_type': self.agent_type, |
| | | 'agent_id': self.agent_id, |
| | | # 'workflow': self.workflow, |
| | | 'create_date': self.create_date.strftime("%Y-%m-%d %H:%M:%S"), |
| | | 'update_date': self.update_date.strftime("%Y-%m-%d %H:%M:%S"), |
| | | } |
| | |
| | | def __init__(self, db: Session): |
| | | self.db = db |
| | | |
| | | def create_session(self, session_id: str, name: str, agent_id: str, agent_type: AgentType, user_id: int) -> Type[ |
| | | def create_session(self, session_id: str, name: str, agent_id: str, agent_type: AgentType, user_id: int, message:dict=None, workflow_type: int=0) -> Type[ |
| | | SessionModel] | SessionModel: |
| | | """ |
| | | 创建一个新的会话记录。 |
| | |
| | | 返回: |
| | | SessionModel: 新创建的会话模型实例,如果会话ID已存在则返回None。 |
| | | """ |
| | | if not message: |
| | | message = {"role": "user", "content": name} |
| | | existing_session = self.get_session_by_id(session_id) |
| | | if existing_session: |
| | | existing_session.add_message({"role": "user", "content": name}) |
| | | existing_session.add_message(message) |
| | | existing_session.update_date = current_time() |
| | | self.db.commit() |
| | | self.db.refresh(existing_session) |
| | |
| | | agent_id=agent_id, |
| | | agent_type=agent_type, |
| | | tenant_id=user_id, |
| | | message=json.dumps([{"role": "user", "content": name}]) |
| | | # workflow=workflow_type, |
| | | message=json.dumps([message]) |
| | | ) |
| | | self.db.add(new_session) |
| | | self.db.commit() |