| | |
| | | |
| | | |
| | | async def add_session_log(db, session_id: str, question: str, chat_id: str, user_id, event_type: str, |
| | | conversation_id: str): |
| | | conversation_id: str, agent_type): |
| | | try: |
| | | session = await ChatSessionDao(db).update_or_insert_by_id( |
| | | session_id=session_id, |
| | | name=question[:255], |
| | | agent_id=chat_id, |
| | | agent_type=1, |
| | | agent_type=agent_type, |
| | | tenant_id=user_id, |
| | | message={"role": "user", "content": question}, |
| | | conversation_id=conversation_id, |
| | |
| | | token = await get_chat_token(db, rg_api_token) |
| | | url = settings.fwr_base_url + RG_CHAT_DIALOG.format(chat_id) |
| | | chat = ChatDialog() |
| | | session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id) |
| | | session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id, 1) |
| | | if session: |
| | | conversation_id = session.conversation_id |
| | | message = {"role": "assistant", "answer": "", "reference": {}} |
| | |
| | | except: |
| | | ... |
| | | finally: |
| | | message["role"] = "assistant" |
| | | await update_session_log(db, session_id, message, conversation_id) |
| | | |
| | | async def data_process(data): |
| | | if isinstance(data, str): |
| | | return data.replace("dify", "smart") |
| | | elif isinstance(data, dict): |
| | | for k in list(data.keys()): |
| | | if isinstance(k, str) and "dify" in k: |
| | | new_k = k.replace("dify", "smart") |
| | | data[new_k] = await data_process(data[k]) |
| | | del data[k] |
| | | else: |
| | | data[k] = await data_process(data[k]) |
| | | return data |
| | | elif isinstance(data, list): |
| | | for i in range(len(data)): |
| | | data[i] = await data_process(data[i]) |
| | | return data |
| | | else: |
| | | return data |
| | | |
| | | |
| | | async def service_chat_workflow(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str): |
| | | conversation_id = "" |
| | | answer_event = "" |
| | | answer_agent = "" |
| | | answer_workflow = "" |
| | | download_url = "" |
| | | message_id = "" |
| | | task_id = "" |
| | | error = "" |
| | |
| | | query = chat_data.query |
| | | else: |
| | | query = "start new workflow" |
| | | session = await add_session_log(db, session_id, query, chat_id, user_id, mode, conversation_id) |
| | | session = await add_session_log(db, session_id, query, chat_id, user_id, mode, conversation_id, 3) |
| | | if session: |
| | | conversation_id = session.conversation_id |
| | | try: |
| | |
| | | event = smart_message_file |
| | | elif ans.get("event") in [workflow_started, node_started, node_finished]: |
| | | data = ans.get("data", {}) |
| | | data["inputs"] = [] |
| | | data["outputs"] = [] |
| | | data["inputs"] = await data_process(data.get("inputs", {})) |
| | | data["outputs"] = await data_process(data.get("outputs", {})) |
| | | data["files"] = await data_process(data.get("files", [])) |
| | | data["process_data"] = "" |
| | | node_list.append(ans) |
| | | event = [smart_workflow_started, smart_node_started, smart_node_finished][ |
| | | [workflow_started, node_started, node_finished].index(ans.get("event"))] |
| | | elif ans.get("event") == workflow_finished: |
| | | data = ans.get("data", {}) |
| | | answer_workflow = data.get("outputs", {}).get("output") |
| | | download_url = data.get("outputs", {}).get("download_url") |
| | | event = smart_workflow_finished |
| | | node_list.append(ans) |
| | | |
| | |
| | | except: |
| | | ... |
| | | finally: |
| | | await update_session_log(db, session_id, {"role": "assistant", "answer": answer_event or answer_agent, |
| | | await update_session_log(db, session_id, {"role": "assistant", "answer": answer_event or answer_agent or answer_workflow, |
| | | "download_url":download_url, |
| | | "node_list": node_list, "task_id": task_id, "id": message_id, |
| | | "error": error}, conversation_id) |
| | | |
| | |
| | | if not chat_info: |
| | | return {} |
| | | return chat_info.parameters |
| | | # if chat_info.dialog_type == RG_TYPE: |
| | | # return {"retriever_resource": |
| | | # { |
| | | # "enabled": True |
| | | # } |
| | | # } |
| | | # elif chat_info.dialog_type == BASIC_TYPE: |
| | | # ... |
| | | # elif chat_info.dialog_type == DF_TYPE: |
| | | # token = await get_chat_token(db, chat_id) |
| | | # if not token: |
| | | # return {} |
| | | # url = settings.dify_base_url + DF_CHAT_PARAMETERS |
| | | # chat = ChatBaseApply() |
| | | # return await chat.chat_get(url, {"user": str(user_id)}, await chat.get_headers(token)) |
| | | |
| | | |
| | | async def service_chat_sessions(db, chat_id, name): |
| | | token = await get_chat_token(db, rg_api_token) |
| | |
| | | url = settings.fwr_base_url + RG_CHAT_SESSIONS.format(chat_id) |
| | | chat = ChatDialog() |
| | | return await chat.chat_sessions(url, {"name": name}, await chat.get_headers(token)) |
| | | |
| | | |
| | | async def service_chat_sessions_list(db, chat_id, current, page_size, user_id, keyword): |
| | | total, session_list = await ChatSessionDao(db).get_session_list( |
| | | user_id=user_id, |
| | | agent_id=chat_id, |
| | | keyword=keyword, |
| | | page=current, |
| | | page_size=page_size |
| | | ) |
| | | return json.dumps({"total":total, "rows": [session.to_dict() for session in session_list]}) |
| | | |
| | | |
| | | |
| | | async def service_chat_session_log(db, session_id): |
| | | session_log = await ChatSessionDao(db).get_session_by_id(session_id) |
| | | return json.dumps(session_log.log_to_json()) |
| | | |
| | | |
| | | |
| | | async def service_chat_upload(db, chat_id, file, user_id): |
| | |
| | | tokenizer = get_tokenizer('qwen-turbo') |
| | | # 将字符串切分成token并转换为token id |
| | | tokens = tokenizer.encode(input_str) |
| | | # print(f"经过切分后的token id为:{tokens}。") |
| | | # # 经过切分后的token id为: [31935, 64559, 99320, 56007, 100629, 104795, 99788, 1773] |
| | | # print(f"经过切分后共有{len(tokens)}个token") |
| | | # # 经过切分后共有8个token |
| | | # |
| | | # # 将token id转化为字符串并打印出来 |
| | | # for i in range(len(tokens)): |
| | | # print(f"token id为{tokens[i]}对应的字符串为:{tokenizer.decode(tokens[i])}") |
| | | return len(tokens) |
| | | |
| | | async def read_pdf(pdf_stream): |