zhaoqingang
2025-03-04 370120fd4154ce6c5f69d16a4a343a016cf2e816
app/service/v2/chat.py
@@ -1,11 +1,13 @@
import asyncio
import io
import json
import fitz
from fastapi import HTTPException
from Log import logger
from app.config.agent_base_url import RG_CHAT_DIALOG, DF_CHAT_AGENT, DF_CHAT_PARAMETERS, RG_CHAT_SESSIONS, \
    DF_CHAT_WORKFLOW, DF_UPLOAD_FILE
    DF_CHAT_WORKFLOW, DF_UPLOAD_FILE, RG_ORIGINAL_URL
from app.config.config import settings
from app.config.const import *
from app.models import DialogModel, ApiTokenModel, UserTokenModel
@@ -28,13 +30,13 @@
async def add_session_log(db, session_id: str, question: str, chat_id: str, user_id, event_type: str,
                          conversation_id: str):
                          conversation_id: str, agent_type):
    try:
        session = await ChatSessionDao(db).update_or_insert_by_id(
            session_id=session_id,
            name=question[:255],
            agent_id=chat_id,
            agent_type=1,
            agent_type=agent_type,
            tenant_id=user_id,
            message={"role": "user", "content": question},
            conversation_id=conversation_id,
@@ -88,7 +90,7 @@
    token = await get_chat_token(db, rg_api_token)
    url = settings.fwr_base_url + RG_CHAT_DIALOG.format(chat_id)
    chat = ChatDialog()
    session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id)
    session = await add_session_log(db, session_id, question, chat_id, user_id, mode, session_id, 1)
    if session:
        conversation_id = session.conversation_id
    message = {"role": "assistant", "answer": "", "reference": {}}
@@ -129,26 +131,47 @@
        except:
            ...
    finally:
        message["role"] = "assistant"
        await update_session_log(db, session_id, message, conversation_id)
async def data_process(data):
    if isinstance(data, str):
        return data.replace("dify", "smart")
    elif isinstance(data, dict):
        for k in list(data.keys()):
            if isinstance(k, str) and "dify" in k:
                new_k = k.replace("dify", "smart")
                data[new_k] = await data_process(data[k])
                del data[k]
            else:
                data[k] = await data_process(data[k])
        return data
    elif isinstance(data, list):
        for i in range(len(data)):
            data[i] = await data_process(data[i])
        return data
    else:
        return data
async def service_chat_workflow(db, chat_id: str, chat_data: ChatData, session_id: str, user_id, mode: str):
    conversation_id = ""
    answer_event = ""
    answer_agent = ""
    answer_workflow = ""
    download_url = ""
    message_id = ""
    task_id = ""
    error = ""
    files = []
    node_list = []
    token = await get_chat_token(db, chat_id)
    chat, url = await get_chat_object(mode)
    if hasattr(chat_data, "query"):
        query = chat_data.query
    else:
        query = "start new workflow"
    session = await add_session_log(db, session_id, query, chat_id, user_id, mode, conversation_id)
    session = await add_session_log(db, session_id,query if query else "start new conversation", chat_id, user_id, mode, conversation_id, 3)
    if session:
        conversation_id = session.conversation_id
    try:
@@ -180,15 +203,24 @@
                event = smart_message_file
            elif ans.get("event") in [workflow_started, node_started, node_finished]:
                data = ans.get("data", {})
                data["inputs"] = []
                data["outputs"] = []
                data["inputs"] = await data_process(data.get("inputs", {}))
                data["outputs"] = await data_process(data.get("outputs", {}))
                data["files"] = await data_process(data.get("files", []))
                data["process_data"] = ""
                if data.get("status") == "failed":
                    status = http_500
                    error = data.get("error", "")
                node_list.append(ans)
                event = [smart_workflow_started, smart_node_started, smart_node_finished][
                    [workflow_started, node_started, node_finished].index(ans.get("event"))]
            elif ans.get("event") == workflow_finished:
                data = ans.get("data", {})
                answer_workflow = data.get("outputs", {}).get("output")
                download_url = data.get("outputs", {}).get("download_url")
                event = smart_workflow_finished
                if data.get("status") == "failed":
                    status = http_500
                    error = data.get("error", "")
                node_list.append(ans)
            elif ans.get("event") == message_end:
@@ -210,7 +242,8 @@
        except:
            ...
    finally:
        await update_session_log(db, session_id, {"role": "assistant", "answer": answer_event or answer_agent,
        await update_session_log(db, session_id, {"role": "assistant", "answer": answer_event or answer_agent or answer_workflow or error,
                                                  "download_url":download_url,
                                                  "node_list": node_list, "task_id": task_id, "id": message_id,
                                                  "error": error}, conversation_id)
@@ -224,30 +257,33 @@
    if not chat_info:
        return {}
    return chat_info.parameters
    # if chat_info.dialog_type == RG_TYPE:
    #     return {"retriever_resource":
    #         {
    #             "enabled": True
    #         }
    #     }
    # elif chat_info.dialog_type == BASIC_TYPE:
    #     ...
    # elif chat_info.dialog_type == DF_TYPE:
    #     token = await get_chat_token(db, chat_id)
    #     if not token:
    #         return {}
    #     url = settings.dify_base_url + DF_CHAT_PARAMETERS
    #     chat = ChatBaseApply()
    #     return await chat.chat_get(url, {"user": str(user_id)}, await chat.get_headers(token))
async def service_chat_sessions(db, chat_id, name):
    token = await get_chat_token(db, rg_api_token)
    # print(token)
    if not token:
        return {}
    url = settings.fwr_base_url + RG_CHAT_SESSIONS.format(chat_id)
    chat = ChatDialog()
    return await chat.chat_sessions(url, {"name": name}, await chat.get_headers(token))
async def service_chat_sessions_list(db, chat_id, current, page_size, user_id, keyword):
    total, session_list = await ChatSessionDao(db).get_session_list(
        user_id=user_id,
        agent_id=chat_id,
        keyword=keyword,
        page=current,
        page_size=page_size
    )
    return json.dumps({"total":total, "rows": [session.to_dict() for session in session_list]})
async def service_chat_session_log(db, session_id):
    session_log = await ChatSessionDao(db).get_session_by_id(session_id)
    return json.dumps(session_log.log_to_json())
async def service_chat_upload(db, chat_id, file, user_id):
@@ -278,14 +314,6 @@
    tokenizer = get_tokenizer('qwen-turbo')
    # 将字符串切分成token并转换为token id
    tokens = tokenizer.encode(input_str)
    # print(f"经过切分后的token id为:{tokens}。")
    # # 经过切分后的token id为: [31935, 64559, 99320, 56007, 100629, 104795, 99788, 1773]
    # print(f"经过切分后共有{len(tokens)}个token")
    # # 经过切分后共有8个token
    #
    # # 将token id转化为字符串并打印出来
    # for i in range(len(tokens)):
    #     print(f"token id为{tokens[i]}对应的字符串为:{tokenizer.decode(tokens[i])}")
    return len(tokens)
async def read_pdf(pdf_stream):
@@ -318,3 +346,89 @@
        text = await read_word(file)
    return await get_str_token(text)
async def service_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key):
    print(query)
    try:
        request_data = json.loads(query)
        payload = {
            "question": request_data.get("query", ""),
            "dataset_ids": request_data.get("dataset_ids", []),
            "page_size": top_k,
            "similarity_threshold": similarity_threshold
        }
    except json.JSONDecodeError as e:
        fixed_json = query.replace("'", '"')
        try:
            request_data = json.loads(fixed_json)
            payload = {
                "question": request_data.get("query", ""),
                "dataset_ids": request_data.get("dataset_ids", []),
                "page_size": top_k,
                "similarity_threshold": similarity_threshold
            }
        except Exception:
            payload = {
                "question":query,
                "dataset_ids":[knowledge_id],
                "page_size": top_k,
                "similarity_threshold": similarity_threshold
            }
    url = settings.fwr_base_url + RG_ORIGINAL_URL
    chat = ChatBaseApply()
    response = await  chat.chat_post(url, payload, await chat.get_headers(api_key))
    if not response:
        raise HTTPException(status_code=500, detail="服务异常!")
    records = [
        {
            "content": chunk["content"],
            "score": chunk["similarity"],
            "title": chunk.get("document_keyword", "Unknown Document"),
            "metadata": {"document_id": chunk["document_id"],
                         "path": f"{settings.fwr_base_url}/document/{chunk['document_id']}?ext={chunk.get('document_keyword').split('.')[-1]}&prefix=document",
                         'highlight': chunk.get("highlight") , "image_id":  chunk.get("image_id"), "positions": chunk.get("positions"),}
        }
        for chunk in response.get("data", {}).get("chunks", [])
    ]
    return records
async def service_base_chunk_retrieval(query, knowledge_id, top_k, similarity_threshold, api_key):
    # request_data = json.loads(query)
    payload = {
        "question": query,
        "dataset_ids": [knowledge_id],
        "page_size": top_k,
        "similarity_threshold": similarity_threshold
    }
    url = settings.fwr_base_url + RG_ORIGINAL_URL
    # url = "http://192.168.20.116:11080/" + RG_ORIGINAL_URL
    chat = ChatBaseApply()
    response = await chat.chat_post(url, payload, await chat.get_headers(api_key))
    if not response:
        raise HTTPException(status_code=500, detail="服务异常!")
    records = [
        {
            "content": chunk["content"],
            "score": chunk["similarity"],
            "title": chunk.get("document_keyword", "Unknown Document"),
            "metadata": {"document_id": chunk["document_id"]}
        }
        for chunk in response.get("data", {}).get("chunks", [])
    ]
    return records
if __name__ == "__main__":
    q = json.dumps({"query": "设备", "dataset_ids": ["fc68db52f43111efb94a0242ac120004"]})
    top_k = 2
    similarity_threshold = 0.5
    api_key = "ragflow-Y4MGYwY2JlZjM2YjExZWY4ZWU5MDI0Mm"
    # a = service_chunk_retrieval(q, top_k, similarity_threshold, api_key)
    # print(a)
    async def a():
        b = await service_chunk_retrieval(q, top_k, similarity_threshold, api_key)
        print(b)
    asyncio.run(a())