From fa78e5b5e03e40431e6c4404c6ecb00b8de81be3 Mon Sep 17 00:00:00 2001
From: shidong <shidong@jhsoft.cc>
Date: 星期一, 14 七月 2025 07:36:08 +0800
Subject: [PATCH] #2025/7/14 #优化获取最新数据代码 #优化预警提示语

---
 qwen_thread.py |   14 ++++++++------
 1 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/qwen_thread.py b/qwen_thread.py
index 3673dcd..aeb16d9 100644
--- a/qwen_thread.py
+++ b/qwen_thread.py
@@ -41,6 +41,7 @@
                 torch_dtype=torch.float16
 
             ).eval()
+            model = model.to(f"cuda:{config.get('cuda')}")
             self.model_pool.append(model)
 
         # 鍏变韩鐨勫鐞嗗櫒 (绾跨▼瀹夊叏)
@@ -93,12 +94,13 @@
                     risk_description = self.image_rule_chat_with_detail(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
                     # 鐢熸垚澶勭悊寤鸿
                     suggestion = self.image_rule_chat_suggestion(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
+                    self.logger.info(
+                        f"{res['video_point_id']}鎵ц瀹屾瘯锛歿res['id']}:鏄惁棰勮{is_waning},瀹夊叏闅愭偅锛歿risk_description}\n澶勭悊寤鸿锛歿suggestion}")
             else:
                 is_desc = 3
 
             # 鏁版嵁缁�
             data = {
-                "id": res['id'],
                 "event_level_id": res['event_level_id'],  # event_level_id
                 "event_level_name": res['event_level_name'],  # event_level_id
                 "rule_id": res["rule_id"],
@@ -122,9 +124,9 @@
                 "suggestion": suggestion,
                 "knowledge_id": res['knowledge_id']
             }
-
+            self.collection.delete(f"id == {res['id']}")
             # 淇濆瓨鍒癿ilvus
-            image_id = self.collection.upsert(data).primary_keys
+            image_id = self.collection.insert(data).primary_keys
             data = {
                 "id": str(image_id[0]),
                 "video_point_id": res['video_point_id'],
@@ -171,8 +173,8 @@
                 return_tensors="pt",
             )
             inputs = inputs.to(model.device)
-            with torch.inference_mode():
-                outputs = model.generate(**inputs,max_new_tokens=100)
+            with torch.inference_mode(),torch.cuda.amp.autocast():
+                outputs = model.generate(**inputs,max_new_tokens=200)
             generated_ids = outputs[:, len(inputs.input_ids[0]):]
             image_text = self.processor.batch_decode(
                 generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
@@ -218,7 +220,7 @@
     def image_rule_chat(self, image_des,rule_text, ragurl, rag_mode,max_tokens):
         try:
             content = (
-                f"鍥剧墖鎻忚堪鍐呭涓猴細\n{image_des}\n瑙勫垯鍐呭锛歕n{rule_text}銆俓n璇烽獙璇佸浘鐗囨弿杩颁腑鏄惁鏈夌鍚堣鍒欑殑鍐呭锛屼笉杩涜鎺ㄧ悊鍜宼hink銆傝繑鍥炵粨鏋滄牸寮忎负[xxx绗﹀悎鐨勮鍒檌d]锛屽鏋滄病鏈夎繑鍥瀃]")
+                f"鍥剧墖鎻忚堪鍐呭涓猴細\n{image_des}\n瑙勫垯鍐呭锛歕n{rule_text}銆俓n璇烽獙璇佸浘鐗囨弿杩颁腑鏄惁鏈変笉绗﹀悎瑙勫垯鐨勫唴瀹癸紝涓嶈繘琛屾帹鐞嗗拰think銆傝繑鍥炵粨鏋滄牸寮忎负[xxx绗﹀悎鐨勮鍒檌d]锛屽鏋滄病鏈夎繑鍥瀃]")
             #self.logger.info(len(content))
             search_data = {
                 "prompt": "",

--
Gitblit v1.8.0