From fa78e5b5e03e40431e6c4404c6ecb00b8de81be3 Mon Sep 17 00:00:00 2001
From: shidong <shidong@jhsoft.cc>
Date: 星期一, 14 七月 2025 07:36:08 +0800
Subject: [PATCH] #2025/7/14 #优化获取最新数据代码 #优化预警提示语
---
qwen_thread.py | 51 +++++++++++++++++++--------------------------------
1 files changed, 19 insertions(+), 32 deletions(-)
diff --git a/qwen_thread.py b/qwen_thread.py
index 0a16fd8..aeb16d9 100644
--- a/qwen_thread.py
+++ b/qwen_thread.py
@@ -17,11 +17,12 @@
class qwen_thread:
- def __init__(self, config):
+ def __init__(self, config,logger):
self.config = config
self.max_workers = int(config.get("threadnum"))
self.executor = ThreadPoolExecutor(max_workers=int(config.get("threadnum")))
self.semaphore = threading.Semaphore(int(config.get("threadnum")))
+ self.logger = logger
# 鍒濆鍖朚ilvus闆嗗悎
connections.connect("default", host=config.get("milvusurl"), port=config.get("milvusport"))
@@ -40,27 +41,12 @@
torch_dtype=torch.float16
).eval()
+ model = model.to(f"cuda:{config.get('cuda')}")
self.model_pool.append(model)
# 鍏变韩鐨勫鐞嗗櫒 (绾跨▼瀹夊叏)
self.processor = AutoProcessor.from_pretrained(config.get("qwenaddr"), use_fast=True)
- # 鍒涘缓瀹炰緥涓撳睘logger
- self.logger = logging.getLogger(f"{self.__class__}_{id(self)}")
- self.logger.setLevel(logging.INFO)
- # 閬垮厤閲嶅娣诲姞handler
- if not self.logger.handlers:
- handler = RotatingFileHandler(
- filename=os.path.join("logs", 'thread_log.log'),
- maxBytes=10 * 1024 * 1024,
- backupCount=3,
- encoding='utf-8'
- )
- formatter = logging.Formatter(
- '%(asctime)s - %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s: %(message)s'
- )
- handler.setFormatter(formatter)
- self.logger.addHandler(handler)
def submit(self,res_a):
# 灏濊瘯鑾峰彇淇″彿閲忥紙闈為樆濉烇級
@@ -101,25 +87,26 @@
# 璋冪敤瑙勫垯鍖归厤鏂规硶,鍒ゆ柇鏄惁棰勮
is_waning = self.image_rule_chat(desc, res['waning_value'], ragurl,rag_mode,max_tokens)
# 濡傛灉棰勮,鍒欑敓鎴愰殣鎮f弿杩板拰澶勭悊寤鸿
- #if is_waning == 1:
- # 鑾峰彇瑙勭珷鍒跺害鏁版嵁
- filedata = self.get_filedata(res['waning_value'],res['suggestion'], ragurl)
- # 鐢熸垚闅愭偅鎻忚堪
- risk_description = self.image_rule_chat_with_detail(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
- # 鐢熸垚澶勭悊寤鸿
- suggestion = self.image_rule_chat_suggestion(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
+ if is_waning == 1:
+ # 鑾峰彇瑙勭珷鍒跺害鏁版嵁
+ filedata = self.get_filedata(res['waning_value'],res['suggestion'], ragurl)
+ # 鐢熸垚闅愭偅鎻忚堪
+ risk_description = self.image_rule_chat_with_detail(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
+ # 鐢熸垚澶勭悊寤鸿
+ suggestion = self.image_rule_chat_suggestion(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
+ self.logger.info(
+ f"{res['video_point_id']}鎵ц瀹屾瘯锛歿res['id']}:鏄惁棰勮{is_waning},瀹夊叏闅愭偅锛歿risk_description}\n澶勭悊寤鸿锛歿suggestion}")
else:
is_desc = 3
# 鏁版嵁缁�
data = {
- "id": res['id'],
"event_level_id": res['event_level_id'], # event_level_id
"event_level_name": res['event_level_name'], # event_level_id
"rule_id": res["rule_id"],
"video_point_id": res['video_point_id'], # video_point_id
"video_point_name": res['video_point_name'],
- "is_waning": 1,
+ "is_waning": is_waning,
"is_desc": is_desc,
"zh_desc_class": desc, # text_vector
"bounding_box": res['bounding_box'], # bounding_box
@@ -137,9 +124,9 @@
"suggestion": suggestion,
"knowledge_id": res['knowledge_id']
}
-
+ self.collection.delete(f"id == {res['id']}")
# 淇濆瓨鍒癿ilvus
- image_id = self.collection.upsert(data).primary_keys
+ image_id = self.collection.insert(data).primary_keys
data = {
"id": str(image_id[0]),
"video_point_id": res['video_point_id'],
@@ -154,7 +141,7 @@
# 璋冪敤rag
asyncio.run(self.insert_json_data(ragurl, data))
rag_time = datetime.now() - current_time
- self.logger.info(f"{image_id}杩愯缁撴潫鎬讳綋鐢ㄦ椂:{datetime.now() - ks_time},鍥剧墖鎻忚堪鐢ㄦ椂{desc_time}锛孯AG鐢ㄦ椂{rag_time}")
+ self.logger.info(f"{res['video_point_id']}鎵ц瀹屾瘯锛歿image_id}杩愯缁撴潫鎬讳綋鐢ㄦ椂:{datetime.now() - ks_time},鍥剧墖鎻忚堪鐢ㄦ椂{desc_time}锛孯AG鐢ㄦ椂{rag_time}")
except Exception as e:
self.logger.info(f"绾跨▼锛氭墽琛屾ā鍨嬭В鏋愭椂鍑洪敊::{e}")
return 0
@@ -186,8 +173,8 @@
return_tensors="pt",
)
inputs = inputs.to(model.device)
- with torch.inference_mode():
- outputs = model.generate(**inputs,max_new_tokens=100)
+ with torch.inference_mode(),torch.cuda.amp.autocast():
+ outputs = model.generate(**inputs,max_new_tokens=200)
generated_ids = outputs[:, len(inputs.input_ids[0]):]
image_text = self.processor.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
@@ -233,7 +220,7 @@
def image_rule_chat(self, image_des,rule_text, ragurl, rag_mode,max_tokens):
try:
content = (
- f"鍥剧墖鎻忚堪鍐呭涓猴細\n{image_des}\n瑙勫垯鍐呭锛歕n{rule_text}銆俓n璇烽獙璇佸浘鐗囨弿杩颁腑鏄惁鏈夌鍚堣鍒欑殑鍐呭锛屼笉杩涜鎺ㄧ悊鍜宼hink銆傝繑鍥炵粨鏋滄牸寮忎负[xxx绗﹀悎鐨勮鍒檌d]锛屽鏋滄病鏈夎繑鍥瀃]")
+ f"鍥剧墖鎻忚堪鍐呭涓猴細\n{image_des}\n瑙勫垯鍐呭锛歕n{rule_text}銆俓n璇烽獙璇佸浘鐗囨弿杩颁腑鏄惁鏈変笉绗﹀悎瑙勫垯鐨勫唴瀹癸紝涓嶈繘琛屾帹鐞嗗拰think銆傝繑鍥炵粨鏋滄牸寮忎负[xxx绗﹀悎鐨勮鍒檌d]锛屽鏋滄病鏈夎繑鍥瀃]")
#self.logger.info(len(content))
search_data = {
"prompt": "",
--
Gitblit v1.8.0