qwen_thread.py
@@ -93,12 +93,13 @@
                    risk_description = self.image_rule_chat_with_detail(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
                    # 生成处理建议
                    suggestion = self.image_rule_chat_suggestion(filedata, res['waning_value'], ragurl,rag_mode,max_tokens)
                    self.logger.info(
                        f"{res['video_point_id']}执行完毕:{res['id']}:是否预警{is_waning},安全隐患:{risk_description}\n处理建议:{suggestion}")
            else:
                is_desc = 3
            # 数据组
            data = {
                "id": res['id'],
                "event_level_id": res['event_level_id'],  # event_level_id
                "event_level_name": res['event_level_name'],  # event_level_id
                "rule_id": res["rule_id"],
@@ -122,9 +123,9 @@
                "suggestion": suggestion,
                "knowledge_id": res['knowledge_id']
            }
            self.collection.delete(f"id == {res['id']}")
            # 保存到milvus
            image_id = self.collection.upsert(data).primary_keys
            image_id = self.collection.insert(data).primary_keys
            data = {
                "id": str(image_id[0]),
                "video_point_id": res['video_point_id'],
@@ -172,7 +173,7 @@
            )
            inputs = inputs.to(model.device)
            with torch.inference_mode():
                outputs = model.generate(**inputs,max_new_tokens=100)
                outputs = model.generate(**inputs,max_new_tokens=200)
            generated_ids = outputs[:, len(inputs.input_ids[0]):]
            image_text = self.processor.batch_decode(
                generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True