#2025/7/14
#优化获取最新数据代码
#优化预警提示语
| | |
| | | # 读取共享内存中的图片 |
| | | # image_id = get_mem.smem_read_frame_qianwen(camera_id) |
| | | if len(res_a) > 0: |
| | | sorted_results = sorted(res_a, key=itemgetter("id"), reverse=True) |
| | | res = sorted_results[0] |
| | | #sorted_results = sorted(res_a, key=itemgetter("id"), reverse=True) |
| | | #res = sorted_results[0] |
| | | res = max(res_a, key=itemgetter("id")) |
| | | self.collection.delete(f"id == {res['id']}") |
| | | # 数据组 |
| | | data = { |
| | |
| | | torch_dtype=torch.float16 |
| | | |
| | | ).eval() |
| | | model = model.to(f"cuda:{config.get('cuda')}") |
| | | self.model_pool.append(model) |
| | | |
| | | # 共享的处理器 (线程安全) |
| | |
| | | return_tensors="pt", |
| | | ) |
| | | inputs = inputs.to(model.device) |
| | | with torch.inference_mode(): |
| | | with torch.inference_mode(),torch.cuda.amp.autocast(): |
| | | outputs = model.generate(**inputs,max_new_tokens=200) |
| | | generated_ids = outputs[:, len(inputs.input_ids[0]):] |
| | | image_text = self.processor.batch_decode( |
| | |
| | | def image_rule_chat(self, image_des,rule_text, ragurl, rag_mode,max_tokens): |
| | | try: |
| | | content = ( |
| | | f"图片描述内容为:\n{image_des}\n规则内容:\n{rule_text}。\n请验证图片描述中是否有符合规则的内容,不进行推理和think。返回结果格式为[xxx符合的规则id],如果没有返回[]") |
| | | f"图片描述内容为:\n{image_des}\n规则内容:\n{rule_text}。\n请验证图片描述中是否有不符合规则的内容,不进行推理和think。返回结果格式为[xxx符合的规则id],如果没有返回[]") |
| | | #self.logger.info(len(content)) |
| | | search_data = { |
| | | "prompt": "", |