diff --git a/src/memos/mem_feedback/feedback.py b/src/memos/mem_feedback/feedback.py index e0fd6cc77..0b3fc3846 100644 --- a/src/memos/mem_feedback/feedback.py +++ b/src/memos/mem_feedback/feedback.py @@ -1,5 +1,4 @@ import concurrent.futures -import copy import difflib import json import re @@ -17,7 +16,12 @@ from memos.llms.factory import AzureLLM, LLMFactory, OllamaLLM, OpenAILLM from memos.log import get_logger from memos.mem_feedback.base import BaseMemFeedback -from memos.mem_feedback.utils import make_mem_item, should_keep_update, split_into_chunks +from memos.mem_feedback.utils import ( + general_split_into_chunks, + make_mem_item, + should_keep_update, + split_into_chunks, +) from memos.mem_reader.factory import MemReaderFactory from memos.mem_reader.read_multi_modal import detect_lang from memos.memories.textual.item import TextualMemoryItem @@ -37,6 +41,8 @@ FEEDBACK_JUDGEMENT_PROMPT_ZH, KEYWORDS_REPLACE, KEYWORDS_REPLACE_ZH, + OPERATION_UPDATE_JUDGEMENT, + OPERATION_UPDATE_JUDGEMENT_ZH, UPDATE_FORMER_MEMORIES, UPDATE_FORMER_MEMORIES_ZH, ) @@ -47,6 +53,7 @@ "if_kw_replace": {"en": KEYWORDS_REPLACE, "zh": KEYWORDS_REPLACE_ZH}, "judge": {"en": FEEDBACK_JUDGEMENT_PROMPT, "zh": FEEDBACK_JUDGEMENT_PROMPT_ZH}, "compare": {"en": UPDATE_FORMER_MEMORIES, "zh": UPDATE_FORMER_MEMORIES_ZH}, + "compare_judge": {"en": OPERATION_UPDATE_JUDGEMENT, "zh": OPERATION_UPDATE_JUDGEMENT_ZH}, "generation": {"en": FEEDBACK_ANSWER_PROMPT, "zh": FEEDBACK_ANSWER_PROMPT_ZH}, } @@ -108,7 +115,7 @@ def _retry_db_operation(self, operation): return operation() except Exception as e: logger.error( - f"[Feedback Core: _retry_db_operation] DB operation failed: {e}", exc_info=True + f"[1223 Feedback Core: _retry_db_operation] DB operation failed: {e}", exc_info=True ) raise @@ -122,7 +129,7 @@ def _batch_embed(self, texts: list[str], embed_bs: int = 5): results.extend(self._embed_once(batch)) except Exception as e: logger.error( - f"[Feedback Core: process_feedback_core] Embedding batch failed, Cover with all zeros: {len(batch)} entries: {e}" + f"[1223 Feedback Core: process_feedback_core] Embedding batch failed, Cover with all zeros: {len(batch)} entries: {e}" ) results.extend([[0.0] * dim for _ in range(len(batch))]) return results @@ -138,7 +145,7 @@ def _pure_add(self, user_name: str, feedback_content: str, feedback_time: str, i lambda: self.memory_manager.add(to_add_memories, user_name=user_name, use_batch=False) ) logger.info( - f"[Feedback Core: _pure_add] Pure added {len(added_ids)} memories for user {user_name}." + f"[1223 Feedback Core: _pure_add] Pure added {len(added_ids)} memories for user {user_name}." ) return { "record": { @@ -175,7 +182,7 @@ def _keyword_replace_judgement(self, feedback_content: str) -> dict | None: return judge_res else: logger.warning( - "[Feedback Core: _feedback_judgement] feedback judgement failed, return []" + "[1223 Feedback Core: _feedback_judgement] feedback judgement failed, return []" ) return {} @@ -200,7 +207,7 @@ def _feedback_judgement( return judge_res else: logger.warning( - "[Feedback Core: _feedback_judgement] feedback judgement failed, return []" + "[1223 Feedback Core: _feedback_judgement] feedback judgement failed, return []" ) return [] @@ -327,11 +334,11 @@ def _del_working_binding(self, user_name, mem_items: list[TextualMemoryItem]) -> self.graph_store.delete_node(mid, user_name=user_name) logger.info( - f"[Feedback Core:_del_working_binding] Delete raw/working mem_ids: {delete_ids} for user_name: {user_name}" + f"[1223 Feedback Core:_del_working_binding] Delete raw/working mem_ids: {delete_ids} for user_name: {user_name}" ) except Exception as e: logger.warning( - f"[Feedback Core:_del_working_binding] TreeTextMemory.delete_hard: failed to delete {mid}: {e}" + f"[1223 Feedback Core:_del_working_binding] TreeTextMemory.delete_hard: failed to delete {mid}: {e}" ) def semantics_feedback( @@ -400,24 +407,12 @@ def semantics_feedback( ): all_operations.extend(chunk_operations["operations"]) except Exception as e: - logger.error(f"[Feedback Core: semantics_feedback] Operation failed: {e}") + logger.error( + f"[1223 Feedback Core: semantics_feedback] Operation failed: {e}" + ) - operations = self.standard_operations(all_operations, current_memories) - - add_texts = [] - final_operations = [] - for item in operations: - if item["operation"].lower() == "add" and "text" in item and item["text"]: - if item["text"] in add_texts: - continue - final_operations.append(item) - add_texts.append(item["text"]) - elif item["operation"].lower() == "update": - final_operations.append(item) - logger.info( - f"[Feedback Core: deduplicate add] {len(operations)} -> {len(final_operations)} memories" - ) - operations = copy.deepcopy(final_operations) + standard_operations = self.standard_operations(all_operations, current_memories) + operations = self.filter_fault_update(standard_operations) logger.info(f"[Feedback Core Operations]: {operations!s}") @@ -463,7 +458,7 @@ def semantics_feedback( update_results.append(result) except Exception as e: logger.error( - f"[Feedback Core: semantics_feedback] Operation failed for {original_op}: {e}", + f"[1223 Feedback Core: semantics_feedback] Operation failed for {original_op}: {e}", exc_info=True, ) if update_results: @@ -491,7 +486,7 @@ def _feedback_memory( ] if filterd_ids: logger.warning( - f"[Feedback Core: _feedback_memory] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}." + f"[1223 Feedback Core: _feedback_memory] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}." ) current_memories = [ @@ -523,7 +518,7 @@ def _feedback_memory( results[i] = node except Exception as e: logger.error( - f"[Feedback Core: _feedback_memory] Error processing memory index {i}: {e}", + f"[1223 Feedback Core: _feedback_memory] Error processing memory index {i}: {e}", exc_info=True, ) mem_res = [r for r in results if r] @@ -552,7 +547,7 @@ def _retrieve(self, query: str, info=None, top_k=100, user_name=None): retrieved_mems = self.searcher.search( query, info=info, user_name=user_name, top_k=top_k, full_recall=True ) - retrieved_mems = [item[0] for item in retrieved_mems] + retrieved_mems = [item[0] for item in retrieved_mems if float(item[1]) > 0.01] return retrieved_mems def _vec_query(self, new_memories_embedding: list[float], user_name=None): @@ -582,7 +577,7 @@ def _vec_query(self, new_memories_embedding: list[float], user_name=None): if not retrieved_ids: logger.info( - f"[Feedback Core: _vec_query] No similar memories found for embedding query for user {user_name}." + f"[1223 Feedback Core: _vec_query] No similar memories found for embedding query for user {user_name}." ) filterd_ids = [ @@ -590,7 +585,7 @@ def _vec_query(self, new_memories_embedding: list[float], user_name=None): ] if filterd_ids: logger.warning( - f"[Feedback Core: _vec_query] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}." + f"[1223 Feedback Core: _vec_query] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}." ) return [ TextualMemoryItem(**item) @@ -615,6 +610,52 @@ def _get_llm_response(self, prompt: str, dsl: bool = True) -> dict: response_json = None return response_json + def filter_fault_update(self, operations: list[dict]): + """To address the randomness of large model outputs, it is necessary to conduct validity evaluation on the texts used for memory override operations.""" + updated_operations = [item for item in operations if item["operation"] == "UPDATE"] + if len(updated_operations) < 5: + return operations + + lang = detect_lang("".join(updated_operations[0]["text"])) + template = FEEDBACK_PROMPT_DICT["compare_judge"][lang] + + all_judge = [] + operations_chunks = general_split_into_chunks(updated_operations) + with ContextThreadPoolExecutor(max_workers=10) as executor: + future_to_chunk_idx = {} + for chunk in operations_chunks: + raw_operations_str = {"operations": chunk} + prompt = template.format(raw_operations=str(raw_operations_str)) + + future = executor.submit(self._get_llm_response, prompt) + future_to_chunk_idx[future] = chunk + for future in concurrent.futures.as_completed(future_to_chunk_idx): + try: + judge_res = future.result() + if ( + judge_res + and "operations_judgement" in judge_res + and isinstance(judge_res["operations_judgement"], list) + ): + all_judge.extend(judge_res["operations_judgement"]) + except Exception as e: + logger.error(f"[1223 Feedback Core: filter_fault_update] Judgement failed: {e}") + + logger.info(f"[1223 Feedback Core: filter_fault_update] LLM judgement: {all_judge}") + id2op = {item["id"]: item for item in updated_operations} + valid_updates = [] + for judge in all_judge: + valid_update = None + if judge["judgement"] == "UPDATE_APPROVED": + valid_update = id2op.get(judge["id"], None) + if valid_update: + valid_updates.append(valid_update) + + logger.info( + f"[1223 Feedback Core: filter_fault_update] {len(updated_operations)} -> {len(valid_updates)}" + ) + return valid_updates + [item for item in operations if item["operation"] != "UPDATE"] + def standard_operations(self, operations, current_memories): """ Regularize the operation design @@ -643,7 +684,7 @@ def correct_item(data): if not should_keep_update(data["text"], data["old_memory"]): logger.warning( - f"[Feedback Core: semantics_feedback] Due to the excessive proportion of changes, skip update: {data}" + f"[1223 Feedback Core: semantics_feedback] Due to the excessive proportion of changes, skip update: {data}" ) return None @@ -663,14 +704,14 @@ def correct_item(data): return data except Exception: logger.error( - f"[Feedback Core: standard_operations] Error processing operation item: {data}", + f"[1223 Feedback Core: standard_operations] Error processing operation item: {data}", exc_info=True, ) return None dehallu_res = [correct_item(item) for item in operations] dehalluded_operations = [item for item in dehallu_res if item] - logger.info(f"[Feedback Core: dehalluded_operations] {dehalluded_operations}") + logger.info(f"[1223 Feedback Core: dehalluded_operations] {dehalluded_operations}") # c add objects add_texts = [] @@ -684,7 +725,7 @@ def correct_item(data): elif item["operation"].lower() == "update": llm_operations.append(item) logger.info( - f"[Feedback Core: deduplicate add] {len(dehalluded_operations)} -> {len(llm_operations)} memories" + f"[1223 Feedback Core: deduplicate add] {len(dehalluded_operations)} -> {len(llm_operations)} memories" ) # Update takes precedence over add @@ -698,7 +739,7 @@ def correct_item(data): ] if filtered_items: logger.info( - f"[Feedback Core: semantics_feedback] Due to have update objects, skip add: {filtered_items}" + f"[1223 Feedback Core: semantics_feedback] Due to have update objects, skip add: {filtered_items}" ) return update_items else: @@ -746,7 +787,7 @@ def _doc_filter(self, doc_scope: str, memories: list[TextualMemoryItem]): memid for inscope_file in inscope_docs for memid in filename2_memid[inscope_file] ] logger.info( - f"[Feedback Core: process_keyword_replace] These docs are in scope : {inscope_docs}, relared memids: {inscope_ids}" + f"[1223 Feedback Core: process_keyword_replace] These docs are in scope : {inscope_docs}, relared memids: {inscope_ids}" ) filter_memories = [mem for mem in memories if mem.id in inscope_ids] return filter_memories @@ -800,7 +841,7 @@ def process_keyword_replace( retrieved_memories = self._doc_filter(doc_scope, retrieved_memories) logger.info( - f"[Feedback Core: process_keyword_replace] Keywords recalled memory for user {user_name}: {len(retrieved_ids)} memories | After filtering: {len(retrieved_memories)} memories." + f"[1223 Feedback Core: process_keyword_replace] Keywords recalled memory for user {user_name}: {len(retrieved_ids)} memories | After filtering: {len(retrieved_memories)} memories." ) if not retrieved_memories: @@ -885,7 +926,7 @@ def check_validity(item): info.update({"user_id": user_id, "user_name": user_name, "session_id": session_id}) logger.info( - f"[Feedback Core: process_feedback_core] Starting memory feedback process for user {user_name}" + f"[1223 Feedback Core: process_feedback_core] Starting memory feedback process for user {user_name}" ) # feedback keywords update kwp_judge = self._keyword_replace_judgement(feedback_content) @@ -918,7 +959,7 @@ def check_validity(item): if not valid_feedback: logger.warning( - f"[Feedback Core: process_feedback_core] No valid judgements for user {user_name}: {raw_judge}." + f"[1223 Feedback Core: process_feedback_core] No valid judgements for user {user_name}: {raw_judge}." ) return {"record": {"add": [], "update": []}} @@ -966,12 +1007,14 @@ def check_validity(item): add_memories = mem_record["record"]["add"] update_memories = mem_record["record"]["update"] logger.info( - f"[Feedback Core: process_feedback_core] Processed {len(feedback_memories)} feedback | add {len(add_memories)} memories | update {len(update_memories)} memories for user {user_name}." + f"[1223 Feedback Core: process_feedback_core] Processed {len(feedback_memories)} feedback | add {len(add_memories)} memories | update {len(update_memories)} memories for user {user_name}." ) return mem_record except Exception as e: - logger.error(f"[Feedback Core: process_feedback_core] Error for user {user_name}: {e}") + logger.error( + f"[1223 Feedback Core: process_feedback_core] Error for user {user_name}: {e}" + ) return {"record": {"add": [], "update": []}} def process_feedback( diff --git a/src/memos/mem_feedback/utils.py b/src/memos/mem_feedback/utils.py index 0033d85b4..c32c12328 100644 --- a/src/memos/mem_feedback/utils.py +++ b/src/memos/mem_feedback/utils.py @@ -54,6 +54,38 @@ def calculate_similarity(text1: str, text2: str) -> float: return change_ratio < 0.2 +def general_split_into_chunks(items: list[dict], max_tokens_per_chunk: int = 500): + chunks = [] + current_chunk = [] + current_tokens = 0 + + for item in items: + item_text = str(item) + item_tokens = estimate_tokens(item_text) + + if item_tokens > max_tokens_per_chunk: + if current_chunk: + chunks.append(current_chunk) + current_chunk = [] + + chunks.append([item]) + current_tokens = 0 + + elif current_tokens + item_tokens <= max_tokens_per_chunk: + current_chunk.append(item) + current_tokens += item_tokens + else: + if current_chunk: + chunks.append(current_chunk) + current_chunk = [item] + current_tokens = item_tokens + + if current_chunk: + chunks.append(current_chunk) + + return chunks + + def split_into_chunks(memories: list[TextualMemoryItem], max_tokens_per_chunk: int = 500): chunks = [] current_chunk = [] diff --git a/src/memos/templates/mem_feedback_prompts.py b/src/memos/templates/mem_feedback_prompts.py index bbdb187e2..dd30c4f92 100644 --- a/src/memos/templates/mem_feedback_prompts.py +++ b/src/memos/templates/mem_feedback_prompts.py @@ -334,10 +334,11 @@ }} *Requirements*: -1. If the new fact does not provide additional information to the existing memory item, the existing memory can override the new fact, and the operation is set to "NONE." -2. If the new fact is similar to existing memory but the information is more accurate, complete, or requires correction, set operation to "UPDATE" +1. If the new fact does not provide additional information to the existing memory item, or the existing memory can override the new fact, and the operation is set to "NONE." +2. If the new fact is similar to existing memory **about the same entity** but the information is more accurate, complete, or requires correction, set operation to "UPDATE" 3. If the new fact contradicts existing memory in key information (such as time, location, status, etc.), update the original memory based on the new fact and set operation to "UPDATE", only modifying the relevant error segments in the existing memory paragraphs while keeping other text completely unchanged. -4. If there is no existing memory that requires updating, the new fact is added as entirely new information, and the operation is set to "ADD." Therefore, in the same operation list, ADD and UPDATE will not coexist. +4. If there is no existing memory that requires updating **or if the new fact refers to a different entity**, the new fact is added as entirely new information, and the operation is set to "ADD." Therefore, in the same operation list, ADD and UPDATE will not coexist. +5. Facts about different entities that were acknowledged by the user within the same time period can coexist and are not considered contradictory. *ID Management Rules*: - Update operation: Keep the original ID unchanged @@ -408,16 +409,16 @@ Example2: Current Memories: -"123": "The user works as a software engineer in Company A, mainly responsible for front-end development" -"908": "The user likes to go fishing with friends on weekends" +"123": "On December 22, 2025, the user claim that John works at Company X" +"908": "On December 22, 2025, the user claim that Mary lives in New York" The background of the new fact being put forward: -user: Guess where I live? -assistant: Hehuan Community. -user feedback: Wrong, update my address: Mingyue Community, Chaoyang District, Beijing +user: Guess who am I? +assistant: You are a teacher at School ABC. +user feedback: No, I mean Peter is a teacher at School ABC. Newly facts: -"The user's residential address is Mingyue Community, Chaoyang District, Beijing" +"Peter is a teacher at School ABC." Operation recommendations: {{ @@ -425,17 +426,17 @@ [ {{ "id": "123", - "text": "The user works as a software engineer at Company A, primarily responsible for front-end development", + "text": "On December 22, 2025, the user claim that John works at Company X", "operation": "NONE" }}, {{ "id": "908", - "text": "The user enjoys fishing with friends on weekends", + "text": "On December 22, 2025, the user claim that Mary lives in New York", "operation": "NONE" }}, {{ - "id": "4567", - "text": "The user's residential address is Mingyue Community, Chaoyang District, Beijing", + "id": "001", + "text": "Peter is a teacher at School ABC.", "operation": "ADD" }} ] @@ -478,6 +479,7 @@ 2. 若新事实与现有记忆相似但信息更准确、完整或需修正,操作设为"UPDATE" 3. 若新事实在关键信息(如时间、地点、状态等)上与现有记忆矛盾,则根据新事实更新原记忆,操作设为"UPDATE",仅修改现有记忆段落中的相关错误片段,其余文本完全保持不变 4. 若无需要更新的现有记忆,则将新事实作为全新信息添加,操作设为"ADD"。因此在同一操作列表中,ADD与UPDATE不会同时存在 +5. 同一时间段内用户所确认的不同实体的相关事实可以并存,且不会被视作相互矛盾。 ID管理规则: - 更新操作:保持原有ID不变 @@ -549,17 +551,16 @@ 示例2: 当前记忆: -"123": "用户在公司A担任软件工程师,主要负责前端开发" -"908": "用户周末喜欢和朋友一起钓鱼" - +"123": "2025年12月12日,用户声明约翰在 X 公司工作" +"908": "2025年12月12日,用户声明玛丽住在纽约" 提出新事实的背景: -user: 猜猜我住在哪里? +user: 猜猜刘青住在哪里? assistant: 合欢社区 -user feedback: 错了,请更新我的地址:北京市朝阳区明月社区 +user feedback: 错了,他住在明月小区 新获取的事实: -"用户的居住地址是北京市朝阳区明月小区" +"用户声明刘青住在明月小区" 操作建议: {{ @@ -577,7 +578,7 @@ }}, {{ "id": "4567", - "text": "用户的居住地址是北京市朝阳区明月小区", + "text": "用户声明刘青住在明月小区", "operation": "ADD" }} ] @@ -660,3 +661,162 @@ 回答: """ + + +OPERATION_UPDATE_JUDGEMENT = """ +# Batch UPDATE Safety Assessment Instruction + +**Background**: +This instruction serves as a supplementary safety verification layer for the memory update instruction. It evaluates each UPDATE operation in the `operations` list to ensure safety and effectiveness, preventing erroneous data overwrites. + +**Input**: The `operations` list containing multiple UPDATE proposals generated by the main instruction +**Output**: The final `operations_judgement` list after safety assessment and necessary corrections + +**Safety Assessment Process (for each UPDATE entry)**: +1. **Entity Consistency Check**: Verify that the old and new texts of this UPDATE entry describe exactly the same core entity (same person, organization, event, etc.). This is the most important check. +2. **Semantic Relevance Check**: Determine whether the new information directly corrects errors in or supplements missing information from the old information, rather than introducing completely unrelated new facts. +3. **Context Preservation Check**: Ensure that the updated text of this UPDATE only modifies the parts that need correction, while completely preserving all other valid information from the original text. + +**Batch Assessment Rules**: +- Independently assess each entry in the list and record the evaluation results + +**Key Decision Rules**: +1. If the core entities of old and new texts are different → Set `judgement` to "INVALID" (completely invalid) +2. If the core entities are the same but the information is completely unrelated → Set `judgement` to "NONE" (should not update) +3. If all three checks pass → Set `judgement` to "UPDATE_APPROVED" + +**Output Format**: +{{ + "operations_judgement": [ + {{ + "id": "...", + "text": "...", + "old_memory": "...", + "judgement": "INVALID" | "NONE" | "UPDATE_APPROVED" + }}, + ... + ] +}} + +**Example 1**: +Input operations list: +{{ + "operations": [ + {{ + "id": "275a", + "text": "On December 22, 2025 at 6:58 AM UTC, the user mentioned that Mission Terra is from Germany.", + "operation": "UPDATE", + "old_memory": "On December 13, 2025 at 4:02 PM UTC, the user mentioned that Mission Terra is a French national." + }}, + {{ + "id": "88a4", + "text": "On December 22, 2025 at 6:58 AM UTC, the user mentioned that Mission Terra is from Germany.", + "operation": "UPDATE", + "old_memory": "On December 22, 2025 at 6:52 AM UTC, the user confirmed that Gladys Liu is an Italian citizen." + }} + ] +}} + +Safety assessment output: +{{ + "operations_judgement": [ + {{ + "id": "275a", + "text": "On December 22, 2025 at 6:58 AM UTC, the user mentioned that Mission Terra is from Germany.", + "old_memory": "On December 13, 2025 at 4:02 PM UTC, the user mentioned that Mission Terra is a French national.", + "judgement": "UPDATE_APPROVED" + }}, + {{ + "id": "88a4", + "text": "On December 22, 2025 at 6:58 AM UTC, the user mentioned that Mission Terra is from Germany.", + "old_memory": "On December 22, 2025 at 6:52 AM UTC, the user confirmed that Gladys Liu is an Italian citizen.", + "judgement": "INVALID" + }} + ] +}} + +**For actual execution**: +Input operations list: +{raw_operations} + +Safety assessment output:""" + + +OPERATION_UPDATE_JUDGEMENT_ZH = """## 批量UPDATE安全评估指令 + +**背景说明**: +本指令作为记忆更新指令的补充安全验证层。针对`operations`列表,评估每个UPDATE操作都安全有效,防止错误的数据覆盖。 + +**输入**:主指令生成的包含多个UPDATE提议的`operations`列表 +**输出**:经过安全评估和必要修正后的最终`operations_judgement`列表 + +**安全评估流程(针对每个UPDATE条目)**: +1. **实体一致性检查**:确认该UPDATE条目的新旧文本是否描述完全相同的核心实体(同一人物、组织、事件等)。这是最重要的检查。 +2. **语义相关性检查**:判断该UPDATE的新信息是否直接修正旧信息中的错误部分或补充缺失信息,而非引入完全不相关的新事实。 +3. **上下文保留检查**:确保该UPDATE更新后的文本只修改需要纠正的部分,完全保留原始文本中其他所有有效信息。 + +**批量评估规则**: +- 对列表中的每个条目独立评估,记录评估结果 + +**关键决策规则**: +1. 如果新旧文本核心实体不同 → `judgement`置为"INVALID"(完全无效) +2. 如果新旧文本核心实体相同但信息完全不相关 → `judgement`置为"NONE"(不应更新) +3. 如果通过全部三项检查 → `judgement`置为"UPDATE_APPROVED" + + +**输出格式**: +{{ + "operations_judgement": [ + // 评估后的完整operations列表 + {{ + "id": "...", + "text": "...", + "old_memory": "...", + "judgement": "INVALID" | "NONE" | "UPDATE_APPROVED" + }}, + ... + ] +}} + + +示例1: +输入operations列表: +{{ + "operations": [ + {{ + "id": "275a", + "text": "2025年12月22日 UTC 时间6:58,用户提到Mission Terra 来自德国。", + "operation": "UPDATE", + "old_memory": "2025年12月13日 UTC 时间16:02,用户提及 Mission Terra 是法国国籍。" + }}, + {{ + "id": "88a4", + "text": "2025年12月22日 UTC 时间6:58,用户提到Mission Terra 来自德国。", + "operation": "UPDATE", + "old_memory": "2025年12月22日 UTC 时间6:52,用户确认 Gladys Liu 是意大利公民。" + }} + ] +}} +安全评估输出: +{{ + "operations_judgement": [ + {{ + "id": "275a", + "text": "2025年12月22日 UTC 时间6:58,用户提到Mission Terra 来自德国。", + "old_memory": "2025年12月13日 UTC 时间16:02,用户提及 Mission Terra 是法国国籍。", + "judgement": "UPDATE_APPROVED" + }}, + {{ + "id": "88a4", + "text": "2025年12月22日 UTC 时间6:58,用户提到Mission Terra 来自德国。", + "old_memory": "2025年12月22日 UTC 时间6:52,用户确认 Gladys Liu 是意大利公民。", + "judgement": "INVALID" + }} + ] +}} + +输入operations列表: +{raw_operations} + +安全评估输出: +"""