Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
140 commits
Select commit Hold shift + click to select a range
65d5649
update reader and search strategy
Oct 28, 2025
6cad866
set strategy reader and search config
Oct 29, 2025
f040110
fix all reader conflicts
Oct 29, 2025
c389367
fix install problem
Oct 29, 2025
499502d
fix
Oct 29, 2025
e1bb223
fix test
Oct 29, 2025
72b7466
Merge branch 'dev' into dev_test
CaralHsi Oct 29, 2025
74585e8
Merge branch 'dev' into dev_test
fridayL Oct 30, 2025
790e99f
turn off graph recall
Oct 30, 2025
15b63a7
Merge branch 'dev' into dev_test
Oct 30, 2025
390ba29
turn off graph recall
Oct 30, 2025
9615282
turn off graph recall
Oct 30, 2025
2fb8ce0
Merge branch 'dev' into dev_test
fridayL Oct 30, 2025
6035522
Merge branch 'dev' into dev_test
Oct 30, 2025
04f412b
fix Searcher input bug
Oct 30, 2025
9716274
fix Searcher
Oct 30, 2025
c455a4e
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Oct 30, 2025
f8b9b4a
fix Search
Oct 30, 2025
c840ad4
Merge branch 'dev' into dev_test
Oct 30, 2025
b9dbecd
fix bug
Nov 4, 2025
1798f60
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 4, 2025
6db95e7
Merge branch 'dev' into dev_test
Nov 4, 2025
1173c07
adjust strategy reader
Nov 4, 2025
7ab465b
Merge branch 'dev' into dev_test
Nov 4, 2025
744d227
adjust strategy reader
Nov 4, 2025
a9a98fa
adjust search config input
Nov 4, 2025
900f5e6
reformat code
Nov 4, 2025
ac7aff5
Merge branch 'dev' into dev_test
CaralHsi Nov 4, 2025
144c446
re pr
Nov 5, 2025
a2b55c7
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 5, 2025
441c52b
Merge branch 'dev' into dev_test
Nov 5, 2025
6f272db
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Nov 5, 2025
f506d3e
format repair
Nov 5, 2025
db9041c
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 5, 2025
d921284
Merge branch 'dev' into dev_test
CaralHsi Nov 5, 2025
d036c53
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 11, 2025
5a3f0db
Merge branch 'dev' into dev_test
Nov 11, 2025
dc67413
fix time issue
Nov 11, 2025
7699b9a
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Nov 11, 2025
8bfbf94
develop feedback process
Nov 19, 2025
875c551
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 19, 2025
7f20f8b
Resolve merge conflicts
Nov 19, 2025
4d712eb
feedback handler configuration
Nov 20, 2025
36b93eb
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 25, 2025
adec73e
merged
Nov 25, 2025
aef3aad
upgrade feedback using
Nov 26, 2025
81ec520
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 26, 2025
55c9d89
fix
Nov 26, 2025
b4fbfde
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 27, 2025
ee64719
Merge branch 'dev' into dev_test
Nov 27, 2025
0fa9be7
add threshold
Nov 27, 2025
4a4746e
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Nov 27, 2025
16de8da
Merge branch 'dev' into dev_test
Nov 27, 2025
facb7b3
update prompt
Nov 27, 2025
eab5fe6
update prompt
Nov 27, 2025
7577aac
fix handler
Nov 27, 2025
cc4069d
add feedback scheduler
Nov 29, 2025
2529db2
add handler change node update
Dec 1, 2025
898ccac
add handler change node update
Dec 1, 2025
faec340
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 1, 2025
913c24d
add handler change node update
Dec 1, 2025
91d063d
add handler change node update
Dec 1, 2025
2a47880
add handler change node update
Dec 1, 2025
c5618c6
Merge branch 'dev' into dev_test
whipser030 Dec 2, 2025
b9737f1
Merge branch 'dev' into dev_test
CaralHsi Dec 2, 2025
ad9c2e7
fix interface input
Dec 2, 2025
c0c32b1
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Dec 2, 2025
d906f0d
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 2, 2025
696708e
fix interface input
Dec 2, 2025
6ad8dae
add chunk and ratio filter
Dec 3, 2025
6298c64
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 3, 2025
47acd7a
Merge branch 'dev' into dev_test
Dec 3, 2025
0727c25
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 3, 2025
0b0342d
Merge branch 'dev' into dev_test
Dec 3, 2025
294c1e6
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 3, 2025
d9158e4
Merge branch 'dev' into dev_test
Dec 3, 2025
699cdf7
update stopwords
Dec 3, 2025
8ca03c0
Merge branch 'dev' into dev_test
fridayL Dec 3, 2025
6076935
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 4, 2025
b2b0f6e
Merge branch 'dev' into dev_test
Dec 4, 2025
343eeb3
fix messages queue
Dec 4, 2025
1bb9396
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Dec 4, 2025
045196c
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 4, 2025
7131c35
Merge branch 'dev' into dev_test
Dec 4, 2025
d66e8ce
add seach_by_keywords_LIKE
Dec 7, 2025
d081aaa
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 7, 2025
405658f
Merge branch 'dev' into dev_test
Dec 7, 2025
ae60994
add doc filter
Dec 9, 2025
70efbf3
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 9, 2025
a613c7e
merge dev
Dec 9, 2025
7b0f2f4
add retrieve query
Dec 9, 2025
c6768b6
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 9, 2025
005a5bb
add retrieve queies
Dec 10, 2025
d69e7f4
patch info filter
Dec 10, 2025
d4f18e8
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 10, 2025
3c5199a
add strict info filter
Dec 11, 2025
365e0b6
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 12, 2025
9bc942d
Merge branch 'dev' into dev_test
Dec 12, 2025
eab3d80
add log and make embedding safety net
Dec 12, 2025
9519f5e
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 12, 2025
f21a885
Merge branch 'dev' into dev_test
Dec 12, 2025
7f146e1
add log and make embedding safety net
Dec 12, 2025
4cc4677
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 15, 2025
c01c900
Merge branch 'dev' into dev_test
Dec 15, 2025
4da6d31
deduplicate add objects
Dec 16, 2025
28934c8
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 16, 2025
d3f0a77
Merge branch 'dev' into dev_test
Dec 16, 2025
13e8d16
Merge branch 'dev' into dev_test
CaralHsi Dec 16, 2025
fd2816c
use _add_memories_parallel
Dec 17, 2025
dfe62dc
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 17, 2025
192d150
Merge branch 'dev' into dev_test
Dec 17, 2025
e6ce0ee
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Dec 17, 2025
02585f2
Merge branch 'dev' into dev_test
fridayL Dec 17, 2025
39b0b20
delete Special characters
Dec 17, 2025
81fa434
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 17, 2025
0a10a52
Merge branch 'dev' into dev_test
Dec 17, 2025
991092e
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Dec 17, 2025
b7b5003
delete Special characters
Dec 17, 2025
95bb061
delete Special characters
Dec 17, 2025
14d6732
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 17, 2025
c8ea8ae
Merge branch 'dev' into dev_test
Dec 17, 2025
a2fe6ed
delete Special characters
Dec 17, 2025
6274864
add source_doc_id
Dec 17, 2025
5f19846
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 17, 2025
a06e5f7
Merge branch 'dev' into dev_test
Dec 17, 2025
f2aec38
add source_doc_id
Dec 17, 2025
45f4957
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 18, 2025
bd612b9
Merge branch 'dev' into dev_test
Dec 18, 2025
d34812b
add reranker in init com..
Dec 18, 2025
5b681be
Merge branch 'dev' into dev_test
fridayL Dec 18, 2025
3919dcf
fix circle import
Dec 18, 2025
56df680
Merge branch 'dev_test' of github.com:whipser030/MemOS into dev_test
Dec 18, 2025
dd1aef9
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 18, 2025
777d6e0
Merge branch 'dev' into dev_test
Dec 18, 2025
353e417
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 22, 2025
bd8651c
Merge branch 'dev' into dev_test
Dec 22, 2025
73106ed
add feedback judgement
Dec 23, 2025
b7ffa5a
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 23, 2025
f37b15b
Merge branch 'dev' into dev_test
Dec 23, 2025
1b0e3af
add feedback judgement
Dec 23, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 86 additions & 43 deletions src/memos/mem_feedback/feedback.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import concurrent.futures
import copy
import difflib
import json
import re
Expand All @@ -17,7 +16,12 @@
from memos.llms.factory import AzureLLM, LLMFactory, OllamaLLM, OpenAILLM
from memos.log import get_logger
from memos.mem_feedback.base import BaseMemFeedback
from memos.mem_feedback.utils import make_mem_item, should_keep_update, split_into_chunks
from memos.mem_feedback.utils import (
general_split_into_chunks,
make_mem_item,
should_keep_update,
split_into_chunks,
)
from memos.mem_reader.factory import MemReaderFactory
from memos.mem_reader.read_multi_modal import detect_lang
from memos.memories.textual.item import TextualMemoryItem
Expand All @@ -37,6 +41,8 @@
FEEDBACK_JUDGEMENT_PROMPT_ZH,
KEYWORDS_REPLACE,
KEYWORDS_REPLACE_ZH,
OPERATION_UPDATE_JUDGEMENT,
OPERATION_UPDATE_JUDGEMENT_ZH,
UPDATE_FORMER_MEMORIES,
UPDATE_FORMER_MEMORIES_ZH,
)
Expand All @@ -47,6 +53,7 @@
"if_kw_replace": {"en": KEYWORDS_REPLACE, "zh": KEYWORDS_REPLACE_ZH},
"judge": {"en": FEEDBACK_JUDGEMENT_PROMPT, "zh": FEEDBACK_JUDGEMENT_PROMPT_ZH},
"compare": {"en": UPDATE_FORMER_MEMORIES, "zh": UPDATE_FORMER_MEMORIES_ZH},
"compare_judge": {"en": OPERATION_UPDATE_JUDGEMENT, "zh": OPERATION_UPDATE_JUDGEMENT_ZH},
"generation": {"en": FEEDBACK_ANSWER_PROMPT, "zh": FEEDBACK_ANSWER_PROMPT_ZH},
}

Expand Down Expand Up @@ -108,7 +115,7 @@ def _retry_db_operation(self, operation):
return operation()
except Exception as e:
logger.error(
f"[Feedback Core: _retry_db_operation] DB operation failed: {e}", exc_info=True
f"[1223 Feedback Core: _retry_db_operation] DB operation failed: {e}", exc_info=True
)
raise

Expand All @@ -122,7 +129,7 @@ def _batch_embed(self, texts: list[str], embed_bs: int = 5):
results.extend(self._embed_once(batch))
except Exception as e:
logger.error(
f"[Feedback Core: process_feedback_core] Embedding batch failed, Cover with all zeros: {len(batch)} entries: {e}"
f"[1223 Feedback Core: process_feedback_core] Embedding batch failed, Cover with all zeros: {len(batch)} entries: {e}"
)
results.extend([[0.0] * dim for _ in range(len(batch))])
return results
Expand All @@ -138,7 +145,7 @@ def _pure_add(self, user_name: str, feedback_content: str, feedback_time: str, i
lambda: self.memory_manager.add(to_add_memories, user_name=user_name, use_batch=False)
)
logger.info(
f"[Feedback Core: _pure_add] Pure added {len(added_ids)} memories for user {user_name}."
f"[1223 Feedback Core: _pure_add] Pure added {len(added_ids)} memories for user {user_name}."
)
return {
"record": {
Expand Down Expand Up @@ -175,7 +182,7 @@ def _keyword_replace_judgement(self, feedback_content: str) -> dict | None:
return judge_res
else:
logger.warning(
"[Feedback Core: _feedback_judgement] feedback judgement failed, return []"
"[1223 Feedback Core: _feedback_judgement] feedback judgement failed, return []"
)
return {}

Expand All @@ -200,7 +207,7 @@ def _feedback_judgement(
return judge_res
else:
logger.warning(
"[Feedback Core: _feedback_judgement] feedback judgement failed, return []"
"[1223 Feedback Core: _feedback_judgement] feedback judgement failed, return []"
)
return []

Expand Down Expand Up @@ -327,11 +334,11 @@ def _del_working_binding(self, user_name, mem_items: list[TextualMemoryItem]) ->
self.graph_store.delete_node(mid, user_name=user_name)

logger.info(
f"[Feedback Core:_del_working_binding] Delete raw/working mem_ids: {delete_ids} for user_name: {user_name}"
f"[1223 Feedback Core:_del_working_binding] Delete raw/working mem_ids: {delete_ids} for user_name: {user_name}"
)
except Exception as e:
logger.warning(
f"[Feedback Core:_del_working_binding] TreeTextMemory.delete_hard: failed to delete {mid}: {e}"
f"[1223 Feedback Core:_del_working_binding] TreeTextMemory.delete_hard: failed to delete {mid}: {e}"
)

def semantics_feedback(
Expand Down Expand Up @@ -400,24 +407,12 @@ def semantics_feedback(
):
all_operations.extend(chunk_operations["operations"])
except Exception as e:
logger.error(f"[Feedback Core: semantics_feedback] Operation failed: {e}")
logger.error(
f"[1223 Feedback Core: semantics_feedback] Operation failed: {e}"
)

operations = self.standard_operations(all_operations, current_memories)

add_texts = []
final_operations = []
for item in operations:
if item["operation"].lower() == "add" and "text" in item and item["text"]:
if item["text"] in add_texts:
continue
final_operations.append(item)
add_texts.append(item["text"])
elif item["operation"].lower() == "update":
final_operations.append(item)
logger.info(
f"[Feedback Core: deduplicate add] {len(operations)} -> {len(final_operations)} memories"
)
operations = copy.deepcopy(final_operations)
standard_operations = self.standard_operations(all_operations, current_memories)
operations = self.filter_fault_update(standard_operations)

logger.info(f"[Feedback Core Operations]: {operations!s}")

Expand Down Expand Up @@ -463,7 +458,7 @@ def semantics_feedback(
update_results.append(result)
except Exception as e:
logger.error(
f"[Feedback Core: semantics_feedback] Operation failed for {original_op}: {e}",
f"[1223 Feedback Core: semantics_feedback] Operation failed for {original_op}: {e}",
exc_info=True,
)
if update_results:
Expand Down Expand Up @@ -491,7 +486,7 @@ def _feedback_memory(
]
if filterd_ids:
logger.warning(
f"[Feedback Core: _feedback_memory] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
f"[1223 Feedback Core: _feedback_memory] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
)

current_memories = [
Expand Down Expand Up @@ -523,7 +518,7 @@ def _feedback_memory(
results[i] = node
except Exception as e:
logger.error(
f"[Feedback Core: _feedback_memory] Error processing memory index {i}: {e}",
f"[1223 Feedback Core: _feedback_memory] Error processing memory index {i}: {e}",
exc_info=True,
)
mem_res = [r for r in results if r]
Expand Down Expand Up @@ -552,7 +547,7 @@ def _retrieve(self, query: str, info=None, top_k=100, user_name=None):
retrieved_mems = self.searcher.search(
query, info=info, user_name=user_name, top_k=top_k, full_recall=True
)
retrieved_mems = [item[0] for item in retrieved_mems]
retrieved_mems = [item[0] for item in retrieved_mems if float(item[1]) > 0.01]
return retrieved_mems

def _vec_query(self, new_memories_embedding: list[float], user_name=None):
Expand Down Expand Up @@ -582,15 +577,15 @@ def _vec_query(self, new_memories_embedding: list[float], user_name=None):

if not retrieved_ids:
logger.info(
f"[Feedback Core: _vec_query] No similar memories found for embedding query for user {user_name}."
f"[1223 Feedback Core: _vec_query] No similar memories found for embedding query for user {user_name}."
)

filterd_ids = [
item["id"] for item in current_memories if "mode:fast" in item["metadata"]["tags"]
]
if filterd_ids:
logger.warning(
f"[Feedback Core: _vec_query] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
f"[1223 Feedback Core: _vec_query] Since the tags mode is fast, no modifications are made to the following memory {filterd_ids}."
)
return [
TextualMemoryItem(**item)
Expand All @@ -615,6 +610,52 @@ def _get_llm_response(self, prompt: str, dsl: bool = True) -> dict:
response_json = None
return response_json

def filter_fault_update(self, operations: list[dict]):
"""To address the randomness of large model outputs, it is necessary to conduct validity evaluation on the texts used for memory override operations."""
updated_operations = [item for item in operations if item["operation"] == "UPDATE"]
if len(updated_operations) < 5:
return operations

lang = detect_lang("".join(updated_operations[0]["text"]))
template = FEEDBACK_PROMPT_DICT["compare_judge"][lang]

all_judge = []
operations_chunks = general_split_into_chunks(updated_operations)
with ContextThreadPoolExecutor(max_workers=10) as executor:
future_to_chunk_idx = {}
for chunk in operations_chunks:
raw_operations_str = {"operations": chunk}
prompt = template.format(raw_operations=str(raw_operations_str))

future = executor.submit(self._get_llm_response, prompt)
future_to_chunk_idx[future] = chunk
for future in concurrent.futures.as_completed(future_to_chunk_idx):
try:
judge_res = future.result()
if (
judge_res
and "operations_judgement" in judge_res
and isinstance(judge_res["operations_judgement"], list)
):
all_judge.extend(judge_res["operations_judgement"])
except Exception as e:
logger.error(f"[1223 Feedback Core: filter_fault_update] Judgement failed: {e}")

logger.info(f"[1223 Feedback Core: filter_fault_update] LLM judgement: {all_judge}")
id2op = {item["id"]: item for item in updated_operations}
valid_updates = []
for judge in all_judge:
valid_update = None
if judge["judgement"] == "UPDATE_APPROVED":
valid_update = id2op.get(judge["id"], None)
if valid_update:
valid_updates.append(valid_update)

logger.info(
f"[1223 Feedback Core: filter_fault_update] {len(updated_operations)} -> {len(valid_updates)}"
)
return valid_updates + [item for item in operations if item["operation"] != "UPDATE"]

def standard_operations(self, operations, current_memories):
"""
Regularize the operation design
Expand Down Expand Up @@ -643,7 +684,7 @@ def correct_item(data):

if not should_keep_update(data["text"], data["old_memory"]):
logger.warning(
f"[Feedback Core: semantics_feedback] Due to the excessive proportion of changes, skip update: {data}"
f"[1223 Feedback Core: semantics_feedback] Due to the excessive proportion of changes, skip update: {data}"
)
return None

Expand All @@ -663,14 +704,14 @@ def correct_item(data):
return data
except Exception:
logger.error(
f"[Feedback Core: standard_operations] Error processing operation item: {data}",
f"[1223 Feedback Core: standard_operations] Error processing operation item: {data}",
exc_info=True,
)
return None

dehallu_res = [correct_item(item) for item in operations]
dehalluded_operations = [item for item in dehallu_res if item]
logger.info(f"[Feedback Core: dehalluded_operations] {dehalluded_operations}")
logger.info(f"[1223 Feedback Core: dehalluded_operations] {dehalluded_operations}")

# c add objects
add_texts = []
Expand All @@ -684,7 +725,7 @@ def correct_item(data):
elif item["operation"].lower() == "update":
llm_operations.append(item)
logger.info(
f"[Feedback Core: deduplicate add] {len(dehalluded_operations)} -> {len(llm_operations)} memories"
f"[1223 Feedback Core: deduplicate add] {len(dehalluded_operations)} -> {len(llm_operations)} memories"
)

# Update takes precedence over add
Expand All @@ -698,7 +739,7 @@ def correct_item(data):
]
if filtered_items:
logger.info(
f"[Feedback Core: semantics_feedback] Due to have update objects, skip add: {filtered_items}"
f"[1223 Feedback Core: semantics_feedback] Due to have update objects, skip add: {filtered_items}"
)
return update_items
else:
Expand Down Expand Up @@ -746,7 +787,7 @@ def _doc_filter(self, doc_scope: str, memories: list[TextualMemoryItem]):
memid for inscope_file in inscope_docs for memid in filename2_memid[inscope_file]
]
logger.info(
f"[Feedback Core: process_keyword_replace] These docs are in scope : {inscope_docs}, relared memids: {inscope_ids}"
f"[1223 Feedback Core: process_keyword_replace] These docs are in scope : {inscope_docs}, relared memids: {inscope_ids}"
)
filter_memories = [mem for mem in memories if mem.id in inscope_ids]
return filter_memories
Expand Down Expand Up @@ -800,7 +841,7 @@ def process_keyword_replace(
retrieved_memories = self._doc_filter(doc_scope, retrieved_memories)

logger.info(
f"[Feedback Core: process_keyword_replace] Keywords recalled memory for user {user_name}: {len(retrieved_ids)} memories | After filtering: {len(retrieved_memories)} memories."
f"[1223 Feedback Core: process_keyword_replace] Keywords recalled memory for user {user_name}: {len(retrieved_ids)} memories | After filtering: {len(retrieved_memories)} memories."
)

if not retrieved_memories:
Expand Down Expand Up @@ -885,7 +926,7 @@ def check_validity(item):
info.update({"user_id": user_id, "user_name": user_name, "session_id": session_id})

logger.info(
f"[Feedback Core: process_feedback_core] Starting memory feedback process for user {user_name}"
f"[1223 Feedback Core: process_feedback_core] Starting memory feedback process for user {user_name}"
)
# feedback keywords update
kwp_judge = self._keyword_replace_judgement(feedback_content)
Expand Down Expand Up @@ -918,7 +959,7 @@ def check_validity(item):

if not valid_feedback:
logger.warning(
f"[Feedback Core: process_feedback_core] No valid judgements for user {user_name}: {raw_judge}."
f"[1223 Feedback Core: process_feedback_core] No valid judgements for user {user_name}: {raw_judge}."
)
return {"record": {"add": [], "update": []}}

Expand Down Expand Up @@ -966,12 +1007,14 @@ def check_validity(item):
add_memories = mem_record["record"]["add"]
update_memories = mem_record["record"]["update"]
logger.info(
f"[Feedback Core: process_feedback_core] Processed {len(feedback_memories)} feedback | add {len(add_memories)} memories | update {len(update_memories)} memories for user {user_name}."
f"[1223 Feedback Core: process_feedback_core] Processed {len(feedback_memories)} feedback | add {len(add_memories)} memories | update {len(update_memories)} memories for user {user_name}."
)
return mem_record

except Exception as e:
logger.error(f"[Feedback Core: process_feedback_core] Error for user {user_name}: {e}")
logger.error(
f"[1223 Feedback Core: process_feedback_core] Error for user {user_name}: {e}"
)
return {"record": {"add": [], "update": []}}

def process_feedback(
Expand Down
32 changes: 32 additions & 0 deletions src/memos/mem_feedback/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,38 @@ def calculate_similarity(text1: str, text2: str) -> float:
return change_ratio < 0.2


def general_split_into_chunks(items: list[dict], max_tokens_per_chunk: int = 500):
chunks = []
current_chunk = []
current_tokens = 0

for item in items:
item_text = str(item)
item_tokens = estimate_tokens(item_text)

if item_tokens > max_tokens_per_chunk:
if current_chunk:
chunks.append(current_chunk)
current_chunk = []

chunks.append([item])
current_tokens = 0

elif current_tokens + item_tokens <= max_tokens_per_chunk:
current_chunk.append(item)
current_tokens += item_tokens
else:
if current_chunk:
chunks.append(current_chunk)
current_chunk = [item]
current_tokens = item_tokens

if current_chunk:
chunks.append(current_chunk)

return chunks


def split_into_chunks(memories: list[TextualMemoryItem], max_tokens_per_chunk: int = 500):
chunks = []
current_chunk = []
Expand Down
Loading