-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsalience.py
More file actions
276 lines (221 loc) · 8.98 KB
/
salience.py
File metadata and controls
276 lines (221 loc) · 8.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
"""
Salience - Value Assignment and Reinforcement
Salience is the mechanism by which symbols acquire and maintain
attention-worthiness. It answers: "Why should I pay attention to this?"
From the original insight:
"Every time an input stream produces a pattern, it is assigned
a set of features and, if deemed relevant, it is appended to
this sequence."
Salience can come from:
- Survival/utility value (hardcoded objectives)
- Novelty (deviation from expectations)
- Repetition (reinforcement through recurrence)
- Convergence (multiple sources agreeing)
- Association (connection to high-salience items)
This module provides pluggable salience functions that can integrate
with external systems (novelty scores, world model allocations, etc.)
"""
from dataclasses import dataclass, field
from typing import Callable, Optional, List, Dict, Any, Protocol
from abc import ABC, abstractmethod
from datetime import datetime
import math
from .sequence import Symbol
class SalienceFunction(Protocol):
"""Protocol for salience computation."""
def __call__(self, symbol: Symbol, context: Optional[Dict] = None) -> float:
"""Compute salience score for a symbol. Returns 0-1."""
...
# -----------------------------------------------------------------------------
# Built-in Salience Functions
# -----------------------------------------------------------------------------
def constant_salience(value: float = 0.5) -> SalienceFunction:
"""Returns a constant salience for all symbols."""
def fn(symbol: Symbol, context: Optional[Dict] = None) -> float:
return value
return fn
def recency_salience(half_life_seconds: float = 60.0) -> SalienceFunction:
"""
Salience decays exponentially with age.
Recent symbols are more salient than old ones.
"""
def fn(symbol: Symbol, context: Optional[Dict] = None) -> float:
age = (datetime.now() - symbol.timestamp).total_seconds()
return math.exp(-age * math.log(2) / half_life_seconds)
return fn
def length_salience(optimal_length: int = 50, falloff: float = 0.02) -> SalienceFunction:
"""
Salience based on content length.
Very short or very long content is less salient.
Optimal around a typical sentence length.
"""
def fn(symbol: Symbol, context: Optional[Dict] = None) -> float:
length = len(str(symbol.data))
deviation = abs(length - optimal_length)
return math.exp(-deviation * falloff)
return fn
def keyword_salience(keywords: Dict[str, float]) -> SalienceFunction:
"""
Salience boosted by presence of keywords.
Args:
keywords: Dict mapping keyword -> boost value
"""
def fn(symbol: Symbol, context: Optional[Dict] = None) -> float:
text = str(symbol.data).lower()
boost = 0.0
for keyword, weight in keywords.items():
if keyword.lower() in text:
boost += weight
return min(1.0, 0.3 + boost) # Base of 0.3
return fn
# -----------------------------------------------------------------------------
# Composite Salience
# -----------------------------------------------------------------------------
class CompositeSalience:
"""
Combines multiple salience functions.
Supports different aggregation strategies:
- max: Take the highest salience
- mean: Average all saliences
- product: Multiply (all must be high)
- weighted: Weighted average
"""
def __init__(
self,
functions: List[SalienceFunction],
weights: Optional[List[float]] = None,
aggregation: str = "mean"
):
self.functions = functions
self.weights = weights or [1.0] * len(functions)
self.aggregation = aggregation
assert len(self.weights) == len(self.functions)
def __call__(self, symbol: Symbol, context: Optional[Dict] = None) -> float:
scores = [fn(symbol, context) for fn in self.functions]
if self.aggregation == "max":
return max(scores)
elif self.aggregation == "mean":
return sum(scores) / len(scores)
elif self.aggregation == "product":
result = 1.0
for s in scores:
result *= s
return result
elif self.aggregation == "weighted":
total_weight = sum(self.weights)
return sum(s * w for s, w in zip(scores, self.weights)) / total_weight
else:
raise ValueError(f"Unknown aggregation: {self.aggregation}")
# -----------------------------------------------------------------------------
# Pluggable External Salience
# -----------------------------------------------------------------------------
class NoveltyAdapter:
"""
Adapter to use novelty scores as salience.
Plugs into the c:\\code\\novelty system. High novelty = high salience.
"""
def __init__(self, novelty_fn: Callable[[Any], float], scale: float = 1.0):
"""
Args:
novelty_fn: Function that computes novelty score (0-1)
scale: Multiplier for novelty contribution
"""
self.novelty_fn = novelty_fn
self.scale = scale
def __call__(self, symbol: Symbol, context: Optional[Dict] = None) -> float:
try:
novelty = self.novelty_fn(symbol.data)
return min(1.0, novelty * self.scale)
except Exception:
return 0.5 # Fallback to neutral
class AllocationAdapter:
"""
Adapter to use world model allocations as salience.
Plugs into the c:\\code\\life system. Symbols related to
high-allocation tendencies get higher salience.
"""
def __init__(
self,
get_allocations: Callable[[], Dict[str, float]],
classify_fn: Callable[[Any], Optional[str]],
):
"""
Args:
get_allocations: Returns current tendency allocations
classify_fn: Classifies a symbol's data into a tendency (or None)
"""
self.get_allocations = get_allocations
self.classify_fn = classify_fn
def __call__(self, symbol: Symbol, context: Optional[Dict] = None) -> float:
try:
tendency = self.classify_fn(symbol.data)
if tendency is None:
return 0.3 # Unclassified gets low salience
allocations = self.get_allocations()
return allocations.get(tendency, 0.3)
except Exception:
return 0.3
# -----------------------------------------------------------------------------
# Salience Tracker
# -----------------------------------------------------------------------------
@dataclass
class SalienceRecord:
"""Record of salience computation for a symbol."""
symbol_hash: str
computed_salience: float
component_scores: Dict[str, float]
timestamp: datetime = field(default_factory=datetime.now)
class SalienceTracker:
"""
Tracks salience computations over time.
Useful for:
- Debugging why certain symbols got high/low salience
- Computing salience trends
- Identifying which components contribute most
"""
def __init__(self, max_history: int = 1000):
self.max_history = max_history
self._history: List[SalienceRecord] = []
self._by_hash: Dict[str, List[SalienceRecord]] = {}
def record(
self,
symbol: Symbol,
salience: float,
components: Optional[Dict[str, float]] = None
):
"""Record a salience computation."""
rec = SalienceRecord(
symbol_hash=symbol.hash,
computed_salience=salience,
component_scores=components or {}
)
self._history.append(rec)
if len(self._history) > self.max_history:
old = self._history.pop(0)
if old.symbol_hash in self._by_hash:
self._by_hash[old.symbol_hash].remove(old)
if symbol.hash not in self._by_hash:
self._by_hash[symbol.hash] = []
self._by_hash[symbol.hash].append(rec)
def get_history(self, symbol_hash: str) -> List[SalienceRecord]:
"""Get salience history for a specific symbol."""
return self._by_hash.get(symbol_hash, [])
def average_salience(self) -> float:
"""Get average salience across all recorded symbols."""
if not self._history:
return 0.0
return sum(r.computed_salience for r in self._history) / len(self._history)
def top_components(self, n: int = 5) -> List[tuple[str, float]]:
"""Get the components that contribute most to salience."""
component_totals: Dict[str, float] = {}
component_counts: Dict[str, int] = {}
for rec in self._history:
for comp, score in rec.component_scores.items():
component_totals[comp] = component_totals.get(comp, 0) + score
component_counts[comp] = component_counts.get(comp, 0) + 1
averages = [
(comp, component_totals[comp] / component_counts[comp])
for comp in component_totals
]
averages.sort(key=lambda x: x[1], reverse=True)
return averages[:n]