/ core / attention / context_stream.py
context_stream.py
  1  """
  2  Context Stream - Continuous attention-weighted context management.
  3  
  4  Instead of fill-compact-fill sawtooth, maintain a continuous stream
  5  where items flow in and out based on attention.
  6  
  7  Key insight: Context is a river, not a bucket.
  8  - Items enter when attention lands on them
  9  - Items exit when attention decays below threshold
 10  - High-attention items move slowly (high friction)
 11  - Low-attention items flow quickly (low friction)
 12  - The stream is always near capacity but breathing
 13  
 14  This eliminates the jarring "compaction" events and creates
 15  smooth context evolution that mirrors how human memory works.
 16  """
 17  
 18  from dataclasses import dataclass, field
 19  from datetime import datetime, timedelta
 20  from typing import Optional, List, Dict, Tuple, Any, Callable, Set
 21  from enum import Enum
 22  import heapq
 23  import math
 24  
 25  from .signal_words import detect_signals, SignalDetection
 26  
 27  
 28  class StreamPriority(Enum):
 29      """Priority levels for stream items."""
 30      PINNED = "pinned"        # Never exits (unresolved items)
 31      ACTIVE = "active"        # Currently being attended to
 32      RECENT = "recent"        # Recently attended
 33      DECAYING = "decaying"    # Attention fading
 34      EXITING = "exiting"      # About to leave stream
 35  
 36  
 37  @dataclass
 38  class StreamItem:
 39      """
 40      An item in the context stream.
 41  
 42      Each item has:
 43      - Content (the actual context)
 44      - Attention score (current relevance)
 45      - Velocity (how fast it's moving toward exit)
 46      - Entry time (when it entered the stream)
 47      """
 48      id: str
 49      content: str
 50      token_count: int
 51  
 52      # Attention state
 53      attention_score: float = 0.5  # Current attention (0-1)
 54      peak_attention: float = 0.5   # Highest attention ever
 55      last_attended: datetime = field(default_factory=datetime.now)
 56      entry_time: datetime = field(default_factory=datetime.now)
 57  
 58      # Stream dynamics
 59      velocity: float = 0.0  # Positive = moving toward exit
 60      friction: float = 1.0  # Resistance to movement (from attention)
 61      priority: StreamPriority = StreamPriority.RECENT
 62  
 63      # Metadata
 64      source: str = ""  # Where this came from
 65      item_type: str = ""  # 'message', 'file', 'search_result', etc.
 66  
 67      # Signal-based metadata
 68      signal_tags: Set[str] = field(default_factory=set)
 69      signal_weight: float = 1.0  # Modifier from signal words
 70      needs_validation: bool = False  # Flagged for validation
 71  
 72      def __lt__(self, other):
 73          """For heap ordering - lower attention = higher exit priority."""
 74          return self.attention_score < other.attention_score
 75  
 76  
 77  @dataclass
 78  class StreamState:
 79      """Current state of the context stream."""
 80      total_tokens: int = 0
 81      max_tokens: int = 100000  # Target capacity
 82      item_count: int = 0
 83  
 84      # Stream health
 85      pressure: float = 0.0  # How close to capacity (0-1)
 86      flow_rate: float = 0.0  # Items exiting per minute
 87      avg_attention: float = 0.0  # Average attention across items
 88  
 89      # Recent activity
 90      items_entered: int = 0
 91      items_exited: int = 0
 92      tokens_entered: int = 0
 93      tokens_exited: int = 0
 94  
 95  
 96  class ContextStream:
 97      """
 98      Continuous attention-weighted context stream.
 99  
100      Usage:
101          stream = ContextStream(max_tokens=100000)
102  
103          # Items enter when attended
104          stream.attend("item_1", content="...", tokens=500, intensity=0.8)
105  
106          # Stream flows continuously
107          stream.tick()  # Call periodically to update velocities
108  
109          # Get current context (items sorted by attention)
110          context = stream.get_context(max_tokens=50000)
111  
112          # Check what's about to exit
113          exiting = stream.get_exiting_items()
114      """
115  
116      # Decay rate: attention halves every N seconds without reinforcement
117      ATTENTION_HALF_LIFE_SECONDS = 300  # 5 minutes
118  
119      # Velocity parameters
120      BASE_VELOCITY = 0.01  # Base flow rate
121      PRESSURE_VELOCITY_BOOST = 0.05  # Extra velocity when near capacity
122  
123      # Friction from attention (high attention = high friction = slow exit)
124      ATTENTION_FRICTION_SCALE = 10.0
125  
126      def __init__(
127          self,
128          max_tokens: int = 100000,
129          exit_threshold: float = 0.1,  # Attention below this → exit
130          pressure_threshold: float = 0.9  # Start accelerating exits
131      ):
132          self.max_tokens = max_tokens
133          self.exit_threshold = exit_threshold
134          self.pressure_threshold = pressure_threshold
135  
136          # Item storage
137          self._items: Dict[str, StreamItem] = {}
138  
139          # Exit queue (min-heap by attention)
140          self._exit_queue: List[StreamItem] = []
141  
142          # State tracking
143          self._total_tokens = 0
144          self._last_tick = datetime.now()
145  
146          # Callbacks
147          self._on_exit: List[Callable[[StreamItem], None]] = []
148          self._on_enter: List[Callable[[StreamItem], None]] = []
149  
150      def on_exit(self, callback: Callable[[StreamItem], None]) -> None:
151          """Register callback for when items exit the stream."""
152          self._on_exit.append(callback)
153  
154      def on_enter(self, callback: Callable[[StreamItem], None]) -> None:
155          """Register callback for when items enter the stream."""
156          self._on_enter.append(callback)
157  
158      def attend(
159          self,
160          item_id: str,
161          content: str = None,
162          tokens: int = None,
163          intensity: float = 0.5,
164          source: str = "",
165          item_type: str = "",
166          pinned: bool = False
167      ) -> StreamItem:
168          """
169          Attend to an item - either boosting existing or adding new.
170  
171          Args:
172              item_id: Unique identifier for the item
173              content: Content (required for new items)
174              tokens: Token count (required for new items)
175              intensity: Attention intensity (0-1)
176              source: Where this came from
177              item_type: Type of item
178              pinned: If True, item never exits
179  
180          Returns:
181              The StreamItem (new or updated)
182          """
183          now = datetime.now()
184  
185          if item_id in self._items:
186              # Boost existing item
187              item = self._items[item_id]
188              item.attention_score = min(1.0, item.attention_score + intensity * 0.3)
189              item.peak_attention = max(item.peak_attention, item.attention_score)
190              item.last_attended = now
191              item.velocity = 0.0  # Reset velocity on attention
192              item.friction = self._compute_friction(item.attention_score)
193  
194              if pinned:
195                  item.priority = StreamPriority.PINNED
196  
197              return item
198  
199          # New item
200          if content is None or tokens is None:
201              raise ValueError("content and tokens required for new items")
202  
203          # Detect signal words to adjust weight and persistence
204          signal_detection = detect_signals(content)
205          adjusted_intensity = intensity * signal_detection.weight_modifier
206          adjusted_intensity = min(1.0, adjusted_intensity)  # Cap at 1.0
207  
208          # Determine if should be pinned based on signals
209          should_pin = pinned or signal_detection.should_pin
210  
211          item = StreamItem(
212              id=item_id,
213              content=content,
214              token_count=tokens,
215              attention_score=adjusted_intensity,
216              peak_attention=adjusted_intensity,
217              last_attended=now,
218              entry_time=now,
219              velocity=0.0,
220              friction=self._compute_friction(adjusted_intensity),
221              priority=StreamPriority.PINNED if should_pin else StreamPriority.ACTIVE,
222              source=source,
223              item_type=item_type,
224              signal_tags=signal_detection.suggested_tags,
225              signal_weight=signal_detection.weight_modifier,
226              needs_validation="needs_validation" in signal_detection.suggested_tags
227          )
228  
229          self._items[item_id] = item
230          self._total_tokens += tokens
231          heapq.heappush(self._exit_queue, item)
232  
233          for callback in self._on_enter:
234              callback(item)
235  
236          return item
237  
238      def pin(self, item_id: str) -> bool:
239          """Pin an item so it never exits."""
240          if item_id in self._items:
241              self._items[item_id].priority = StreamPriority.PINNED
242              return True
243          return False
244  
245      def unpin(self, item_id: str) -> bool:
246          """Unpin an item so it can exit normally."""
247          if item_id in self._items:
248              item = self._items[item_id]
249              if item.priority == StreamPriority.PINNED:
250                  item.priority = StreamPriority.ACTIVE
251                  return True
252          return False
253  
254      def tick(self) -> List[StreamItem]:
255          """
256          Advance the stream - decay attention, update velocities, exit items.
257  
258          Call this periodically (every second or so) to keep the stream flowing.
259  
260          Returns:
261              List of items that exited during this tick
262          """
263          now = datetime.now()
264          dt = (now - self._last_tick).total_seconds()
265          self._last_tick = now
266  
267          if dt <= 0:
268              return []
269  
270          exited = []
271          pressure = self._compute_pressure()
272  
273          for item in list(self._items.values()):
274              # Skip pinned items
275              if item.priority == StreamPriority.PINNED:
276                  continue
277  
278              # Decay attention
279              time_since_attention = (now - item.last_attended).total_seconds()
280              decay_factor = math.pow(0.5, time_since_attention / self.ATTENTION_HALF_LIFE_SECONDS)
281              item.attention_score = item.peak_attention * decay_factor
282  
283              # Update friction based on current attention
284              item.friction = self._compute_friction(item.attention_score)
285  
286              # Compute velocity
287              base_velocity = self.BASE_VELOCITY
288              if pressure > self.pressure_threshold:
289                  # Accelerate exits when under pressure
290                  pressure_boost = (pressure - self.pressure_threshold) / (1 - self.pressure_threshold)
291                  base_velocity += self.PRESSURE_VELOCITY_BOOST * pressure_boost
292  
293              # Apply friction
294              item.velocity = base_velocity / max(0.1, item.friction)
295  
296              # Update priority based on attention
297              if item.attention_score < self.exit_threshold:
298                  item.priority = StreamPriority.EXITING
299              elif item.attention_score < 0.3:
300                  item.priority = StreamPriority.DECAYING
301              elif item.attention_score < 0.6:
302                  item.priority = StreamPriority.RECENT
303              else:
304                  item.priority = StreamPriority.ACTIVE
305  
306              # Check for exit
307              if item.priority == StreamPriority.EXITING:
308                  exited.append(item)
309  
310          # Process exits
311          for item in exited:
312              self._exit_item(item)
313  
314          return exited
315  
316      def _exit_item(self, item: StreamItem) -> None:
317          """Remove an item from the stream."""
318          if item.id in self._items:
319              del self._items[item.id]
320              self._total_tokens -= item.token_count
321  
322              for callback in self._on_exit:
323                  callback(item)
324  
325      def _compute_friction(self, attention: float) -> float:
326          """Compute friction from attention score."""
327          # High attention = high friction = slow movement
328          return 1.0 + attention * self.ATTENTION_FRICTION_SCALE
329  
330      def _compute_pressure(self) -> float:
331          """Compute stream pressure (how full it is)."""
332          return min(1.0, self._total_tokens / self.max_tokens)
333  
334      def get_context(
335          self,
336          max_tokens: int = None,
337          include_exiting: bool = False
338      ) -> List[StreamItem]:
339          """
340          Get current context items, sorted by attention.
341  
342          Args:
343              max_tokens: Limit total tokens (default: all)
344              include_exiting: Include items about to exit
345  
346          Returns:
347              List of StreamItems sorted by attention (highest first)
348          """
349          items = [
350              item for item in self._items.values()
351              if include_exiting or item.priority != StreamPriority.EXITING
352          ]
353  
354          # Sort by attention (highest first)
355          items.sort(key=lambda x: x.attention_score, reverse=True)
356  
357          if max_tokens is None:
358              return items
359  
360          # Limit by tokens
361          result = []
362          total = 0
363          for item in items:
364              if total + item.token_count > max_tokens:
365                  break
366              result.append(item)
367              total += item.token_count
368  
369          return result
370  
371      def get_exiting_items(self) -> List[StreamItem]:
372          """Get items that are about to exit."""
373          return [
374              item for item in self._items.values()
375              if item.priority == StreamPriority.EXITING
376          ]
377  
378      def get_items_needing_validation(self) -> List[StreamItem]:
379          """Get items flagged as needing validation (e.g., 'what do you think?')."""
380          return [
381              item for item in self._items.values()
382              if item.needs_validation
383          ]
384  
385      def get_items_by_tag(self, tag: str) -> List[StreamItem]:
386          """Get items with a specific signal tag."""
387          return [
388              item for item in self._items.values()
389              if tag in item.signal_tags
390          ]
391  
392      def get_principles(self) -> List[StreamItem]:
393          """Get items tagged as principles."""
394          return self.get_items_by_tag("principle")
395  
396      def get_decisions(self) -> List[StreamItem]:
397          """Get items tagged as decisions."""
398          return self.get_items_by_tag("decision")
399  
400      def validate_item(self, item_id: str, validated: bool = True) -> bool:
401          """
402          Mark an item as validated (or not).
403  
404          If validated=True, the item gets boosted and the needs_validation flag cleared.
405          If validated=False, the item gets demoted.
406          """
407          if item_id not in self._items:
408              return False
409  
410          item = self._items[item_id]
411          item.needs_validation = False
412  
413          if validated:
414              # Boost attention and pin
415              item.attention_score = min(1.0, item.attention_score * 1.3)
416              item.peak_attention = max(item.peak_attention, item.attention_score)
417              item.priority = StreamPriority.PINNED
418              item.signal_tags.add("validated")
419          else:
420              # Demote
421              item.attention_score *= 0.5
422              item.signal_tags.add("rejected")
423  
424          return True
425  
426      def get_state(self) -> StreamState:
427          """Get current stream state."""
428          items = list(self._items.values())
429  
430          return StreamState(
431              total_tokens=self._total_tokens,
432              max_tokens=self.max_tokens,
433              item_count=len(items),
434              pressure=self._compute_pressure(),
435              avg_attention=sum(i.attention_score for i in items) / max(1, len(items))
436          )
437  
438      def force_exit(self, item_id: str) -> bool:
439          """Force an item to exit immediately."""
440          if item_id in self._items:
441              item = self._items[item_id]
442              self._exit_item(item)
443              return True
444          return False
445  
446      def get_item(self, item_id: str) -> Optional[StreamItem]:
447          """Get a specific item by ID."""
448          return self._items.get(item_id)
449  
450      def __len__(self) -> int:
451          return len(self._items)
452  
453      def __contains__(self, item_id: str) -> bool:
454          return item_id in self._items
455  
456  
457  class AttentionStreamBridge:
458      """
459      Bridges the attention system to the context stream.
460  
461      Attention events flow into the stream, maintaining continuous
462      context that evolves with attention.
463      """
464  
465      def __init__(
466          self,
467          stream: ContextStream,
468          default_tokens: int = 100  # Default token count for items
469      ):
470          self.stream = stream
471          self.default_tokens = default_tokens
472  
473      def on_attention_event(
474          self,
475          target_id: str,
476          content: str,
477          intensity: float,
478          modality: str,
479          source: str
480      ) -> StreamItem:
481          """
482          Process an attention event into the stream.
483  
484          Higher intensity → higher initial attention
485          Different modalities have different weights
486          """
487          # Modality weighting
488          modality_weights = {
489              'highlight': 1.0,
490              'read': 0.7,
491              'gaze': 0.8,
492              'listen': 0.6,
493              'search': 0.5,
494              'glance': 0.3
495          }
496          weight = modality_weights.get(modality, 0.5)
497  
498          adjusted_intensity = intensity * weight
499  
500          # Estimate tokens (simple heuristic)
501          tokens = max(self.default_tokens, len(content) // 4)
502  
503          return self.stream.attend(
504              item_id=target_id,
505              content=content,
506              tokens=tokens,
507              intensity=adjusted_intensity,
508              source=source,
509              item_type=modality
510          )
511  
512  
513  def create_context_stream(
514      max_tokens: int = 100000,
515      exit_threshold: float = 0.1
516  ) -> Tuple[ContextStream, AttentionStreamBridge]:
517      """
518      Create a context stream with attention bridge.
519  
520      Returns:
521          (ContextStream, AttentionStreamBridge)
522      """
523      stream = ContextStream(
524          max_tokens=max_tokens,
525          exit_threshold=exit_threshold
526      )
527      bridge = AttentionStreamBridge(stream)
528      return stream, bridge
529  
530  
531  # =============================================================================
532  # CONTEXT STREAM TUNER
533  # =============================================================================
534  
535  @dataclass
536  class EngagementEvent:
537      """
538      Records when operator engages with a context item.
539  
540      Used to learn which items they actually care about vs what
541      the model predicted they'd care about.
542      """
543      item_id: str
544      engagement_type: str  # 'reference', 'expand', 'action', 'ignore', 'dismiss'
545      attention_at_engagement: float  # What model predicted
546      time_in_stream: float  # Seconds since entry
547      timestamp: datetime = field(default_factory=datetime.now)
548      context: Optional[Dict[str, Any]] = None
549  
550  
551  @dataclass
552  class StreamParameters:
553      """
554      Tunable parameters for the context stream.
555      """
556      attention_half_life: float = 300.0  # Seconds
557      base_velocity: float = 0.01
558      pressure_velocity_boost: float = 0.05
559      attention_friction_scale: float = 10.0
560      exit_threshold: float = 0.1
561      pressure_threshold: float = 0.9
562  
563      # Per-modality weights (how much each modality boosts attention)
564      modality_weights: Dict[str, float] = field(default_factory=lambda: {
565          'highlight': 1.0,
566          'read': 0.7,
567          'gaze': 0.8,
568          'listen': 0.6,
569          'search': 0.5,
570          'glance': 0.3
571      })
572  
573      # Per-source weights (how much we trust different sources)
574      source_weights: Dict[str, float] = field(default_factory=lambda: {
575          'conversation': 1.0,
576          'file': 0.8,
577          'search': 0.6,
578          'external': 0.5
579      })
580  
581  
582  class ContextStreamTuner:
583      """
584      Learns optimal context stream parameters from operator behavior.
585  
586      The key insight: what the operator ACTUALLY engages with tells us
587      what should stay in context vs what should decay faster.
588  
589      Learns:
590      - Half-life: If operator often references "old" items, increase half-life
591      - Exit threshold: If operator revives exiting items, threshold is too high
592      - Modality weights: Which input modalities predict actual engagement
593      - Source weights: Which sources provide lasting-relevant context
594  
595      This is the context window equivalent of the ResonanceWeightTuner -
596      learning preferences implicitly from behavior.
597      """
598  
599      DEFAULT_LEARNING_RATE = 0.05  # Slower than resonance tuner (parameters are sensitive)
600      WINDOW_SIZE = 200  # Events to consider
601  
602      def __init__(
603          self,
604          stream: ContextStream,
605          operator_id: str,
606          learning_rate: float = DEFAULT_LEARNING_RATE
607      ):
608          self.stream = stream
609          self.operator_id = operator_id
610          self.learning_rate = learning_rate
611  
612          # Current parameters
613          self.params = StreamParameters()
614  
615          # Engagement history
616          self.engagement_events: List[EngagementEvent] = []
617  
618          # Performance tracking
619          self.prediction_errors: List[float] = []  # Model attention vs actual engagement
620          self.revival_rate: float = 0.0  # How often exiting items get revived
621  
622      def record_engagement(
623          self,
624          item_id: str,
625          engagement_type: str,
626          context: Optional[Dict[str, Any]] = None
627      ):
628          """
629          Record when operator engages with a context item.
630  
631          engagement_type:
632          - 'reference': Operator explicitly referenced this item
633          - 'expand': Operator asked for more detail on this item
634          - 'action': Operator took action based on this item
635          - 'ignore': Item was presented but not engaged with
636          - 'dismiss': Operator explicitly dismissed/cleared this item
637          - 'revive': Operator brought back an exiting/exited item
638          """
639          item = self.stream.get_item(item_id)
640  
641          if item:
642              attention_at_engagement = item.attention_score
643              time_in_stream = (datetime.now() - item.entry_time).total_seconds()
644          else:
645              # Item might have exited - this is a revival signal
646              attention_at_engagement = 0.0
647              time_in_stream = -1  # Indicates exited
648  
649          event = EngagementEvent(
650              item_id=item_id,
651              engagement_type=engagement_type,
652              attention_at_engagement=attention_at_engagement,
653              time_in_stream=time_in_stream,
654              timestamp=datetime.now(),
655              context=context
656          )
657  
658          self.engagement_events.append(event)
659  
660          # Keep window bounded
661          if len(self.engagement_events) > self.WINDOW_SIZE * 2:
662              self.engagement_events = self.engagement_events[-self.WINDOW_SIZE:]
663  
664          # Update parameters based on this event
665          self._update_parameters(event, item)
666  
667      def _update_parameters(self, event: EngagementEvent, item: Optional[StreamItem]):
668          """Update stream parameters based on engagement event."""
669  
670          # Track prediction error
671          if event.engagement_type in ('reference', 'expand', 'action'):
672              # Positive engagement - did we predict this?
673              if event.attention_at_engagement < 0.3:
674                  # Low attention but high engagement = prediction error
675                  error = 1.0 - event.attention_at_engagement
676              else:
677                  error = 0.0
678              self.prediction_errors.append(error)
679  
680          elif event.engagement_type == 'ignore':
681              # Item was shown but ignored
682              if event.attention_at_engagement > 0.7:
683                  # High attention but ignored = prediction error
684                  error = event.attention_at_engagement
685                  self.prediction_errors.append(error)
686  
687          # Learn from specific patterns
688  
689          # 1. Revival pattern: item exited but operator wanted it back
690          if event.engagement_type == 'revive' or event.time_in_stream < 0:
691              # Exit threshold is too aggressive
692              self.params.exit_threshold *= (1 - self.learning_rate)
693              self.params.exit_threshold = max(0.01, self.params.exit_threshold)
694  
695              # Also increase half-life
696              self.params.attention_half_life *= (1 + self.learning_rate * 0.5)
697  
698          # 2. Old engagement: operator engaged with long-in-stream item
699          if event.engagement_type in ('reference', 'expand', 'action'):
700              if event.time_in_stream > self.params.attention_half_life:
701                  # Item was old but still relevant - increase half-life
702                  factor = min(2.0, event.time_in_stream / self.params.attention_half_life)
703                  adjustment = self.learning_rate * (factor - 1.0) * 0.1
704                  self.params.attention_half_life *= (1 + adjustment)
705  
706          # 3. Quick dismiss: operator dismissed high-attention item
707          if event.engagement_type == 'dismiss' and event.attention_at_engagement > 0.6:
708              # Our attention model is wrong for this type of item
709              # Increase velocity for faster exit of similar items
710              self.params.base_velocity *= (1 + self.learning_rate * 0.3)
711  
712          # 4. Modality learning
713          if item and item.item_type and event.engagement_type in ('reference', 'expand', 'action'):
714              # Positive engagement - boost this modality
715              modality = item.item_type
716              if modality in self.params.modality_weights:
717                  current = self.params.modality_weights[modality]
718                  self.params.modality_weights[modality] = min(1.5, current * (1 + self.learning_rate))
719  
720          if item and item.item_type and event.engagement_type == 'ignore':
721              # Ignored - decrease modality weight
722              modality = item.item_type
723              if modality in self.params.modality_weights:
724                  current = self.params.modality_weights[modality]
725                  self.params.modality_weights[modality] = max(0.1, current * (1 - self.learning_rate))
726  
727          # Keep bounded
728          if len(self.prediction_errors) > self.WINDOW_SIZE:
729              self.prediction_errors = self.prediction_errors[-self.WINDOW_SIZE:]
730  
731          # Apply learned parameters to stream
732          self._apply_parameters()
733  
734      def _apply_parameters(self):
735          """Apply learned parameters to the stream."""
736          self.stream.ATTENTION_HALF_LIFE_SECONDS = self.params.attention_half_life
737          self.stream.BASE_VELOCITY = self.params.base_velocity
738          self.stream.PRESSURE_VELOCITY_BOOST = self.params.pressure_velocity_boost
739          self.stream.ATTENTION_FRICTION_SCALE = self.params.attention_friction_scale
740          self.stream.exit_threshold = self.params.exit_threshold
741          self.stream.pressure_threshold = self.params.pressure_threshold
742  
743      def get_tuning_report(self) -> Dict[str, Any]:
744          """Get report on current tuning state."""
745          recent_errors = self.prediction_errors[-50:] if self.prediction_errors else []
746          avg_error = sum(recent_errors) / max(1, len(recent_errors))
747  
748          # Count engagement types
749          type_counts = {}
750          for event in self.engagement_events[-self.WINDOW_SIZE:]:
751              type_counts[event.engagement_type] = type_counts.get(event.engagement_type, 0) + 1
752  
753          return {
754              'operator_id': self.operator_id,
755              'total_events': len(self.engagement_events),
756              'avg_prediction_error': avg_error,
757              'engagement_counts': type_counts,
758              'current_parameters': {
759                  'attention_half_life': self.params.attention_half_life,
760                  'base_velocity': self.params.base_velocity,
761                  'exit_threshold': self.params.exit_threshold,
762                  'pressure_threshold': self.params.pressure_threshold,
763              },
764              'modality_weights': dict(self.params.modality_weights),
765              'source_weights': dict(self.params.source_weights),
766          }
767  
768      def export_parameters(self) -> Dict[str, Any]:
769          """Export parameters for persistence."""
770          return {
771              'attention_half_life': self.params.attention_half_life,
772              'base_velocity': self.params.base_velocity,
773              'pressure_velocity_boost': self.params.pressure_velocity_boost,
774              'attention_friction_scale': self.params.attention_friction_scale,
775              'exit_threshold': self.params.exit_threshold,
776              'pressure_threshold': self.params.pressure_threshold,
777              'modality_weights': dict(self.params.modality_weights),
778              'source_weights': dict(self.params.source_weights),
779          }
780  
781      def import_parameters(self, params: Dict[str, Any]):
782          """Import parameters from persistence."""
783          if 'attention_half_life' in params:
784              self.params.attention_half_life = params['attention_half_life']
785          if 'base_velocity' in params:
786              self.params.base_velocity = params['base_velocity']
787          if 'pressure_velocity_boost' in params:
788              self.params.pressure_velocity_boost = params['pressure_velocity_boost']
789          if 'attention_friction_scale' in params:
790              self.params.attention_friction_scale = params['attention_friction_scale']
791          if 'exit_threshold' in params:
792              self.params.exit_threshold = params['exit_threshold']
793          if 'pressure_threshold' in params:
794              self.params.pressure_threshold = params['pressure_threshold']
795          if 'modality_weights' in params:
796              self.params.modality_weights = dict(params['modality_weights'])
797          if 'source_weights' in params:
798              self.params.source_weights = dict(params['source_weights'])
799  
800          self._apply_parameters()
801  
802  
803  def create_tuned_context_stream(
804      operator_id: str,
805      max_tokens: int = 100000,
806      saved_params: Dict[str, Any] = None
807  ) -> Tuple[ContextStream, AttentionStreamBridge, ContextStreamTuner]:
808      """
809      Create a context stream with tuning capability.
810  
811      Returns:
812          (ContextStream, AttentionStreamBridge, ContextStreamTuner)
813      """
814      stream = ContextStream(max_tokens=max_tokens)
815      bridge = AttentionStreamBridge(stream)
816      tuner = ContextStreamTuner(stream, operator_id)
817  
818      if saved_params:
819          tuner.import_parameters(saved_params)
820  
821      return stream, bridge, tuner
822  
823  
824  if __name__ == "__main__":
825      print("=== Context Stream Demo ===\n")
826      print("Context is a river, not a bucket.\n")
827  
828      stream, bridge = create_context_stream(max_tokens=10000)
829  
830      # Simulate attention events
831      print("1. Adding items with varying attention...")
832  
833      bridge.on_attention_event(
834          target_id="item_1",
835          content="High attention item - actively working on this",
836          intensity=0.9,
837          modality="highlight",
838          source="conversation"
839      )
840  
841      bridge.on_attention_event(
842          target_id="item_2",
843          content="Medium attention - mentioned briefly",
844          intensity=0.5,
845          modality="read",
846          source="conversation"
847      )
848  
849      bridge.on_attention_event(
850          target_id="item_3",
851          content="Low attention - barely glanced",
852          intensity=0.2,
853          modality="glance",
854          source="conversation"
855      )
856  
857      state = stream.get_state()
858      print(f"   Items: {state.item_count}, Pressure: {state.pressure:.1%}")
859  
860      # Simulate time passing and decay
861      print("\n2. Simulating attention decay...")
862      import time
863  
864      # Manually decay for demo (normally tick() handles this)
865      for item in stream._items.values():
866          if item.id == "item_1":
867              item.attention_score = 0.8  # Still high
868          elif item.id == "item_2":
869              item.attention_score = 0.3  # Decayed
870          elif item.id == "item_3":
871              item.attention_score = 0.05  # Below exit threshold
872  
873      exited = stream.tick()
874      print(f"   Exited: {[e.id for e in exited]}")
875  
876      # Get current context
877      print("\n3. Current context (sorted by attention):")
878      for item in stream.get_context():
879          print(f"   [{item.attention_score:.2f}] {item.id}: {item.content[:40]}...")
880  
881      print("\n4. Pinning an item (unresolved items never exit)...")
882      stream.pin("item_2")
883      item2 = stream.get_item("item_2")
884      print(f"   item_2 priority: {item2.priority.value}")
885  
886      print("\n=== Stream breathing, not sawteeth ===")