From 78292e836f40362c2c687598f5c9b7330131f352 Mon Sep 17 00:00:00 2001 From: Lily Miller Date: Mon, 6 Oct 2025 16:59:09 -0600 Subject: [PATCH] Improve tool output processing: raise pass-through threshold, use extraction for medium outputs FIXES: 1. Truncation was too aggressive (2KB threshold, truncate to 2000 chars) 2. Important data was being lost in medium-sized outputs (2-10KB) Changes: - Raise pass-through threshold from 2KB to 5KB - Medium outputs (5-20KB) now use hierarchical extraction instead of truncation - _extract_key_findings already handles chunking automatically - Better preservation of important data like service lists Benefits: - Full service lists will now be properly analyzed - No more missing services due to truncation - Macha can see the complete picture before responding --- agent.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/agent.py b/agent.py index 5398e19..0246f25 100644 --- a/agent.py +++ b/agent.py @@ -683,26 +683,22 @@ Provide unified summary (max 800 chars) covering all key points.""" def _process_tool_result_hierarchical(self, tool_name: str, result: Any) -> str: """ Intelligently process tool results based on size: - - Small (< 2KB): Pass through directly - - Medium (2-10KB): Truncate with head+tail - - Large (> 10KB): Hierarchical extraction in separate context + - Small (< 5KB): Pass through directly + - Medium (5-20KB): Hierarchical extraction with single-pass summarization + - Large (> 20KB): Hierarchical extraction with chunked processing """ result_str = json.dumps(result) if not isinstance(result, str) else result size = len(result_str) # Small outputs: pass through directly - if size < 2000: + if size < 5000: print(f" [Tool result: {size} chars, passing through]") return result_str - # Medium outputs: truncate with head+tail - elif size < 10000: - print(f" [Tool result: {size} chars, truncating to 2000]") - return self._simple_truncate(result_str, 2000) - - # Large outputs: hierarchical extraction + # Medium and large outputs: hierarchical extraction with chunking else: print(f" [Tool result: {size} chars, extracting key findings...]") + # _extract_key_findings automatically chunks large outputs return self._extract_key_findings(tool_name, result_str) def _prune_messages(self, messages: List[Dict], max_context_tokens: int = 80000) -> List[Dict]: