116 lines
4.0 KiB
Python
116 lines
4.0 KiB
Python
#!/usr/bin/env python3
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
import glob
|
|
import subprocess
|
|
from collections import defaultdict
|
|
from datetime import datetime
|
|
|
|
def extract_text_content(content_list):
|
|
"""Extract text content from message content array, excluding thinking blocks."""
|
|
text_parts = []
|
|
for item in content_list:
|
|
if item.get('type') == 'text':
|
|
text_parts.append(item['text'])
|
|
return ' '.join(text_parts).strip()
|
|
|
|
def process_session_file(file_path):
|
|
"""Process a single JSONL session file and extract conversation turns."""
|
|
turns = []
|
|
|
|
try:
|
|
with open(file_path, 'r') as f:
|
|
lines = f.readlines()
|
|
|
|
messages = []
|
|
for line in lines:
|
|
try:
|
|
data = json.loads(line.strip())
|
|
if data.get('type') == 'message' and 'message' in data:
|
|
messages.append(data)
|
|
except json.JSONDecodeError:
|
|
continue
|
|
|
|
# Group messages into conversation turns (user -> assistant pairs)
|
|
current_user_msg = None
|
|
|
|
for msg in messages:
|
|
role = msg['message']['role']
|
|
content = msg['message'].get('content', [])
|
|
|
|
if role == 'user':
|
|
current_user_msg = extract_text_content(content)
|
|
elif role == 'assistant' and current_user_msg:
|
|
assistant_text = extract_text_content(content)
|
|
if assistant_text and current_user_msg: # Only include if both have content
|
|
turns.append({
|
|
'user': current_user_msg,
|
|
'assistant': assistant_text,
|
|
'timestamp': msg.get('timestamp')
|
|
})
|
|
current_user_msg = None
|
|
|
|
return turns
|
|
|
|
except Exception as e:
|
|
print(f"Error processing {file_path}: {e}", file=sys.stderr)
|
|
return []
|
|
|
|
def main():
|
|
sessions_dir = "/home/wdjones/.openclaw/agents/main/sessions/"
|
|
|
|
# Get all non-deleted session files
|
|
session_files = []
|
|
for file_path in glob.glob(os.path.join(sessions_dir, "*.jsonl")):
|
|
if not file_path.endswith('.deleted'):
|
|
session_files.append(file_path)
|
|
|
|
# Sort by modification time (newest first)
|
|
session_files.sort(key=os.path.getmtime, reverse=True)
|
|
|
|
all_turns = []
|
|
|
|
# Process session files until we have enough turns
|
|
for session_file in session_files:
|
|
turns = process_session_file(session_file)
|
|
all_turns.extend(turns)
|
|
|
|
if len(all_turns) >= 10:
|
|
break
|
|
|
|
# Get the last 10 turns
|
|
recent_turns = all_turns[-10:] if len(all_turns) > 10 else all_turns
|
|
|
|
print(f"Processing {len(recent_turns)} conversation turns through auto-memory indexer...", file=sys.stderr)
|
|
|
|
# Process each turn individually through the auto-memory script
|
|
success_count = 0
|
|
for i, turn in enumerate(recent_turns):
|
|
try:
|
|
turn_data = {
|
|
"user": turn['user'],
|
|
"assistant": turn['assistant'],
|
|
"agent_id": "case",
|
|
"session": "main"
|
|
}
|
|
|
|
# Run the auto-memory-hook script for each turn
|
|
result = subprocess.run([
|
|
'python3', '/home/wdjones/.openclaw/workspace/tools/auto-memory-hook.py'
|
|
], input=json.dumps(turn_data), text=True, capture_output=True)
|
|
|
|
if result.returncode == 0:
|
|
success_count += 1
|
|
print(f"Turn {i+1}/{len(recent_turns)}: Processed successfully", file=sys.stderr)
|
|
else:
|
|
print(f"Turn {i+1}/{len(recent_turns)}: Error - {result.stderr}", file=sys.stderr)
|
|
|
|
except Exception as e:
|
|
print(f"Turn {i+1}/{len(recent_turns)}: Exception - {e}", file=sys.stderr)
|
|
|
|
print(f"Auto-memory indexer completed: {success_count}/{len(recent_turns)} turns processed successfully", file=sys.stderr)
|
|
|
|
if __name__ == "__main__":
|
|
main() |