Feed Hunter: deep scraper skill, pipeline, simulator, first investigation

- Built deep-scraper skill (CDP-based X feed extraction)
- Three-stage pipeline: scrape → triage → investigate
- Paper trading simulator with position tracking
- First live investigation: verified kch123 Polymarket profile ($9.3M P&L)
- Opened first paper position: Seahawks Super Bowl @ 68c
- Telegram alerts with inline action buttons
- Portal build in progress (night shift)
This commit is contained in:
2026-02-07 23:58:40 -06:00
parent b93228ddc2
commit 8638500190
31 changed files with 7752 additions and 40 deletions

View File

@ -0,0 +1,118 @@
#!/usr/bin/env python3
"""
Investigation report generator for Feed Hunter.
Reads triage.json and produces investigation tasks as structured prompts
for the agent to execute.
Usage:
python3 investigate.py <triage.json> [--output investigations/]
This doesn't do the investigation itself — it generates the task list
that the agent (Case) follows using browser/web tools.
"""
import argparse
import json
import os
from datetime import datetime, timezone
from pathlib import Path
def generate_investigation_prompt(post):
"""Generate an investigation prompt for the agent."""
author = post["author"].get("handle", "unknown")
text = post["text"][:500]
claims = post.get("claims", [])
links = post.get("links", [])
tasks = post.get("tasks", [])
prompt = f"""## Investigation: {author}
**Post:** {text}
**Claims detected:**
"""
for c in claims:
prompt += f"- [{c['type']}] {c['match']}\n"
prompt += f"\n**Links found:**\n"
for l in links:
prompt += f"- [{l['type']}] {l['url']}\n"
prompt += f"\n**Investigation tasks:**\n"
for i, t in enumerate(tasks, 1):
prompt += f"{i}. **{t['action']}**: {t['description']}\n"
prompt += f" Method: {t['method']}\n"
if t.get('url'):
prompt += f" URL: {t['url']}\n"
prompt += """
**Deliver:**
1. Is the claim verifiable? What does the actual data show?
2. Is there recent activity? (Last 24-48h)
3. Is this still actionable or has the window closed?
4. Risk assessment (1-10, where 10 is highest risk)
5. Verdict: ACTIONABLE / EXPIRED / EXAGGERATED / SCAM / UNVERIFIABLE
6. If ACTIONABLE: suggested paper trade parameters (asset, entry, size, stop loss, take profit)
"""
return prompt
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", help="Path to triage.json")
parser.add_argument("--output", help="Output directory for investigation files")
args = parser.parse_args()
with open(args.input) as f:
data = json.load(f)
queue = data.get("investigation_queue", [])
if not queue:
print("No posts in investigation queue.")
return
output_dir = args.output or os.path.join(os.path.dirname(args.input), "investigations")
os.makedirs(output_dir, exist_ok=True)
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
investigations = []
for i, post in enumerate(queue):
inv = {
"id": f"inv-{timestamp}-{i}",
"post_author": post["author"].get("handle", "unknown"),
"post_url": post.get("url", ""),
"priority": post["priority"],
"claims": post.get("claims", []),
"tasks": post.get("tasks", []),
"prompt": generate_investigation_prompt(post),
"status": "pending",
"result": None,
}
investigations.append(inv)
# Save investigation batch
batch_file = os.path.join(output_dir, f"batch-{timestamp}.json")
with open(batch_file, "w") as f:
json.dump({
"batch_id": timestamp,
"created_at": datetime.now(timezone.utc).isoformat(),
"count": len(investigations),
"investigations": investigations,
}, f, indent=2)
print(f"=== Investigation Batch: {timestamp} ===")
print(f"Tasks: {len(investigations)}")
for inv in investigations:
print(f"\n [{inv['priority']}] {inv['post_author']}")
print(f" Claims: {[c['type'] for c in inv['claims']]}")
print(f" Tasks: {len(inv['tasks'])}")
print(f"\nSaved to {batch_file}")
print(f"\nTo execute: agent reads batch file and runs each investigation prompt")
if __name__ == "__main__":
main()