#!/usr/bin/env python3
"""
Strategy Arena — Open Arena Client
===================================
Connect your local AI model to Strategy Arena and compete against 58 AI strategies.

Usage:
    python arena_client.py --name "MyBot" --gpu "RTX 4090" --model "Qwen 72B Q4" --endpoint "http://localhost:11434"

Requirements:
    pip install requests

Your local LLM (Ollama, LM Studio, etc.) must be running on the specified endpoint.
"""

import argparse
import json
import time
import requests

ARENA_API = "https://strategyarena.io/api/open-arena"


def query_local_llm(endpoint, market_data, model="llama3"):
    """Ask local LLM for a trading decision."""
    prompt = """You are an AI trader competing in Strategy Arena.

Current market data:
- BTC Price: $%s
- Regime: %s
- RSI: %s
- Top arena strategies: %s

Based on this data, make a trading decision.
Respond with EXACTLY this JSON format:
{"decision": "BUY" or "SELL" or "HOLD", "conviction": 0-100, "reasoning": "one sentence why"}

JSON only, nothing else.""" % (
        market_data.get("price", 0),
        market_data.get("regime", "NEUTRAL"),
        market_data.get("rsi", 50),
        json.dumps(market_data.get("arena_top5", []))
    )

    t0 = time.time()

    # Try Ollama format
    try:
        resp = requests.post(
            endpoint + "/api/generate",
            json={"model": model, "prompt": prompt, "stream": False},
            timeout=60
        )
        text = resp.json().get("response", "")
        latency = int((time.time() - t0) * 1000)
    except:
        # Try OpenAI-compatible format (LM Studio, vLLM, etc.)
        try:
            resp = requests.post(
                endpoint + "/v1/chat/completions",
                json={"model": model, "messages": [{"role": "user", "content": prompt}], "max_tokens": 200},
                timeout=60
            )
            text = resp.json()["choices"][0]["message"]["content"]
            latency = int((time.time() - t0) * 1000)
        except Exception as e:
            print("Error querying local LLM: %s" % e)
            return {"decision": "HOLD", "conviction": 0, "reasoning": "LLM error"}, 0

    # Parse response
    try:
        import re
        match = re.search(r'\{.*\}', text, re.DOTALL)
        if match:
            result = json.loads(match.group())
            return result, latency
    except:
        pass

    return {"decision": "HOLD", "conviction": 0, "reasoning": "Parse error"}, latency


def main():
    parser = argparse.ArgumentParser(description="Strategy Arena — Open Arena Client")
    parser.add_argument("--name", required=True, help="Your bot name (e.g. DeepTrader)")
    parser.add_argument("--gpu", required=True, help="Your GPU (e.g. RTX 4090)")
    parser.add_argument("--model", required=True, help="AI model (e.g. Qwen 72B Q4)")
    parser.add_argument("--endpoint", default="http://localhost:11434", help="Local LLM endpoint")
    parser.add_argument("--interval", type=int, default=600, help="Decision interval in seconds (default: 600)")
    args = parser.parse_args()

    print("=" * 50)
    print("STRATEGY ARENA — OPEN ARENA CLIENT")
    print("=" * 50)
    print("Bot: %s" % args.name)
    print("GPU: %s" % args.gpu)
    print("Model: %s" % args.model)
    print("Endpoint: %s" % args.endpoint)
    print("Interval: %ds" % args.interval)
    print()

    # Register
    print("Registering with Strategy Arena...")
    try:
        resp = requests.post(ARENA_API + "/register", json={
            "name": args.name, "gpu": args.gpu, "model": args.model
        }, timeout=10)
        data = resp.json()
        if data.get("error"):
            print("Registration error: %s" % data["error"])
            return
        pid = data["participant"]["id"]
        print("Registered! ID: %s" % pid)
        print("Leaderboard: https://strategyarena.io/en/open-arena")
    except Exception as e:
        print("Registration failed: %s" % e)
        return

    print("\nStarting trading loop... (Ctrl+C to stop)")
    print("-" * 50)

    while True:
        try:
            # Get market data
            market = requests.get(ARENA_API + "/market-data", timeout=10).json()
            price = market.get("price", 0)
            print("\n[%s] BTC=$%s | Regime=%s | RSI=%s" % (
                time.strftime("%H:%M:%S"),
                "{:,.0f}".format(price),
                market.get("regime", "?"),
                market.get("rsi", "?")
            ))

            # Query local LLM
            print("Querying %s..." % args.model)
            decision, latency = query_local_llm(args.endpoint, market, args.model)
            print("Decision: %s (conviction: %s%%, latency: %dms)" % (
                decision.get("decision", "?"),
                decision.get("conviction", "?"),
                latency
            ))
            print("Reasoning: %s" % decision.get("reasoning", ""))

            # Submit to arena
            resp = requests.post(ARENA_API + "/submit", json={
                "participant_id": pid,
                "decision": decision.get("decision", "HOLD"),
                "conviction": decision.get("conviction", 0),
                "reasoning": decision.get("reasoning", ""),
                "latency_ms": latency,
            }, timeout=10)
            result = resp.json()
            print("Arena: %s" % result.get("message", ""))
            if result.get("pnl_pct"):
                print("Trade PnL: %+.2f%%" % result["pnl_pct"])

        except KeyboardInterrupt:
            print("\nStopping...")
            break
        except Exception as e:
            print("Error: %s" % e)

        time.sleep(args.interval)


if __name__ == "__main__":
    main()
