#!/usr/bin/env python3
import os
import sys
import time
import csv
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Tuple, Optional

from roboflow import Roboflow
from tqdm import tqdm

# ======= 必填项（替换为你的值）=======
WORKSPACE_ID = "mo-v0q7f"
PROJECT_ID   = "yanzhe-qingqi-j8sei"
IMAGE_DIR    = Path("/www/wwwroot/BOT/files/storage/test")

# ======= 可调参数 =======
SPLIT             = "train"
BATCH_NAME        = "bulk_oct_06_2025"
ALLOW_EXT         = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
MAX_WORKERS       = 16          # 并发线程数（10/20/…随你改）
MAX_RETRIES       = 3           # 单张失败的重试次数
BACKOFF_BASE      = 0.6         # 指数退避基数（秒）：0.6, 1.2, 2.4, ...
THROTTLE_QPS      = 10          # 全局速率限制（每秒最多多少次调用）
PER_TASK_DELAY    = 0.0         # 每张上传后的固定小延时（秒），一般 0 即可

# ======= 速率限制（简单令牌桶/节流）=======
# 轻量节流器：避免触发 429；如果你网络很快且服务端允许更高并发，可把 THROTTLE_QPS 调大
from threading import Lock
_last_tick = 0.0
_tick_lock = Lock()
def throttle():
    global _last_tick
    if THROTTLE_QPS <= 0:
        return
    need_gap = 1.0 / float(THROTTLE_QPS)
    with _tick_lock:
        now = time.time()
        delta = now - _last_tick
        if delta < need_gap:
            time.sleep(need_gap - delta)
        _last_tick = time.time()

def mk_project():
    """每个线程独立创建 rf/project 来降低共享状态带来的潜在竞争问题。"""
    api_key = os.getenv("ROBOFLOW_API_KEY")
    if not api_key:
        print("环境变量 ROBOFLOW_API_KEY 未设置。请先执行：\n  export ROBOFLOW_API_KEY=\"<你的API Key>\"")
        sys.exit(1)
    rf = Roboflow(api_key=api_key)
    return rf.workspace(WORKSPACE_ID).project(PROJECT_ID)

def upload_one(image_path: Path) -> Tuple[Path, bool, Optional[str]]:
    """上传单张图片；返回 (路径, 是否成功, 错误信息)."""
    project = mk_project()   # 线程内实例，降低锁争用；连接会被短时复用
    for attempt in range(1, MAX_RETRIES + 1):
        try:
            throttle()
            project.upload(
                image_path=str(image_path),
                split=SPLIT,
                batch_name=BATCH_NAME,
                num_retry_uploads=1,   # 叠加 SDK 内部一次小重试
            )
            if PER_TASK_DELAY > 0:
                time.sleep(PER_TASK_DELAY)
            return image_path, True, None
        except Exception as e:
            # 指数退避
            if attempt < MAX_RETRIES:
                time.sleep(BACKOFF_BASE * (2 ** (attempt - 1)))
            else:
                return image_path, False, str(e)

def collect_files(root: Path):
    return [p for p in sorted(root.rglob("*")) if p.is_file() and p.suffix.lower() in ALLOW_EXT]

def main():
    if not IMAGE_DIR.exists():
        print(f"目录不存在：{IMAGE_DIR}")
        sys.exit(1)

    files = collect_files(IMAGE_DIR)
    if not files:
        print(f"目录中未找到图片（支持后缀：{', '.join(sorted(ALLOW_EXT))}）：{IMAGE_DIR}")
        sys.exit(0)

    print(f"目标：workspace={WORKSPACE_ID}  project={PROJECT_ID}")
    print(f"上传：{len(files)} 张 | 并发={MAX_WORKERS} | QPS≈{THROTTLE_QPS} | 重试={MAX_RETRIES} | split='{SPLIT}' | batch='{BATCH_NAME}'")

    successes, failures = 0, []
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        futures = {executor.submit(upload_one, p): p for p in files}
        for fut in tqdm(as_completed(futures), total=len(futures), desc="Uploading", unit="img"):
            p = futures[fut]
            try:
                path, ok, err = fut.result()
                if ok:
                    successes += 1
                else:
                    failures.append((path, err))
            except Exception as e:
                failures.append((p, f"Unexpected error: {e!r}"))

    print(f"\n完成：成功 {successes} 张，失败 {len(failures)} 张。")

    # 失败日志写入 CSV，便于后续重传
    if failures:
        csv_path = Path("upload_failures.csv")
        with csv_path.open("w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["image_path", "error"])
            for p, err in failures:
                writer.writerow([str(p), err])
        print(f"失败列表已保存：{csv_path.resolve()}  （可用于重试）")

if __name__ == "__main__":
    main()
