Created
September 17, 2025 17:26
-
-
Save doxt3r/1f2caef20d6cf3387ae9f7aad6070657 to your computer and use it in GitHub Desktop.
vast ia image startup
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| set -euo pipefail | |
| export DEBIAN_FRONTEND=noninteractive | |
| echo "[setup] System deps" | |
| apt-get update -y | |
| apt-get install -y --no-install-recommends python3-pip python3-venv git ffmpeg jq curl | |
| python3 -m pip install --upgrade pip | |
| cd /workspace | |
| # 1) Récupérer InfiniteTalk | |
| if [ ! -d "/workspace/InfiniteTalk" ]; then | |
| git clone https://github.com/MeiGen-AI/InfiniteTalk.git | |
| fi | |
| cd /workspace/InfiniteTalk | |
| echo "[setup] Python deps (PyTorch cu121 + flash-attn requis par le repo)" | |
| python3 -m pip install --upgrade \ | |
| "torch==2.4.1" "torchvision==0.19.1" "torchaudio==2.4.1" --index-url https://download.pytorch.org/whl/cu121 | |
| python3 -m pip install -U xformers==0.0.28 --index-url https://download.pytorch.org/whl/cu121 | |
| python3 -m pip install misaki[en] ninja psutil packaging wheel "flash_attn==2.7.4.post1" | |
| python3 -m pip install -r requirements.txt | |
| python3 -m pip install "huggingface_hub[cli]" | |
| echo "[setup] Poids modèles" | |
| mkdir -p /workspace/InfiniteTalk/weights | |
| if [ -n "${HF_TOKEN:-}" ]; then | |
| huggingface-cli login --token "$HF_TOKEN" || true | |
| fi | |
| # Dossiers poids | |
| mkdir -p weights/Wan2.1-I2V-14B-480P \ | |
| weights/chinese-wav2vec2-base \ | |
| weights/InfiniteTalk | |
| # Téléchargements selon le README du repo | |
| huggingface-cli download Wan-AI/Wan2.1-I2V-14B-480P --local-dir weights/Wan2.1-I2V-14B-480P || true | |
| huggingface-cli download TencentGameMate/chinese-wav2vec2-base --local-dir weights/chinese-wav2vec2-base || true | |
| # Certains workflows demandent un .safetensors révisé (PR refs/pr/1) — ignoré si indisponible. | |
| huggingface-cli download TencentGameMate/chinese-wav2vec2-base model.safetensors --revision refs/pr/1 \ | |
| --local-dir weights/chinese-wav2vec2-base || true | |
| huggingface-cli download MeiGen-AI/InfiniteTalk --local-dir weights/InfiniteTalk || true | |
| # 2) Mini-API/UI de test (Gradio officiel du repo) sur 7860 | |
| cat >/workspace/run_infinitetalk.sh <<'SH' | |
| #!/usr/bin/env bash | |
| set -euo pipefail | |
| cd /workspace/InfiniteTalk | |
| python3 app.py \ | |
| --ckpt_dir weights/Wan2.1-I2V-14B-480P \ | |
| --wav2vec_dir 'weights/chinese-wav2vec2-base' \ | |
| --infinitetalk_dir weights/InfiniteTalk/single/infinitetalk.safetensors \ | |
| --num_persistent_param_in_dit 0 \ | |
| --motion_frame 9 | |
| SH | |
| chmod +x /workspace/run_infinitetalk.sh | |
| # 3) Watchdog auto-stop (arrêt si idle X minutes) | |
| cat >/root/watchdog.sh <<'SH' | |
| #!/usr/bin/env bash | |
| set -euo pipefail | |
| : "${IDLE_UTIL_THRESH:=5}" | |
| : "${IDLE_MEM_THRESH_MB:=1000}" | |
| : "${CHECK_EVERY_SEC:=60}" | |
| : "${MAX_IDLE_MIN:=15}" | |
| # INSTANCE_ID depuis env ou variable spéciale Vast ($CONTAINER_ID dispo en général) | |
| INSTANCE_ID="${INSTANCE_ID:-${CONTAINER_ID:-}}" | |
| # Installer CLI si manquante | |
| if ! command -v vastai >/dev/null 2>&1; then | |
| python3 -m pip install --user vastai >/dev/null | |
| export PATH="$HOME/.local/bin:$PATH" | |
| fi | |
| gpu_idle() { | |
| # Si un process CUDA tourne -> actif | |
| if nvidia-smi --query-compute-apps=pid --format=csv,noheader 2>/dev/null | grep -q '[0-9]'; then | |
| return 1 | |
| fi | |
| local line util mem | |
| line="$(nvidia-smi --query-gpu=utilization.gpu,memory.used --format=csv,noheader,nounits 2>/dev/null | head -n1 || true)" | |
| [ -z "$line" ] && return 1 | |
| util="$(echo "$line" | cut -d',' -f1 | tr -d ' ')" | |
| mem="$(echo "$line" | cut -d',' -f2 | tr -d ' ')" | |
| (( util < IDLE_UTIL_THRESH && mem < IDLE_MEM_THRESH_MB )) | |
| } | |
| echo "[watchdog] idle<${IDLE_UTIL_THRESH}% & mem<${IDLE_MEM_THRESH_MB}MiB ; stop après ${MAX_IDLE_MIN}min" | |
| idle=0 | |
| while true; do | |
| if gpu_idle; then | |
| idle=$((idle+1)) | |
| echo "[watchdog] GPU idle ${idle}/${MAX_IDLE_MIN} min" | |
| else | |
| [ "$idle" -gt 0 ] && echo "[watchdog] activité détectée, reset" | |
| idle=0 | |
| fi | |
| if [ "$idle" -ge "$MAX_IDLE_MIN" ]; then | |
| echo "[watchdog] seuil atteint → stop instance $INSTANCE_ID" | |
| vastai stop instance "$INSTANCE_ID" || { | |
| echo "[watchdog][ERREUR] Échec stop. Vérifie VAST API key/INSTANCE_ID." | |
| } | |
| exit 0 | |
| fi | |
| sleep "$CHECK_EVERY_SEC" | |
| done | |
| SH | |
| chmod +x /root/watchdog.sh | |
| # 4) OnStart: lancer UI puis watchdog en arrière-plan | |
| cat >/root/onstart.sh <<'SH' | |
| #!/usr/bin/env bash | |
| set -euo pipefail | |
| # Lancer l'UI Gradio | |
| nohup /workspace/run_infinitetalk.sh >/workspace/infinitetalk.log 2>&1 & | |
| # Lancer watchdog (arrêt auto) | |
| nohup /root/watchdog.sh >/workspace/watchdog.log 2>&1 & | |
| SH | |
| chmod +x /root/onstart.sh | |
| echo "[setup] DONE. Au prochain démarrage, onstart.sh lancera l'UI + watchdog." |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment