Created
October 5, 2025 04:34
-
-
Save anr2me/bf8f651b8c4a9294ce72237bf1127684 to your computer and use it in GitHub Desktop.
OVI Test
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "nbformat": 4, | |
| "nbformat_minor": 0, | |
| "metadata": { | |
| "colab": { | |
| "provenance": [], | |
| "gpuType": "T4", | |
| "authorship_tag": "ABX9TyPbLDHGRk97285yUwCxLVIA", | |
| "include_colab_link": true | |
| }, | |
| "kernelspec": { | |
| "name": "python3", | |
| "display_name": "Python 3" | |
| }, | |
| "language_info": { | |
| "name": "python" | |
| }, | |
| "accelerator": "GPU" | |
| }, | |
| "cells": [ | |
| { | |
| "cell_type": "markdown", | |
| "metadata": { | |
| "id": "view-in-github", | |
| "colab_type": "text" | |
| }, | |
| "source": [ | |
| "<a href=\"https://colab.research.google.com/gist/anr2me/bf8f651b8c4a9294ce72237bf1127684/ovi-test.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "source": [ | |
| "\n", | |
| "Colab: Upload your input files from File Browser menu.\n", | |
| "\n", | |
| "Kaggle: Upload your input files from File -> Upload Input -> Upload Data" | |
| ], | |
| "metadata": { | |
| "id": "Sg6hZROjIqoP" | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "\n", | |
| "\n", | |
| "!uname -a\n", | |
| "!free -h\n", | |
| "!pwd\n", | |
| "!pip install -U pip uv virtualenv GitPython loglevel\n", | |
| "!git config --global core.fileMode\n", | |
| "!git config --global pull.rebase true\n", | |
| "\n", | |
| "import os\n", | |
| "from datetime import datetime, timedelta\n", | |
| "start_time = datetime.now()\n", | |
| "\n", | |
| "!mkdir -p \"/kaggle/working\"\n", | |
| "!mkdir -p \"/kaggle/input\"\n", | |
| "!mkdir -p \"/kaggle/temp\"\n", | |
| "!mkdir -p \"/tmp/xdg_runtime\"\n", | |
| "!mkdir -p \"/content\"\n", | |
| "current_dir = !pwd #[\"/content\", \"/kaggle/working\",\"/kaggle/input\",\"/kaggle/temp\"]\n", | |
| "WORKSPACE = \"/content\" # current_dir[0]\n", | |
| "os.environ['WORKSPACE']=WORKSPACE\n", | |
| "print(f\"Workspace at {WORKSPACE}\")\n", | |
| "%cd \"$WORKSPACE\"\n", | |
| "\n", | |
| "os.environ['XDG_RUNTIME_DIR']=\"/tmp/xdg_runtime\"\n", | |
| "os.environ['MPLBACKEND']=\"Agg\"\n", | |
| "#os.environ['PYTHONVERBOSE']=\"1\"\n", | |
| "\n", | |
| "# Create a bash script at /usr/bin/\n", | |
| "shcontent = \"\"\"\n", | |
| "[ -f \"$WORKSPACE/venv/bin/activate\" ] && echo \"Using VENV at $WORKSPACE\" && source \"$WORKSPACE/venv/bin/activate\"; \"$@\"\n", | |
| "\"\"\"\n", | |
| "with open('/usr/bin/venv.sh', 'w') as file:\n", | |
| " file.write(shcontent)\n", | |
| "!chmod +x /usr/bin/venv.sh\n", | |
| "\n", | |
| "print(\"-= Creating/Restoring Virtual Environment =-\")\n", | |
| "![ ! -f \"venv/bin/activate\" ] && echo -= Initial Python Virtual Environment =- && virtualenv venv\n", | |
| "\n", | |
| "print(\"-= Installing Default Dependencies =-\")\n", | |
| "!venv.sh pip install -U pip uv setuptools wheel ninja jedi loglevel einops\n", | |
| "!venv.sh pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1\n", | |
| "#!venv.sh pip install -U torch torchao torchvision torchaudio xformers --index-url https://download.pytorch.org/whl/cu129\n", | |
| "!venv.sh pip install triton sageattention #flash-attn-triton\n", | |
| "!venv.sh pip install flash_attn --no-build-isolation\n", | |
| "\n", | |
| "print(\"-= Installing Ovi repository =-\")\n", | |
| "![ ! -d Ovi ] && echo -= Initial setup Ovi =- && git clone --depth=1500 --branch main --single-branch https://github.com/character-ai/Ovi.git\n", | |
| "%cd Ovi\n", | |
| "!git pull\n", | |
| "!venv.sh pip install -r requirements.txt # Using old numpy 1.26.4\n", | |
| "\n", | |
| "print(\"-= Updating site-packages with VENV =-\")\n", | |
| "import sys\n", | |
| "full_python_version = sys.version\n", | |
| "python_version_number = \".\".join(full_python_version.split(\" \")[0].split(\".\")[:2])\n", | |
| "print(f\"Python Ver = {python_version_number}\")\n", | |
| "python_ver_num = python_version_number.replace(\".\",\"\")\n", | |
| "\n", | |
| "import torch\n", | |
| "full_pytorch_version = torch.__version__\n", | |
| "pytorch_version_number = \".\".join(full_pytorch_version.split(\" \")[0].split(\".\")[:2])\n", | |
| "print(f\"PyTorch Ver = {pytorch_version_number}\")\n", | |
| "\n", | |
| "print(f\"Adding site-packages {WORKSPACE}/venv/lib/python{python_version_number}/site-packages\")\n", | |
| "import site\n", | |
| "site.addsitedir(f\"{WORKSPACE}/venv/lib/python{python_version_number}/site-packages\")\n", | |
| "sys.path.append(f\"{WORKSPACE}/venv/bin\")\n", | |
| "sys.path.append(f\"{WORKSPACE}/venv/lib/python{python_version_number}/site-packages\")\n", | |
| "\n", | |
| "print(\"-= Downloading Weights =-\")\n", | |
| "# Optional can specific --output-dir to download to a specific directory (default is ./ckpts)\n", | |
| "# but if a custom directory is used, the inference yaml has to be updated with the custom directory\n", | |
| "!venv.sh python3 download_weights.py --output-dir /kaggle/temp/ckpts\n", | |
| "\n", | |
| "print(\"-= Configure Ovi =-\")\n", | |
| "# Uploaded Input files at'/kaggle/input' directory\n", | |
| "# Downloadable Output files at '/kaggle/working' directory\n", | |
| "\n", | |
| "config_content = \"\"\"\n", | |
| "# Output and Model Configuration\n", | |
| "ckpt_dir: /kaggle/temp/ckpts #./ckpts\n", | |
| "output_dir: /kaggle/working\n", | |
| "\n", | |
| "# Generation Quality Settings\n", | |
| "num_steps: 50\n", | |
| "solver_name: unipc\n", | |
| "shift: 5.0\n", | |
| "seed: 103\n", | |
| "\n", | |
| "# Guidance Strength Control\n", | |
| "audio_guidance_scale: 3.0\n", | |
| "video_guidance_scale: 4.0\n", | |
| "slg_layer: 11\n", | |
| "\n", | |
| "# Multi-GPU and Performance\n", | |
| "sp_size: 1\n", | |
| "cpu_offload: True\n", | |
| "\n", | |
| "# Input Configuration\n", | |
| "each_example_n_times: 1\n", | |
| "mode: \"t2v\" # [\"t2v\", \"i2v\", \"t2i2v\"] all comes with audio\n", | |
| "video_frame_height_width: [512, 992] # only useful if mode = t2v or t2i2v, recommended values: [512, 992], [992, 512], [960, 512], [512, 960], [720, 720], [448, 1120]\n", | |
| "text_prompt: \"a man proposing on one knee, while offering a large diamond ring to a grim reaper, as he says, <S>Will you marry me?<E>. After a brief pause, the grim reaper says, <S>You are courting death.<E><AUDCAP>A low-pitched male voice.<ENDAUDCAP>\" #./example_prompts/gpt_examples_i2v.csv\n", | |
| "\n", | |
| "# Quality Control (Negative Prompts)\n", | |
| "video_negative_prompt: \"jitter, bad hands, blur, distortion\" # Artifacts to avoid in video\n", | |
| "audio_negative_prompt: \"robotic, muffled, echo, distorted\" # Artifacts to avoid in audio\n", | |
| "\"\"\"\n", | |
| "with open(\"./ovi/configs/inference/inference_fusion.yaml\", 'w') as file:\n", | |
| " file.write(config_content)\n", | |
| "\n", | |
| "print(\"-= Running Inference =-\")\n", | |
| "start_time2 = datetime.now()\n", | |
| "#!venv.sh python3 inference.py --config-file \"./ovi/configs/inference/inference_fusion.yaml\"\n", | |
| "#!venv.sh torchrun --nnodes 1 --nproc_per_node 4 inference.py --config-file \"./ovi/configs/inference/inference_fusion.yaml\"\n", | |
| "!venv.sh python3 gradio_app.py --cpu_offload\n", | |
| "\n", | |
| "time_delta2 = datetime.now() - start_time2\n", | |
| "print(f\"-= Inference Done! ({str(timedelta(seconds=time_delta2.total_seconds()))}) =-\")\n", | |
| "\n", | |
| "time_delta = datetime.now() - start_time\n", | |
| "print(f\"-= All Done! ({str(timedelta(seconds=time_delta.total_seconds()))}) =-\")" | |
| ], | |
| "metadata": { | |
| "id": "nZpXa3ygIo1d", | |
| "colab": { | |
| "base_uri": "https://localhost:8080/" | |
| }, | |
| "outputId": "ddd37c8e-fa44-4254-957a-0549037e0c87" | |
| }, | |
| "execution_count": null, | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "Linux 98afc3a498e5 6.6.97+ #1 SMP PREEMPT_DYNAMIC Sat Sep 6 09:54:41 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux\n", | |
| " total used free shared buff/cache available\n", | |
| "Mem: 12Gi 721Mi 8.8Gi 1.0Mi 3.1Gi 11Gi\n", | |
| "Swap: 0B 0B 0B\n", | |
| "/content\n", | |
| "Requirement already satisfied: pip in /usr/local/lib/python3.12/dist-packages (24.1.2)\n", | |
| "Collecting pip\n", | |
| " Downloading pip-25.2-py3-none-any.whl.metadata (4.7 kB)\n", | |
| "Collecting uv\n", | |
| " Downloading uv-0.8.23-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", | |
| "Collecting virtualenv\n", | |
| " Downloading virtualenv-20.34.0-py3-none-any.whl.metadata (4.6 kB)\n", | |
| "Requirement already satisfied: GitPython in /usr/local/lib/python3.12/dist-packages (3.1.45)\n", | |
| "Collecting distlib<1,>=0.3.7 (from virtualenv)\n", | |
| " Downloading distlib-0.4.0-py2.py3-none-any.whl.metadata (5.2 kB)\n", | |
| "Requirement already satisfied: filelock<4,>=3.12.2 in /usr/local/lib/python3.12/dist-packages (from virtualenv) (3.19.1)\n", | |
| "Requirement already satisfied: platformdirs<5,>=3.9.1 in /usr/local/lib/python3.12/dist-packages (from virtualenv) (4.4.0)\n", | |
| "Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.12/dist-packages (from GitPython) (4.0.12)\n", | |
| "Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from gitdb<5,>=4.0.1->GitPython) (5.0.2)\n", | |
| "Downloading pip-25.2-py3-none-any.whl (1.8 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m22.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", | |
| "\u001b[?25hDownloading uv-0.8.23-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (21.2 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.2/21.2 MB\u001b[0m \u001b[31m124.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", | |
| "\u001b[?25hDownloading virtualenv-20.34.0-py3-none-any.whl (6.0 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.0/6.0 MB\u001b[0m \u001b[31m141.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", | |
| "\u001b[?25hDownloading distlib-0.4.0-py2.py3-none-any.whl (469 kB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m469.0/469.0 kB\u001b[0m \u001b[31m42.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", | |
| "\u001b[?25hInstalling collected packages: distlib, virtualenv, uv, pip\n", | |
| " Attempting uninstall: pip\n", | |
| " Found existing installation: pip 24.1.2\n", | |
| " Uninstalling pip-24.1.2:\n", | |
| " Successfully uninstalled pip-24.1.2\n", | |
| "Successfully installed distlib-0.4.0 pip-25.2 uv-0.8.23 virtualenv-20.34.0\n", | |
| "Workspace at /content\n", | |
| "-= Creating/Restoring Virtual Environment =-\n", | |
| "-= Initial Python Virtual Environment =-\n", | |
| "created virtual environment CPython3.12.11.final.0-64 in 318ms\n", | |
| " creator CPython3Posix(dest=/content/venv, clear=False, no_vcs_ignore=False, global=False)\n", | |
| " seeder FromAppData(download=False, pip=bundle, via=copy, app_data_dir=/root/.local/share/virtualenv)\n", | |
| " added seed packages: pip==25.2\n", | |
| " activators BashActivator,CShellActivator,FishActivator,NushellActivator,PowerShellActivator,PythonActivator\n", | |
| "-= Installing Default Dependencies =-\n", | |
| "Using VENV at /content\n", | |
| "Requirement already satisfied: pip in ./venv/lib/python3.12/site-packages (25.2)\n", | |
| "Collecting uv\n", | |
| " Using cached uv-0.8.23-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", | |
| "Collecting setuptools\n", | |
| " Downloading setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n", | |
| "Collecting wheel\n", | |
| " Downloading wheel-0.45.1-py3-none-any.whl.metadata (2.3 kB)\n", | |
| "Collecting ninja\n", | |
| " Downloading ninja-1.13.0-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (5.1 kB)\n", | |
| "Collecting jedi\n", | |
| " Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n", | |
| "Collecting parso<0.9.0,>=0.8.4 (from jedi)\n", | |
| " Downloading parso-0.8.5-py2.py3-none-any.whl.metadata (8.3 kB)\n", | |
| "Using cached uv-0.8.23-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (21.2 MB)\n", | |
| "Downloading setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m26.9 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", | |
| "\u001b[?25hDownloading wheel-0.45.1-py3-none-any.whl (72 kB)\n", | |
| "Downloading ninja-1.13.0-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (180 kB)\n", | |
| "Downloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m56.9 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", | |
| "\u001b[?25hDownloading parso-0.8.5-py2.py3-none-any.whl (106 kB)\n", | |
| "Installing collected packages: wheel, uv, setuptools, parso, ninja, jedi\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6/6\u001b[0m [jedi]\n", | |
| "\u001b[1A\u001b[2KSuccessfully installed jedi-0.19.2 ninja-1.13.0 parso-0.8.5 setuptools-80.9.0 uv-0.8.23 wheel-0.45.1\n", | |
| "Using VENV at /content\n", | |
| "Looking in indexes: https://download.pytorch.org/whl/cu129\n", | |
| "Collecting torch\n", | |
| " Downloading https://download.pytorch.org/whl/cu129/torch-2.8.0%2Bcu129-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (30 kB)\n", | |
| "Collecting torchao\n", | |
| " Downloading https://download.pytorch.org/whl/cu129/torchao-0.13.0%2Bcu129-cp39-abi3-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (19 kB)\n", | |
| "Collecting torchvision\n", | |
| " Downloading https://download.pytorch.org/whl/cu129/torchvision-0.23.0%2Bcu129-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (6.1 kB)\n", | |
| "Collecting torchaudio\n", | |
| " Downloading https://download.pytorch.org/whl/cu129/torchaudio-2.8.0%2Bcu129-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (7.2 kB)\n", | |
| "Collecting xformers\n", | |
| " Downloading https://download.pytorch.org/whl/cu129/xformers-0.0.32.post2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (1.1 kB)\n", | |
| "Collecting filelock (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/filelock-3.13.1-py3-none-any.whl.metadata (2.8 kB)\n", | |
| "Collecting typing-extensions>=4.10.0 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/typing_extensions-4.12.2-py3-none-any.whl.metadata (3.0 kB)\n", | |
| "Requirement already satisfied: setuptools in ./venv/lib/python3.12/site-packages (from torch) (80.9.0)\n", | |
| "Collecting sympy>=1.13.3 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/sympy-1.13.3-py3-none-any.whl.metadata (12 kB)\n", | |
| "Collecting networkx (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/networkx-3.3-py3-none-any.whl.metadata (5.1 kB)\n", | |
| "Collecting jinja2 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/Jinja2-3.1.4-py3-none-any.whl.metadata (2.6 kB)\n", | |
| "Collecting fsspec (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/fsspec-2024.6.1-py3-none-any.whl.metadata (11 kB)\n", | |
| "Collecting nvidia-cuda-nvrtc-cu12==12.9.86 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cuda_nvrtc_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting nvidia-cuda-runtime-cu12==12.9.79 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cuda_runtime_cu12-12.9.79-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting nvidia-cuda-cupti-cu12==12.9.79 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl.metadata (1.8 kB)\n", | |
| "Collecting nvidia-cudnn-cu12==9.10.2.21 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl.metadata (1.8 kB)\n", | |
| "Collecting nvidia-cublas-cu12==12.9.1.4 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting nvidia-cufft-cu12==11.4.1.4 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cufft_cu12-11.4.1.4-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.8 kB)\n", | |
| "Collecting nvidia-curand-cu12==10.3.10.19 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_curand_cu12-10.3.10.19-py3-none-manylinux_2_27_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting nvidia-cusolver-cu12==11.7.5.82 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cusolver_cu12-11.7.5.82-py3-none-manylinux_2_27_x86_64.whl.metadata (1.9 kB)\n", | |
| "Collecting nvidia-cusparse-cu12==12.5.10.65 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cusparse_cu12-12.5.10.65-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.8 kB)\n", | |
| "Collecting nvidia-cusparselt-cu12==0.7.1 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl.metadata (7.0 kB)\n", | |
| "Collecting nvidia-nccl-cu12==2.27.3 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (2.0 kB)\n", | |
| "Collecting nvidia-nvtx-cu12==12.9.79 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_nvtx_cu12-12.9.79-py3-none-manylinux1_x86_64.manylinux_2_5_x86_64.whl.metadata (1.8 kB)\n", | |
| "Collecting nvidia-nvjitlink-cu12==12.9.86 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting nvidia-cufile-cu12==1.14.1.1 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/nvidia_cufile_cu12-1.14.1.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting triton==3.4.0 (from torch)\n", | |
| " Downloading https://download.pytorch.org/whl/triton-3.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (1.7 kB)\n", | |
| "Collecting numpy (from torchvision)\n", | |
| " Downloading https://download.pytorch.org/whl/numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (60 kB)\n", | |
| "Collecting pillow!=8.3.*,>=5.3.0 (from torchvision)\n", | |
| " Downloading https://download.pytorch.org/whl/pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (9.1 kB)\n", | |
| "Collecting mpmath<1.4,>=1.1.0 (from sympy>=1.13.3->torch)\n", | |
| " Downloading https://download.pytorch.org/whl/mpmath-1.3.0-py3-none-any.whl (536 kB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m536.2/536.2 kB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n", | |
| "\u001b[?25hCollecting MarkupSafe>=2.0 (from jinja2->torch)\n", | |
| " Downloading https://download.pytorch.org/whl/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (28 kB)\n", | |
| "Downloading https://download.pytorch.org/whl/cu129/torch-2.8.0%2Bcu129-cp312-cp312-manylinux_2_28_x86_64.whl (1240.3 MB)\n", | |
| "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 GB\u001b[0m \u001b[31m16.7 MB/s\u001b[0m \u001b[33m0:00:36\u001b[0m\n", | |
| "\u001b[?25hDownloading https://download.pytorch.org/whl/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl (581.2 MB)\n", | |
| "\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━\u001b[0m \u001b[32m497.5/581.2 MB\u001b[0m \u001b[31m44.6 MB/s\u001b[0m eta \u001b[36m0:00:02\u001b[0m" | |
| ] | |
| } | |
| ] | |
| } | |
| ] | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment