diff --git a/.devcontainer/.gitignore b/.devcontainer/.gitignore new file mode 100644 index 00000000000..1103038ad27 --- /dev/null +++ b/.devcontainer/.gitignore @@ -0,0 +1,2 @@ +/.uv_cache/ +/local diff --git a/.devcontainer/Containerfile b/.devcontainer/Containerfile new file mode 100644 index 00000000000..75063e45df4 --- /dev/null +++ b/.devcontainer/Containerfile @@ -0,0 +1,44 @@ +# syntax=docker/dockerfile:1.4 + +# For InvokeAI devcontainer. +# +# Design choices: +# - Build on standard devcontainer base. +# - Only do root-level installs, as base container does not include the non-root ubuntu user and +# sets it up using the common-utils feature after. + +FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04 + +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + export DEBIAN_FRONTEND=noninteractive ;\ + apt-get --quiet install --update --assume-yes --no-install-recommends --autoremove \ + git git-lfs \ + imagemagick- \ + libopencv-dev \ + libstdc++-10-dev \ + openssh-client &&\ + # Link amdgpu.ids for ROCm builds + # contributed by https://github.com/Rubonnek + mkdir -p "/opt/amdgpu/share/libdrm" &&\ + ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" &&\ + # for patchmatch build + ln -sf --relative /usr/lib/$(uname -p)-linux-gnu/pkgconfig/{opencv4,opencv}.pc + +# Install `uv` for Python package management +COPY --from=ghcr.io/astral-sh/uv:0.11.14 /uv /uvx /usr/local/bin/ + +ENV PNPM_HOME=/pnpm PATH=/pnpm:$PATH + +# PNPM_HOME needs to be writable by the user using it, but that user doesn't exist yet. +# Set up a pnpm group that users may be added to later. +RUN addgroup --system pnpm &&\ + umask 0002 &&\ + mkdir "${PNPM_HOME}" &&\ + chown :pnpm "${PNPM_HOME}" &&\ + chmod g+s "${PNPM_HOME}" &&\ + # Use the pnpm install script, but inhibit it from changing any rc-files + # because we set those values in the ENV above. + curl -fsSL https://get.pnpm.io/install.sh | PNPM_VERSION=10.33.4 SHELL=/bin/sh ENV=/dev/null newgrp pnpm &&\ + pnpm env use --global 22 diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 00000000000..e1e89c5527b --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1,59 @@ +# Development Containers + +[Development Containers](https://containers.dev/) can be a good way to get a development environment running. +Containers also provide some isolation to the host system. + +Dev containers may be run locally by your IDE +([VS Code](https://code.visualstudio.com/docs/devcontainers/containers), +[PyCharm Pro](https://www.jetbrains.com/help/pycharm/2026.1/connect-to-devcontainer.html)), +on a cloud IDE ([GitHub Codespaces](https://docs.github.com/en/codespaces/about-codespaces/what-are-codespaces)), +or by an editor-agnostic host ([DevPod](https://github.com/skevetter/devpod/)). + + +## Available Containers +- **CPU-only** has no GPU support. Smaller dependency size; no additional setup required. +- **CUDA** for NVIDIA GPUs. Installs torch with CUDA dependencies. Requires [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html#generating-a-cdi-specification/) on the host. + - **CUDA on podman** for those using [podman](https://podman.io/) to run containers [[VS Code](https://code.visualstudio.com/remote/advancedcontainers/docker-options#_podman), [PyCharm](https://www.jetbrains.com/help/pycharm/2026.1/podman.html)]. + - *CUDA on Docker* 🚧 **TODO** 🚧 for those using [Docker](https://www.docker.com/) to run containers. + + + + +## Customizing a Container + +Customizating the container environment +(e.g. to use your existing `INVOKEAI_ROOT` data directory) +requires making a new configuration. + +Copy one of the provided `devcontainer.json` to a new subdirectory such as `local/devcontainer.json` and edit it there. Change its `"name"` and other properties as desired. + +See the comments in `base/partial.jsonc` for some suggestions. + + + +## Implementation Details + +Unfortunately [there is no extension mechanism for `devcontainer.json`](https://github.com/devcontainers/spec/issues/22), +so we've cobbled one together. + +`base/partial.jsonc` contains the common base of the devcontainer configuration. + +`merge-partial-configs.sh` merges it with other `partial.jsonc` files to form full `devcontainer.json` configs in subdirectories. + +> [!NOTE] +> Maintainers should run `merge-partial-configs.sh` and **commit** the results after changing any public `partial.jsonc`. +> This keeps the configs available to dev clients on their initial load of the project. + +Generated `devcontainer.json` files should not be edited, as any edits will be overwritten the next time the partials change. + + +### Differences from /docker/Dockerfile + +Production containers are typically optimized for deployment size and strip away anything non-essential for running their single service. + +Development containers are built for developer experience. They include all the tools to build, test, and debug the application. + +Production containers include the application code in their image. + +Development containers assume you're going to be changing the code, +so the image provides the environment but the application code is mounted separately at runtime. diff --git a/.devcontainer/base/partial.jsonc b/.devcontainer/base/partial.jsonc new file mode 100644 index 00000000000..7918ff25430 --- /dev/null +++ b/.devcontainer/base/partial.jsonc @@ -0,0 +1,66 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "[Common Base Template]", + "build": { + "dockerfile": "../Containerfile" + }, + + "mounts": [ + // If you have existing invokeai directory you want mounted in to the container: + // {"type": "bind", "source": "/EXAMPLE/InvokeAI", "target": "/invokeai"}, + // If instead you want to persist data to a docker volume: + {"type": "volume", "source": "InvokeAIData", "target": "/invokeai"} + // ⚠️ NOTICE: If you do not mount something to /invokeai, all your data goes away when the container does. + // If you have a huggingface models directory you want to use: + // {"type": "bind", "source": "/EXAMPLE/huggingface", "target": "/invokeai/.cache/huggingface"} + ], + + "forwardPorts": [ + // Ports get auto-forwarded regardless, but highlight the main server port. + 9090 + ], + "portsAttributes": { + "9090": { + "label": "InvokeAI server" + }, + "5173": { + "label": "vite" + }, + "6006": { + "label": "Storybook" + } + }, + "containerEnv": { + "GPU_DRIVER": "cpu", + "HF_HOME": "/invokeai/.cache/huggingface", + "INVOKEAI_ROOT": "/invokeai", + // For additonal dependencies of custom nodes + "PYTHONPATH": "/invokeai/node-deps/", + // Keep the uv cache in the workspace so it's on the same filesystem as the default venv location. + "UV_CACHE_DIR": "${containerWorkspaceFolder}/.devcontainer/.uv_cache", + "UV_COMPILE_BYTECODE": "1", + "UV_PYTHON": "3.12" + }, + + "onCreateCommand": { + "pnpm": "sudo adduser $( id -un ) pnpm", + // uv sync will ensure there's a venv in postCreate, but maybe if we do it earlier vscode will stop complaining about being unable to find python? + "uv": "sh .devcontainer/install-python.sh" + }, + // Parallel execution is nice in theory, but has a UX problem in vscode: + // It doesn't show output until it is done. Downloading torch takes long enough that we would like to be able to see what's going on, + // so stick with a single command. + "postCreateCommand": "sh .devcontainer/post-create.sh $GPU_DRIVER", + + "customizations": { + "vscode": { + "extensions": [ + "charliermarsh.ruff", + "dbaeumer.vscode-eslint", + "esbenp.prettier-vscode", + "ms-python.python", + "vitest.explorer" + ] + } + } +} diff --git a/.devcontainer/cpu/devcontainer.json b/.devcontainer/cpu/devcontainer.json new file mode 100644 index 00000000000..f73e9d92d38 --- /dev/null +++ b/.devcontainer/cpu/devcontainer.json @@ -0,0 +1,53 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "InvokeAI (CPU-only)", + "build": { + "dockerfile": "../Containerfile" + }, + "mounts": [ + { + "type": "volume", + "source": "InvokeAIData", + "target": "/invokeai" + } + ], + "forwardPorts": [ + 9090 + ], + "portsAttributes": { + "5173": { + "label": "vite" + }, + "6006": { + "label": "Storybook" + }, + "9090": { + "label": "InvokeAI server" + } + }, + "containerEnv": { + "GPU_DRIVER": "cpu", + "HF_HOME": "/invokeai/.cache/huggingface", + "INVOKEAI_ROOT": "/invokeai", + "PYTHONPATH": "/invokeai/node-deps/", + "UV_CACHE_DIR": "${containerWorkspaceFolder}/.devcontainer/.uv_cache", + "UV_COMPILE_BYTECODE": "1", + "UV_PYTHON": "3.12" + }, + "onCreateCommand": { + "pnpm": "sudo adduser $( id -un ) pnpm", + "uv": "sh .devcontainer/install-python.sh" + }, + "postCreateCommand": "sh .devcontainer/post-create.sh $GPU_DRIVER", + "customizations": { + "vscode": { + "extensions": [ + "charliermarsh.ruff", + "dbaeumer.vscode-eslint", + "esbenp.prettier-vscode", + "ms-python.python", + "vitest.explorer" + ] + } + } +} \ No newline at end of file diff --git a/.devcontainer/cpu/partial.jsonc b/.devcontainer/cpu/partial.jsonc new file mode 100644 index 00000000000..89415650d17 --- /dev/null +++ b/.devcontainer/cpu/partial.jsonc @@ -0,0 +1,6 @@ +{ + "name": "InvokeAI (CPU-only)", + "containerEnv": { + "GPU_DRIVER": "cpu" + } +} diff --git a/.devcontainer/install-python.sh b/.devcontainer/install-python.sh new file mode 100755 index 00000000000..2c2c4231c59 --- /dev/null +++ b/.devcontainer/install-python.sh @@ -0,0 +1,16 @@ +#!/bin/sh +set -e +export UV_NO_PROGRESS=1 + +set -x +if [ ! -e .venv/bin/python ] ; then + if [ ! -d .venv ] ; then + uv venv < /dev/null + else + # There is a venv present; maybe it's just missing the uv-managed python. + uv python install < /dev/null + # If that didn't work, clear the venv and rebuild it. + .venv/bin/python --version || uv venv --clear < /dev/null + fi +fi + diff --git a/.devcontainer/merge-partial-configs.sh b/.devcontainer/merge-partial-configs.sh new file mode 100755 index 00000000000..96b3fd8fc71 --- /dev/null +++ b/.devcontainer/merge-partial-configs.sh @@ -0,0 +1,6 @@ +#!/bin/sh +for d in cpu podman-cuda; do + pnpx @gradientedge/merge-jsonc@1.1.0 \ + --indent 4 --array-merge concat \ + --out "${d}"/devcontainer.json base/partial.jsonc "${d}"/partial.jsonc +done diff --git a/.devcontainer/podman-cuda/devcontainer.json b/.devcontainer/podman-cuda/devcontainer.json new file mode 100644 index 00000000000..24cfa8f5bb1 --- /dev/null +++ b/.devcontainer/podman-cuda/devcontainer.json @@ -0,0 +1,56 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "InvokeAI (CUDA on podman)", + "build": { + "dockerfile": "../Containerfile" + }, + "mounts": [ + { + "type": "volume", + "source": "InvokeAIData", + "target": "/invokeai" + } + ], + "forwardPorts": [ + 9090 + ], + "portsAttributes": { + "5173": { + "label": "vite" + }, + "6006": { + "label": "Storybook" + }, + "9090": { + "label": "InvokeAI server" + } + }, + "containerEnv": { + "GPU_DRIVER": "cuda", + "HF_HOME": "/invokeai/.cache/huggingface", + "INVOKEAI_ROOT": "/invokeai", + "PYTHONPATH": "/invokeai/node-deps/", + "UV_CACHE_DIR": "${containerWorkspaceFolder}/.devcontainer/.uv_cache", + "UV_COMPILE_BYTECODE": "1", + "UV_PYTHON": "3.12" + }, + "onCreateCommand": { + "pnpm": "sudo adduser $( id -un ) pnpm", + "uv": "sh .devcontainer/install-python.sh" + }, + "postCreateCommand": "sh .devcontainer/post-create.sh $GPU_DRIVER", + "customizations": { + "vscode": { + "extensions": [ + "charliermarsh.ruff", + "dbaeumer.vscode-eslint", + "esbenp.prettier-vscode", + "ms-python.python", + "vitest.explorer" + ] + } + }, + "runArgs": [ + "--device=nvidia.com/gpu=all" + ] +} \ No newline at end of file diff --git a/.devcontainer/podman-cuda/partial.jsonc b/.devcontainer/podman-cuda/partial.jsonc new file mode 100644 index 00000000000..28971ec5ba2 --- /dev/null +++ b/.devcontainer/podman-cuda/partial.jsonc @@ -0,0 +1,10 @@ +{ + "name": "InvokeAI (CUDA on podman)", + "runArgs": [ + // enable nvidia gpu access on podman; https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html#generating-a-cdi-specification + "--device=nvidia.com/gpu=all" + ], + "containerEnv": { + "GPU_DRIVER": "cuda" + } +} diff --git a/.devcontainer/post-create.sh b/.devcontainer/post-create.sh new file mode 100755 index 00000000000..3ffa4611e63 --- /dev/null +++ b/.devcontainer/post-create.sh @@ -0,0 +1,11 @@ +#!/bin/sh +if [ -z "$1" ] ; then + echo "Usage: $0 GPU" + echo " where GPU is an extra defined in pyproject.toml [cpu, cuda, rocm]" + exit 1 +fi +set -ex +sh .devcontainer/run-pnpm-i.sh invokeai/frontend/web docs +uv sync --frozen --no-progress --extra="$1" --extra dev --extra test --extra docs --extra dist +# collect_env shows whether torch is set up correctly. +uv run --no-sync python -m torch.utils.collect_env diff --git a/.devcontainer/run-pnpm-i.sh b/.devcontainer/run-pnpm-i.sh new file mode 100755 index 00000000000..e7d8ff01ce6 --- /dev/null +++ b/.devcontainer/run-pnpm-i.sh @@ -0,0 +1,8 @@ +#!/bin/sh +set -ex +for d in "$@" ; do + # Redirect input from /dev/null to prevent it from hanging on prompts. + # It should stop node from thinking stdin.isTTY and prompting in the first place, but it doesn't? + # The exit code is 0 even when this nulls out a prompt. :( + pnpm -C "${d}" install --frozen-lockfile --config.confirmModulesPurge=false < /dev/null +done