Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .env
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# HOME(s)
HF_HOME=${HF_HOME:-~/.cache/huggingface}
TORCH_HOME=${TORCH_HOME:-~/.cache/torch}

# Ports
DEMUCS_PORT=8202

# GPU
GPU=true
20 changes: 16 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Base image supports Nvidia CUDA but does not require it and can also run demucs on the CPU
FROM nvidia/cuda:12.6.2-base-ubuntu22.04
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04

USER root
ENV TORCH_HOME=/data/models
# ENV TORCH_HOME=/data/models
ENV OMP_NUM_THREADS=1

# Install required tools
Expand All @@ -16,6 +16,9 @@ RUN apt update && apt install -y --no-install-recommends \
python3 \
python3-dev \
python3-pip \
nvidia-cuda-toolkit \
&& apt-get autoremove -y \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

# Clone Demucs (now maintained in the original author's github space)
Expand All @@ -24,8 +27,17 @@ WORKDIR /lib/demucs
# Checkout known stable commit on main
RUN git checkout b9ab48cad45976ba42b2ff17b229c071f0df9390

# Install dependencies with overrides for known working versions on this base image
RUN python3 -m pip install -e . "torch<2" "torchaudio<2" "numpy<2" --no-cache-dir
# Install specific versions of torch, torchvision and torchaudio
# If the 'gpu' build argument is true, install CUDA-enabled packages from PyTorch's cu118 index URL
ARG gpu
RUN pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 \
$( [ "$gpu" = "true" ] && echo "--index-url https://download.pytorch.org/whl/cu118" )
# Install requirements
RUN pip install -r requirements.txt

# Downgrade numpy
RUN pip install -U "numpy<2"

# Run once to ensure demucs works and trigger the default model download
RUN python3 -m demucs -d cpu test.mp3
# Cleanup output - we just used this to download the model
Expand Down
24 changes: 12 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ SHELL = /bin/sh
current-dir := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))

# Default options
gpu = false
gpu = true
mp3output = false
model = htdemucs
shifts = 1
Expand All @@ -14,24 +14,19 @@ splittrack =

.PHONY:
init:
ifeq ($(gpu), true)
docker-gpu-option = --gpus all
endif
export GPU=$(gpu)
ifeq ($(mp3output), true)
demucs-mp3-option = --mp3
endif
ifneq ($(splittrack),)
demucs-twostems-option = --two-stems $(splittrack)
endif

# Build commands
docker-build-command = docker compose build

# Construct commands
docker-run-command = docker run --rm -i \
--name=demucs \
$(docker-gpu-option) \
-v $(current-dir)input:/data/input \
-v $(current-dir)output:/data/output \
-v $(current-dir)models:/data/models \
xserrat/facebook-demucs:latest
docker-run-command = docker compose run --rm demucs

demucs-command = "python3 -m demucs -n $(model) \
--out /data/output \
Expand Down Expand Up @@ -61,4 +56,9 @@ run-interactive: init build ## Run the docker container interactively to experim
.PHONY:
.SILENT:
build: ## Build the docker image which supports running demucs with CPU only or with Nvidia CUDA on a supported GPU
docker build -t xserrat/facebook-demucs:latest .
$(docker-build-command)

.PHONY:
.SILENT:
run-no-build: ## Run demucs without build
$(docker-run-command) $(demucs-command)
12 changes: 10 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ to split music tracks into different tracks (bass, drums, voice, others).
## Usage
### Clone this repository
```bash
git clone https://github.com/xserrat/docker-facebook-demucs.git demucs
git clone https://github.com/gu-ma/docker-facebook-demucs.git demucs
```
### Split a music track
1. Copy the track you want to split into the `input` folder (e.g., `input/mysong.mp3`).
Expand Down Expand Up @@ -52,11 +52,19 @@ Example:
make run-interactive gpu=true
```

### Run without build

To run the `demucs` without building the docker image again

```bash
make run-no-build track=mysong.mp3
```

## Building the Image

The Docker image can be built locally via the `build` job:
```bash
make build
make build gpu=true
```

## License
Expand Down
26 changes: 26 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
services:
demucs:
container_name: demucs
image: xserrat/facebook-demucs:latest
build:
context: .
args:
- gpu=${GPU}
volumes:
- ./input:/data/input
- ./output:/data/output
- ./models:/data/models
- ${HF_HOME}:/root/.cache/huggingface
- ${TORCH_HOME}:/root/.cache/torch
ports:
- ${DEMUCS_PORT}:6969
working_dir: /lib/demucs
command: ['/bin/bash', '--login', '-c']
tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]