diff --git a/.github/workflows/GithubActionTests.yml b/.github/workflows/GithubActionTests.yml index 9e6709333eb..0b490000adc 100644 --- a/.github/workflows/GithubActionTests.yml +++ b/.github/workflows/GithubActionTests.yml @@ -40,7 +40,7 @@ jobs: eval "$(conda shell.bash hook)" conda activate bioconda if git diff --name-only origin/master...HEAD | grep -vE ^docs; then - source images/versions.sh + source images/image_config.sh source images/env_var_inventory.sh py.test --durations=0 test/ -v --log-level=DEBUG --tb=native -m '${{ matrix.py_test_marker }}' else diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 815aabe8d75..32a5952368e 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -1,5 +1,3 @@ -# Build all container images. -# name: Build images concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -11,236 +9,126 @@ on: - '.circleci/**' - 'docs/**' - 'test/**' + env: - # Used to override BIOCONDA_UTILS_VERSION in images/versions.sh BIOCONDA_UTILS_VERSION: ${{ github.event.release && github.event.release.tag_name || github.head_ref || github.ref_name }} jobs: - # JOBS FOR BUILDING IMAGES - # ---------------------------------------------------------------------- - # These jobs will build images for archs, put them into a manifest, and push - # that to GitHub Container Registry. Later, the testing jobs will test and - # push to quay.io. - - build-base-debian: - name: Build base-debian + # Inspect quay.io to see which, if any, of the images we're trying to build + # are already on quay.io. If *any* are missing, then build them *all*. + detect-existing: + name: detect-existing runs-on: ubuntu-24.04 - container: - # travier/podman-action contains newer podman/buildah versions. - image: quay.io/travier/podman-action - options: --privileged outputs: - # A note on these TAG_EXISTS_* outputs: these allow subsequent jobs to - # change behavior (e.g., skip building or skip pushing to ghcr) depending - # on whether an image has already been created. - TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} + DO_BUILD: ${{ steps.detect-existing.outputs.DO_BUILD }} steps: - - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up QEMU - run: | - podman run --rm --privileged \ - docker.io/tonistiigi/binfmt --install arm64 - - - name: Install Tools - run: | - set -eu - # jq is not installed in travier/podman-action - dnf install -qy \ - jq - rpm -q \ - buildah podman \ - coreutils findutils sed \ - curl jq \ - | ( - while read -r line ; do - printf %s\\n "${line}" - case "${line}" in (*' not installed'*) - err=1 ;; - esac - done - exit "${err-0}" - ) - - - name: base-debian - id: base-debian - run: | - source images/versions.sh - if [ $(tag_exists $BASE_DEBIAN_IMAGE_NAME $BASE_TAG) ]; then - echo "TAG_EXISTS_base-debian=true" >> $GITHUB_OUTPUT - else - cd images && bash build.sh base-glibc-debian-bash - fi - - - name: push to ghcr - if: '${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - source images/versions.sh - push_to_ghcr $BASE_DEBIAN_IMAGE_NAME $BASE_TAG - - build-base-busybox: - name: Build base-busybox + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect existing images on quay.io + id: detect-existing + run: | + + # Source env vars and functions to be used throughout building. + source images/image_config.sh + + # Display the current environment variables used throughout building + # -- useful for debugging + env_var_inventory + + if tag_exists $BASE_BUSYBOX_IMAGE_NAME $BASE_TAG \ + && tag_exists $BASE_DEBIAN_IMAGE_NAME $BASE_TAG \ + && tag_exists $BUILD_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG \ + && tag_exists $BOT_IMAGE_NAME $BIOCONDA_IMAGE_TAG \ + && tag_exists $ISSUE_RESPONDER_IMAGE_NAME $BIOCONDA_IMAGE_TAG \ + && tag_exists $CREATE_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG; + then + echo "DO_BUILD=false" >> $GITHUB_OUTPUT + else + echo "DO_BUILD=true" >> $GITHUB_OUTPUT + fi + + build-images: + name: Build all images with podman runs-on: ubuntu-24.04 + needs: [ detect-existing ] + if: ${{ needs.detect-existing.outputs.DO_BUILD == 'true' }} container: # travier/podman-action contains newer podman/buildah versions. image: quay.io/travier/podman-action options: --privileged - outputs: - TAG_EXISTS_base-busybox: ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 + steps: - - name: Set up QEMU + - name: Initial setup of podman-action container run: | - podman run --rm --privileged \ - docker.io/tonistiigi/binfmt --install arm64 + # support cross-arch container building + podman run --rm --privileged docker.io/tonistiigi/binfmt --install arm64 - - name: Install Tools - run: | - set -eu - # jq is not installed in travier/podman-action - dnf install -qy \ - jq + # Install jq and git, and then check that we have other required tools + dnf install -qy jq git rpm -q \ buildah podman \ coreutils findutils sed \ - curl jq \ - | ( - while read -r line ; do + curl jq git | + ( + while read -r line; do printf %s\\n "${line}" - case "${line}" in (*' not installed'*) - err=1 ;; + case "${line}" in *' not installed'*) + err=1 + ;; esac - done - exit "${err-0}" + done + exit "${err-0}" ) - - name: base-busybox - id: base-busybox - run: | - source images/versions.sh - if [ $(tag_exists $BASE_BUSYBOX_IMAGE_NAME $BASE_TAG) ]; then - echo "TAG_EXISTS_base-busybox=true" >> $GITHUB_OUTPUT - else - cd images && bash build.sh base-glibc-busybox-bash - fi - - - name: push to ghcr - if: '${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - source images/versions.sh - push_to_ghcr $BASE_BUSYBOX_IMAGE_NAME $BASE_TAG - - build-build-env: - name: Build build-env - outputs: - TAG_EXISTS_build-env: ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} - runs-on: ubuntu-22.04 - steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Install qemu dependency + - name: build all images run: | - sudo apt-get update - sudo apt-get install -y qemu-user-static + # GHA is giving warning with a suggestion to do the following, even + # though the checkout action should have already done this. + git config --global --add safe.directory /__w/bioconda-utils/bioconda-utils - - name: build-env - id: build-env - run: | - source images/versions.sh - if [ $(tag_exists $BUILD_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG) ]; then - echo "TAG_EXISTS_build-env=true" >> $GITHUB_OUTPUT - else - cd images && bash build.sh build-env - fi - - - name: push to ghcr - if: '${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - source images/versions.sh - push_to_ghcr $BUILD_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG + # Source env vars and functions to be used throughout building. + source images/image_config.sh - build-create-env: - name: Build create-env - needs: [build-build-env, build-base-busybox] - outputs: - TAG_EXISTS_create-env: ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} - runs-on: ubuntu-24.04 - container: - # travier/podman-action contains newer podman/buildah versions. - image: quay.io/travier/podman-action - options: --privileged - steps: - - uses: actions/checkout@v4 + # Build each image, do a quick test, and then save the image as + # a tarball in the image-artifacts directory. This happens once per + # arch. + # + # There are dependencies, so they should be built in this order. + cd images + time bash build.sh base-glibc-busybox-bash + time bash build.sh base-glibc-debian-bash + time bash build.sh build-env + time bash build.sh create-env + time bash build.sh bot + + # Upload the tarballs just created so they can be used in the next job. + - name: Upload artifacts + uses: actions/upload-artifact@v4 with: - fetch-depth: 0 - - - name: Set up QEMU - run: | - podman run --rm --privileged \ - docker.io/tonistiigi/binfmt --install arm64 - - - name: Install Tools - run: | - set -eu - # jq is not installed in travier/podman-action - dnf install -qy \ - jq - rpm -q \ - buildah podman \ - coreutils findutils sed \ - curl jq \ - | ( - while read -r line ; do - printf %s\\n "${line}" - case "${line}" in (*' not installed'*) - err=1 ;; - esac - done - exit "${err-0}" - ) - - - name: Build create-env - id: create-env - run: | - source images/versions.sh - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - if [ $(tag_exists $CREATE_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG) ]; then - echo "TAG_EXISTS_create-env=true" >> $GITHUB_OUTPUT - else - cd images && bash build.sh create-env - fi - - - name: push to ghcr - if: '${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - source images/versions.sh - push_to_ghcr $CREATE_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG - - - # END OF BUILDING IMAGES - # ---------------------------------------------------------------------- - # START TESTING - # These testing jobs will run the respective Dockerfile.test in each image - # directory. + name: image-artifacts + path: | + image-artifacts/ test: name: test bioconda-utils with images runs-on: ubuntu-24.04 - needs: [build-base-debian, build-base-busybox, build-build-env, build-create-env] + + # Start a local docker registry. Podman/buildah will push manifests and + # images here for docker to use within bioconda-utils + services: + registry: + image: registry:2 + ports: + - 5000:5000 + needs: [ build-images ] steps: - uses: actions/checkout@v4 @@ -250,12 +138,17 @@ jobs: - name: Install bioconda-utils run: | export BIOCONDA_DISABLE_BUILD_PREP=1 + + # TODO: change to master when done with PR BRANCH=simplify-unify-containers + wget https://raw.githubusercontent.com/bioconda/bioconda-common/${BRANCH}/{common,install-and-set-up-conda,configure-conda}.sh - source images/versions.sh + + # Source env vars and functions to be used throughout building. + source images/image_config.sh # Ensure install-and-set-up-conda uses same version as in the container - # (which uses images/versions.sh + # (which uses images/image_config.sh) export BIOCONDA_UTILS_TAG=$BIOCONDA_UTILS_VERSION bash install-and-set-up-conda.sh eval "$(conda shell.bash hook)" @@ -263,90 +156,122 @@ jobs: conda activate bioconda python setup.py install - - name: test + # Download tarballs created in the previous job + - name: Download images as artifacts + uses: actions/download-artifact@v4 + with: + name: image-artifacts + path: image-artifacts + + # Load those tarballs as images into podman. + - name: Load image artifacts into podman run: | - eval "$(conda shell.bash hook)" - conda activate bioconda + for image in image-artifacts/*.tar; do + podman load -i $image + done - source images/versions.sh + - name: Build & push manifests to local docker registry + run: | - # Figure out which registry to use for each image. If we built an image - # during this workflow run, it was pushed to ghcr.io as a staging area. - # If it was not built during this run, then we use the exissting one on - # quay.io. - [ ${{ needs.build-build-env.outputs.TAG_EXISTS_build-env }} ] && BUILD_ENV_REGISTRY='quay.io/bioconda' || BUILD_ENV_REGISTRY="ghcr.io/bioconda" - [ ${{ needs.build-create-env.outputs.TAG_EXISTS_create-env }} ] && CREATE_ENV_REGISTRY='quay.io/bioconda' || CREATE_ENV_REGISTRY="ghcr.io/bioconda" - [ ${{ needs.build-base-busybox.outputs.TAG_EXISTS_base_busybox }} ] && DEST_BASE_REGISTRY='quay.io/bioconda' || DEST_BASE_REGISTRY="ghcr.io/bioconda" - [ ${{ needs.build-base-debian.outputs.TAG_EXISTS_base_debian }} ] && DEST_EXTENDED_BASE_REGISTRY='quay.io/bioconda' || DEST_EXTENDED_BASE_REGISTRY="ghcr.io/bioconda" + # Source env vars and functions to be used throughout building. + source images/image_config.sh - # Tell mulled-build which image to use - # - # DEST_BASE_IMAGE, DEFAULT_BASE_IMAGE, and DEFAULT_EXTENDED_BASE_IMAGE - # are hard-coded by mulled-build, e.g. - # https://github.com/galaxyproject/galaxy/blob/957f6f5475f8f96c6af110be10791b5acab3a0df/lib/galaxy/tool_util/deps/mulled/mulled_build.py#L62-L71 - # We keep DEST_BASE_IMAGE unset so it defaults to DEFAULT_BASE_IMAGE or - # DEFAULT_EXTENDED_BASE_IMAGE. - export DEFAULT_BASE_IMAGE="${DEST_BASE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" - export DEFAULT_EXTENDED_BASE_IMAGE="${DEST_EXTENDED_BASE_REGISTRY}/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" + # Display the current environment variables used throughout building + # -- useful for debugging + env_var_inventory - export BUILD_ENV_IMAGE="${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" - export CREATE_ENV_IMAGE="${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + # Check that we loaded images + podman images - source images/env_var_inventory.sh + set -x - [ command -v podman > /dev/null ] && podman images - [ command -v docker > /dev/null ] && docker images + # Compose a multi-arch manifest (json file); push it and its images to the local docker registry. + # We provide additional arguments of --tls-verify=false for the local + # registry to avoid the need to set up TLS certs for it. + build_and_push_manifest ${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} docker://localhost:5000/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} "--tls-verify=false" + build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} docker://localhost:5000/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} "--tls-verify=false" + build_and_push_manifest ${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "--tls-verify=false" + build_and_push_manifest ${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "--tls-verify=false" + ONLY_AMD64=true build_and_push_manifest ${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000/${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "--tls-verify=false" - py.test --durations=0 test/ -v --log-level=DEBUG -k "docker" --tb=native + # Make sure we can get them back into the docker runtime in preparation + # for running bioconda-utils tests in the next job. + docker pull localhost:5000/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + docker pull localhost:5000/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + docker pull localhost:5000/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} + docker pull localhost:5000/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} + docker pull localhost:5000/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} + docker pull localhost:5000/${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} + + docker images + + - name: test + run: | + eval "$(conda shell.bash hook)" + conda activate bioconda - # END TESTING - # ------------------------------------------------------------------------ - # START PUSHING IMAGES + # The following env vars are searched for by mulled-build: + # - DEST_BASE_IMAGE + # - DEFAULT_BASE_IMAGE + # - DEFAULT_EXTENDED_BASE_IMAGE + # + # We keep DEST_BASE_IMAGE unset so it defaults to DEFAULT_BASE_IMAGE or + # DEFAULT_EXTENDED_BASE_IMAGE. + # + # See + # https://github.com/galaxyproject/galaxy/blob/957f6f5/lib/galaxy/tool_util/deps/mulled/mulled_build.py#L62-L71 + # for more details. + # + # We use the manifests that were just pushed to the local docker + # registry running on localhost (which simulates eventually using + # manifests from quay.io) + source images/image_config.sh + export DEFAULT_BASE_IMAGE="localhost:5000/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + export DEFAULT_EXTENDED_BASE_IMAGE="localhost:5000/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" + export BUILD_ENV_IMAGE="localhost:5000/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + export CREATE_ENV_IMAGE="localhost:5000/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + + # Now that everything is set up, run the actual tests -- but only those + # related to docker. The other tests are run in a different GitHub + # Actions workflow. + py.test --durations=0 test/ -v --log-level=DEBUG -k "docker" --tb=native - # For these push steps, a repository must first exist on quay.io/bioconda - # AND that repository must also be configured to allow write access for the - # appropriate service account. This must be done by a user with admin - # access to quay.io/bioconda. - # - # This uses the TAG_EXISTS_* outputs from previous jobs to determine if - # a push to quay.io should happen. + # Push images to quay.io. + # + # NOTE: a repository must first exist on quay.io/bioconda AND that + # repository must also be configured to allow write access for the + # appropriate service account. This must be done by a user with admin + # access to quay.io/bioconda. push: name: push images - if: github.ref == 'refs/heads/master' + if: (github.ref == 'refs/heads/master') && (needs.detect-existing.outputs.DO_BUILD == 'true') runs-on: ubuntu-24.04 - needs: [build-base-debian, build-base-busybox, build-build-env, build-create-env, test] + needs: [ test ] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - name: push base-debian - if: ${{ ! needs.base-debian.outputs.TAG_EXISTS_base-debian }} - run: | - echo '${{ secrets.QUAY_BIOCONDA_TOKEN }}' | podman login quay.io -u '${{ secrets.QUAY_BIOCONDA_USERNAME }}' --password-stdin - source images/versions.sh - move_from_ghcr_to_quay ${BASE_DEBIAN_IMAGE_NAME} ${BASE_TAG} - - - - name: push base-busybox - if: ${{ ! needs.base-busybox.outputs.TAG_EXISTS_base-busybox }} - run: | - echo '${{ secrets.QUAY_BIOCONDA_TOKEN }}' | podman login quay.io -u '${{ secrets.QUAY_BIOCONDA_USERNAME }}' --password-stdin - source images/versions.sh - move_from_ghcr_to_quay ${BASE_BUSYBOX_IMAGE_NAME} ${BASE_TAG} - - - name: push create-env - if: ${{ ! needs.create-env.outputs.TAG_EXISTS_create-env }} + - name: push manifests to quay.io run: | + # quay.io login echo '${{ secrets.QUAY_BIOCONDA_TOKEN }}' | podman login quay.io -u '${{ secrets.QUAY_BIOCONDA_USERNAME }}' --password-stdin - source images/versions.sh - move_from_ghcr_to_quay ${CREATE_ENV_IMAGE_NAME} ${BIOCONDA_IMAGE_TAG} - - name: push build-env - if: ${{ ! needs.build-env.outputs.TAG_EXISTS_build-env }} - run: | - echo '${{ secrets.QUAY_BIOCONDA_TOKEN }}' | podman login quay.io -u '${{ secrets.QUAY_BIOCONDA_USERNAME }}' --password-stdin - source images/versions.sh - move_from_ghcr_to_quay ${BUILD_ENV_IMAGE_NAME} ${BIOCONDA_IMAGE_TAG} + # Source env vars and functions to be used throughout building. + source images/image_config.sh + + # Compose a multi-arch manifest (json file); push it and its images to quay.io + build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} "quay.io/bioconda/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + build_and_push_manifest ${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} "quay.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" + build_and_push_manifest ${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + build_and_push_manifest ${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + ONLY_AMD64=true build_and_push_manifest ${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda/${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + + # Same thing, but push as "latest" tag. + build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} "quay.io/bioconda/${BASE_BUSYBOX_IMAGE_NAME}:latest" + build_and_push_manifest ${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} "quay.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:latest" + build_and_push_manifest ${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda/${BUILD_ENV_IMAGE_NAME}:latest" + build_and_push_manifest ${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda/${CREATE_ENV_IMAGE_NAME}:latest" + ONLY_AMD64=true build_and_push_manifest ${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} "quay.io/bioconda/${BOT_IMAGE_NAME}:latest" diff --git a/docs/workflow-dag.txt b/docs/workflow-dag.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/images/README.md b/images/README.md index af39908dda6..19a51a6d3ca 100644 --- a/images/README.md +++ b/images/README.md @@ -2,60 +2,85 @@ This is where the various images used by bioconda-utils are created. -Individual images for *each package* are not created here. But those depend on -a base image, and the base image is created here. +Here, we build the base containers (busybox and debian), the build container +(has bioconda-utils and conda), the create container (only conda), and the bot +(includes autobump and responding to PRs and issues). See https://bioconda.github.io/developer/dockerfile-inventory.html for context on the containers themselves and how they relate to each other. -`.github/workflows/build-images.yml` is what orchestrates these image builds on -CI. +The primary components are: -`versions.sh` sets env vars that are used to control versions across all -images. It also has some helper functions. It should be sourced before running -`build.sh`. +- `image_config.sh` configures versions and defines some useful functions. It + should be sourced before doing anything else. +- Each image has a separate directory and has at least: + - `prepare.sh`, which sources the `image_config.sh` described above. + `prepare.sh` is responsible for setting image-specific env vars needed for + the build + - `Dockerfile` creates the image + - `Dockerfile.test` tests the image. +- `build.sh` is provided with one of these image directories, and will source + the respective `prepare.sh` to set env vars for the image. -Then run `build.sh`, providing it an image directory. +## GitHub Actions -When building locally for testing, you need podman installed. Then do the -following: +See `.github/workflows/build-images.yml` for how this is configured to run on +GitHub Actions, which largely follows the method described below for building +locally. -```bash -source versions.sh - -# When running on GitHub Actions, this would be ghcr.io or quay.io -export BUILD_ENV_REGISTRY="localhost" - -# Similarly, when running on GitHub Actions, this would normally pull the -# manifest (which does not have the -amd64 suffix) from ghcr.io or quay.io. There -# does not seem to be a way to get podman-created manifests over to docker, or -# even to make local docker manifests. So we need to reference the image -# directly including the arch suffix. -export BUILD_ENV_IMAGE="localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}-amd64" - -# Each takes 3-10 min (build-env takes the longest) -bash build.sh base-glibc-busybox-bash -bash build.sh base-glibc-debian-bash -bash build.sh build-env -bash build.sh create-env -``` +## Building locally + +Building locally has the following requirements: -# Details +- podman installed +- docker installed +- docker registry running on localhost:5000 + - i.e. with `docker run -p 5000:5000 --rm --name registry registry`, + optionally with `-d` to run in detached mode +- bioconda-utils installed, along with test dependencies + - i.e. with `conda create -p ./env --file bioconda_utils/bioconda_utils-requirements.txt --file test-requirements.txt -y` + - followed by `conda activate ./env && pip install -e .` -Image directories must at least contain the following: +Use the following commands to build and test locally: -- `prepare.sh` script, where the first line should be `source ../versions.sh` -- `Dockerfile` for building -- `Dockerfile.test` for testing. +```bash + +cd images +source image_config.sh + +time bash build.sh base-glibc-busybox-bash +time bash build.sh base-glibc-debian-bash +time bash build.sh build-env +time bash build.sh create-env +time bash build.sh bot + +build_and_push_manifest ${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} docker://localhost:5000 +build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} docker://localhost:5000 +build_and_push_manifest ${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000 +build_and_push_manifest ${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000 +ONLY_AMD64=true build_and_push_manifest ${BOT_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} docker://localhost:5000 + +# Run bioconda-utils tests +export DEFAULT_BASE_IMAGE="localhost:5000/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" +export DEFAULT_EXTENDED_BASE_IMAGE="localhost:5000/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" +export BUILD_ENV_IMAGE="localhost:5000/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" +export CREATE_ENV_IMAGE="localhost:5000/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}" + +docker pull $DEFAULT_BASE_IMAGE +docker pull $DEFAULT_EXTENDED_BASE_IMAGE +docker pull $BUILD_ENV_IMAGE +docker pull $CREATE_ENV_IMAGE + +cd ../ +py.test --durations=0 test/ -v --log-level=DEBUG -k "docker" --tb=native +``` -`build.sh` sources `/prepare.sh`, which sources `versions.sh` to -populate the env vars needed for that particular image. -`/prepare.sh` should also do any other needed work in preparation -for building. -# How locale is handled +## How locale is handled Previously, we were preparing the locale each time in an image and copying that out to subsequent image. However, we expect the C.utf8 locale to change infrequently. So now we store it separately in the repo and copy it in. It was -initially prepared with `locale/generate_locale.sh`. +initially prepared with `locale/generate_locale.sh` and stored in +`locale/C.utf8`; if the locale needs updating then this script and output +should be updated. diff --git a/images/base-glibc-busybox-bash/prepare.sh b/images/base-glibc-busybox-bash/prepare.sh index c97209955da..a709b3b124e 100644 --- a/images/base-glibc-busybox-bash/prepare.sh +++ b/images/base-glibc-busybox-bash/prepare.sh @@ -1,24 +1,25 @@ -source ../versions.sh +source ../image_config.sh IMAGE_NAME="${BASE_BUSYBOX_IMAGE_NAME}" TAG="$BASE_TAG" -# Build busybox binaries for each arch. -# -# The respective busybox base containers for each arch will later extract the -# relevant binary from this image. +# Before building the actual base images (which will happen in build.sh), we +# first build busybox binaries for each arch. Later, the base image Dockerfile +# will extract the arch-appropriate binary. BUILD_ARGS=() BUILD_ARGS+=("--build-arg=debian_version=${DEBIAN_VERSION}") BUILD_ARGS+=("--build-arg=busybox_version=${BUSYBOX_VERSION}") -iidfile="$( mktemp )" +iidfile="$(mktemp)" buildah bud \ --iidfile="${iidfile}" \ --file=Dockerfile.busybox \ ${BUILD_ARGS[@]} -busybox_image="$( cat "${iidfile}" )" +busybox_image="$(cat "${iidfile}")" rm "${iidfile}" # Override build args for what's needed in main Dockerfile BUILD_ARGS=() BUILD_ARGS+=("--build-arg=debian_version=${DEBIAN_VERSION}") BUILD_ARGS+=("--build-arg=busybox_image=${busybox_image}") + +TEST_ARGS=() diff --git a/images/base-glibc-debian-bash/prepare.sh b/images/base-glibc-debian-bash/prepare.sh index d3459adb0fc..e69357d646f 100644 --- a/images/base-glibc-debian-bash/prepare.sh +++ b/images/base-glibc-debian-bash/prepare.sh @@ -1,5 +1,7 @@ -source ../versions.sh +source ../image_config.sh IMAGE_NAME="${BASE_DEBIAN_IMAGE_NAME}" TAG="$BASE_TAG" BUILD_ARGS=() BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") + +TEST_ARGS=() diff --git a/images/bioconda-recipes-issue-responder/Dockerfile b/images/bioconda-recipes-issue-responder/Dockerfile deleted file mode 100644 index 9b94896414c..00000000000 --- a/images/bioconda-recipes-issue-responder/Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -ARG base=quay.io/bioconda/base-glibc-busybox-bash:2.0.0 - -FROM quay.io/bioconda/create-env:2.0.0 as build -RUN /opt/create-env/env-execute \ - create-env \ - --conda=mamba \ - --strip-files=\* \ - --remove-paths=\*.a \ - --remove-paths=\*.pyc \ - /usr/local \ - aiohttp \ - anaconda-client \ - ca-certificates \ - git \ - openssh \ - python=3.8 \ - pyyaml \ - skopeo \ - && \ - # Workaround for https://github.com/conda/conda/issues/10490 - export CONDA_REPODATA_THREADS=1 && \ - # We don't need Perl (used by Git for some functionalities). - # => Remove perl package to reduce image size. - /opt/create-env/env-execute \ - conda remove --yes \ - --prefix=/usr/local \ - --force-remove \ - perl - -FROM "${base}" -COPY --from=build /usr/local /usr/local -COPY ./issue-responder /usr/local/bin/ - -# Used environment variables: -# - JOB_CONTEXT -# - BOT_TOKEN -# - GITTER_TOKEN -# - ANACONDA_TOKEN -# - QUAY_OAUTH_TOKEN -# - QUAY_LOGIN diff --git a/images/bioconda-recipes-issue-responder/Dockerfile.test b/images/bioconda-recipes-issue-responder/Dockerfile.test deleted file mode 100644 index 665dc72ed0a..00000000000 --- a/images/bioconda-recipes-issue-responder/Dockerfile.test +++ /dev/null @@ -1,7 +0,0 @@ -ARG base - - -FROM "${base}" -RUN JOB_CONTEXT='{"event": {"issue": {}}}' \ - /usr/local/env-execute \ - issue-responder diff --git a/images/bioconda-recipes-issue-responder/issue-responder b/images/bioconda-recipes-issue-responder/issue-responder deleted file mode 100755 index 9d915f2f528..00000000000 --- a/images/bioconda-recipes-issue-responder/issue-responder +++ /dev/null @@ -1,615 +0,0 @@ -#! /usr/bin/env python - -import logging -import os -import re -import sys -from asyncio import gather, run, sleep -from asyncio.subprocess import create_subprocess_exec -from pathlib import Path -from shutil import which -from subprocess import check_call -from typing import Any, Dict, List, Optional, Set, Tuple -from zipfile import ZipFile - -from aiohttp import ClientSession -from yaml import safe_load - -logger = logging.getLogger(__name__) -log = logger.info - - -async def async_exec( - command: str, *arguments: str, env: Optional[Dict[str, str]] = None -) -> None: - process = await create_subprocess_exec(command, *arguments, env=env) - return_code = await process.wait() - if return_code != 0: - raise RuntimeError( - f"Failed to execute {command} {arguments} (return code: {return_code})" - ) - - -# Post a comment on a given issue/PR with text in message -async def send_comment(session: ClientSession, issue_number: int, message: str) -> None: - token = os.environ["BOT_TOKEN"] - url = ( - f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{issue_number}/comments" - ) - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - payload = {"body": message} - log("Sending comment: url=%s", url) - log("Sending comment: payload=%s", payload) - async with session.post(url, headers=headers, json=payload) as response: - status_code = response.status - log("the response code was %d", status_code) - if status_code < 200 or status_code > 202: - sys.exit(1) - - -def list_zip_contents(fname: str) -> [str]: - f = ZipFile(fname) - return [e.filename for e in f.infolist() if e.filename.endswith('.tar.gz') or e.filename.endswith('.tar.bz2')] - - -# Download a zip file from url to zipName.zip and return that path -# Timeout is 30 minutes to compensate for any network issues -async def download_file(session: ClientSession, zipName: str, url: str) -> str: - async with session.get(url, timeout=60*30) as response: - if response.status == 200: - ofile = f"{zipName}.zip" - with open(ofile, 'wb') as fd: - while True: - chunk = await response.content.read(1024*1024*1024) - if not chunk: - break - fd.write(chunk) - return ofile - return None - - -# Find artifact zip files, download them and return their URLs and contents -async def fetch_azure_zip_files(session: ClientSession, buildId: str) -> [(str, str)]: - artifacts = [] - - url = f"https://dev.azure.com/bioconda/bioconda-recipes/_apis/build/builds/{buildId}/artifacts?api-version=4.1" - log("contacting azure %s", url) - async with session.get(url) as response: - # Sometimes we get a 301 error, so there are no longer artifacts available - if response.status == 301: - return artifacts - res = await response.text() - - res_object = safe_load(res) - if res_object['count'] == 0: - return artifacts - - for artifact in res_object['value']: - zipName = artifact['name'] # LinuxArtifacts or OSXArtifacts - zipUrl = artifact['resource']['downloadUrl'] - log(f"zip name is {zipName} url {zipUrl}") - fname = await download_file(session, zipName, zipUrl) - if not fname: - continue - pkgsImages = list_zip_contents(fname) - for pkg in pkgsImages: - artifacts.append((zipUrl, pkg)) - - return artifacts - - -def parse_azure_build_id(url: str) -> str: - return re.search("buildId=(\d+)", url).group(1) - - -# Given a PR and commit sha, fetch a list of the artifact zip files URLs and their contents -async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> List[Tuple[str, str]]: - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/commits/{sha}/check-runs" - - headers = { - "User-Agent": "BiocondaCommentResponder", - "Accept": "application/vnd.github.antiope-preview+json", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - check_runs = safe_load(res) - log(f"DEBUG url was {url} returned {check_runs}") - - for check_run in check_runs["check_runs"]: - # The names are "bioconda.bioconda-recipes (test_osx test_osx)" or similar - if check_run["name"].startswith("bioconda.bioconda-recipes (test_"): - # The azure build ID is in the details_url as buildId=\d+ - buildID = parse_azure_build_id(check_run["details_url"]) - log(f"DEBUG buildID is {buildID}") - zipFiles = await fetch_azure_zip_files(session, buildID) - log(f"DEBUG zipFiles are {zipFiles}") - return zipFiles # We've already fetched all possible artifacts - - return [] - - -# Given a PR and commit sha, post a comment with any artifacts -async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> None: - artifacts = await fetch_pr_sha_artifacts(session, pr, sha) - nPackages = len(artifacts) - log(f"DEBUG the artifacts are {artifacts}") - - if nPackages > 0: - comment = "Package(s) built on Azure are ready for inspection:\n\n" - comment += "Arch | Package | Zip File\n-----|---------|---------\n" - install_noarch = "" - install_linux = "" - install_osx = "" - - # Table of packages and repodata.json - for URL, artifact in artifacts: - if not (package_match := re.match(r"^((.+)\/(.+)\/(.+)\/(.+\.tar\.bz2))$", artifact)): - continue - url, archdir, basedir, subdir, packageName = package_match.groups() - urlBase = URL[:-3] # trim off zip from format= - urlBase += "file&subPath=%2F{}".format("%2F".join([basedir, subdir])) - conda_install_url = urlBase - # N.B., the zip file URL is nearly identical to the URL for the individual member files. It's unclear if there's an API for getting the correct URL to the files themselves - #pkgUrl = "%2F".join([urlBase, packageName]) - #repoUrl = "%2F".join([urlBase, "current_repodata.json"]) - #resp = await session.get(repoUrl) - - if subdir == "noarch": - comment += "noarch |" - elif subdir == "linux-64": - comment += "linux-64 |" - else: - comment += "osx-64 |" - comment += f" {packageName} | [{archdir}]({URL})\n" - - # Conda install examples - comment += "***\n\nYou may also use `conda` to install these after downloading and extracting the appropriate zip file. From the LinuxArtifacts or OSXArtifacts directories:\n\n" - comment += "```conda install -c ./packages \n```\n" - - # Table of containers - comment += "***\n\nDocker image(s) built (images are in the LinuxArtifacts zip file above):\n\n" - comment += "Package | Tag | Install with `docker`\n" - comment += "--------|-----|----------------------\n" - - for URL, artifact in artifacts: - if artifact.endswith(".tar.gz"): - image_name = artifact.split("/").pop()[: -len(".tar.gz")] - if ':' in image_name: - package_name, tag = image_name.split(':', 1) - #image_url = URL[:-3] # trim off zip from format= - #image_url += "file&subPath=%2F{}.tar.gz".format("%2F".join(["images", '%3A'.join([package_name, tag])])) - comment += f"[{package_name}] | {tag} | " - comment += f'
show`gzip -dc LinuxArtifacts/images/{image_name}.tar.gz \\| docker load`\n' - comment += "\n\n" - else: - comment = ( - "No artifacts found on the most recent Azure build. " - "Either the build failed, the artifacts have were removed due to age, or the recipe was blacklisted/skipped." - ) - await send_comment(session, pr, comment) - - -# Post a comment on a given PR with its CircleCI artifacts -async def artifact_checker(session: ClientSession, issue_number: int) -> None: - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" - headers = { - "User-Agent": "BiocondaCommentResponder", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - pr_info = safe_load(res) - - await make_artifact_comment(session, issue_number, pr_info["head"]["sha"]) - - -# Return true if a user is a member of bioconda -async def is_bioconda_member(session: ClientSession, user: str) -> bool: - token = os.environ["BOT_TOKEN"] - url = f"https://api.github.com/orgs/bioconda/members/{user}" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - rc = 404 - async with session.get(url, headers=headers) as response: - try: - response.raise_for_status() - rc = response.status - except: - # Do nothing, this just prevents things from crashing on 404 - pass - - return rc == 204 - - -# Reposts a quoted message in a given issue/PR if the user isn't a bioconda member -async def comment_reposter(session: ClientSession, user: str, pr: int, message: str) -> None: - if await is_bioconda_member(session, user): - log("Not reposting for %s", user) - return - log("Reposting for %s", user) - await send_comment( - session, - pr, - f"Reposting for @{user} to enable pings (courtesy of the BiocondaBot):\n\n> {message}", - ) - - -# Fetch and return the JSON of a PR -# This can be run to trigger a test merge -async def get_pr_info(session: ClientSession, pr: int) -> Any: - token = os.environ["BOT_TOKEN"] - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - pr_info = safe_load(res) - return pr_info - - -# Update a branch from upstream master, this should be run in a try/catch -async def update_from_master_runner(session: ClientSession, pr: int) -> None: - async def git(*args: str) -> None: - return await async_exec("git", *args) - - # Setup git, otherwise we can't push - await git("config", "--global", "user.email", "biocondabot@gmail.com") - await git("config", "--global", "user.name", "BiocondaBot") - - pr_info = await get_pr_info(session, pr) - remote_branch = pr_info["head"]["ref"] - remote_repo = pr_info["head"]["repo"]["full_name"] - - max_depth = 2000 - # Clone - await git( - "clone", - f"--depth={max_depth}", - f"--branch={remote_branch}", - f"git@github.com:{remote_repo}.git", - "bioconda-recipes", - ) - - async def git_c(*args: str) -> None: - return await git("-C", "bioconda-recipes", *args) - - # Add/pull upstream - await git_c("remote", "add", "upstream", "https://github.com/bioconda/bioconda-recipes") - await git_c("fetch", f"--depth={max_depth}", "upstream", "master") - - # Merge - await git_c("merge", "upstream/master") - - await git_c("push") - - -# Merge the upstream master branch into a PR branch, leave a message on error -async def update_from_master(session: ClientSession, pr: int) -> None: - try: - await update_from_master_runner(session, pr) - except Exception as e: - await send_comment( - session, - pr, - "I encountered an error updating your PR branch. You can report this to bioconda/core if you'd like.\n-The Bot", - ) - sys.exit(1) - - -# Ensure there's at least one approval by a member -async def approval_review(session: ClientSession, issue_number: int) -> bool: - token = os.environ["BOT_TOKEN"] - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/reviews" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - reviews = safe_load(res) - - approved_reviews = [review for review in reviews if review["state"] == "APPROVED"] - if not approved_reviews: - return False - - # Ensure the review author is a member - return any( - gather( - *( - is_bioconda_member(session, review["user"]["login"]) - for review in approved_reviews - ) - ) - ) - - -# Check the mergeable state of a PR -async def check_is_mergeable( - session: ClientSession, issue_number: int, second_try: bool = False -) -> bool: - token = os.environ["BOT_TOKEN"] - # Sleep a couple of seconds to allow the background process to finish - if second_try: - await sleep(3) - - # PR info - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - pr_info = safe_load(res) - - # We need mergeable == true and mergeable_state == clean, an approval by a member and - if pr_info.get("mergeable") is None and not second_try: - return await check_is_mergeable(session, issue_number, True) - elif ( - pr_info.get("mergeable") is None - or not pr_info["mergeable"] - or pr_info["mergeable_state"] != "clean" - ): - return False - - return await approval_review(session, issue_number) - - -# Ensure uploaded containers are in repos that have public visibility -async def toggle_visibility(session: ClientSession, container_repo: str) -> None: - url = f"https://quay.io/api/v1/repository/biocontainers/{container_repo}/changevisibility" - QUAY_OAUTH_TOKEN = os.environ["QUAY_OAUTH_TOKEN"] - headers = { - "Authorization": f"Bearer {QUAY_OAUTH_TOKEN}", - "Content-Type": "application/json", - } - body = {"visibility": "public"} - rc = 0 - try: - async with session.post(url, headers=headers, json=body) as response: - rc = response.status - except: - # Do nothing - pass - log("Trying to toggle visibility (%s) returned %d", url, rc) - - -# Download an artifact from CircleCI, rename and upload it -async def download_and_upload(session: ClientSession, x: str) -> None: - basename = x.split("/").pop() - # the tarball needs a regular name without :, the container needs pkg:tag - image_name = basename.replace("%3A", ":").replace("\n", "").replace(".tar.gz", "") - file_name = basename.replace("%3A", "_").replace("\n", "") - - async with session.get(x) as response: - with open(file_name, "wb") as file: - logged = 0 - loaded = 0 - while chunk := await response.content.read(256 * 1024): - file.write(chunk) - loaded += len(chunk) - if loaded - logged >= 50 * 1024 ** 2: - log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) - logged = loaded - log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) - - if x.endswith(".gz"): - # Container - log("uploading with skopeo: %s", file_name) - # This can fail, retry with 5 second delays - count = 0 - maxTries = 5 - success = False - QUAY_LOGIN = os.environ["QUAY_LOGIN"] - env = os.environ.copy() - # TODO: Fix skopeo package to find certificates on its own. - skopeo_path = which("skopeo") - if not skopeo_path: - raise RuntimeError("skopeo not found") - env["SSL_CERT_DIR"] = str(Path(skopeo_path).parents[1].joinpath("ssl")) - while count < maxTries: - try: - await async_exec( - "skopeo", - "--command-timeout", - "600s", - "copy", - f"docker-archive:{file_name}", - f"docker://quay.io/biocontainers/{image_name}", - "--dest-creds", - QUAY_LOGIN, - env=env, - ) - success = True - break - except: - count += 1 - if count == maxTries: - raise - await sleep(5) - if success: - await toggle_visibility(session, basename.split("%3A")[0]) - elif x.endswith(".bz2"): - # Package - log("uploading package") - ANACONDA_TOKEN = os.environ["ANACONDA_TOKEN"] - await async_exec("anaconda", "-t", ANACONDA_TOKEN, "upload", file_name, "--force") - - log("cleaning up") - os.remove(file_name) - - -# Upload artifacts to quay.io and anaconda, return the commit sha -# Only call this for mergeable PRs! -async def upload_artifacts(session: ClientSession, pr: int) -> str: - # Get last sha - pr_info = await get_pr_info(session, pr) - sha: str = pr_info["head"]["sha"] - - # Fetch the artifacts - artifacts = await fetch_pr_sha_artifacts(session, pr, sha) - artifacts = [artifact for artifact in artifacts if artifact.endswith((".gz", ".bz2"))] - assert artifacts - - # Download/upload Artifacts - for artifact in artifacts: - await download_and_upload(session, artifact) - - return sha - - -# Assume we have no more than 250 commits in a PR, which is probably reasonable in most cases -async def get_pr_commit_message(session: ClientSession, issue_number: int) -> str: - token = os.environ["BOT_TOKEN"] - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/commits" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - async with session.get(url, headers=headers) as response: - response.raise_for_status() - res = await response.text() - commits = safe_load(res) - message = "".join(f" * {commit['commit']['message']}\n" for commit in reversed(commits)) - return message - - -# Merge a PR -async def merge_pr(session: ClientSession, pr: int) -> None: - token = os.environ["BOT_TOKEN"] - await send_comment( - session, - pr, - "I will attempt to upload artifacts and merge this PR. This may take some time, please have patience.", - ) - - try: - mergeable = await check_is_mergeable(session, pr) - log("mergeable state of %s is %s", pr, mergeable) - if not mergeable: - await send_comment(session, pr, "Sorry, this PR cannot be merged at this time.") - else: - log("uploading artifacts") - sha = await upload_artifacts(session, pr) - log("artifacts uploaded") - - # Carry over last 250 commit messages - msg = await get_pr_commit_message(session, pr) - - # Hit merge - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}/merge" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - payload = { - "sha": sha, - "commit_title": f"[ci skip] Merge PR {pr}", - "commit_message": f"Merge PR #{pr}, commits were: \n{msg}", - "merge_method": "squash", - } - log("Putting merge commit") - async with session.put(url, headers=headers, json=payload) as response: - rc = response.status - log("body %s", payload) - log("merge_pr the response code was %s", rc) - except: - await send_comment( - session, - pr, - "I received an error uploading the build artifacts or merging the PR!", - ) - logger.exception("Upload failed", exc_info=True) - - -# Add the "Please review and merge" label to a PR -async def add_pr_label(session: ClientSession, pr: int) -> None: - token = os.environ["BOT_TOKEN"] - url = f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{pr}/labels" - headers = { - "Authorization": f"token {token}", - "User-Agent": "BiocondaCommentResponder", - } - payload = {"labels": ["please review & merge"]} - async with session.post(url, headers=headers, json=payload) as response: - response.raise_for_status() - - -async def gitter_message(session: ClientSession, msg: str) -> None: - token = os.environ["GITTER_TOKEN"] - room_id = "57f3b80cd73408ce4f2bba26" - url = f"https://api.gitter.im/v1/rooms/{room_id}/chatMessages" - headers = { - "Authorization": f"Bearer {token}", - "Content-Type": "application/json", - "Accept": "application/json", - "User-Agent": "BiocondaCommentResponder", - } - payload = {"text": msg} - log("Sending request to %s", url) - async with session.post(url, headers=headers, json=payload) as response: - response.raise_for_status() - - -async def notify_ready(session: ClientSession, pr: int) -> None: - try: - await gitter_message( - session, - f"PR ready for review: https://github.com/bioconda/bioconda-recipes/pull/{pr}", - ) - except Exception: - logger.exception("Posting to Gitter failed", exc_info=True) - # Do not die if we can't post to gitter! - - -# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` -async def main() -> None: - job_context = safe_load(os.environ["JOB_CONTEXT"]) - log("%s", job_context) - if job_context["event"]["issue"].get("pull_request") is None: - return - issue_number = job_context["event"]["issue"]["number"] - - original_comment = job_context["event"]["comment"]["body"] - log("the comment is: %s", original_comment) - - comment = original_comment.lower() - async with ClientSession() as session: - if comment.startswith(("@bioconda-bot", "@biocondabot")): - if "please update" in comment: - await update_from_master(session, issue_number) - elif " hello" in comment: - await send_comment(session, issue_number, "Yes?") - elif " please fetch artifacts" in comment or " please fetch artefacts" in comment: - await artifact_checker(session, issue_number) - elif " please merge" in comment: - await send_comment(session, issue_number, "Sorry, I'm currently disabled") - #await merge_pr(session, issue_number) - elif " please add label" in comment: - await add_pr_label(session, issue_number) - await notify_ready(session, issue_number) - # else: - # # Methods in development can go below, flanked by checking who is running them - # if job_context["actor"] != "dpryan79": - # console.log("skipping") - # sys.exit(0) - elif "@bioconda/" in comment: - await comment_reposter( - session, job_context["actor"], issue_number, original_comment - ) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - run(main()) diff --git a/images/bot/Dockerfile b/images/bot/Dockerfile index 372e52f16dd..8179e24d1a1 100644 --- a/images/bot/Dockerfile +++ b/images/bot/Dockerfile @@ -1,10 +1,11 @@ -ARG base=quay.io/bioconda/base-glibc-busybox-bash:2.1.0 +ARG base= +ARG create_env= +FROM "${create_env}" as build -FROM quay.io/bioconda/create-env as build ## If gettext pulls in libxml2, use one that doesn't bloat the image with ICU. #RUN . /opt/create-env/env-activate.sh \ # && \ -# mamba install --yes curl conda-build patch \ +# conda install --yes curl conda-build patch \ # && \ # curl -L \ # https://github.com/conda-forge/libxml2-feedstock/archive/master.tar.gz \ @@ -24,7 +25,6 @@ RUN . /opt/create-env/env-activate.sh && \ export CONDA_ADD_PIP_AS_PYTHON_DEPENDENCY=0 \ && \ create-env \ - --conda=mamba \ --strip-files=\* \ --remove-paths=\*.a \ --remove-paths=\*.c \ diff --git a/images/bot/prepare.sh b/images/bot/prepare.sh new file mode 100644 index 00000000000..6f5a64c1e48 --- /dev/null +++ b/images/bot/prepare.sh @@ -0,0 +1,14 @@ +source ../image_config.sh +IMAGE_NAME="${BOT_IMAGE_NAME}" + +# Depends on create-env, which in turn depends on bioconda-utils +TAG=$BIOCONDA_IMAGE_TAG + +# Signal to build.sh that we only need an amd64 version +ONLY_AMD64=true + +BUILD_ARGS=() +BUILD_ARGS+=("--build-arg=create_env=${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}-${CURRENT_ARCH}") +BUILD_ARGS+=("--build-arg=base=${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}-${CURRENT_ARCH}") + +TEST_ARGS=() diff --git a/images/build-env/prepare.sh b/images/build-env/prepare.sh index 00022da36d7..520c4569175 100644 --- a/images/build-env/prepare.sh +++ b/images/build-env/prepare.sh @@ -1,6 +1,6 @@ set -e -source ../versions.sh +source ../image_config.sh IMAGE_NAME="${BUILD_ENV_IMAGE_NAME}" TAG="${BIOCONDA_UTILS_VERSION}_base$BASE_TAG" @@ -8,7 +8,7 @@ TAG="${BIOCONDA_UTILS_VERSION}_base$BASE_TAG" cp -r ../locale/C.utf8 . # We are aiming to get the entire repo into the container so that versioneer -# can correctly determine the version. But the direc +# can correctly determine the version. if [ -e "./bioconda-utils" ]; then rm -rf "./bioconda-utils" fi @@ -23,3 +23,5 @@ BUILD_ARGS=() # Where to find the copied-over bioconda-utils BUILD_ARGS+=("--build-arg=BIOCONDA_UTILS_FOLDER=bioconda-utils") BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") + +TEST_ARGS=() diff --git a/images/build.sh b/images/build.sh index c2dbd683af1..0c3a809f1aa 100755 --- a/images/build.sh +++ b/images/build.sh @@ -19,16 +19,23 @@ # ARCHS: space-separated string of archs to build # IMAGE_NAME: name of image; created manifest will be IMAGE_NAME:tag # BUILD_ARGS: array of arguments like ("--build-arg=argument1=the-value", "--build-arg=arg2=a") +# +# Output is: +# - images for each arch +# - manifest containing those images +# - tar files of images ready to upload as artifacts +# - tar files built by podman loaded into docker set -xeu IMAGE_DIR=$1 +echo === BUILDING $IMAGE_DIR === + cd $IMAGE_DIR [ -e prepare.sh ] && source prepare.sh - # Clean up any manifests before we start. # IMAGE_NAME and TAG should be created by prepare.sh buildah manifest rm "${IMAGE_NAME}:${TAG}" || true @@ -37,6 +44,8 @@ buildah manifest create "${IMAGE_NAME}:${TAG}" # ARCHS should be created by prepare.sh for arch in $ARCHS; do + [ "${ONLY_AMD64:-false}" == "true" -a "${arch}" != "amd64" ] && continue + # This logic is specific to the build-env. We need an arch-specific base # image, but the nomenclature is inconsistent. So we directly map arch names # to conda-forge base images. @@ -50,40 +59,52 @@ for arch in $ARCHS; do fi fi - source ../env_var_inventory.sh + # Make arch available as an environment variable and source prepare.sh again + export CURRENT_ARCH="$arch" + [ -e prepare.sh ] && source prepare.sh + + # defined in image_config.sh, which is expected to have been sourced prior to + # running this script. + env_var_inventory # Actual building happens here. We will keep track of the built image in # $image_id. - iidfile="$( mktemp )" + iidfile="$(mktemp)" buildah bud \ --arch="${arch}" \ --iidfile="${iidfile}" \ --file=Dockerfile \ ${BUILD_ARGS[@]} \ $BASE_IMAGE_BUILD_ARG - image_id="$( cat "${iidfile}" )" + image_id="$(cat "${iidfile}")" rm "${iidfile}" # In order for GitHub Actions to inherit container permissions from the repo # permissions, we need to add a special label. However `buildah config # --label` operates on a container, not an image. So we add the label to # a temporary container and then save the resulting image. - container="$( buildah from "${image_id}" )" + container="$(buildah from "${image_id}")" buildah config \ --label="org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils" \ "${container}" - image_id="$( buildah commit "${container}" )" + image_id="$(buildah commit "${container}")" buildah rm "${container}" # Add -$arch suffix to image's tag buildah tag "${image_id}" "${IMAGE_NAME}:${TAG}-${arch}" - # Add it to the manifest, which has no -$arch suffix - buildah manifest add "${IMAGE_NAME}:${TAG}" "${image_id}" + # Run basic test in Dockerfile.test, passing the exact image ID that was just + # created. + TEST_ARGS=${TEST_ARGS:-""} + buildah bud \ + --arch="${arch}" \ + --build-arg=base=${image_id} \ + ${TEST_ARGS[@]} \ + --file=Dockerfile.test + + # Save image for storing as artifact or to load into docker + mkdir -p ../../image-artifacts + podman save "${IMAGE_NAME}:${TAG}-${arch}" >"../../image-artifacts/${IMAGE_NAME}-${arch}.tar" + ls ../../image-artifacts - # copy image over to local docker registry, when running locally. Otherwise, - # the CI will use ghcr.io as an intermediate registry. - if [ "${CI:-false}" == "false" ]; then - podman save "${IMAGE_NAME}:${TAG}-${arch}" | docker load - fi done diff --git a/images/create-env/prepare.sh b/images/create-env/prepare.sh index c2096c0cd14..fa08f367336 100644 --- a/images/create-env/prepare.sh +++ b/images/create-env/prepare.sh @@ -1,37 +1,21 @@ -source ../versions.sh +source ../image_config.sh IMAGE_NAME="${CREATE_ENV_IMAGE_NAME}" TAG=$BIOCONDA_IMAGE_TAG BUILD_ARGS=() - - # Get the exact versions of mamba and conda that were installed in build-env. # -# If this tag exists on quay.io (that is, this create-env is being built in -# a subsequent run), then use that. Otherwise, we assume this tag has already -# been built and pushed to GitHub Container Registry (and the GitHub Actions job -# dependency should reflect this) -BUILD_ENV_REGISTRY=${BUILD_ENV_REGISTRY:="ghcr.io/bioconda"} -if [ $(tag_exists $BUILD_ENV_IMAGE_NAME $BIOCONDA_IMAGE_TAG) ]; then - BUILD_ENV_REGISTRY=quay.io/bioconda -fi - +# TODO: here we're hard-coding the amd64 on the reasonable assumption that it +# matches the arm64 CONDA_VERSION=$( - podman run -t $BUILD_ENV_REGISTRY/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG} \ - bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" + podman run -t ${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_IMAGE_TAG}-amd64 \ + bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" ) # Remove trailing \r with parameter expansion export CONDA_VERSION=${CONDA_VERSION%$'\r'} BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") +BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}-${CURRENT_ARCH}") -# Needs busybox image to copy some items over -BASE_BUSYBOX_REGISTRY=${BASE_BUSYBOX_REGISTRY:="ghcr.io/bioconda"} -if [ $(tag_exists $BASE_BUSYBOX_IMAGE_NAME $BASE_TAG) ]; then - BASE_BUSYBOX_REGISTRY=quay.io/bioconda -fi - -BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=${BASE_BUSYBOX_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}") - -# TEST_BUILD_ARGS=() -# TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") +TEST_ARGS=() +TEST_ARGS+=("--build-arg=BUSYBOX_IMAGE=${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}-${CURRENT_ARCH}") diff --git a/images/env_var_inventory.sh b/images/env_var_inventory.sh deleted file mode 100644 index 59c92922435..00000000000 --- a/images/env_var_inventory.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# There are a lot of environment variables used here. This script aims to -# document them as well as provide a mechanism (e.g., in CI workflows) to show -# their current values. -# -# Typical usage: -# -# source versions.sh -# source env_var_inventory.sh -# -echo "--------BEGIN ENVIRONMENT VARIABLE INVENTORY ---------------------------------" -while read -r name description; do - description="${description//\"/}" - value="${!name:-}" - echo -e "${name}\t${value}\t$description" -done << 'EOF' | column -t -s $'\t' -ARCHS "architectures to build images for" -DEBIAN_VERSION "version of debian for extended base image" -BUSYBOX_VERSION "version of busybox for base image" -BIOCONDA_UTILS_VERSION "version of bioconda-utils to use" -BASE_DEBIAN_IMAGE_NAME "name for debian image" -BASE_BUSYBOX_IMAGE_NAME "name for busybox image" -BUILD_ENV_IMAGE_NAME "name for build image" -CREATE_ENV_IMAGE_NAME "name for create image" -BASE_TAG "the base version tag to add to image tags" -BIOCONDA_IMAGE_TAG "full bioconda + image version" -BUILD_ENV_REGISTRY "where the build image should come from (used in CI)" -CREATE_ENV_REGISTRY "where the create image should come from (used in CI)" -DEFAULT_BASE_IMAGE "where the busybox image should come from (used in CI and mulled-build)" -DEST_BASE_REGISTRY "registry where the busybox image should go to (used in CI)" -DEST_BASE_IMAGE "fully qualified busybox image destination (used in CI)" -DEFAULT_EXTENDED_BASE_IMAGE "where the debian image should come from (used in CI and mulled-build)" -DEST_EXTENDED_BASE_REGISTRY "where the debian image should go to (used in CI)" -BUILD_ENV_IMAGE "fully qualified image for building (used in CI)" -CREATE_ENV_IMAGE "fully qualified image for creating (used in CI)" -BUILD_ARGS "full build arguments passed to podman, typically created by /prepare.sh" -busybox_image "initial busybox image, created by images/base-glibc-busybox-bash/prepare.sh" -IMAGE_NAME "image name as determined by /prepare.sh" -TAG "tag as determined by prepare.sh" -BASE_IMAGE_CONDAFORGE_AMD64 "x86_64 base image for building" -BASE_IMAGE_CONDAFORGE_ARM64 "ARM64 base image for building" -BASE_IMAGE_BUILD_ARG "unique to build image, this determines the upstream conda-forge image to use" -arch "current architecture" -EOF - -echo "--------END ENVIRONMENT VARIABLE INVENTORY ---------------------------------" diff --git a/images/image_config.sh b/images/image_config.sh new file mode 100644 index 00000000000..6c4be72cfd4 --- /dev/null +++ b/images/image_config.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +#----------------------------VERSIONS------------------------------------------- +# Configures various versions to be used throughout infrastructure. +ARCHS="amd64 arm64" +DEBIAN_VERSION="12.5" +BUSYBOX_VERSION="1.36.1" +BASE_DEBIAN_IMAGE_NAME="tmp-base-debian" +BASE_BUSYBOX_IMAGE_NAME="tmp-base-busybox" +BUILD_ENV_IMAGE_NAME="tmp-build-env" +CREATE_ENV_IMAGE_NAME="tmp-create-env" +BOT_IMAGE_NAME="tmp-bot" +BASE_TAG="0.2" +BASE_IMAGE_CONDAFORGE_AMD64="quay.io/condaforge/linux-anvil-x86_64:cos7" +BASE_IMAGE_CONDAFORGE_ARM64="quay.io/condaforge/linux-anvil-aarch64:cos7" +CURRENT_ARCH=${CURRENT_ARCH:-""} + +# Inspect this repo to get the currently-checked-out version, which matches +# what versioneer.py does -- but if BIOCONDA_UTILS_VERSION_OVERRIDE was set +# outside this script, use that instead. +BIOCONDA_UTILS_VERSION=${BIOCONDA_UTILS_VERSION_OVERRIDE:-$(git describe --tags --dirty --always)} + +# This will be used as the tag for create-env and build-env images, which +# depend on bioconda-utils. The base images do not depend on bioconda-utils and +# will only have the base tag. +BIOCONDA_IMAGE_TAG=${BIOCONDA_UTILS_VERSION}_base${BASE_TAG} +#------------------------------------------------------------------------------- + + + +#-------------------------------FUNCTIONS--------------------------------------- + +function tag_exists() { + # Check to see if the image and tag exists on quay.io. + # Skips "latest" tags (likely they will always be present) + # $1: image name + # $2: tags + local IMAGE_NAME="$1" + local TAGS="$2" + + response="$(curl -sL "https://quay.io/api/v1/repository/bioconda/${IMAGE_NAME}/tag/")" + + # Images can be set to expire; the jq query selects only non-expired images. + existing_tags="$( + printf %s "${response}" | + jq -r '.tags[]|select(.end_ts == null or .end_ts >= now)|.name' + )" || + { + printf %s\\n \ + 'Could not get list of image tags.' \ + 'Does the repository exist on Quay.io?' \ + 'Quay.io REST API response was:' \ + "${response}" >&2 + return 1 + } + for tag in $TAGS; do + case "${tag}" in + "latest") ;; + *) + if printf %s "${existing_tags}" | grep -qxF "${tag}"; then + printf 'Tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" >&2 + echo "exists" + fi + ;; + esac + done +} + +function build_and_push_manifest() { + # Creates a local manifest, adds containers for multiple archs, and pushes to + # a registry. + # + # Typical usage: + # build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} docker://localhost:5000 ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} "--tls-verify=false" + # + # or + # + # build_and_push_manifest ${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} quay.io/bioconda/${BASE_BUSYBOX_IMAGE_NAME}:latest + # + local source=$1 + local dest=$2 + local additional_args=${3:-""} + + local manifest_name="local_${source}" + + buildah manifest rm "${manifest_name}" || true + + # Locally-named manifest to which we'll add the different archs. + buildah manifest create "${manifest_name}" + + # Expects images for archs to be built already by buildah/podman. Here we add + # them to local manifest. + for arch in $ARCHS; do + + # skip non-amd64 if configured + [ "${ONLY_AMD64:-false}" == "true" -a "${arch}" != "amd64" ] && continue + + imgid=$(buildah pull --arch=$arch "${source}-${arch}") + + buildah manifest add "${manifest_name}" "${imgid}" + done + + # Note that --all is required to actually push the images, too + podman manifest push --all $additional_args "${manifest_name}" "${dest}" +} + + +function env_var_inventory () { + # There are a lot of environment variables used here; this function describes + # them and reports their values at call time. + + echo "--------BEGIN ENVIRONMENT VARIABLE INVENTORY ---------------------------------" + while read -r name description; do + description="${description//\"/}" + value="${!name:-}" + echo -e "${name}\t${value}\t$description" + done <<'EOF' | column -t -s $'\t' + ARCHS "architectures to build images for" + DEBIAN_VERSION "version of debian for extended base image" + BUSYBOX_VERSION "version of busybox for base image" + BIOCONDA_UTILS_VERSION "version of bioconda-utils to use" + BASE_DEBIAN_IMAGE_NAME "name for debian image" + BASE_BUSYBOX_IMAGE_NAME "name for busybox image" + BUILD_ENV_IMAGE_NAME "name for build image" + CREATE_ENV_IMAGE_NAME "name for create image" + BOT_IMAGE_IMAGE_NAME "name for bot image" + CURRENT_ARCH "Arch for current iteration of loop" + BASE_TAG "the base version tag to add to image tags" + BIOCONDA_IMAGE_TAG "full bioconda + image version" + DEFAULT_BASE_IMAGE "where the busybox image should come from (used in CI and mulled-build)" + DEST_BASE_IMAGE "fully qualified busybox image destination (used in CI)" + DEFAULT_EXTENDED_BASE_IMAGE "where the debian image should come from (used in CI and mulled-build)" + BUILD_ENV_IMAGE "fully qualified image for building (used in CI)" + CREATE_ENV_IMAGE "fully qualified image for creating (used in CI)" + BUILD_ARGS "full build arguments passed to podman, typically created by /prepare.sh" + busybox_image "initial busybox image, created by images/base-glibc-busybox-bash/prepare.sh" + IMAGE_NAME "image name as determined by /prepare.sh" + TAG "tag as determined by prepare.sh" + BASE_IMAGE_CONDAFORGE_AMD64 "x86_64 base image for building" + BASE_IMAGE_CONDAFORGE_ARM64 "ARM64 base image for building" + BASE_IMAGE_BUILD_ARG "unique to build image, this determines the upstream conda-forge image to use" + arch "current architecture" +EOF + + echo "--------END ENVIRONMENT VARIABLE INVENTORY ---------------------------------" +} diff --git a/images/versions.sh b/images/versions.sh deleted file mode 100644 index fecacc1f1cf..00000000000 --- a/images/versions.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash - -# Configures various versions to be used throughout infrastructure -ARCHS="amd64 arm64" -DEBIAN_VERSION=12.5 -BUSYBOX_VERSION=1.36.1 -BASE_DEBIAN_IMAGE_NAME="tmp-base-debian" -BASE_BUSYBOX_IMAGE_NAME="tmp-base-busybox" -BUILD_ENV_IMAGE_NAME="tmp-build-env" -CREATE_ENV_IMAGE_NAME="tmp-create-env" -BASE_TAG="0.2" -BASE_IMAGE_CONDAFORGE_AMD64="quay.io/condaforge/linux-anvil-x86_64:cos7" -BASE_IMAGE_CONDAFORGE_ARM64="quay.io/condaforge/linux-anvil-aarch64:cos7" - - -# Inspect this repo to get the currently-checked-out version, but if -# BIOCONDA_UTILS_VERSION was set outside this script, use that instead. -BIOCONDA_UTILS_VERSION=${BIOCONDA_UTILS_VERSION:-$(git describe --tags --dirty --always)} - -# This will be used as the tag for create-env and build-env images, which -# depend on bioconda-utils -BIOCONDA_IMAGE_TAG=${BIOCONDA_UTILS_VERSION}_base${BASE_TAG} - -# FUNCTIONS -------------------------------------------------------------------- - -function tag_exists () { - # Returns 0 if the tag for the image exists on quay.io, otherwise returns 1. - # Skips "latest" tags (likely they will always be present) - # $1: image name - # $2: tags - local IMAGE_NAME="$1" - local TAGS="$2" - - response="$(curl -sL "https://quay.io/api/v1/repository/bioconda/${IMAGE_NAME}/tag/")" - - # Images can be set to expire; the jq query selects only non-expired images. - existing_tags="$( - printf %s "${response}" \ - | jq -r '.tags[]|select(.end_ts == null or .end_ts >= now)|.name' - )" \ - || { - printf %s\\n \ - 'Could not get list of image tags.' \ - 'Does the repository exist on Quay.io?' \ - 'Quay.io REST API response was:' \ - "${response}" >&2 - return 1 - } - for tag in $TAGS ; do - case "${tag}" in - "latest" ) ;; - * ) - if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then - printf 'Tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" >&2 - echo "exists" - fi - esac - done -} - -# Helper function to push a just-built image to GitHub Container -# Respository, which is used as a temporary storage mechanism. -function push_to_ghcr () { - podman manifest push --all localhost/${1}:${2} ghcr.io/bioconda/${1}:${2} -} - -# Helper function to move an image from gchr to quay.io for public use. -function move_from_ghcr_to_quay () { - local image_name=$1 - local tag=$2 - - # Locally-named manifest to which we'll add the different archs. - buildah manifest create "local_${image_name}:${tag}" - - # Expects images for archs to be built already; add them to local manifest. - for arch in $ARCHS; do - imgid=$(buildah pull --arch=$arch "ghcr.io/bioconda/${image_name}:${tag}") - buildah manifest add "local_${image_name}:${tag}" "${imgid}" - done - - # Publish - podman manifest push "local_${image_name}:${tag}" "quay.io/bioconda/${image_name}:${tag}" -}