Open
Description
I've been testing for a long time, but it still won't start normally, and there are no prompts or indications.
If I'm using version 1.94, the message is: "s3fs exited with code 1"
If I am using version 1.89, the message is: "read: '/run/secrets/aws_secret_access_key': bad variable name".
When using the custom dockerfile, the logs display as follows:
FROM efrecon/s3fs:1.94
RUN sed -i 's/set -eu/set -eux/' /usr/local/bin/docker-entrypoint.sh
s3fs | + S3FS_DEBUG=0
s3fs | + AWS_S3_ENVFILE=
s3fs | + '[' -n ]
s3fs | + AWS_S3_URL=https://example.r2.cloudflarestorage.com
s3fs | + AWS_S3_ROOTDIR=/opt/s3fs
s3fs | + AWS_S3_MOUNT=/opt/s3fs/bucket
s3fs | + AWS_S3_ACCESS_KEY_ID=
s3fs | + AWS_S3_ACCESS_KEY_ID_FILE=/run/secrets/aws_access_key_id
s3fs | + AWS_S3_SECRET_ACCESS_KEY=
s3fs | + AWS_S3_SECRET_ACCESS_KEY_FILE=/run/secrets/aws_secret_access_key
s3fs | + AWS_S3_AUTHFILE=
s3fs | + '[' -z ]
s3fs | + '[' -z /run/secrets/aws_access_key_id ]
s3fs | + '[' -z docker-bucket ]
s3fs | + '[' -n /run/secrets/aws_access_key_id ]
s3fs | + read -r AWS_S3_ACCESS_KEY_ID
s3fs exited with code 1
All tests run with docker compose.
services:
s3fs:
image: efrecon/s3fs:1.94
container_name: s3fs
devices:
- /dev/fuse
cap_add:
- SYS_ADMIN
security_opt:
- apparmor=unconfined
environment:
- AWS_S3_BUCKET=docker-bucket
- AWS_S3_ACCESS_KEY_ID_FILE=/run/secrets/aws_access_key_id
- AWS_S3_SECRET_ACCESS_KEY_FILE=/run/secrets/aws_secret_access_key
- AWS_S3_URL=https://example.r2.cloudflarestorage.com
- UID=1000
- GID=1000
volumes:
- /mnt/tmp:/opt/s3fs/bucket:rshared
secrets:
- aws_access_key_id
- aws_secret_access_key
secrets:
aws_access_key_id:
file: ./secrets/aws_access_key_id.secret
aws_secret_access_key:
file: ./secrets/aws_secret_access_key.secret