This commit is contained in:
Simon 2023-06-26 14:49:03 +02:00
parent d576a7f752
commit d65c9d32d0
6 changed files with 36 additions and 65 deletions

1
.gitignore vendored
View File

@ -1 +0,0 @@
.vagrant

View File

@ -1,11 +1,11 @@
FROM alpine:3.10 FROM peakcom/s5cmd:v2.1.0
label maintainer="Elementar Sistemas <contato@elementarsistemas.com.br>" label maintainer="RésiLien <admin+docker@resilien.fr>"
RUN apk --no-cache add bash py3-pip && pip3 install --no-cache-dir awscli RUN apk --no-cache add inotify-tools bash
ADD watch /watch ADD watch /watch
VOLUME /data VOLUME /data
ENV S3_SYNC_FLAGS "--delete" ENV S3_SYNC_FLAGS ""
ENTRYPOINT [ "./watch" ] ENTRYPOINT [ "/watch" ]
CMD ["/data"] CMD ["help"]

View File

@ -1,9 +1,8 @@
NAME = elementar/s3-volume NAME = simonc/s3-sync
.PHONY: build release .PHONY: build release
build: build:
docker build -t $(NAME):latest . docker build -t $(NAME):latest .
release: release:
docker push $(NAME):latest docker push $(NAME):latest

View File

@ -1,10 +1,14 @@
# docker-s3-volume # docker-s3-volume-watch
[![Docker Build Status](https://img.shields.io/docker/build/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume) Le principe est de synchroniser un dossier avec un volume s3.
[![Docker Layers Count](https://img.shields.io/microbadger/layers/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Version](https://img.shields.io/docker/v/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume) Voici le fonctionnement :
[![Docker Pull Count](https://img.shields.io/docker/pulls/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume) - la commande prend comme paramètre le dossier source `/data/` et un bucket S3 `s3://bucket`
[![Docker Stars](https://img.shields.io/docker/stars/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume) - il synchronise le bucket avec le dossier puis lance `inotify` pour resynchroniser le bucket dès qu'un évènement se produit dans le dossier (modification, création, suppression)
Inspiration : [docker-s3-volume](https://github.com/elementar/docker-s3-volume)
## README original
Creates a Docker container that is restored and backed up to a directory on s3. Creates a Docker container that is restored and backed up to a directory on s3.
You could use this to run short lived processes that work with and persist data to and from S3. You could use this to run short lived processes that work with and persist data to and from S3.

38
s3cfg
View File

@ -1,38 +0,0 @@
[default]
access_key =
bucket_location = US
cloudfront_host = cloudfront.amazonaws.com
cloudfront_resource = /2010-07-15/distribution
default_mime_type = binary/octet-stream
delete_removed = False
dry_run = False
encoding = UTF-8
encrypt = False
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/local/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = s3.amazonaws.com
host_bucket = %(bucket)s.s3.amazonaws.com
human_readable_sizes = False
list_md5 = False
log_target_prefix =
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
recursive = False
recv_chunk = 4096
reduced_redundancy = False
secret_key =
send_chunk = 4096
simpledb_host = sdb.amazonaws.com
skip_existing = False
socket_timeout = 300
urlencoding_mode = normal
use_https = False
verbosity = WARNING

31
watch
View File

@ -2,6 +2,8 @@
[[ "$TRACE" ]] && set -x [[ "$TRACE" ]] && set -x
DEBUG=${DEBUG:-"Unknown Error"}
function usage { function usage {
cat <<-EOF cat <<-EOF
Usage: $PROGNAME [OPTIONS] <local-path> <remote-path> Usage: $PROGNAME [OPTIONS] <local-path> <remote-path>
@ -37,14 +39,10 @@ while true; do
done done
PROGNAME=$0 PROGNAME=$0
S3_PROG=/s5cmd
LOCAL=$1 LOCAL=$1
REMOTE=$2 REMOTE=$2
WATCH_EVENTS=modify
if [ "$ENDPOINT_URL" ]; then
AWS="aws --endpoint-url $ENDPOINT_URL"
else
AWS=aws
fi
function restore { function restore {
if [ "$(ls -A $LOCAL)" ]; then if [ "$(ls -A $LOCAL)" ]; then
@ -54,14 +52,14 @@ function restore {
fi fi
echo "restoring $REMOTE => $LOCAL" echo "restoring $REMOTE => $LOCAL"
if ! $AWS s3 sync "$REMOTE" "$LOCAL"; then if ! $S3_PROG sync "$REMOTE/*" "$LOCAL"; then
error_exit "restore failed" error_exit "restore failed"
fi fi
} }
function backup { function backup {
echo "backup $LOCAL => $REMOTE" echo "backup $LOCAL => $REMOTE"
if ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then if ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then
echo "backup failed" 1>&2 echo "backup failed" 1>&2
return 1 return 1
fi fi
@ -69,7 +67,7 @@ function backup {
function final_backup { function final_backup {
echo "backup $LOCAL => $REMOTE" echo "backup $LOCAL => $REMOTE"
while ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do while ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do
echo "backup failed, will retry" 1>&2 echo "backup failed, will retry" 1>&2
sleep 1 sleep 1
done done
@ -79,9 +77,18 @@ function final_backup {
function idle { function idle {
echo "ready" echo "ready"
while true; do while true; do
sleep ${BACKUP_INTERVAL:-42} & restore
wait $! if [[ -v $RESTORE_INTERVAL ]]; then
[ -n "$BACKUP_INTERVAL" ] && backup backup
timeout ${RESTORE_INTERVAL} inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL | while read date time file; do echo echo "The file '$file' appeared in directory '$path' via '$action'"; backup; done
else
inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL |
while read date time file
do
echo echo "The file '$file' appeared in directory '$path' via '$action'"
backup
done
fi
done done
} }