This commit is contained in:
Simon 2023-06-26 14:49:03 +02:00
parent d576a7f752
commit d65c9d32d0
6 changed files with 36 additions and 65 deletions

1
.gitignore vendored
View File

@ -1 +0,0 @@
.vagrant

View File

@ -1,11 +1,11 @@
FROM alpine:3.10
label maintainer="Elementar Sistemas <contato@elementarsistemas.com.br>"
FROM peakcom/s5cmd:v2.1.0
label maintainer="RésiLien <admin+docker@resilien.fr>"
RUN apk --no-cache add bash py3-pip && pip3 install --no-cache-dir awscli
RUN apk --no-cache add inotify-tools bash
ADD watch /watch
VOLUME /data
ENV S3_SYNC_FLAGS "--delete"
ENTRYPOINT [ "./watch" ]
CMD ["/data"]
ENV S3_SYNC_FLAGS ""
ENTRYPOINT [ "/watch" ]
CMD ["help"]

View File

@ -1,9 +1,8 @@
NAME = elementar/s3-volume
NAME = simonc/s3-sync
.PHONY: build release
build:
docker build -t $(NAME):latest .
release:
docker push $(NAME):latest

View File

@ -1,10 +1,14 @@
# docker-s3-volume
# docker-s3-volume-watch
[![Docker Build Status](https://img.shields.io/docker/build/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Layers Count](https://img.shields.io/microbadger/layers/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Version](https://img.shields.io/docker/v/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Pull Count](https://img.shields.io/docker/pulls/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Stars](https://img.shields.io/docker/stars/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
Le principe est de synchroniser un dossier avec un volume s3.
Voici le fonctionnement :
- la commande prend comme paramètre le dossier source `/data/` et un bucket S3 `s3://bucket`
- il synchronise le bucket avec le dossier puis lance `inotify` pour resynchroniser le bucket dès qu'un évènement se produit dans le dossier (modification, création, suppression)
Inspiration : [docker-s3-volume](https://github.com/elementar/docker-s3-volume)
## README original
Creates a Docker container that is restored and backed up to a directory on s3.
You could use this to run short lived processes that work with and persist data to and from S3.

38
s3cfg
View File

@ -1,38 +0,0 @@
[default]
access_key =
bucket_location = US
cloudfront_host = cloudfront.amazonaws.com
cloudfront_resource = /2010-07-15/distribution
default_mime_type = binary/octet-stream
delete_removed = False
dry_run = False
encoding = UTF-8
encrypt = False
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/local/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = s3.amazonaws.com
host_bucket = %(bucket)s.s3.amazonaws.com
human_readable_sizes = False
list_md5 = False
log_target_prefix =
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
recursive = False
recv_chunk = 4096
reduced_redundancy = False
secret_key =
send_chunk = 4096
simpledb_host = sdb.amazonaws.com
skip_existing = False
socket_timeout = 300
urlencoding_mode = normal
use_https = False
verbosity = WARNING

31
watch
View File

@ -2,6 +2,8 @@
[[ "$TRACE" ]] && set -x
DEBUG=${DEBUG:-"Unknown Error"}
function usage {
cat <<-EOF
Usage: $PROGNAME [OPTIONS] <local-path> <remote-path>
@ -37,14 +39,10 @@ while true; do
done
PROGNAME=$0
S3_PROG=/s5cmd
LOCAL=$1
REMOTE=$2
if [ "$ENDPOINT_URL" ]; then
AWS="aws --endpoint-url $ENDPOINT_URL"
else
AWS=aws
fi
WATCH_EVENTS=modify
function restore {
if [ "$(ls -A $LOCAL)" ]; then
@ -54,14 +52,14 @@ function restore {
fi
echo "restoring $REMOTE => $LOCAL"
if ! $AWS s3 sync "$REMOTE" "$LOCAL"; then
if ! $S3_PROG sync "$REMOTE/*" "$LOCAL"; then
error_exit "restore failed"
fi
}
function backup {
echo "backup $LOCAL => $REMOTE"
if ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then
if ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then
echo "backup failed" 1>&2
return 1
fi
@ -69,7 +67,7 @@ function backup {
function final_backup {
echo "backup $LOCAL => $REMOTE"
while ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do
while ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do
echo "backup failed, will retry" 1>&2
sleep 1
done
@ -79,9 +77,18 @@ function final_backup {
function idle {
echo "ready"
while true; do
sleep ${BACKUP_INTERVAL:-42} &
wait $!
[ -n "$BACKUP_INTERVAL" ] && backup
restore
if [[ -v $RESTORE_INTERVAL ]]; then
backup
timeout ${RESTORE_INTERVAL} inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL | while read date time file; do echo echo "The file '$file' appeared in directory '$path' via '$action'"; backup; done
else
inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL |
while read date time file
do
echo echo "The file '$file' appeared in directory '$path' via '$action'"
backup
done
fi
done
}