Compare commits

...

18 Commits

Author SHA1 Message Date
1e3600e140 feat: Add timezone
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-17 09:35:27 +02:00
67440a7f1f feat: Add datetime on log
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-16 10:50:30 +02:00
1f29978152 feat: Add multi arch
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-26 15:23:39 +02:00
b607230f64 feat: Add registry configuration
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-26 15:11:49 +02:00
a6071706c6 feat: Update repository name
Some checks failed
continuous-integration/drone/push Build is failing
2023-06-26 14:57:45 +02:00
f501a379ff feat: Add drone configuration 2023-06-26 14:56:05 +02:00
d65c9d32d0 wip 2023-06-26 14:49:37 +02:00
d576a7f752 Adds links to the badges 2020-12-02 18:32:38 -03:00
a6d878638b Updates badges 2020-12-02 17:40:09 -03:00
7bdb1a17b4 Fixes S3_SYNC_FLAGS documentation 2020-12-02 17:32:26 -03:00
082e5188aa Merge pull request #14 from jmahowald/master
allow us to not delete files
2020-12-02 16:41:37 -03:00
014b843a2c Apply suggestions from code review
Co-authored-by: Mirsal <mirsal@mirsal.fr>
2020-12-02 16:40:07 -03:00
05dfe25557 allow us to not delete files 2020-02-13 19:04:19 -06:00
957697dc1a Merge pull request #13 from tlex/master
Updates to latest alpine
2019-08-27 11:43:43 -03:00
f3d579a0e9 Updates to latest alpine
* updates to alpine:3.10 from alpine:3.6
* updates from python2 to python3
* ensures that pip doesn't create a cache directory
2019-08-27 10:09:54 +02:00
f19b3fa1ee Added badges to the README 2019-08-07 10:46:45 -03:00
4d18ac3fc8 Merge pull request #12 from Arizona-Tecnologia/configurable-url
Allow configuration of the endpoint url
2019-08-07 10:40:57 -03:00
9cb9e30ad7 Allow configuration of the endpoint url 2019-08-06 16:31:40 -03:00
7 changed files with 105 additions and 62 deletions

37
.drone.yml Normal file
View File

@ -0,0 +1,37 @@
---
# drone encrypt ResiLien/docker-s3-volume-watch $REGISTRY_USERNAME
kind: secret
name: REGISTRY_USERNAME
data: nWD+Q9IZSvWw6aDlxBgjvaQqsyU9Muub4A9wJMIfZ8A18g==
---
# drone encrypt ResiLien/docker-s3-volume-watch $REGISTRY_PASSWORD
kind: secret
name: REGISTRY_PASSWORD
data: fshphgiLW3gyuwhKHgSe7m6Y/LXtse2VEYABvjWYCov8w85+CMa5hjkHSiO+dgRCMOC4+OE5ZlZjD/5hAURi6TsBTZw0h5fWsnEjMR6L
---
kind: pipeline
type: docker
name: amd64
platform:
os: linux
arch: amd64
steps:
- name: docker
image: thegeeklab/drone-docker-buildx
privileged: true
settings:
registry: registry.weko.io
username:
from_secret: REGISTRY_USERNAME
password:
from_secret: REGISTRY_PASSWORD
repo: registry.weko.io/simonc/docker-s3-volume-watch
tags:
- latest
platforms:
- linux/arm64
- linux/amd64

1
.gitignore vendored
View File

@ -1 +0,0 @@
.vagrant

View File

@ -1,10 +1,11 @@
FROM alpine:3.6
MAINTAINER Elementar Sistemas <contato@elementarsistemas.com.br>
FROM peakcom/s5cmd:v2.1.0
label maintainer="RésiLien <admin+docker@resilien.fr>"
RUN apk --no-cache add bash py-pip && pip install awscli
RUN apk --no-cache add inotify-tools bash tzdata
ADD watch /watch
VOLUME /data
ENTRYPOINT [ "./watch" ]
CMD ["/data"]
ENV S3_SYNC_FLAGS ""
ENTRYPOINT [ "/watch" ]
CMD ["help"]

View File

@ -1,9 +1,8 @@
NAME = elementar/s3-volume
NAME = simonc/s3-sync
.PHONY: build release
build:
docker build -t $(NAME):latest .
release:
docker push $(NAME):latest

View File

@ -1,4 +1,14 @@
# docker-s3-volume
# docker-s3-volume-watch
Le principe est de synchroniser un dossier avec un volume s3.
Voici le fonctionnement :
- la commande prend comme paramètre le dossier source `/data/` et un bucket S3 `s3://bucket`
- il synchronise le bucket avec le dossier puis lance `inotify` pour resynchroniser le bucket dès qu'un évènement se produit dans le dossier (modification, création, suppression)
Inspiration : [docker-s3-volume](https://github.com/elementar/docker-s3-volume)
## README original
Creates a Docker container that is restored and backed up to a directory on s3.
You could use this to run short lived processes that work with and persist data to and from S3.
@ -49,6 +59,15 @@ Any environment variable available to the `aws-cli` command can be used. see
http://docs.aws.amazon.com/cli/latest/userguide/cli-environment.html for more
information.
### Configuring an endpoint URL
If you are using an S3-compatible service (such as Oracle OCI Object Storage), you may want to set the service's endpoint URL:
```bash
docker run -d --name my-data-container -e ENDPOINT_URL=... \
elementar/s3-volume /data s3://mybucket/someprefix
```
### Forcing a sync
A final sync will always be performed on container shutdown. A sync can be
@ -69,6 +88,14 @@ docker run -d --name my-data-container \
elementar/s3-volume --force-restore /data s3://mybucket/someprefix
```
### Deletion and sync
By default if there are files that are deleted in your local file system, those will be deleted remotely. If you wish to turn this off, set the environment variable `S3_SYNC_FLAGS` to an empty string:
```bash
docker run -d -e S3_SYNC_FLAGS="" elementar/s3-volume /data s3://mybucket/someprefix
```
### Using Compose and named volumes
Most of the time, you will use this image to sync data for another container.

38
s3cfg
View File

@ -1,38 +0,0 @@
[default]
access_key =
bucket_location = US
cloudfront_host = cloudfront.amazonaws.com
cloudfront_resource = /2010-07-15/distribution
default_mime_type = binary/octet-stream
delete_removed = False
dry_run = False
encoding = UTF-8
encrypt = False
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/local/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = s3.amazonaws.com
host_bucket = %(bucket)s.s3.amazonaws.com
human_readable_sizes = False
list_md5 = False
log_target_prefix =
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
recursive = False
recv_chunk = 4096
reduced_redundancy = False
secret_key =
send_chunk = 4096
simpledb_host = sdb.amazonaws.com
skip_existing = False
socket_timeout = 300
urlencoding_mode = normal
use_https = False
verbosity = WARNING

48
watch
View File

@ -2,6 +2,13 @@
[[ "$TRACE" ]] && set -x
DEBUG=${DEBUG:-"Unknown Error"}
function log {
now=`date +"%Y-%m-%d %T"`
echo "[${now}] $1"
}
function usage {
cat <<-EOF
Usage: $PROGNAME [OPTIONS] <local-path> <remote-path>
@ -14,7 +21,7 @@ EOF
}
function error_exit {
echo "${1:-"Unknown Error"}" 1>&2
log "${1:-"Unknown Error"}" 1>&2
exit 1
}
@ -37,8 +44,10 @@ while true; do
done
PROGNAME=$0
S3_PROG=/s5cmd
LOCAL=$1
REMOTE=$2
WATCH_EVENTS=modify
function restore {
if [ "$(ls -A $LOCAL)" ]; then
@ -47,40 +56,49 @@ function restore {
fi
fi
echo "restoring $REMOTE => $LOCAL"
if ! aws s3 sync "$REMOTE" "$LOCAL"; then
log "restoring $REMOTE => $LOCAL"
if ! $S3_PROG sync "$REMOTE/*" "$LOCAL"; then
error_exit "restore failed"
fi
}
function backup {
echo "backup $LOCAL => $REMOTE"
if ! aws s3 sync "$LOCAL" "$REMOTE" --delete; then
echo "backup failed" 1>&2
log "backup $LOCAL => $REMOTE"
if ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then
log "backup failed" 1>&2
return 1
fi
}
function final_backup {
echo "backup $LOCAL => $REMOTE"
while ! aws s3 sync "$LOCAL" "$REMOTE" --delete; do
echo "backup failed, will retry" 1>&2
log "backup $LOCAL => $REMOTE"
while ! $S3_PROG sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do
log "backup failed, will retry" 1>&2
sleep 1
done
exit 0
}
function idle {
echo "ready"
log "ready"
log "RESTORE_INTERVAL: ${RESTORE_INTERVAL}"
while true; do
sleep ${BACKUP_INTERVAL:-42} &
wait $!
[ -n "$BACKUP_INTERVAL" ] && backup
restore
if [[ -v RESTORE_INTERVAL ]]; then
backup
timeout ${RESTORE_INTERVAL} inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL | while read date time file; do log "The file '$file' appeared in directory '$path' via '$action'"; backup; done
else
log "Without restore interval"
inotifywait -m -e modify -e move -e create -e delete --timefmt '%Y-%m-%d %H:%M:%S' --format '%T %f' $LOCAL |
while read date time file
do
log "The file '$file' appeared in directory '$path' via '$action'"
backup
done
fi
done
}
restore
trap final_backup SIGHUP SIGINT SIGTERM
trap "backup; idle" USR1