Compare commits

..

No commits in common. "d576a7f752a938fd2ad0de0f4ac506649c27199c" and "1288239de3982d3e97ed97d3be399decb31d33cb" have entirely different histories.

3 changed files with 6 additions and 36 deletions

View File

@ -1,11 +1,10 @@
FROM alpine:3.10
label maintainer="Elementar Sistemas <contato@elementarsistemas.com.br>"
FROM alpine:3.6
MAINTAINER Elementar Sistemas <contato@elementarsistemas.com.br>
RUN apk --no-cache add bash py3-pip && pip3 install --no-cache-dir awscli
RUN apk --no-cache add bash py-pip && pip install awscli
ADD watch /watch
VOLUME /data
ENV S3_SYNC_FLAGS "--delete"
ENTRYPOINT [ "./watch" ]
CMD ["/data"]

View File

@ -1,11 +1,5 @@
# docker-s3-volume
[![Docker Build Status](https://img.shields.io/docker/build/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Layers Count](https://img.shields.io/microbadger/layers/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Version](https://img.shields.io/docker/v/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Pull Count](https://img.shields.io/docker/pulls/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
[![Docker Stars](https://img.shields.io/docker/stars/elementar/s3-volume?style=plastic)](https://hub.docker.com/r/elementar/s3-volume)
Creates a Docker container that is restored and backed up to a directory on s3.
You could use this to run short lived processes that work with and persist data to and from S3.
@ -55,15 +49,6 @@ Any environment variable available to the `aws-cli` command can be used. see
http://docs.aws.amazon.com/cli/latest/userguide/cli-environment.html for more
information.
### Configuring an endpoint URL
If you are using an S3-compatible service (such as Oracle OCI Object Storage), you may want to set the service's endpoint URL:
```bash
docker run -d --name my-data-container -e ENDPOINT_URL=... \
elementar/s3-volume /data s3://mybucket/someprefix
```
### Forcing a sync
A final sync will always be performed on container shutdown. A sync can be
@ -84,14 +69,6 @@ docker run -d --name my-data-container \
elementar/s3-volume --force-restore /data s3://mybucket/someprefix
```
### Deletion and sync
By default if there are files that are deleted in your local file system, those will be deleted remotely. If you wish to turn this off, set the environment variable `S3_SYNC_FLAGS` to an empty string:
```bash
docker run -d -e S3_SYNC_FLAGS="" elementar/s3-volume /data s3://mybucket/someprefix
```
### Using Compose and named volumes
Most of the time, you will use this image to sync data for another container.

12
watch
View File

@ -40,12 +40,6 @@ PROGNAME=$0
LOCAL=$1
REMOTE=$2
if [ "$ENDPOINT_URL" ]; then
AWS="aws --endpoint-url $ENDPOINT_URL"
else
AWS=aws
fi
function restore {
if [ "$(ls -A $LOCAL)" ]; then
if [[ ${FORCE_RESTORE:false} == 'true' ]]; then
@ -54,14 +48,14 @@ function restore {
fi
echo "restoring $REMOTE => $LOCAL"
if ! $AWS s3 sync "$REMOTE" "$LOCAL"; then
if ! aws s3 sync "$REMOTE" "$LOCAL"; then
error_exit "restore failed"
fi
}
function backup {
echo "backup $LOCAL => $REMOTE"
if ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; then
if ! aws s3 sync "$LOCAL" "$REMOTE" --delete; then
echo "backup failed" 1>&2
return 1
fi
@ -69,7 +63,7 @@ function backup {
function final_backup {
echo "backup $LOCAL => $REMOTE"
while ! $AWS s3 sync "$LOCAL" "$REMOTE" $S3_SYNC_FLAGS; do
while ! aws s3 sync "$LOCAL" "$REMOTE" --delete; do
echo "backup failed, will retry" 1>&2
sleep 1
done