#!/usr/bin/env bash SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P ) IMAGE_NAME="alpine" IMAGE_VERSION="3.4" SERVICE_NAME=aws-cli source ${SCRIPT_PATH}/../common.shinc # # Project specific variables # AWS_DEFAULT_REGION=eu-central-1 CUSTOM_ARGS="" CURRENT_DATE="$(date +%Y-%m-%d)" platform=$(uname) if [[ ${platform} = "FreeBSD" ]] || [[ ${platform} = "Darwin" ]]; then EXPIRES=$(date -v+365d '+%a, %d %b %Y 00:00:00 GMT') else EXPIRES=$(date '+%a, %d %b %Y 00:00:00 GMT' -d "${CURRENT_DATE} + 365 day") fi CACHE_MAX_AGE="31536000" ACL="public-read" source ${SCRIPT_PATH}/env.shinc 2> /dev/null REQUIRED_VARIABLES=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY) for _var in ${REQUIRED_VARIABLES[@]}; do if [ -z ${!_var+x} ]; then echo "Please provide credential in env.shinc file, missing definition of variable: ${_var}" exit 2 fi done init() { __build [[ $? -ne 0 ]] && return 1 } cmd() { local localDir __msg "Specify a mount point to /mnt/host inside the docker container" read localDir if [[ ${localDir} = "" ]]; then localDir="$(pwd -P)" __warn "You have not provided a directory, using current path: ${localDir}" __msg "Continue? [(y)/n]" read CONTINUE if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then return 0 fi fi __warn "Mounting ${localDir} to /mnt/host inside the docker container" docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${localDir}":/mnt/host \ ${CUSTOM_ARGS} \ ${SERVICE_NAME} \ aws "$@" } upload() { local source="$1" shift local target="$1" shift local args="$@" [[ ${source} = "" ]] && __err "You must provide a source directory (filesystem) as the first parameter" && return 137 [[ ${target} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137 [[ ! -e ${source} ]] && __err "The source does not exist: ${source}" && return 6 __warn "Uploading ${source} to s3://${target}" __msg "Cache expires on ${EXPIRES}" if [[ -f ${source} ]]; then # File local dirName=$(dirname ${source}) local filename=$(basename ${source}) docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${dirName}":/mnt/host \ ${SERVICE_NAME} \ aws s3 cp "/mnt/host/${filename}" "s3://${target}" ${args} --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE else # Directory docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${source}":/mnt/host \ ${SERVICE_NAME} \ aws s3 cp /mnt/host "s3://${target}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE fi } download() { local source="$1" shift local target="$1" shift local args="$@" [[ ${source} = "" ]] && __err "You must provide a source (filesystem) as the first parameter" && return 137 [[ ${target} = "" ]] && __err "You must provide a target (s3) as the second parameter" && return 137 __warn "Downloading s3://${source} to ${target}" if [[ ! -e ${target} ]]; then __warn "The target ${target} does not exist. Is this a file (f) or a directory (d)? ..." read type if [[ ${type} = "f" ]]; then mkdir -p $(dirname ${target}) touch ${target} elif [[ ${type} = "d" ]]; then mkdir -p ${target} else __err "The input is invalid, please use either 'f' or 'd'" fi fi if [[ -f ${target} ]]; then # File local dirName=$(dirname ${target}) local filename=$(basename ${target}) docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${dirName}":/mnt/host \ ${SERVICE_NAME} \ aws s3 cp "s3://${source}" "/mnt/host/${filename}" ${args} else # Directory docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${target}":/mnt/host \ ${SERVICE_NAME} \ aws s3 cp "s3://${source}" /mnt/host ${args} --recursive fi } migrate() { local source_dir="$1" shift local target_dir="$1" shift local args="$@" [[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137 [[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137 __warn "Migrating s3://${source_dir} to s3://${target_dir}" echo "Cache expires on ${EXPIRES}" docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ ${SERVICE_NAME} \ aws s3 cp "s3://${source_dir}" "s3://${target_dir}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } move() { local source_dir="$1" shift local target_dir="$1" shift local args="$@" [[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137 [[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137 __warn "Migrating s3://${source_dir} to s3://${target_dir}" echo "Cache expires on ${EXPIRES}" docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ ${SERVICE_NAME} \ aws s3 mv "s3://${source_dir}" "s3://${target_dir}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } setPermissions() { local target_dir="$1" if [[ ${target_dir} = "" ]]; then __warn "Please provide an s3 path: " read target_dir fi docker run --rm \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ ${SERVICE_NAME} \ aws s3 cp "s3://${target_dir}" "s3://${target_dir}" --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } exec() { local source_dir="$1" if [[ ${source_dir} = "" ]]; then source_dir=$(pwd -P) __warn "You have not provided a directory, using current path: ${source_dir}" __msg "Continue? [(y)/n]" read CONTINUE if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then return 0 fi fi docker run --rm -it \ -u $(id -u):$(id -g) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v "${source_dir}":/mnt/host \ ${SERVICE_NAME} \ sh } "$@" exit $?