Compare commits

..

No commits in common. "3ef347323606ed143221d44a380ac4967f537d56" and "71c9b169dff69369fa08891e009cb43e9858b113" have entirely different histories.

19 changed files with 313 additions and 66 deletions

View File

@ -1,11 +1,10 @@
DOCKER_REGISTRY := dr.ys-dev.cz DOCKER_REGISTRY := dr.ys-dev.cz
DOCKER_PUBLIC_REGISTRY := yoursystemcz DOCKER_PUBLIC_REGISTRY := yoursystemcz
DOCKER_MY_REGISTRY := dr.cechis.cz
APP_NAME := adminer APP_NAME := adminer
IMAGE_NAME := adminer IMAGE_NAME := adminer
VERSION := 4.7.1 VERSION := 4.7.0
all: build release all: build release
.PHONY: all .PHONY: all
@ -17,10 +16,8 @@ version/%:
release/%: release/%:
docker tag $(DOCKER_REGISTRY)/$(APP_NAME):$* $(DOCKER_PUBLIC_REGISTRY)/$(APP_NAME):$* docker tag $(DOCKER_REGISTRY)/$(APP_NAME):$* $(DOCKER_PUBLIC_REGISTRY)/$(APP_NAME):$*
docker tag $(DOCKER_REGISTRY)/$(APP_NAME):$* $(DOCKER_MY_REGISTRY)/$(APP_NAME):$*
docker push $(DOCKER_REGISTRY)/$(APP_NAME):$* docker push $(DOCKER_REGISTRY)/$(APP_NAME):$*
docker push $(DOCKER_PUBLIC_REGISTRY)/$(APP_NAME):$* docker push $(DOCKER_PUBLIC_REGISTRY)/$(APP_NAME):$*
docker push $(DOCKER_MY_REGISTRY)/$(APP_NAME):$*
build: version/$(VERSION) build: version/$(VERSION)

View File

@ -3,7 +3,7 @@
SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P ) SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
IMAGE_NAME="yoursystemcz/adminer" IMAGE_NAME="yoursystemcz/adminer"
IMAGE_VERSION="4.7.1" IMAGE_VERSION="4.7.0"
SERVICE_NAME=adminer SERVICE_NAME=adminer

304
aws-cli/run.sh Executable file
View File

@ -0,0 +1,304 @@
#!/usr/bin/env bash
SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
IMAGE_NAME="alpine"
IMAGE_VERSION="3.6"
SERVICE_NAME=aws-cli
source ${SCRIPT_PATH}/../common.shinc
#
# Project specific variables
#
AWS_DEFAULT_REGION=eu-central-1
CUSTOM_ARGS=""
# This directory will be mounted if no other directory is specified on the command line
DEFAULT_DIRECTORY=${SCRIPT_PATH}
# To enable the default directory mount point, override this setting to 1 in env.shinc
USE_DEFAULT_DIRECTORY=0
CURRENT_DATE="$(date +%Y-%m-%d)"
platform=$(uname)
if [[ ${platform} = "FreeBSD" ]] || [[ ${platform} = "Darwin" ]]; then
EXPIRES=$(date -v+365d '+%a, %d %b %Y 00:00:00 GMT')
else
EXPIRES=$(date '+%a, %d %b %Y 00:00:00 GMT' -d "${CURRENT_DATE} + 365 day")
fi
CACHE_MAX_AGE="31536000"
ACL="public-read"
source ${SCRIPT_PATH}/env.shinc 2> /dev/null
REQUIRED_VARIABLES=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY)
for _var in ${REQUIRED_VARIABLES[@]}; do
if [ -z ${!_var+x} ]; then
echo "Please provide credential in env.shinc file, missing definition of variable: ${_var}"
exit 2
fi
done
init() {
__build
[[ $? -ne 0 ]] && return 1
}
cmd() {
local localDir
if [[ ${USE_DEFAULT_DIRECTORY} -eq 1 ]]; then
localDir=${DEFAULT_DIRECTORY}
else
__msg "Specify a mount point to /mnt/host inside the docker container"
read localDir
if [[ ${localDir} = "" ]]; then
localDir="$(pwd -P)"
__warn "You have not provided a directory, using default directory path: ${localDir}"
__msg "Continue? [(y)/n]"
read CONTINUE
if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then
return 0
fi
fi
fi
__warn "Mounting ${localDir} to /mnt/host inside the docker container"
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${localDir}":/mnt/host \
${CUSTOM_ARGS} \
${SERVICE_NAME} \
aws "$@"
}
upload() {
local source="$1"
shift
local target="$1"
shift
local args="$@"
[[ ${source} = "" ]] && __err "You must provide a source directory (filesystem) as the first parameter" && return 137
[[ ${target} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
[[ ! -e ${source} ]] && __err "The source does not exist: ${source}" && return 6
__warn "Uploading ${source} to s3://${target}"
__msg "Cache expires on ${EXPIRES}"
if [[ -f ${source} ]]; then
# File
local dirName=$(dirname ${source})
local filename=$(basename ${source})
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${dirName}":/mnt/host \
${SERVICE_NAME} \
aws s3 cp "/mnt/host/${filename}" "s3://${target}" ${args} --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
else
# Directory
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${source}":/mnt/host \
${SERVICE_NAME} \
aws s3 cp /mnt/host "s3://${target}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
fi
}
download() {
local source="$1"
shift
local target="$1"
shift
local args="$@"
[[ ${source} = "" ]] && __err "You must provide a source (filesystem) as the first parameter" && return 137
[[ ${target} = "" ]] && __err "You must provide a target (s3) as the second parameter" && return 137
__warn "Downloading s3://${source} to ${target}"
if [[ ! -e ${target} ]]; then
__warn "The target ${target} does not exist. Is this a file (f) or a directory (d)? ..."
read type
if [[ ${type} = "f" ]]; then
mkdir -p $(dirname ${target})
touch ${target}
elif [[ ${type} = "d" ]]; then
mkdir -p ${target}
else
__err "The input is invalid, please use either 'f' or 'd'"
fi
fi
if [[ -f ${target} ]]; then
# File
local dirName=$(dirname ${target})
local filename=$(basename ${target})
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${dirName}":/mnt/host \
${SERVICE_NAME} \
aws s3 cp "s3://${source}" "/mnt/host/${filename}" ${args}
else
# Directory
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${target}":/mnt/host \
${SERVICE_NAME} \
aws s3 cp "s3://${source}" /mnt/host ${args} --recursive
fi
}
migrate() {
local source_dir="$1"
shift
local target_dir="$1"
shift
local args="$@"
[[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137
[[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
__warn "Migrating s3://${source_dir} to s3://${target_dir}"
echo "Cache expires on ${EXPIRES}"
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 cp "s3://${source_dir}" "s3://${target_dir}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
move() {
local source_dir="$1"
shift
local target_dir="$1"
shift
local args="$@"
[[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137
[[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
__warn "Migrating s3://${source_dir} to s3://${target_dir}"
echo "Cache expires on ${EXPIRES}"
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 mv "s3://${source_dir}" "s3://${target_dir}" ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
sync() {
local source_dir="$1"
shift
local target_dir="$1"
shift
local args="$@"
[[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137
[[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
__warn "Syncing s3://${source_dir} to s3://${target_dir}"
echo "Cache expires on ${EXPIRES}"
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 sync "s3://${source_dir}" "s3://${target_dir}" ${args} --delete --metadata-directive COPY
}
setPermissions() {
local target_dir="$1"
if [[ ${target_dir} = "" ]]; then
__warn "Please provide an s3 path: "
read target_dir
fi
docker run --rm \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 cp "s3://${target_dir}" "s3://${target_dir}" --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
exec() {
local source_dir="$1"
if [[ ${source_dir} = "" ]]; then
if [[ ${USE_DEFAULT_DIRECTORY} -eq 1 ]]; then
source_dir=${DEFAULT_DIRECTORY}
else
source_dir=$(pwd -P)
__warn "You have not provided a directory, using current path: ${source_dir}"
__msg "Continue? [(y)/n]"
read CONTINUE
if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then
return 0
fi
fi
fi
docker run --rm -it \
-u $(id -u):$(id -g) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v "${source_dir}":/mnt/host \
${SERVICE_NAME} \
sh
}
"$@"
exit $?

View File

@ -34,19 +34,6 @@ stop() {
start() { start() {
__msg "Starting container... " 0 no __msg "Starting container... " 0 no
docker inspect "${SERVICE_NAME}" > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
init
CURRENT_STATUS=$(docker inspect --format "{{.State.Running}}" "${SERVICE_NAME}")
if [[ "${CURRENT_STATUS}" == "true" ]]; then
return 0
fi
fi
STATUS=$(docker start ${SERVICE_NAME} 2>&1) STATUS=$(docker start ${SERVICE_NAME} 2>&1)
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
__err "${STATUS}" __err "${STATUS}"

View File

@ -18,10 +18,6 @@ source ${SCRIPT_PATH}/../common.shinc
source ${SCRIPT_PATH}/env.shinc 2> /dev/null source ${SCRIPT_PATH}/env.shinc 2> /dev/null
init() { init() {
makePrivate
}
makePrivate() {
docker pull ${IMAGE_NAME}:${IMAGE_VERSION} || return $? docker pull ${IMAGE_NAME}:${IMAGE_VERSION} || return $?
NETWORKS=(${PROXY_NETWORK}) NETWORKS=(${PROXY_NETWORK})
@ -42,26 +38,6 @@ makePrivate() {
__ask_to_start __ask_to_start
} }
makePublic() {
docker pull ${IMAGE_NAME}:${IMAGE_VERSION} || return $?
NETWORKS=(${PROXY_NETWORK})
__createNetworks
docker create \
--name ${SERVICE_NAME} \
-v /var/run/docker.sock:/tmp/docker.sock:ro \
-v dev-proxy:/root/.caddy \
--restart=unless-stopped \
-p 80:80 \
-p 443:443 \
--net ${PROXY_NETWORK} \
${IMAGE_NAME}:${IMAGE_VERSION}
[[ $? -ne 0 ]] && return 1
__ask_to_start
}
reload() { reload() {
docker exec ${SERVICE_NAME} pkill -USR1 caddy docker exec ${SERVICE_NAME} pkill -USR1 caddy

View File

@ -10,10 +10,3 @@ xpack:
roles: roles:
- role1 - role1
- role2 - role2
cluster:
routing:
allocation:
disk:
watermark:
low: 0.90
high: 0.95

View File

@ -5,7 +5,7 @@ SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
IMAGE_NAME="elasticsearch" IMAGE_NAME="elasticsearch"
IMAGE_VERSION="5.6.16" IMAGE_VERSION="5.6.16"
SERVICE_NAME=ys-elastic-search-5 SERVICE_NAME=ys-elastic-search
source ${SCRIPT_PATH}/../common.shinc source ${SCRIPT_PATH}/../common.shinc

View File

@ -2,6 +2,8 @@ ARG IMAGE_NAME
ARG IMAGE_VERSION ARG IMAGE_VERSION
FROM ${IMAGE_NAME}:${IMAGE_VERSION} FROM ${IMAGE_NAME}:${IMAGE_VERSION}
RUN bin/elasticsearch-plugin install x-pack
ADD elasticsearch.yml /usr/share/elasticsearch/config/ ADD elasticsearch.yml /usr/share/elasticsearch/config/
USER root USER root

View File

@ -10,10 +10,3 @@ xpack:
roles: roles:
- role1 - role1
- role2 - role2
cluster:
routing:
allocation:
disk:
watermark:
low: 0.90
high: 0.95

View File

@ -3,7 +3,7 @@
SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P ) SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
IMAGE_NAME="elasticsearch" IMAGE_NAME="elasticsearch"
IMAGE_VERSION="6.5.4" IMAGE_VERSION="6.4.3"
SERVICE_NAME=ys-elastic-search-6 SERVICE_NAME=ys-elastic-search-6
@ -17,7 +17,7 @@ source ${SCRIPT_PATH}/../common.shinc
DOMAIN_NAME="elastic6.loc" DOMAIN_NAME="elastic6.loc"
RELEASE_NAME="yoursystemcz/elasticsearch" RELEASE_NAME="yoursystemcz/elasticsearch"
RELEASE_VERSION="6.5.4" RELEASE_VERSION="6.4.3"
PERSISTENT_VOLUME="ys-elastic-6" PERSISTENT_VOLUME="ys-elastic-6"

View File

@ -2,6 +2,8 @@ ARG IMAGE_NAME
ARG IMAGE_VERSION ARG IMAGE_VERSION
FROM ${IMAGE_NAME}:${IMAGE_VERSION} FROM ${IMAGE_NAME}:${IMAGE_VERSION}
RUN bin/elasticsearch-plugin install x-pack
ADD elasticsearch.yml /usr/share/elasticsearch/config/ ADD elasticsearch.yml /usr/share/elasticsearch/config/
USER root USER root

View File

@ -10,10 +10,3 @@ xpack:
roles: roles:
- role1 - role1
- role2 - role2
cluster:
routing:
allocation:
disk:
watermark:
low: 0.90
high: 0.95