mirror of
https://github.com/willfarrell/docker-crontab.git
synced 2025-04-08 16:05:14 +02:00
Merge 7b249750e641286972f4ec7da9f95449e794b6a5 into be97512ec2241f362d7d254eb213053214850655
This commit is contained in:
commit
a19fdc132a
15
.github/FUNDING.yml
vendored
15
.github/FUNDING.yml
vendored
@ -1,12 +1,3 @@
|
|||||||
# These are supported funding model platforms
|
github: [SimplicityGuy]
|
||||||
|
ko_fi: robertwlodarczyk
|
||||||
github: [willfarrell]# Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
custom: [paypal.me/RWlodarczyk]
|
||||||
patreon: # Replace with a single Patreon username
|
|
||||||
open_collective: # Replace with a single Open Collective username
|
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
|
||||||
liberapay: # Replace with a single Liberapay username
|
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
|
||||||
otechie: # Replace with a single Otechie username
|
|
||||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
|
||||||
|
92
.github/workflows/build.yml
vendored
92
.github/workflows/build.yml
vendored
@ -1,49 +1,75 @@
|
|||||||
name: build
|
---
|
||||||
|
name: crontab
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
tags:
|
pull_request:
|
||||||
- '*'
|
branches:
|
||||||
|
- main
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * *'
|
- cron: '0 1 * * 6'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.actor }}/crontab
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
multi:
|
build-crontab:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
- name: Checkout repository.
|
||||||
name: Checkout
|
uses: actions/checkout@v3
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
submodules: true
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
|
|
||||||
- if: github.ref == 'refs/heads/main'
|
- name: Log in to the GitHub Container Registry.
|
||||||
name: Conditional(Set tag as `latest`)
|
if: github.event_name != 'pull_request'
|
||||||
run: echo "tag=willfarrell/crontab:latest" >> $GITHUB_ENV
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GHCR_TOKEN }}
|
||||||
|
|
||||||
- if: startsWith(github.ref, 'refs/tags/')
|
- name: Extract metadata (tags, labels) for Docker.
|
||||||
name: Conditional(Set tag as `{version}`)
|
id: meta
|
||||||
run: echo "tag=willfarrell/crontab:${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
tags: |
|
||||||
|
type=raw,value=latest,enable={{is_default_branch}}
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=schedule,pattern={{date 'YYYYMMDD'}}
|
||||||
|
|
||||||
-
|
- name: Set up QEMU.
|
||||||
name: Build and push
|
uses: docker/setup-qemu-action@v2
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
|
- name: Set up Docker Buildx.
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64
|
||||||
|
|
||||||
|
- name: Build and push Docker image to GitHub Container Registry.
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
platforms: linux/amd64
|
||||||
push: true
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
${{ env.tag }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
provenance: true
|
||||||
|
sbom: true
|
||||||
|
|
||||||
|
- name: Send notification to Discord.
|
||||||
|
uses: sarisia/actions-status-discord@v1.12.0
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
39
.github/workflows/cleanup.yml
vendored
Normal file
39
.github/workflows/cleanup.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
name: cleanup
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0 15 * *'
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE_NAME: ${{ github.actor }}/docker-crontab
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
cleanup-docker-crontab:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Delete Docker images older than a month.
|
||||||
|
id: cleanup-images
|
||||||
|
uses: snok/container-retention-policy@v2
|
||||||
|
with:
|
||||||
|
account-type: personal
|
||||||
|
cut-off: One month ago UTC
|
||||||
|
keep-at-least: 4
|
||||||
|
skip-tags: latest
|
||||||
|
image-names: ${{ env.IMAGE_NAME }}
|
||||||
|
token: ${{ secrets.GHCR_TOKEN }}
|
||||||
|
|
||||||
|
- name: Send notification to Discord.
|
||||||
|
uses: sarisia/actions-status-discord@v1.12.0
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
title: ${{ env.IMAGE_NAME }}
|
||||||
|
description: |
|
||||||
|
succeded cleanup : ${{ steps.cleanup-images.outputs.deleted }}
|
||||||
|
failed cleanup : ${{ steps.cleanup-images.outputs.failed }}
|
||||||
|
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -1,6 +1,10 @@
|
|||||||
.idea
|
.idea
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
config.json
|
|
||||||
.vscode
|
.vscode
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
config.json
|
||||||
|
config.working.json
|
||||||
|
|
||||||
|
jobs/
|
||||||
|
projects/
|
||||||
|
32
.pre-commit-config.yaml
Normal file
32
.pre-commit-config.yaml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # frozen: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-executables-have-shebangs
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-shebang-scripts-are-executable
|
||||||
|
- id: check-yaml
|
||||||
|
- id: detect-aws-credentials
|
||||||
|
- id: detect-private-key
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: mixed-line-ending
|
||||||
|
- id: trailing-whitespace
|
||||||
|
|
||||||
|
- repo: https://github.com/python-jsonschema/check-jsonschema
|
||||||
|
rev: 62833a79b57fcd1bc372b136911a0edca60c3dcb # frozen: 0.31.0
|
||||||
|
hooks:
|
||||||
|
- id: check-github-workflows
|
||||||
|
|
||||||
|
- repo: https://github.com/executablebooks/mdformat
|
||||||
|
rev: e20b1ac5acb8aba0b49d3a9109c6e6b58684ee83 # frozen: 0.7.21
|
||||||
|
hooks:
|
||||||
|
- id: mdformat
|
||||||
|
additional_dependencies:
|
||||||
|
- mdformat-gfm
|
||||||
|
|
||||||
|
- repo: https://github.com/hadolint/hadolint
|
||||||
|
rev: c3dc18df7a501f02a560a2cc7ba3c69a85ca01d3 # frozen: v2.13.1-beta
|
||||||
|
hooks:
|
||||||
|
- id: hadolint
|
69
Dockerfile
69
Dockerfile
@ -1,26 +1,67 @@
|
|||||||
FROM alpine:3.12 as rq-build
|
#hadolint ignore=DL3007
|
||||||
|
FROM alpine:latest AS builder
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.title="crontab builder" \
|
||||||
|
org.opencontainers.image.description="crontab builder" \
|
||||||
|
org.opencontainers.image.authors="robert@simplicityguy.com" \
|
||||||
|
org.opencontainers.image.source="https://github.com/SimplicityGuy/alertmanager-discord/blob/main/Dockerfile" \
|
||||||
|
org.opencontainers.image.licenses="MIT" \
|
||||||
|
org.opencontainers.image.created="$(date +'%Y-%m-%d')" \
|
||||||
|
org.opencontainers.image.base.name="docker.io/library/alpine"
|
||||||
|
|
||||||
ENV RQ_VERSION=1.0.2
|
ENV RQ_VERSION=1.0.2
|
||||||
WORKDIR /root/
|
WORKDIR /usr/bin/rq/
|
||||||
|
|
||||||
RUN apk --update add upx \
|
#hadolint ignore=DL3018
|
||||||
&& wget https://github.com/dflemstr/rq/releases/download/v${RQ_VERSION}/rq-v${RQ_VERSION}-x86_64-unknown-linux-musl.tar.gz \
|
RUN apk update --quiet && \
|
||||||
&& tar -xvf rq-v1.0.2-x86_64-unknown-linux-musl.tar.gz \
|
apk upgrade --quiet && \
|
||||||
&& upx --brute rq
|
apk add --quiet --no-cache \
|
||||||
|
upx && \
|
||||||
|
rm /var/cache/apk/* && \
|
||||||
|
wget --quiet https://github.com/dflemstr/rq/releases/download/v${RQ_VERSION}/rq-v${RQ_VERSION}-x86_64-unknown-linux-musl.tar.gz && \
|
||||||
|
tar -xvf rq-v${RQ_VERSION}-x86_64-unknown-linux-musl.tar.gz && \
|
||||||
|
upx --brute rq
|
||||||
|
|
||||||
FROM library/docker:stable
|
#hadolint ignore=DL3007
|
||||||
|
FROM docker:latest AS release
|
||||||
|
|
||||||
COPY --from=rq-build /root/rq /usr/local/bin
|
LABEL org.opencontainers.image.title="crontab" \
|
||||||
|
org.opencontainers.image.description="A docker job scheduler (aka crontab for docker)." \
|
||||||
|
org.opencontainers.image.authors="robert@simplicityguy.com" \
|
||||||
|
org.opencontainers.image.source="https://github.com/SimplicityGuy/docker-crontab/blob/main/Dockerfile" \
|
||||||
|
org.opencontainers.image.licenses="MIT" \
|
||||||
|
org.opencontainers.image.created="$(date +'%Y-%m-%d')" \
|
||||||
|
org.opencontainers.image.base.name="docker.io/library/docker"
|
||||||
|
|
||||||
ENV HOME_DIR=/opt/crontab
|
ENV HOME_DIR=/opt/crontab
|
||||||
RUN apk add --no-cache --virtual .run-deps gettext jq bash tini \
|
|
||||||
&& mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/projects \
|
|
||||||
&& adduser -S docker -D
|
|
||||||
|
|
||||||
COPY docker-entrypoint /
|
#hadolint ignore=DL3018
|
||||||
ENTRYPOINT ["/sbin/tini", "--", "/docker-entrypoint"]
|
RUN apk update --quiet && \
|
||||||
|
apk upgrade --quiet && \
|
||||||
|
apk add --quiet --no-cache \
|
||||||
|
bash \
|
||||||
|
coreutils \
|
||||||
|
curl \
|
||||||
|
gettext \
|
||||||
|
jq \
|
||||||
|
tini \
|
||||||
|
wget && \
|
||||||
|
apk add --quiet --no-cache --repository=https://dl-cdn.alpinelinux.org/alpine/edge/testing \
|
||||||
|
gosu && \
|
||||||
|
rm /var/cache/apk/* && \
|
||||||
|
rm -rf /etc/periodic /etc/crontabs/root && \
|
||||||
|
adduser -S docker -D && \
|
||||||
|
mkdir -p ${HOME_DIR}/jobs && \
|
||||||
|
chown -R docker:root ${HOME_DIR}
|
||||||
|
|
||||||
|
USER docker
|
||||||
|
|
||||||
|
COPY --from=builder /usr/bin/rq/rq /usr/local/bin
|
||||||
|
COPY entrypoint.sh /opt
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/bin/gosu", "docker", "/sbin/tini", "--", "/opt/entrypoint.sh"]
|
||||||
|
|
||||||
HEALTHCHECK --interval=5s --timeout=3s \
|
HEALTHCHECK --interval=5s --timeout=3s \
|
||||||
CMD ps aux | grep '[c]rond' || exit 1
|
CMD ps aux | grep '[c]rond' || exit 1
|
||||||
|
|
||||||
CMD ["crond", "-f", "-d", "6", "-c", "/etc/crontabs"]
|
CMD ["crond", "-f", "-d", "7", "-c", "/etc/crontabs"]
|
||||||
|
97
README.md
97
README.md
@ -1,61 +1,59 @@
|
|||||||
# docker-crontab
|
# crontab
|
||||||
|
|
||||||
|
  [](https://github.com/pre-commit/pre-commit)
|
||||||
|
|
||||||
A simple wrapper over `docker` to all complex cron job to be run in other containers.
|
A simple wrapper over `docker` to all complex cron job to be run in other containers.
|
||||||
|
|
||||||
## Supported tags and Dockerfile links
|
|
||||||
|
|
||||||
- [`latest` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/master/Dockerfile)
|
|
||||||
- [`1.0.0` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/1.0.0/Dockerfile)
|
|
||||||
- [`0.6.0` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/0.6.0/Dockerfile)
|
|
||||||
|
|
||||||
 [](http://microbadger.com/images/willfarrell/crontab "Get your own image badge on microbadger.com")
|
|
||||||
|
|
||||||
## Why?
|
## Why?
|
||||||
Yes, I'm aware of [mcuadros/ofelia](https://github.com/mcuadros/ofelia) (>250MB when this was created), it was the main inspiration for this project.
|
|
||||||
|
Yes, I'm aware of [mcuadros/ofelia](https://github.com/mcuadros/ofelia) (>250MB when this was created), it was the main inspiration for this project.
|
||||||
A great project, don't get me wrong. It was just missing certain key enterprise features I felt were required to support where docker is heading.
|
A great project, don't get me wrong. It was just missing certain key enterprise features I felt were required to support where docker is heading.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Easy to read schedule syntax allowed.
|
- Easy to read schedule syntax allowed.
|
||||||
- Allows for comments, cause we all need friendly reminders of what `update_script.sh` actually does.
|
- Allows for comments, cause we all need friendly reminders of what `update_script.sh` actually does.
|
||||||
- Start an image using `image`.
|
- Start an image using `image`.
|
||||||
- Run command in a container using `container`.
|
- Run command in a container using `container`.
|
||||||
- Run command on a instances of a scaled container using `project`.
|
|
||||||
- Ability to trigger scripts in other containers on completion cron job using `trigger`.
|
- Ability to trigger scripts in other containers on completion cron job using `trigger`.
|
||||||
|
- Ability to share settings between cron jobs using `~~shared-settings` as a key.
|
||||||
|
|
||||||
## Config file
|
## Config file
|
||||||
|
|
||||||
The config file can be specifed in any of `json`, `toml`, or `yaml`, and can be defined as either an array or mapping (top-level keys will be ignored; can be useful for organizing commands)
|
The config file can be specified in any of `json`, `toml`, or `yaml`, and can be defined as either an array or mapping (top-level keys will be ignored; can be useful for organizing commands)
|
||||||
|
|
||||||
- `name`: Human readable name that will be used as the job filename. Will be converted into a slug. Optional.
|
- `name`: Human readable name that will be used as the job filename. Will be converted into a slug. Optional.
|
||||||
- `comment`: Comments to be included with crontab entry. Optional.
|
- `comment`: Comments to be included with crontab entry. Optional.
|
||||||
- `schedule`: Crontab schedule syntax as described in https://en.wikipedia.org/wiki/Cron. Ex `@hourly`, `@every 1h30m`, `* * * * *`. Required.
|
- `schedule`: Crontab schedule syntax as described in https://en.wikipedia.org/wiki/Cron. Examples: `@hourly`, `@every 1h30m`, `* * * * *`. Required.
|
||||||
- `command`: Command to be run on in crontab container or docker container/image. Required.
|
- `command`: Command to be run on in crontab container or docker container/image. Required.
|
||||||
- `image`: Docker images name (ex `library/alpine:3.5`). Optional.
|
- `image`: Docker images name (ex `library/alpine:3.5`). Optional.
|
||||||
- `project`: Docker Compose/Swarm project name. Optional, only applies when `contain` is included.
|
- `container`: Full container name. Ignored if `image` is included. Optional.
|
||||||
- `container`: Full container name or container alias if `project` is set. Ignored if `image` is included. Optional.
|
|
||||||
- `dockerargs`: Command line docker `run`/`exec` arguments for full control. Defaults to ` `.
|
- `dockerargs`: Command line docker `run`/`exec` arguments for full control. Defaults to ` `.
|
||||||
- `trigger`: Array of docker-crontab subset objects. Subset includes: `image`,`project`,`container`,`command`,`dockerargs`
|
- `trigger`: Array of docker-crontab subset objects. Sub-set includes: `image`, `container`, `command`, `dockerargs`
|
||||||
- `onstart`: Run the command on `crontab` container start, set to `true`. Optional, defaults to falsey.
|
- `onstart`: Run the command on `crontab` container start, set to `true`. Optional, defaults to false.
|
||||||
|
|
||||||
See [`config-samples`](config-samples) for examples.
|
See [`config-samples`](config-samples) for examples.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[{
|
{
|
||||||
"schedule":"@every 5m",
|
"logrotate": {
|
||||||
"command":"/usr/sbin/logrotate /etc/logrotate.conf"
|
"schedule":"@every 5m",
|
||||||
},{
|
"command":"/usr/sbin/logrotate /etc/logrotate.conf"
|
||||||
"comment":"Regenerate Certificate then reload nginx",
|
},
|
||||||
"schedule":"43 6,18 * * *",
|
"cert-regen": {
|
||||||
"command":"sh -c 'dehydrated --cron --out /etc/ssl --domain ${LE_DOMAIN} --challenge dns-01 --hook dehydrated-dns'",
|
"comment":"Regenerate Certificate then reload nginx",
|
||||||
"dockerargs":"--env-file /opt/crontab/env/letsencrypt.env -v webapp_nginx_tls_cert:/etc/ssl -v webapp_nginx_acme_challenge:/var/www/.well-known/acme-challenge",
|
"schedule":"43 6,18 * * *",
|
||||||
"image":"willfarrell/letsencrypt",
|
"command":"sh -c 'dehydrated --cron --out /etc/ssl --domain ${LE_DOMAIN} --challenge dns-01 --hook dehydrated-dns'",
|
||||||
"trigger":[{
|
"dockerargs":"--it --env-file /opt/crontab/env/letsencrypt.env",
|
||||||
"command":"sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'",
|
"volumes":["webapp_nginx_tls_cert:/etc/ssl", "webapp_nginx_acme_challenge:/var/www/.well-known/acme-challenge"],
|
||||||
"project":"conduit",
|
"image":"willfarrell/letsencrypt",
|
||||||
"container":"nginx"
|
"trigger":[{
|
||||||
}],
|
"command":"sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'",
|
||||||
"onstart":true
|
"container":"nginx"
|
||||||
}]
|
}],
|
||||||
|
"onstart":true
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## How to use
|
## How to use
|
||||||
@ -75,27 +73,26 @@ docker run -d \
|
|||||||
### Use with docker-compose
|
### Use with docker-compose
|
||||||
|
|
||||||
1. Figure out which network name used for your docker-compose containers
|
1. Figure out which network name used for your docker-compose containers
|
||||||
* use `docker network ls` to see existing networks
|
- use `docker network ls` to see existing networks
|
||||||
* if your `docker-compose.yml` is in `my_dir` directory, you probably has network `my_dir_default`
|
- if your `docker-compose.yml` is in `my_dir` directory, you probably has network `my_dir_default`
|
||||||
* otherwise [read the docker-compose docs](https://docs.docker.com/compose/networking/)
|
- otherwise [read the docker-compose docs](https://docs.docker.com/compose/networking/)
|
||||||
2. Add `dockerargs` to your docker-crontab `config.json`
|
1. Add `dockerargs` to your docker-crontab `config.json`
|
||||||
* use `--network NETWORK_NAME` to connect new container into docker-compose network
|
- use `--network NETWORK_NAME` to connect new container into docker-compose network
|
||||||
* use `--rm --name NAME` to use named container
|
- use `--name NAME` to use named container
|
||||||
* e.g. `"dockerargs": "--network my_dir_default --rm --name my-best-cron-job"`
|
- e.g. `"dockerargs": "--it"`
|
||||||
|
|
||||||
### Dockerfile
|
### Dockerfile
|
||||||
|
|
||||||
```Dockerfile
|
```Dockerfile
|
||||||
FROM willfarrell/crontab
|
FROM registry.gitlab.com/simplicityguy/docker/crontab
|
||||||
|
|
||||||
COPY config.json ${HOME_DIR}/
|
COPY config.json ${HOME_DIR}/
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Logrotate Dockerfile
|
### Logrotate Dockerfile
|
||||||
|
|
||||||
```Dockerfile
|
```Dockerfile
|
||||||
FROM willfarrell/crontab
|
FROM registry.gitlab.com/simplicityguy/docker/crontab
|
||||||
|
|
||||||
RUN apk add --no-cache logrotate
|
RUN apk add --no-cache logrotate
|
||||||
RUN echo "*/5 * * * * /usr/sbin/logrotate /etc/logrotate.conf" >> /etc/crontabs/logrotate
|
RUN echo "*/5 * * * * /usr/sbin/logrotate /etc/logrotate.conf" >> /etc/crontabs/logrotate
|
||||||
@ -103,17 +100,3 @@ COPY logrotate.conf /etc/logrotate.conf
|
|||||||
|
|
||||||
CMD ["crond", "-f"]
|
CMD ["crond", "-f"]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Logging - In Dev
|
|
||||||
|
|
||||||
All `stdout` is captured, formatted, and saved to `/var/log/crontab/jobs.log`. Set `LOG_FILE` to `/dev/null` to disable logging.
|
|
||||||
|
|
||||||
example: `e6ced859-1563-493b-b1b1-5a190b29e938 2017-06-18T01:27:10+0000 [info] Start Cronjob **map-a-vol** map a volume`
|
|
||||||
|
|
||||||
grok: `CRONTABLOG %{DATA:request_id} %{TIMESTAMP_ISO8601:timestamp} \[%{LOGLEVEL:severity}\] %{GREEDYDATA:message}`
|
|
||||||
|
|
||||||
## TODO
|
|
||||||
- [ ] Have ability to auto regenerate crontab on file change (signal HUP?)
|
|
||||||
- [ ] Run commands on host machine (w/ --privileged?)
|
|
||||||
- [ ] Write tests
|
|
||||||
- [ ] Setup TravisCI
|
|
||||||
|
@ -47,4 +47,3 @@ onstart = true
|
|||||||
command = "sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'"
|
command = "sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'"
|
||||||
project = "conduit"
|
project = "conduit"
|
||||||
container = "nginx"
|
container = "nginx"
|
||||||
|
|
||||||
|
@ -1,284 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
|
|
||||||
export DOCKER_HOST='tcp://docker:2375'
|
|
||||||
fi
|
|
||||||
|
|
||||||
# for local testing only
|
|
||||||
#HOME_DIR=.
|
|
||||||
|
|
||||||
if [ "${LOG_FILE}" == "" ]; then
|
|
||||||
LOG_DIR=/var/log/crontab
|
|
||||||
LOG_FILE=${LOG_DIR}/jobs.log
|
|
||||||
mkdir -p ${LOG_DIR}
|
|
||||||
touch ${LOG_FILE}
|
|
||||||
fi
|
|
||||||
|
|
||||||
get_config() {
|
|
||||||
if [ -f "${HOME_DIR}/config.json" ]; then
|
|
||||||
jq 'map(.)' ${HOME_DIR}/config.json > ${HOME_DIR}/config.working.json
|
|
||||||
elif [ -f "${HOME_DIR}/config.toml" ]; then
|
|
||||||
rq -t <<< $(cat ${HOME_DIR}/config.toml) | jq 'map(.)' > ${HOME_DIR}/config.json
|
|
||||||
elif [ -f "${HOME_DIR}/config.yml" ]; then
|
|
||||||
rq -y <<< $(cat ${HOME_DIR}/config.yml) | jq 'map(.)' > ${HOME_DIR}/config.json
|
|
||||||
elif [ -f "${HOME_DIR}/config.yaml" ]; then
|
|
||||||
rq -y <<< $(cat ${HOME_DIR}/config.yaml) | jq 'map(.)' > ${HOME_DIR}/config.json
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
DOCKER_SOCK=/var/run/docker.sock
|
|
||||||
CRONTAB_FILE=/etc/crontabs/docker
|
|
||||||
|
|
||||||
# Ensure dir exist - in case of volume mapping
|
|
||||||
mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/projects
|
|
||||||
|
|
||||||
ensure_docker_socket_accessible() {
|
|
||||||
if ! grep -q "^docker:" /etc/group; then
|
|
||||||
# Ensure 'docker' user has permissions for docker socket (without changing permissions)
|
|
||||||
DOCKER_GID=$(stat -c '%g' ${DOCKER_SOCK})
|
|
||||||
if [ "${DOCKER_GID}" != "0" ]; then
|
|
||||||
if ! grep -qE "^[^:]+:[^:]+:${DOCKER_GID}:" /etc/group; then
|
|
||||||
# No group with such gid exists - create group docker
|
|
||||||
addgroup -g ${DOCKER_GID} docker
|
|
||||||
adduser docker docker
|
|
||||||
else
|
|
||||||
# Group with such gid exists - add user "docker" to this group
|
|
||||||
DOCKER_GROUP_NAME=`getent group "${DOCKER_GID}" | awk -F':' '{{ print $1 }}'`
|
|
||||||
adduser docker $DOCKER_GROUP_NAME
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Docker socket belongs to "root" group - add user "docker" to this group
|
|
||||||
adduser docker root
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
slugify() {
|
|
||||||
echo "$@" | iconv -t ascii | sed -r s/[~\^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z
|
|
||||||
}
|
|
||||||
|
|
||||||
make_image_cmd() {
|
|
||||||
DOCKERARGS=$(echo ${1} | jq -r .dockerargs)
|
|
||||||
VOLUMES=$(echo ${1} | jq -r '.volumes | map(" -v " + .) | join("")')
|
|
||||||
PORTS=$(echo ${1} | jq -r '.ports | map(" -p " + .) | join("")')
|
|
||||||
EXPOSE=$(echo ${1} | jq -r '.expose | map(" --expose " + .) | join("")')
|
|
||||||
# We'll add name in, if it exists
|
|
||||||
NAME=$(echo ${1} | jq -r 'select(.name != null) | .name')
|
|
||||||
NETWORK=$(echo ${1} | jq -r 'select(.network != null) | .network')
|
|
||||||
ENVIRONMENT=$(echo ${1} | jq -r '.environment | map(" -e " + .) | join("")')
|
|
||||||
# echo ${1} | jq -r '.environment | join("\n")' > ${PWD}/${NAME}.env
|
|
||||||
# ENVIRONMENT=" --env-file ${PWD}/${NAME}.env"
|
|
||||||
if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi
|
|
||||||
if [ ! -z "${NAME}" ]; then DOCKERARGS="${DOCKERARGS} --rm --name ${NAME} "; fi
|
|
||||||
if [ ! -z "${NETWORK}" ]; then DOCKERARGS="${DOCKERARGS} --network ${NETWORK} "; fi
|
|
||||||
if [ ! -z "${VOLUMES}" ]; then DOCKERARGS="${DOCKERARGS}${VOLUMES}"; fi
|
|
||||||
if [ ! -z "${ENVIRONMENT}" ]; then DOCKERARGS="${DOCKERARGS}${ENVIRONMENT}"; fi
|
|
||||||
if [ ! -z "${PORTS}" ]; then DOCKERARGS="${DOCKERARGS}${PORTS}"; fi
|
|
||||||
if [ ! -z "${EXPOSE}" ]; then DOCKERARGS="${DOCKERARGS}${EXPOSE}"; fi
|
|
||||||
IMAGE=$(echo ${1} | jq -r .image | envsubst)
|
|
||||||
TMP_COMMAND=$(echo ${1} | jq -r .command)
|
|
||||||
echo "docker run ${DOCKERARGS} ${IMAGE} ${TMP_COMMAND}"
|
|
||||||
}
|
|
||||||
|
|
||||||
make_container_cmd() {
|
|
||||||
DOCKERARGS=$(echo ${1} | jq -r .dockerargs)
|
|
||||||
if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi
|
|
||||||
SCRIPT_NAME=$(echo ${1} | jq -r .name)
|
|
||||||
SCRIPT_NAME=$(slugify $SCRIPT_NAME)
|
|
||||||
PROJECT=$(echo ${1} | jq -r .project)
|
|
||||||
CONTAINER=$(echo ${1} | jq -r .container | envsubst)
|
|
||||||
TMP_COMMAND=$(echo ${1} | jq -r .command)
|
|
||||||
|
|
||||||
if [ "${PROJECT}" != "null" ]; then
|
|
||||||
|
|
||||||
# create bash script to detect all running containers
|
|
||||||
if [ "${SCRIPT_NAME}" == "null" ]; then
|
|
||||||
SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
fi
|
|
||||||
cat << EOF > ${HOME_DIR}/projects/${SCRIPT_NAME}.sh
|
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
CONTAINERS=\$(docker ps --format '{{.Names}}' | grep -E "^${PROJECT}_${CONTAINER}.[0-9]+")
|
|
||||||
for CONTAINER_NAME in \$CONTAINERS; do
|
|
||||||
docker exec ${DOCKERARGS} \${CONTAINER_NAME} ${TMP_COMMAND}
|
|
||||||
done
|
|
||||||
EOF
|
|
||||||
echo "/bin/bash ${HOME_DIR}/projects/${SCRIPT_NAME}.sh"
|
|
||||||
# cat "/bin/bash ${HOME_DIR}/projects/${SCRIPT_NAME}.sh"
|
|
||||||
else
|
|
||||||
echo "docker exec ${DOCKERARGS} ${CONTAINER} ${TMP_COMMAND}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#make_host_cmd() {
|
|
||||||
# HOST_BINARY=$(echo ${1} | jq -r .host)
|
|
||||||
# TMP_COMMAND=$(echo ${1} | jq -r .command)
|
|
||||||
# echo "${HOST_BINARY} ${TMP_COMMAND}"
|
|
||||||
#}
|
|
||||||
|
|
||||||
make_cmd() {
|
|
||||||
if [ "$(echo ${1} | jq -r .image)" != "null" ]; then
|
|
||||||
make_image_cmd "$1"
|
|
||||||
elif [ "$(echo ${1} | jq -r .container)" != "null" ]; then
|
|
||||||
make_container_cmd "$1"
|
|
||||||
#elif [ "$(echo ${1} | jq -r .host)" != "null" ]; then
|
|
||||||
# make_host_cmd "$1"
|
|
||||||
else
|
|
||||||
echo ${1} | jq -r .command
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
parse_schedule() {
|
|
||||||
case $1 in
|
|
||||||
"@yearly")
|
|
||||||
echo "0 0 1 1 *"
|
|
||||||
;;
|
|
||||||
"@annually")
|
|
||||||
echo "0 0 1 1 *"
|
|
||||||
;;
|
|
||||||
"@monthly")
|
|
||||||
echo "0 0 1 * *"
|
|
||||||
;;
|
|
||||||
"@weekly")
|
|
||||||
echo "0 0 * * 0"
|
|
||||||
;;
|
|
||||||
"@daily")
|
|
||||||
echo "0 0 * * *"
|
|
||||||
;;
|
|
||||||
"@midnight")
|
|
||||||
echo "0 0 * * *"
|
|
||||||
;;
|
|
||||||
"@hourly")
|
|
||||||
echo "0 * * * *"
|
|
||||||
;;
|
|
||||||
"@every")
|
|
||||||
TIME=$2
|
|
||||||
TOTAL=0
|
|
||||||
|
|
||||||
M=$(echo $TIME | grep -o '[0-9]\+m')
|
|
||||||
H=$(echo $TIME | grep -o '[0-9]\+h')
|
|
||||||
D=$(echo $TIME | grep -o '[0-9]\+d')
|
|
||||||
|
|
||||||
if [ -n "${M}" ]; then
|
|
||||||
TOTAL=$(($TOTAL + ${M::-1}))
|
|
||||||
fi
|
|
||||||
if [ -n "${H}" ]; then
|
|
||||||
TOTAL=$(($TOTAL + ${H::-1} * 60))
|
|
||||||
fi
|
|
||||||
if [ -n "${D}" ]; then
|
|
||||||
TOTAL=$(($TOTAL + ${D::-1} * 60 * 24))
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "*/${TOTAL} * * * *"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "${@}"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
function build_crontab() {
|
|
||||||
|
|
||||||
rm -rf ${CRONTAB_FILE}
|
|
||||||
|
|
||||||
ONSTART=()
|
|
||||||
while read i ; do
|
|
||||||
|
|
||||||
SCHEDULE=$(jq -r .[$i].schedule ${CONFIG} | sed 's/\*/\\*/g')
|
|
||||||
if [ "${SCHEDULE}" == "null" ]; then
|
|
||||||
echo "Schedule Missing: $(jq -r .[$i].schedule ${CONFIG})"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
SCHEDULE=$(parse_schedule ${SCHEDULE} | sed 's/\\//g')
|
|
||||||
|
|
||||||
if [ "$(jq -r .[$i].command ${CONFIG})" == "null" ]; then
|
|
||||||
echo "Command Missing: $(jq -r .[$i].command ${CONFIG})"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
COMMENT=$(jq -r .[$i].comment ${CONFIG})
|
|
||||||
if [ "${COMMENT}" != "null" ]; then
|
|
||||||
echo "# ${COMMENT}" >> ${CRONTAB_FILE}
|
|
||||||
fi
|
|
||||||
|
|
||||||
SCRIPT_NAME=$(jq -r .[$i].name ${CONFIG})
|
|
||||||
SCRIPT_NAME=$(slugify $SCRIPT_NAME)
|
|
||||||
if [ "${SCRIPT_NAME}" == "null" ]; then
|
|
||||||
SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
fi
|
|
||||||
|
|
||||||
COMMAND="/bin/bash ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh"
|
|
||||||
cat << EOF > ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh
|
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# TODO find workaround
|
|
||||||
# [error] write /dev/stdout: broken pipe <- when using docker commands
|
|
||||||
#UUID=\$(cat /proc/sys/kernel/random/uuid)
|
|
||||||
#exec > >(read message; echo "\${UUID} \$(date -Iseconds) [info] \$message" | tee -a ${LOG_FILE} )
|
|
||||||
#exec 2> >(read message; echo "\${UUID} \$(date -Iseconds) [error] \$message" | tee -a ${LOG_FILE} >&2)
|
|
||||||
|
|
||||||
echo "Start Cronjob **${SCRIPT_NAME}** ${COMMENT}"
|
|
||||||
|
|
||||||
$(make_cmd "$(jq -c .[$i] ${CONFIG})")
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$(jq -r .[$i].trigger ${CONFIG})" != "null" ]; then
|
|
||||||
while read j ; do
|
|
||||||
if [ "$(jq .[$i].trigger[$j].command ${CONFIG})" == "null" ]; then
|
|
||||||
echo "Command Missing: $(jq -r .[$i].trigger[$j].command ${CONFIG})"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
#TRIGGER_COMMAND=$(make_cmd "$(jq -c .[$i].trigger[$j] ${CONFIG})")
|
|
||||||
echo "$(make_cmd "$(jq -c .[$i].trigger[$j] ${CONFIG})")" >> ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh
|
|
||||||
#COMMAND="${COMMAND} && ${TRIGGER_COMMAND}"
|
|
||||||
done < <(jq -r '.['$i'].trigger|keys[]' ${CONFIG})
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "echo \"End Cronjob **${SCRIPT_NAME}** ${COMMENT}\"" >> ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh
|
|
||||||
|
|
||||||
echo "${SCHEDULE} ${COMMAND}" >> ${CRONTAB_FILE}
|
|
||||||
|
|
||||||
if [ "$(jq -r .[$i].onstart ${CONFIG})" == "true" ]; then
|
|
||||||
ONSTART+=("${COMMAND}")
|
|
||||||
fi
|
|
||||||
done < <(jq -r '.|keys[]' ${CONFIG})
|
|
||||||
|
|
||||||
echo "##### crontab generation complete #####"
|
|
||||||
cat ${CRONTAB_FILE}
|
|
||||||
|
|
||||||
echo "##### run commands with onstart #####"
|
|
||||||
for COMMAND in "${ONSTART[@]}"; do
|
|
||||||
echo "${COMMAND}"
|
|
||||||
${COMMAND} &
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ensure_docker_socket_accessible
|
|
||||||
|
|
||||||
start_app() {
|
|
||||||
get_config
|
|
||||||
if [ -f "${HOME_DIR}/config.working.json" ]; then
|
|
||||||
export CONFIG=${HOME_DIR}/config.working.json
|
|
||||||
elif [ -f "${HOME_DIR}/config.json" ]; then
|
|
||||||
export CONFIG=${HOME_DIR}/config.json
|
|
||||||
else
|
|
||||||
echo "NO CONFIG FILE FOUND"
|
|
||||||
fi
|
|
||||||
if [ "$1" = "crond" ]; then
|
|
||||||
if [ -f ${CONFIG} ]; then
|
|
||||||
build_crontab
|
|
||||||
else
|
|
||||||
echo "Unable to find ${CONFIG}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "$@"
|
|
||||||
exec "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
start_app "$@"
|
|
238
entrypoint.sh
Executable file
238
entrypoint.sh
Executable file
@ -0,0 +1,238 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CRONTAB_FILE=/etc/crontabs/docker
|
||||||
|
|
||||||
|
if [ -z "${HOME_DIR}" ] && [ -n "${TEST_MODE}" ]; then
|
||||||
|
HOME_DIR=/tmp/crontab-docker-testing
|
||||||
|
CRONTAB_FILE=${HOME_DIR}/test
|
||||||
|
elif [ -z "${HOME_DIR}" ]; then
|
||||||
|
echo "HOME_DIR not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure dir exist - in case of volume mapping.
|
||||||
|
mkdir -p "${HOME_DIR}"/jobs
|
||||||
|
|
||||||
|
if [ -z "${DOCKER_HOST}" ] && [ -a "${DOCKER_PORT_2375_TCP}" ]; then
|
||||||
|
export DOCKER_HOST="tcp://docker:2375"
|
||||||
|
fi
|
||||||
|
|
||||||
|
normalize_config() {
|
||||||
|
JSON_CONFIG={}
|
||||||
|
if [ -f "${HOME_DIR}/config.json" ]; then
|
||||||
|
JSON_CONFIG="$(cat "${HOME_DIR}"/config.json)"
|
||||||
|
elif [ -f "${HOME_DIR}/config.toml" ]; then
|
||||||
|
JSON_CONFIG="$(rq -t <<< "$(cat "${HOME_DIR}"/config.toml)")"
|
||||||
|
elif [ -f "${HOME_DIR}/config.yml" ]; then
|
||||||
|
JSON_CONFIG="$(rq -y <<< "$(cat "${HOME_DIR}"/config.yml)")"
|
||||||
|
elif [ -f "${HOME_DIR}/config.yaml" ]; then
|
||||||
|
JSON_CONFIG="$(rq -y <<< "$(cat "${HOME_DIR}"/config.yaml)")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
jq -S -r '."~~shared-settings" as $shared | del(."~~shared-settings") | to_entries | map_values(.value + { name: .key } + $shared)' <<< "${JSON_CONFIG}" > "${HOME_DIR}"/config.working.json
|
||||||
|
}
|
||||||
|
|
||||||
|
slugify() {
|
||||||
|
echo "${@}" | iconv -t ascii | sed -r s/[~^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr '[:upper:]' '[:lower:]'
|
||||||
|
}
|
||||||
|
|
||||||
|
make_image_cmd() {
|
||||||
|
DOCKERARGS=$(echo "${1}" | jq -r .dockerargs)
|
||||||
|
ENVIRONMENT=$(echo "${1}" | jq -r 'select(.environment != null) | .environment | map("--env " + .) | join(" ")')
|
||||||
|
EXPOSE=$(echo "${1}" | jq -r 'select(.expose != null) | .expose | map("--expose " + .) | join(" ")' )
|
||||||
|
NAME=$(echo "${1}" | jq -r 'select(.name != null) | .name')
|
||||||
|
NETWORKS=$(echo "${1}" | jq -r 'select(.networks != null) | .networks | map("--network " + .) | join(" ")')
|
||||||
|
PORTS=$(echo "${1}" | jq -r 'select(.ports != null) | .ports | map("--publish " + .) | join(" ")')
|
||||||
|
VOLUMES=$(echo "${1}" | jq -r 'select(.volumes != null) | .volumes | map("--volume " + .) | join(" ")')
|
||||||
|
|
||||||
|
if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi
|
||||||
|
DOCKERARGS+=" "
|
||||||
|
if [ -n "${ENVIRONMENT}" ]; then DOCKERARGS+="${ENVIRONMENT} "; fi
|
||||||
|
if [ -n "${EXPOSE}" ]; then DOCKERARGS+="${EXPOSE} "; fi
|
||||||
|
if [ -n "${NAME}" ]; then DOCKERARGS+="--name ${NAME} "; fi
|
||||||
|
if [ -n "${NETWORKS}" ]; then DOCKERARGS+="${NETWORKS} "; fi
|
||||||
|
if [ -n "${PORTS}" ]; then DOCKERARGS+="${PORTS} "; fi
|
||||||
|
if [ -n "${VOLUMES}" ]; then DOCKERARGS+="${VOLUMES} "; fi
|
||||||
|
|
||||||
|
IMAGE=$(echo "${1}" | jq -r .image | envsubst)
|
||||||
|
if [ "${IMAGE}" == "null" ]; then return; fi
|
||||||
|
|
||||||
|
COMMAND=$(echo "${1}" | jq -r .command)
|
||||||
|
|
||||||
|
echo "docker run ${DOCKERARGS} ${IMAGE} ${COMMAND}"
|
||||||
|
}
|
||||||
|
|
||||||
|
make_container_cmd() {
|
||||||
|
DOCKERARGS=$(echo "${1}" | jq -r .dockerargs)
|
||||||
|
if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi
|
||||||
|
|
||||||
|
CONTAINER=$(echo "${1}" | jq -r .container | envsubst)
|
||||||
|
if [ "${CONTAINER}" == "null" ]; then return; fi
|
||||||
|
|
||||||
|
COMMAND=$(echo "${1}" | jq -r .command )
|
||||||
|
if [ "${COMMAND}" == "null" ]; then return; fi
|
||||||
|
|
||||||
|
echo "docker exec ${DOCKERARGS} ${CONTAINER} ${COMMAND}"
|
||||||
|
}
|
||||||
|
|
||||||
|
make_cmd() {
|
||||||
|
if [ "$(echo "${1}" | jq -r .image)" != "null" ]; then
|
||||||
|
make_image_cmd "${1}"
|
||||||
|
elif [ "$(echo "${1}" | jq -r .container)" != "null" ]; then
|
||||||
|
make_container_cmd "${1}"
|
||||||
|
else
|
||||||
|
echo "${1}" | jq -r .command
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_schedule() {
|
||||||
|
IFS=" "
|
||||||
|
read -a params <<< "$@"
|
||||||
|
|
||||||
|
case ${params[0]} in
|
||||||
|
"@yearly" | "@annually")
|
||||||
|
echo "0 0 1 1 *"
|
||||||
|
;;
|
||||||
|
"@monthly")
|
||||||
|
echo "0 0 1 * *"
|
||||||
|
;;
|
||||||
|
"@weekly")
|
||||||
|
echo "0 0 * * 0"
|
||||||
|
;;
|
||||||
|
"@daily")
|
||||||
|
echo "0 0 * * *"
|
||||||
|
;;
|
||||||
|
"@midnight")
|
||||||
|
echo "0 0 * * *"
|
||||||
|
;;
|
||||||
|
"@hourly")
|
||||||
|
echo "0 * * * *"
|
||||||
|
;;
|
||||||
|
"@random")
|
||||||
|
M="*"
|
||||||
|
H="*"
|
||||||
|
D="*"
|
||||||
|
|
||||||
|
for when in "${params[@]:1}"
|
||||||
|
do
|
||||||
|
case $when in
|
||||||
|
"@m")
|
||||||
|
M=$(shuf -i 0-59 -n 1)
|
||||||
|
;;
|
||||||
|
"@h")
|
||||||
|
H=$(shuf -i 0-23 -n 1)
|
||||||
|
;;
|
||||||
|
"@d")
|
||||||
|
D=$(shuf -i 0-6 -n 1)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "${M} ${H} * * ${D}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "${params[@]}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
function build_crontab() {
|
||||||
|
rm -rf "${CRONTAB_FILE}"
|
||||||
|
|
||||||
|
ONSTART=()
|
||||||
|
while read -r i ; do
|
||||||
|
KEY=$(jq -r .["$i"] "${CONFIG}")
|
||||||
|
|
||||||
|
SCHEDULE=$(echo "${KEY}" | jq -r '.schedule' | sed 's/\*/\\*/g')
|
||||||
|
if [ "${SCHEDULE}" == "null" ]; then
|
||||||
|
echo "'schedule' missing: '${KEY}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
SCHEDULE=$(parse_schedule "${SCHEDULE}" | sed 's/\\//g')
|
||||||
|
|
||||||
|
COMMAND=$(echo "${KEY}" | jq -r '.command')
|
||||||
|
if [ "${COMMAND}" == "null" ]; then
|
||||||
|
echo "'command' missing: '${KEY}'"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
COMMENT=$(echo "${KEY}" | jq -r '.comment')
|
||||||
|
|
||||||
|
SCRIPT_NAME=$(echo "${KEY}" | jq -r '.name')
|
||||||
|
SCRIPT_NAME=$(slugify "${SCRIPT_NAME}")
|
||||||
|
if [ "${SCRIPT_NAME}" == "null" ]; then
|
||||||
|
SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid)
|
||||||
|
fi
|
||||||
|
|
||||||
|
CRON_COMMAND=$(make_cmd "${KEY}")
|
||||||
|
|
||||||
|
SCRIPT_PATH="${HOME_DIR}/jobs/${SCRIPT_NAME}.sh"
|
||||||
|
|
||||||
|
touch "${SCRIPT_PATH}"
|
||||||
|
chmod +x "${SCRIPT_PATH}"
|
||||||
|
|
||||||
|
{
|
||||||
|
echo "#\!/usr/bin/env bash"
|
||||||
|
echo "set -e"
|
||||||
|
echo ""
|
||||||
|
echo "echo \"start cron job __${SCRIPT_NAME}__\""
|
||||||
|
echo "${CRON_COMMAND}"
|
||||||
|
} > "${SCRIPT_PATH}"
|
||||||
|
|
||||||
|
TRIGGER=$(echo "${KEY}" | jq -r '.trigger')
|
||||||
|
if [ "${TRIGGER}" != "null" ]; then
|
||||||
|
while read -r j ; do
|
||||||
|
TRIGGER_KEY=$(echo "${KEY}" | jq -r .trigger["$j"])
|
||||||
|
|
||||||
|
TRIGGER_COMMAND=$(echo "${TRIGGER_KEY}" | jq -r '.command')
|
||||||
|
if [ "${TRIGGER_COMMAND}" == "null" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
make_cmd "${TRIGGER_KEY}" >> "${SCRIPT_PATH}"
|
||||||
|
done < <(echo "${KEY}" | jq -r '.trigger | keys[]')
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "echo \"end cron job __${SCRIPT_NAME}__\"" >> "${SCRIPT_PATH}"
|
||||||
|
|
||||||
|
if [ "${COMMENT}" != "null" ]; then
|
||||||
|
echo "# ${COMMENT}" >> "${CRONTAB_FILE}"
|
||||||
|
fi
|
||||||
|
echo "${SCHEDULE} ${SCRIPT_PATH}" >> "${CRONTAB_FILE}"
|
||||||
|
|
||||||
|
ONSTART_COMMAND=$(echo "${KEY}" | jq -r '.onstart')
|
||||||
|
if [ "${ONSTART_COMMAND}" == "true" ]; then
|
||||||
|
ONSTART+=("${SCRIPT_PATH}")
|
||||||
|
fi
|
||||||
|
done < <(jq -r '. | keys[]' "${CONFIG}")
|
||||||
|
|
||||||
|
printf "##### crontab generated #####\n"
|
||||||
|
cat "${CRONTAB_FILE}"
|
||||||
|
|
||||||
|
printf "##### run commands with onstart #####\n"
|
||||||
|
for ONSTART_COMMAND in "${ONSTART[@]}"; do
|
||||||
|
printf "%s\n" "${ONSTART_COMMAND}"
|
||||||
|
${ONSTART_COMMAND} &
|
||||||
|
done
|
||||||
|
|
||||||
|
printf "##### cron running #####\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
start_app() {
|
||||||
|
normalize_config
|
||||||
|
export CONFIG=${HOME_DIR}/config.working.json
|
||||||
|
if [ ! -f "${CONFIG}" ]; then
|
||||||
|
printf "missing generated %s. exiting.\n" "${CONFIG}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "${1}" == "crond" ]; then
|
||||||
|
build_crontab
|
||||||
|
fi
|
||||||
|
printf "%s\n" "${@}"
|
||||||
|
exec "${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
printf "✨ starting crontab container ✨\n"
|
||||||
|
start_app "${@}"
|
20
test_logging
20
test_logging
@ -1,20 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# This file is for testing the logging of docker output #8
|
|
||||||
|
|
||||||
LOG_FILE=./jobs.log
|
|
||||||
touch ${LOG_FILE}
|
|
||||||
UUID="xxxxxxxxxxxxxxxxx"
|
|
||||||
|
|
||||||
exec > >(read message; echo "${UUID} $(date) [info] $message" | tee -a ${LOG_FILE} )
|
|
||||||
exec 2> >(read message; echo "${UUID} $(date) [error] $message" | tee -a ${LOG_FILE} >&2)
|
|
||||||
|
|
||||||
echo "Start"
|
|
||||||
|
|
||||||
docker run alpine sh -c 'while :; do echo "ping"; sleep 1; done'
|
|
||||||
# [error] write /dev/stdout: broken pipe
|
|
||||||
# --log-driver syslog <- errors
|
|
||||||
# --log-driver none <- errors
|
|
||||||
|
|
||||||
echo "End"
|
|
Loading…
x
Reference in New Issue
Block a user