4 Commits

Author SHA1 Message Date
Axodouble b029c0a25d Added example compose for a tailscale deployment
Container image / image (push) Successful in 3m36s
Release / release (push) Successful in 4m7s
2026-05-15 02:01:01 +00:00
Axodouble 3453bf5ec7 Updated action to use a pat due to failure otherwise, fixed cache issue
Container image / image (push) Successful in 3m17s
2026-05-15 01:44:39 +00:00
Axodouble acd55d145c Fixed incorrect shell causing a broken substitution
Container image / image (push) Failing after 9m41s
2026-05-15 01:19:21 +00:00
Axodouble ebbbd8c218 Updated when workflows run and fixed issue with the duplicate mount
Container image / image (push) Failing after 10m21s
2026-05-15 01:11:27 +00:00
3 changed files with 97 additions and 22 deletions
+60 -22
View File
@@ -1,14 +1,18 @@
name: Container image name: Container image
# Builds the multi-arch container image. On tag push (v*) it logs in # Three modes, all driven by the same job:
# to the Gitea registry on this host and publishes the image as # - Tag push (v*) → full release: :v1.2.3, :1.2, :latest, :sha-<short>
# git.cer.sh/<owner>/<repo>:<version> plus :latest. On pull requests # - Branch push → canary: :<branch>, :sha-<short>
# it builds without pushing — purely a smoke test that the Dockerfile # - Pull request → smoke test: build only, nothing pushed
# still works. #
# metadata-action emits the right subset of tags for each event based
# on the `tags:` rules below — no manual branching needed.
on: on:
push: push:
branches:
- "**"
tags: tags:
- 'v*' - "v*"
pull_request: pull_request:
permissions: permissions:
@@ -19,42 +23,74 @@ jobs:
image: image:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# The default `ubuntu-latest` label on aether-runner maps to # The default `ubuntu-latest` label on aether-runner maps to
# `node:16-bullseye`, which has no docker CLI — so the docker/* # `node:16-bullseye`, which has no docker CLI. Override to an
# actions fail. Override the job container to catthehacker's # act-compatible image that ships docker + buildx. The runner
# act-compatible image (ships docker CLI + buildx) and mount the # already bind-mounts /var/run/docker.sock into every job
# host's docker socket through. The runner already has the socket # container, so we do NOT add a `volumes:` entry — doing so
# bind-mounted from the host (see docker.yml gitea-runner volume), # produces a duplicate-mount error from the daemon.
# so this exposes that same daemon to the nested job container.
container: container:
image: catthehacker/ubuntu:act-latest image: catthehacker/ubuntu:act-latest
volumes: # aether-runner defaults `run:` blocks to POSIX `sh`, which
- /var/run/docker.sock:/var/run/docker.sock # chokes on bash-isms like ${var,,} (lowercase) and ${var:0:7}
# (substring). Pin bash for the whole job.
defaults:
run:
shell: bash
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
with:
# Skip the GHA-cache lookup for the binfmt image. The Gitea
# runner has no GHA cache server, so the action would
# otherwise sit in a ~5-minute TCP timeout before falling
# back to a direct docker pull. Going straight to pull
# cuts QEMU setup from ~5 min to ~15 s.
cache-image: false
- name: Set up Buildx - name: Set up Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
# github.repository is owner/name with the repo's original casing; # Registries want lowercase namespaces, and Gitea's container
# registries require lowercase, so normalise once here and reuse # registry is case-sensitive on the login username too. Lowercase
# the result in metadata-action below. # both repo path and actor once here and reuse below.
- name: Resolve image name - name: Resolve image name
id: img id: img
run: | run: |
repo='${{ github.repository }}' repo='${{ github.repository }}'
actor='${{ github.actor }}'
echo "ref=git.cer.sh/${repo,,}" >> "$GITHUB_OUTPUT" echo "ref=git.cer.sh/${repo,,}" >> "$GITHUB_OUTPUT"
echo "user=${actor,,}" >> "$GITHUB_OUTPUT"
# Version stamp baked into the binary via -ldflags. Tag pushes
# use the tag name directly; everything else gets a short SHA
# suffix so `qu version` on a canary build is debuggable.
- name: Compute version
id: ver
run: |
if [[ "$GITHUB_REF" == refs/tags/* ]]; then
v="${GITHUB_REF_NAME}"
else
v="${GITHUB_REF_NAME}-${GITHUB_SHA:0:7}"
fi
echo "version=$v" >> "$GITHUB_OUTPUT"
# Prefers a user-provided PAT (repo secret REGISTRY_TOKEN with
# `write:package` scope) and falls back to the auto-injected
# runner token. The auto-token works on Gitea >= 1.21 when the
# workflow declares `packages: write` in permissions, but if
# the registry still rejects it (older instance, container
# registry gated by config, etc.), REGISTRY_TOKEN takes over
# without any workflow edits.
- name: Login to Gitea registry - name: Login to Gitea registry
if: github.event_name == 'push' if: github.event_name == 'push'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: git.cer.sh registry: git.cer.sh
username: ${{ github.actor }} username: ${{ steps.img.outputs.user }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.REGISTRY_TOKEN || secrets.GITHUB_TOKEN }}
- name: Docker metadata - name: Docker metadata
id: meta id: meta
@@ -65,18 +101,20 @@ jobs:
type=semver,pattern={{version}} type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }} type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
type=ref,event=branch
type=sha,prefix=sha-,format=short
- name: Build (and push on tag) - name: Build (and push on push events)
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: ./Dockerfile file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: ${{ github.event_name == 'push' }} push: ${{ github.event_name == 'push' }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
build-args: | build-args: |
VERSION=${{ github.ref_name }} VERSION=${{ steps.ver.outputs.version }}
# Inline cache embeds layer metadata into the pushed image # Inline cache embeds layer metadata into the pushed image
# itself — no external cache server needed, which keeps the # itself — no external cache server needed, which keeps the
# workflow self-contained on the Gitea runner. # workflow self-contained on the Gitea runner.
View File
+37
View File
@@ -0,0 +1,37 @@
# An example of a docker compose with Tailscale & QUptime.
# This setup is specifically intended for hosts that may not be able to reach each other directly or have a public IP address.
services:
tailscale:
image: tailscale/tailscale:latest
container_name: tailscale
cap_add:
- NET_ADMIN
environment:
- TS_AUTHKEY=${TAILSCALE_AUTHKEY} # Set this in your .env file with a Tailscale auth key
- TS_HOSTNAME=quptime-tailscale
volumes:
- /dev/net/tun:/dev/net/tun
- tailscale:/var/lib/tailscale
restart: unless-stopped
quptime:
image: git.cer.sh/axodouble/quptime:master
container_name: quptime
volumes:
- quptime:/etc/quptime
ports:
- "9901:9901"
depends_on:
- tailscale
# No restart directive, user needs to init quptime first
# Run `docker compose -f docker-compose-tailscale.yml run --rm quptime init` to initialize
# the data volume before starting the service
# If this is not the master node, use
# `docker compose -f docker-compose-tailscale.yml run --rm quptime --advertise <TAILSCALE_IP>:9901 --secret <SECRET>`
# And add the individual nodes to the cluster with `docker compose -f docker-compose-tailscale.yml run --rm quptime node add <OTHER_NODE_IP>:9901`
network_mode: "service:tailscale" # Use the Tailscale network stack
volumes:
tailscale:
quptime: