16 Commits

Author SHA1 Message Date
Axodouble b029c0a25d Added example compose for a tailscale deployment
Container image / image (push) Successful in 3m36s
Release / release (push) Successful in 4m7s
2026-05-15 02:01:01 +00:00
Axodouble 3453bf5ec7 Updated action to use a pat due to failure otherwise, fixed cache issue
Container image / image (push) Successful in 3m17s
2026-05-15 01:44:39 +00:00
Axodouble acd55d145c Fixed incorrect shell causing a broken substitution
Container image / image (push) Failing after 9m41s
2026-05-15 01:19:21 +00:00
Axodouble ebbbd8c218 Updated when workflows run and fixed issue with the duplicate mount
Container image / image (push) Failing after 10m21s
2026-05-15 01:11:27 +00:00
Axodouble 55d966ba8f Fixed failed QEMU set up in container workflow
Container image / image (push) Failing after 1m49s
Release / release (push) Successful in 1m46s
2026-05-15 01:01:58 +00:00
Axodouble 74cb42ea28 Added workflow for docker containers
Container image / image (push) Has been cancelled
Release / release (push) Has been cancelled
2026-05-15 00:51:33 +00:00
Axodouble 2382aebc10 Added some examples of custom messages with GO's templating 2026-05-15 00:45:31 +00:00
Axodouble 9105cba380 Updated TUI field sizing 2026-05-15 00:40:01 +00:00
Axodouble a8f69cd7cc Added VerbLower to have lowercase verbs 2026-05-15 00:34:53 +00:00
Axodouble 1f1dd32741 Fixed issue with bash completions potentially crashing 2026-05-14 07:59:31 +00:00
Axodouble 231176ce41 I have spent more time on the installation script than I would've wanted to 2026-05-14 07:52:33 +00:00
Axodouble 5f7185e5b1 Updated shell assumptions 2026-05-14 07:48:12 +00:00
Axodouble a6283d9d43 Updated the installer to setup the service as qu, added some improvements to the installation 2026-05-14 07:41:49 +00:00
Axodouble 7a1ea39f78 Updated release workflow to drop cache and updated actual go version used for build for optimization
Release / release (push) Successful in 2m55s
2026-05-14 07:02:19 +00:00
Axodouble e8656b09a7 Fixed state being truncated in cell
Release / release (push) Has been cancelled
2026-05-14 06:56:34 +00:00
Axodouble 5c54a1cd91 Updated install script to add shell completions 2026-05-14 06:53:21 +00:00
13 changed files with 418 additions and 55 deletions
+7
View File
@@ -0,0 +1,7 @@
.git
.gitea
.claude
.github
dist
*.md
install.sh
+122
View File
@@ -0,0 +1,122 @@
name: Container image
# Three modes, all driven by the same job:
# - Tag push (v*) → full release: :v1.2.3, :1.2, :latest, :sha-<short>
# - Branch push → canary: :<branch>, :sha-<short>
# - Pull request → smoke test: build only, nothing pushed
#
# metadata-action emits the right subset of tags for each event based
# on the `tags:` rules below — no manual branching needed.
on:
push:
branches:
- "**"
tags:
- "v*"
pull_request:
permissions:
contents: read
packages: write
jobs:
image:
runs-on: ubuntu-latest
# The default `ubuntu-latest` label on aether-runner maps to
# `node:16-bullseye`, which has no docker CLI. Override to an
# act-compatible image that ships docker + buildx. The runner
# already bind-mounts /var/run/docker.sock into every job
# container, so we do NOT add a `volumes:` entry — doing so
# produces a duplicate-mount error from the daemon.
container:
image: catthehacker/ubuntu:act-latest
# aether-runner defaults `run:` blocks to POSIX `sh`, which
# chokes on bash-isms like ${var,,} (lowercase) and ${var:0:7}
# (substring). Pin bash for the whole job.
defaults:
run:
shell: bash
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
# Skip the GHA-cache lookup for the binfmt image. The Gitea
# runner has no GHA cache server, so the action would
# otherwise sit in a ~5-minute TCP timeout before falling
# back to a direct docker pull. Going straight to pull
# cuts QEMU setup from ~5 min to ~15 s.
cache-image: false
- name: Set up Buildx
uses: docker/setup-buildx-action@v3
# Registries want lowercase namespaces, and Gitea's container
# registry is case-sensitive on the login username too. Lowercase
# both repo path and actor once here and reuse below.
- name: Resolve image name
id: img
run: |
repo='${{ github.repository }}'
actor='${{ github.actor }}'
echo "ref=git.cer.sh/${repo,,}" >> "$GITHUB_OUTPUT"
echo "user=${actor,,}" >> "$GITHUB_OUTPUT"
# Version stamp baked into the binary via -ldflags. Tag pushes
# use the tag name directly; everything else gets a short SHA
# suffix so `qu version` on a canary build is debuggable.
- name: Compute version
id: ver
run: |
if [[ "$GITHUB_REF" == refs/tags/* ]]; then
v="${GITHUB_REF_NAME}"
else
v="${GITHUB_REF_NAME}-${GITHUB_SHA:0:7}"
fi
echo "version=$v" >> "$GITHUB_OUTPUT"
# Prefers a user-provided PAT (repo secret REGISTRY_TOKEN with
# `write:package` scope) and falls back to the auto-injected
# runner token. The auto-token works on Gitea >= 1.21 when the
# workflow declares `packages: write` in permissions, but if
# the registry still rejects it (older instance, container
# registry gated by config, etc.), REGISTRY_TOKEN takes over
# without any workflow edits.
- name: Login to Gitea registry
if: github.event_name == 'push'
uses: docker/login-action@v3
with:
registry: git.cer.sh
username: ${{ steps.img.outputs.user }}
password: ${{ secrets.REGISTRY_TOKEN || secrets.GITHUB_TOKEN }}
- name: Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ steps.img.outputs.ref }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
type=ref,event=branch
type=sha,prefix=sha-,format=short
- name: Build (and push on push events)
uses: docker/build-push-action@v6
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name == 'push' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ steps.ver.outputs.version }}
# Inline cache embeds layer metadata into the pushed image
# itself — no external cache server needed, which keeps the
# workflow self-contained on the Gitea runner.
cache-from: type=inline
cache-to: type=inline
+3 -3
View File
@@ -22,9 +22,9 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'
check-latest: true
cache: true
go-version: '1.24'
check-latest: false
cache: false
- name: Test
run: go test -race ./...
+76
View File
@@ -266,6 +266,7 @@ Available template variables:
| `{{.From}}` | previous state (`up` / `down` / `unknown`) |
| `{{.To}}` | new state |
| `{{.Verb}}` | `UP` / `DOWN` / `RECOVERED` |
| `{{.VerbLower}}` | lowercase form (`up` / `down` / `recovered`) |
| `{{.Snapshot.Reports}}` | total per-node reports counted |
| `{{.Snapshot.OKCount}}` | how many reported OK |
| `{{.Snapshot.NotOK}}` | how many reported failure |
@@ -284,6 +285,81 @@ or Body template field in the add/edit alert forms.
production traffic depends on it. A template parse or execution error
falls back to the built-in format and is logged.
### Conditionals, pipelines, and worked examples
Templates use Go's `text/template` syntax, so you have `if`/`else if`/
`else`/`end`, comparison helpers (`eq`, `ne`, `lt`, `gt`), `printf`
pipelines, and `with` blocks. The default rendering — the one used
when no custom template is set — lives in `internal/alerts/message.go`
inside the `Render` function; tweak it there if you want to change
what every alert without an override produces.
A few progressively richer examples:
**1. State-specific Discord copy** — different tone for `DOWN`,
`RECOVERED`, and first-time `UP`:
```yaml
body_template: |
{{if eq .Verb "DOWN"}}:rotating_light: **{{.Check.Name}}** is DOWN
We're investigating. Last detail: `{{.Snapshot.Detail}}`
{{else if eq .Verb "RECOVERED"}}:white_check_mark: **{{.Check.Name}}** is back UP after a {{.From}} blip.
{{else}}:information_source: **{{.Check.Name}}** is online ({{.VerbLower}}).{{end}}
```
**2. SMTP subject with severity prefix and run-length detail**
pipes `Verb` through `printf` for padding and only mentions the
report count when it actually matters:
```yaml
subject_template: '[{{printf "%-9s" .Verb}}] {{.Check.Name}} — {{.Check.Target}}'
body_template: |
Check: {{.Check.Name}} ({{.Check.Type}})
Target: {{.Check.Target}}
Status: {{.Verb}} (was {{.From}})
Reporter: {{.NodeID}}
At: {{.When}}
{{if gt .Snapshot.Reports 1}}
Quorum: {{.Snapshot.OKCount}} ok / {{.Snapshot.NotOK}} failing across {{.Snapshot.Reports}} reports.
{{end}}{{with .Snapshot.Detail}}
Detail: {{.}}
{{end}}
```
**3. PagerDuty-style severity routing** — nest `if`/`else if` so a
single template can produce three different first lines without
duplicating the rest of the body:
```yaml
subject_template: >-
{{if eq .Verb "DOWN"}}P1: {{.Check.Name}} hard down
{{else if eq .Verb "RECOVERED"}}P3: {{.Check.Name}} recovered
{{else}}P4: {{.Check.Name}} {{.VerbLower}}{{end}}
body_template: |
{{/* Header line — uses .VerbLower so the prose reads naturally */}}
{{.Check.Name}} ({{.Check.Target}}) is now {{.VerbLower}}.
{{if eq .Verb "DOWN"-}}
This is a real outage. Quorum: {{.Snapshot.NotOK}}/{{.Snapshot.Reports}} reporters see it failing.
Detail from the first failing probe: {{.Snapshot.Detail}}
Acknowledge in the runbook before paging on-call.
{{- else if eq .Verb "RECOVERED" -}}
Recovered after a {{.From}} period. No action needed; this is informational.
{{- else -}}
First successful probe after {{.From}}. Marking healthy.
{{- end}}
— {{.NodeID}} at {{.When}}
```
The `{{-` / `-}}` trim adjacent whitespace, which keeps the rendered
output tidy even when the template itself is indented for readability.
If a template fails to parse or panics at execute time, the
dispatcher falls back to the default `Render` output for that field
and logs the error — your alert still ships, you just lose the
custom formatting until you fix the template.
## Edit cluster.yaml directly
Anything you can do through the CLI you can also do by editing
+41
View File
@@ -0,0 +1,41 @@
# syntax=docker/dockerfile:1.7
# Build stage. Runs on the runner's native arch (BUILDPLATFORM) and
# cross-compiles the Go binary for whichever target the manifest list
# is being assembled for (TARGETOS/TARGETARCH). Keeps multi-arch
# builds fast — only the final link is per-arch, the Go toolchain is
# always native.
FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS builder
ARG TARGETOS
ARG TARGETARCH
ARG VERSION=dev
WORKDIR /src
# Module cache layer — re-uses unless go.mod/go.sum change.
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \
go build \
-trimpath \
-ldflags "-s -w -X main.version=${VERSION}" \
-o /out/qu \
./cmd/qu
# Runtime stage. distroless/static has CA roots for HTTPS probes and
# nothing else — no shell, no package manager. Runs as root so the
# daemon can open ICMP sockets and write under /etc/quptime; operators
# can override at deploy time with `docker run --user`.
FROM gcr.io/distroless/static-debian12:latest
COPY --from=builder /out/qu /usr/local/bin/qu
ENV QUPTIME_DIR=/etc/quptime
VOLUME ["/etc/quptime"]
EXPOSE 9901
ENTRYPOINT ["/usr/local/bin/qu"]
CMD ["serve"]
+37
View File
@@ -0,0 +1,37 @@
# An example of a docker compose with Tailscale & QUptime.
# This setup is specifically intended for hosts that may not be able to reach each other directly or have a public IP address.
services:
tailscale:
image: tailscale/tailscale:latest
container_name: tailscale
cap_add:
- NET_ADMIN
environment:
- TS_AUTHKEY=${TAILSCALE_AUTHKEY} # Set this in your .env file with a Tailscale auth key
- TS_HOSTNAME=quptime-tailscale
volumes:
- /dev/net/tun:/dev/net/tun
- tailscale:/var/lib/tailscale
restart: unless-stopped
quptime:
image: git.cer.sh/axodouble/quptime:master
container_name: quptime
volumes:
- quptime:/etc/quptime
ports:
- "9901:9901"
depends_on:
- tailscale
# No restart directive, user needs to init quptime first
# Run `docker compose -f docker-compose-tailscale.yml run --rm quptime init` to initialize
# the data volume before starting the service
# If this is not the master node, use
# `docker compose -f docker-compose-tailscale.yml run --rm quptime --advertise <TAILSCALE_IP>:9901 --secret <SECRET>`
# And add the individual nodes to the cluster with `docker compose -f docker-compose-tailscale.yml run --rm quptime node add <OTHER_NODE_IP>:9901`
network_mode: "service:tailscale" # Use the Tailscale network stack
volumes:
tailscale:
quptime:
+56 -34
View File
@@ -1,60 +1,82 @@
#!/bin/bash
set -euo pipefail
INSTALL_BIN="/usr/local/bin/qu"
SERVICE_FILE="/etc/systemd/system/qu-serve.service"
SERVICE_USER="${SUDO_USER:-$(whoami)}"
SERVICE_GROUP="$(id -gn "$SERVICE_USER" 2>/dev/null || echo root)"
fail() {
echo "Error: $*" >&2
exit 1
}
# Helper function which echo's all commands before executing them in grayscale prefixed with >
echo_cmd() {
echo -e "\033[90m> $1\033[0m"
eval "$1"
}
# Check if jq and curl are installed, if not, error out and ask the user to install them
if ! command -v jq > /dev/null; then
echo "Error: jq is not installed. Please install jq and try again."
exit 1
require_command() {
command -v "$1" > /dev/null 2>&1 || fail "$1 is not installed. Please install $1 and try again."
}
write_completion() {
local shell=$1 path=$2
[ -d "$(dirname "$path")" ] || return 1
if "$INSTALL_BIN" completion "$shell" > "$path" 2>/dev/null; then
echo "> installed $shell completion -> $path"
return 0
fi
if ! command -v curl > /dev/null; then
echo "Error: curl is not installed. Please install curl and try again."
exit 1
rm -f "$path"
return 1
}
require_command jq
require_command curl
if [ ! -w "$(dirname "$INSTALL_BIN")" ]; then
fail "You are not allowed to write to $(dirname "$INSTALL_BIN"). Run this script with sudo or install qu manually."
fi
# Check if the user is allowed to write to /usr/local/bin, if so, install qu there, else error out and ask the user to install qu manually
if [ -w "/usr/local/bin" ]; then
# Get release tag by $(curl -s https://git.cer.sh/api/v1/repos/axodouble/quptime/releases/latest | jq -r '.tag_name')
RELEASE=$(curl -s https://git.cer.sh/api/v1/repos/axodouble/quptime/releases/latest | jq -r '.tag_name')
# Download the latest release binary from the Git repository and save it to /usr/local/bin/qu
if command -v curl > /dev/null; then
echo_cmd "curl -L -o \"/usr/local/bin/qu\" \"https://git.cer.sh/axodouble/quptime/releases/download/${RELEASE}/qu-${RELEASE}-linux-amd64\""
echo_cmd "chmod +x \"/usr/local/bin/qu\""
echo "> qu has been installed to /usr/local/bin/qu"
echo_cmd "curl -L -o '$INSTALL_BIN' 'https://git.cer.sh/axodouble/quptime/releases/download/${RELEASE}/qu-${RELEASE}-linux-amd64'"
echo_cmd "chmod +x '$INSTALL_BIN'"
echo "> qu has been installed to $INSTALL_BIN"
if "$INSTALL_BIN" --help 2>/dev/null | grep -q "completion"; then
write_completion bash /usr/share/bash-completion/completions/qu \
|| write_completion bash /etc/bash_completion.d/qu || true
write_completion zsh /usr/share/zsh/site-functions/_qu || true
write_completion fish /usr/share/fish/vendor_completions.d/qu.fish || true
else
echo "Error: curl is not installed. Please install curl and try again."
exit 1
fi
else
echo "Error: You are not allowed to write to /usr/local/bin. Please install qu manually, or run this script with sudo."
exit 1
echo "> qu does not expose completion support; skipping shell completion installation."
fi
if ! command -v systemctl > /dev/null 2>&1; then
echo "> Warning: systemd is not available on this system. qu serve will not be automatically started on boot."
echo "Installation complete, before starting qu serve, make sure to run qu init and read the documentation."
exit 0
fi
# Check if the user has systemd, if so create a systemd service file for qu serve
if command -v systemctl > /dev/null; then
echo "> Creating systemd service file for qu serve..."
cat <<EOL > /etc/systemd/system/qu-serve.service
cat > "$SERVICE_FILE" <<EOL
[Unit]
Description=QUptime Serve
After=network.target
[Service]
ExecStart=/usr/local/bin/qu serve
ExecStart=$INSTALL_BIN serve
Restart=always
User=$(whoami)
User=$SERVICE_USER
Group=$SERVICE_GROUP
[Install]
WantedBy=multi-user.target
EOL
echo_cmd "systemctl daemon-reload"
echo_cmd "systemctl enable qu-serve.service"
echo "> qu serve service has been created and enabled. You can start it with 'systemctl start qu-serve.service'"
else
echo "> Warning: systemd is not available on this system. qu serve will not be automatically started on boot. You can start it manually with '/usr/local/bin/qu serve'"
fi
echo "Installation complete, before starting `qu serve`, make sure to run `qu init` and read the documentation."
echo_cmd "systemctl daemon-reload"
echo_cmd "systemctl enable $(basename "$SERVICE_FILE")"
echo "> qu serve service has been created and enabled. You can start it with 'systemctl start $(basename "$SERVICE_FILE")'"
echo "Installation complete, before starting qu serve, make sure to run qu init and read the documentation."
+2 -1
View File
@@ -9,7 +9,7 @@ package alerts
func TemplateVarsHint() string {
return "Go text/template — leave empty to use the built-in format.\n" +
" Vars: {{.Check.Name}}, {{.Check.Target}}, {{.Check.Type}}, {{.Check.ID}},\n" +
" {{.Verb}} (UP|DOWN|RECOVERED), {{.From}}, {{.To}}, {{.NodeID}}, {{.When}},\n" +
" {{.Verb}} (UP|DOWN|RECOVERED), {{.VerbLower}}, {{.From}}, {{.To}}, {{.NodeID}}, {{.When}},\n" +
" {{.Snapshot.Detail}}, {{.Snapshot.Reports}}, {{.Snapshot.OKCount}}, {{.Snapshot.NotOK}}"
}
@@ -29,6 +29,7 @@ Available variables:
{{.Check.Type}} http | tcp | icmp
{{.Check.ID}} stable check UUID
{{.Verb}} UP | DOWN | RECOVERED
{{.VerbLower}} lowercase form of Verb (up | down | recovered)
{{.From}} previous state name
{{.To}} new state name
{{.NodeID}} master node that rendered the message
+4 -1
View File
@@ -22,6 +22,7 @@ type TemplateContext struct {
From string // previous state name
To string // new state name
Verb string // "UP" | "DOWN" | "RECOVERED"
VerbLower string // lowercase form of Verb ("up" | "down" | "recovered")
Snapshot checks.Snapshot // aggregate counts and detail
NodeID string // master that rendered the message
When string // RFC3339 timestamp
@@ -88,11 +89,13 @@ func RenderFor(alert *config.Alert, nodeID string, check *config.Check, from, to
}
func newContext(nodeID string, check *config.Check, from, to checks.State, snap checks.Snapshot) TemplateContext {
verb := transitionVerb(from, to)
return TemplateContext{
Check: check,
From: string(from),
To: string(to),
Verb: transitionVerb(from, to),
Verb: verb,
VerbLower: strings.ToLower(verb),
Snapshot: snap,
NodeID: nodeID,
When: time.Now().UTC().Format(time.RFC3339),
+1 -1
View File
@@ -91,7 +91,7 @@ type Alert struct {
// format. Discord ignores SubjectTemplate (it has no subject line);
// SMTP uses both. Available variables: {{.Check.Name}},
// {{.Check.Type}}, {{.Check.Target}}, {{.Check.ID}}, {{.From}},
// {{.To}}, {{.Verb}}, {{.Snapshot.Reports}}, {{.Snapshot.OKCount}},
// {{.To}}, {{.Verb}}, {{.VerbLower}}, {{.Snapshot.Reports}}, {{.Snapshot.OKCount}},
// {{.Snapshot.NotOK}}, {{.Snapshot.Detail}}, {{.NodeID}}, {{.When}}.
SubjectTemplate string `yaml:"subject_template,omitempty"`
BodyTemplate string `yaml:"body_template,omitempty"`
+27 -2
View File
@@ -63,10 +63,27 @@ type form struct {
cursor int
busy bool
err string
width int // current terminal width; inputs resize to fill it
submit func(values []string) tea.Cmd
}
// defaultFieldWidth is the fallback input width used before the first
// WindowSizeMsg has arrived. Once we know the terminal size, inputs
// grow to fill the available horizontal space.
const defaultFieldWidth = 40
// fieldWidthFor derives the per-input visible width from the terminal
// width. It subtracts the modal's border+padding (6) and the form's
// label indent (2), then a couple of chars of safety margin.
func fieldWidthFor(termWidth int) int {
w := termWidth - 12
if w < defaultFieldWidth {
return defaultFieldWidth
}
return w
}
func newForm(title string, fields []formField, submit func([]string) tea.Cmd) *form {
for i := range fields {
fields[i].input.Prompt = ""
@@ -89,7 +106,7 @@ func textField(label, hint string, required bool) formField {
// contents and can tweak instead of retyping everything.
func textFieldWithValue(label, hint, value string, required bool) formField {
ti := textinput.New()
ti.Width = 40
ti.Width = defaultFieldWidth
ti.Placeholder = hint
if value != "" {
ti.SetValue(value)
@@ -106,7 +123,7 @@ func passwordField(label, hint string) formField {
// the actual value leaking on-screen.
func passwordFieldWithValue(label, hint, value string) formField {
ti := textinput.New()
ti.Width = 40
ti.Width = defaultFieldWidth
ti.Placeholder = hint
ti.EchoMode = textinput.EchoPassword
ti.EchoCharacter = '•'
@@ -148,6 +165,14 @@ func (f *form) View() string {
func (f *form) Update(msg tea.Msg) (modal, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
f.width = msg.Width
w := fieldWidthFor(msg.Width)
for i := range f.fields {
f.fields[i].input.Width = w
}
return f, nil
case formSubmitErr:
f.busy = false
f.err = string(msg)
+7 -3
View File
@@ -58,14 +58,18 @@ var (
stateUnknownStyle = lipgloss.NewStyle().Foreground(colorMuted)
)
// renderState returns a plain-text state label for use inside the
// bubbles table. The table truncates cells with runewidth.Truncate
// which counts the printable bytes of ANSI escape sequences toward
// column width, so a styled value gets chopped down to just "…".
func renderState(s string) string {
switch s {
case "up":
return stateUpStyle.Render("● up")
return "● up"
case "down":
return stateDownStyle.Render("● down")
return "● down"
default:
return stateUnknownStyle.Render("○ unknown")
return "○ unknown"
}
}
+25
View File
@@ -132,6 +132,9 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
case tea.WindowSizeMsg:
m.width, m.height = msg.Width, msg.Height
m.resizeTabs()
if m.modal != nil {
m.modal, _ = m.modal.Update(msg)
}
return m, nil
case tickMsg:
@@ -175,8 +178,15 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
// Modal grabs all input while open.
if m.modal != nil {
prev := m.modal
newModal, cmd := m.modal.Update(msg)
m.modal = newModal
// If the modal handed off to a different modal (e.g. picker →
// form), seed the new one with the current terminal size so its
// text inputs can size themselves on first paint.
if newModal != nil && newModal != prev {
m.seedModalSize()
}
return m, cmd
}
@@ -223,6 +233,7 @@ func (m model) handleKey(km tea.KeyMsg) (tea.Model, tea.Cmd) {
return m, tea.Batch(loadStatusCmd(), loadConfigCmd())
case "a":
m.modal = m.openAddPicker()
m.seedModalSize()
return m, nil
case "d":
return m.openRemoveConfirm()
@@ -501,9 +512,20 @@ func (m model) openRemoveConfirm() (tea.Model, tea.Cmd) {
return m, nil
}
m.modal = newConfirm(prompt, run)
m.seedModalSize()
return m, nil
}
// seedModalSize forwards the current terminal dimensions to the modal
// so its inputs can size themselves on first paint. Called whenever a
// new modal is installed.
func (m *model) seedModalSize() {
if m.modal == nil || m.width == 0 {
return
}
m.modal, _ = m.modal.Update(tea.WindowSizeMsg{Width: m.width, Height: m.height})
}
// openEditForm dispatches to the right pre-filled edit form based on the
// active tab and the row under the cursor. Looks up the full record in
// m.peersFull / m.checksFull / m.alerts (populated by loadConfigCmd) so
@@ -519,6 +541,7 @@ func (m model) openEditForm() (tea.Model, tea.Cmd) {
for i := range m.peersFull {
if m.peersFull[i].NodeID == id {
m.modal = newEditNodeForm(m.peersFull[i])
m.seedModalSize()
return m, nil
}
}
@@ -534,6 +557,7 @@ func (m model) openEditForm() (tea.Model, tea.Cmd) {
for i := range m.checksFull {
if m.checksFull[i].ID == id {
m.modal = newEditCheckForm(m.checksFull[i])
m.seedModalSize()
return m, nil
}
}
@@ -559,6 +583,7 @@ func (m model) openEditForm() (tea.Model, tea.Cmd) {
m.setFlash("unsupported alert type", flashError)
return m, nil
}
m.seedModalSize()
return m, nil
}
m.setFlash("alert not found in local cluster.yaml", flashError)