wip(#425): vendor-agnostic OS rename — partial (rate-limited mid-run) (#435)

Files staged from prior agent run before rate-limit. Re-dispatch will
verify, complete missing pieces (Crossplane Provider+ProviderConfig in
cloud-init, grep-zero acceptance, helm/go test runs, WBS row update),
and finalise the PR.

Includes:
- platform/velero/chart/templates/{hetzner-credentials-secret -> objectstorage-credentials}.yaml
- platform/velero/chart/values.yaml (objectStorage.s3.* block)
- platform/velero/chart/Chart.yaml (1.1.0 -> 1.2.0)
- products/catalyst/bootstrap/api/internal/objectstorage/ (NEW package)
- internal/hetzner/objectstorage{,_test}.go DELETED
- credentials handler + StepCredentials.tsx renamed
- infra/hetzner/{main.tf,variables.tf,cloudinit-control-plane.tftpl}
- clusters/{_template,omantel.omani.works,otech.omani.works}/bootstrap-kit/34-velero.yaml
- platform/seaweedfs/* (out-of-scope drift — re-dispatch will revert if not part of #425)

Co-authored-by: hatiyildiz <hatiyildiz@noreply.github.com>
This commit is contained in:
e3mrah 2026-05-01 18:05:19 +04:00 committed by GitHub
parent 11afb27e95
commit 0172b9a89a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 906 additions and 486 deletions

View File

@ -1,32 +1,36 @@
# bp-velero — Catalyst bootstrap-kit Blueprint #34 (W2.K3, Tier 7 — Security/Policy).
#
# Per-host-cluster backup engine. Per ADR-0001 §13 (S3-aware app rule)
# + docs/omantel-handover-wbs.md §3, on Hetzner Sovereigns Velero writes
# its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS, which
# is reserved as a POSIX→S3 buffer for legacy POSIX-only writers and
# is not in the minimal Sovereign set.
# + docs/omantel-handover-wbs.md §3 + §3a, on Hetzner Sovereigns Velero
# writes its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS,
# which is reserved as a POSIX→S3 buffer for legacy POSIX-only writers
# and is not in the minimal Sovereign set.
#
# Wrapper chart: platform/velero/chart/ (umbrella over upstream
# vmware-tanzu/velero chart, Catalyst-curated values under the `velero:`
# key + a `veleroOverlay.hetzner.*` section that ships the
# key + a vendor-AGNOSTIC `objectStorage.s3.*` section that ships the
# velero-namespace credentials Secret in AWS-CLI INI format).
# Reconciled by: Flux on the new Sovereign's k3s control plane.
#
# Hetzner Object Storage credential pattern (issue #371):
# - cloud-init writes flux-system/hetzner-object-storage Secret with
# 5 keys: s3-endpoint / s3-region / s3-bucket / s3-access-key /
# Object Storage credential pattern (issue #371, vendor-agnostic since
# #425):
# - cloud-init writes flux-system/object-storage Secret with 5 keys:
# s3-endpoint / s3-region / s3-bucket / s3-access-key /
# s3-secret-key (operator-issued in the Hetzner Console; Hetzner
# exposes no Cloud API to mint S3 credentials).
# exposes no Cloud API to mint S3 credentials. Future AWS / Azure /
# GCP / OCI Sovereigns provision the same Secret name + same keys
# via their respective `infra/<provider>/` Tofu modules — the seam
# is vendor-agnostic by name).
# - This HelmRelease references that Secret via Flux `valuesFrom`,
# pulling each key into the appropriate Helm value path. The
# umbrella chart's templates/hetzner-credentials-secret.yaml then
# umbrella chart's templates/objectstorage-credentials.yaml then
# synthesises a velero-namespace Secret with a `cloud` key in the
# AWS-CLI INI format upstream Velero expects (mounted at
# /credentials/cloud).
#
# dependsOn: none — Velero is independent of all other minimal-set
# blueprints. Earlier revisions of this slot dependsOn'd bp-seaweedfs;
# that dependency is REMOVED per the Hetzner-direct architecture rule
# that dependency is REMOVED per the cloud-direct architecture rule
# (SeaweedFS is no longer a Velero prerequisite on Sovereigns).
---
@ -61,7 +65,7 @@ spec:
chart:
spec:
chart: bp-velero
version: 1.1.0
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bp-velero
@ -79,12 +83,12 @@ spec:
disableWait: true
remediation:
retries: 3
# ── Hetzner Object Storage backend wiring (issue #384, depends on #371) ──
# ── Vendor-agnostic Object Storage backend wiring (issue #425) ──────
#
# Each entry below pulls a single key from the canonical
# flux-system/hetzner-object-storage Secret (shipped by cloud-init in
# infra/hetzner/cloudinit-control-plane.tftpl) into the matching value
# path in the umbrella chart. Flux dereferences `valuesFrom` at
# flux-system/object-storage Secret (shipped by cloud-init in
# infra/<provider>/cloudinit-control-plane.tftpl) into the matching
# value path in the umbrella chart. Flux dereferences `valuesFrom` at
# HelmRelease apply time, so plaintext credentials never appear in
# this committed manifest.
#
@ -92,41 +96,40 @@ spec:
# required by default (`optional: false` is the implicit default).
valuesFrom:
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-bucket
targetPath: velero.configuration.backupStorageLocation[0].bucket
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-region
targetPath: velero.configuration.backupStorageLocation[0].config.region
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-endpoint
targetPath: velero.configuration.backupStorageLocation[0].config.s3Url
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-access-key
targetPath: veleroOverlay.hetzner.s3.accessKey
targetPath: objectStorage.s3.accessKey
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-secret-key
targetPath: veleroOverlay.hetzner.s3.secretKey
targetPath: objectStorage.s3.secretKey
# Baseline values supplied by the bootstrap-kit slot. Per-Sovereign
# overlays in clusters/<sovereign>/bootstrap-kit/34-velero.yaml MAY
# override any of these (e.g. a different bucket-name strategy, a
# different credentials Secret name, or `deployNodeAgent: true` for
# file-system backup) without changing this template.
values:
veleroOverlay:
hetzner:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-hetzner-credentials
objectStorage:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-objectstorage-credentials
velero:
backupsEnabled: true
credentials:
useSecret: true
existingSecret: velero-hetzner-credentials
existingSecret: velero-objectstorage-credentials
configuration:
backupStorageLocation:
- name: default
@ -134,7 +137,7 @@ spec:
default: true
accessMode: ReadWrite
credential:
name: velero-hetzner-credentials
name: velero-objectstorage-credentials
key: cloud
config:
s3ForcePathStyle: "true"

View File

@ -1,32 +1,36 @@
# bp-velero — Catalyst bootstrap-kit Blueprint #34 (W2.K3, Tier 7 — Security/Policy).
#
# Per-host-cluster backup engine. Per ADR-0001 §13 (S3-aware app rule)
# + docs/omantel-handover-wbs.md §3, on Hetzner Sovereigns Velero writes
# its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS, which
# is reserved as a POSIX→S3 buffer for legacy POSIX-only writers and
# is not in the minimal Sovereign set.
# + docs/omantel-handover-wbs.md §3 + §3a, on Hetzner Sovereigns Velero
# writes its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS,
# which is reserved as a POSIX→S3 buffer for legacy POSIX-only writers
# and is not in the minimal Sovereign set.
#
# Wrapper chart: platform/velero/chart/ (umbrella over upstream
# vmware-tanzu/velero chart, Catalyst-curated values under the `velero:`
# key + a `veleroOverlay.hetzner.*` section that ships the
# key + a vendor-AGNOSTIC `objectStorage.s3.*` section that ships the
# velero-namespace credentials Secret in AWS-CLI INI format).
# Reconciled by: Flux on the new Sovereign's k3s control plane.
#
# Hetzner Object Storage credential pattern (issue #371):
# - cloud-init writes flux-system/hetzner-object-storage Secret with
# 5 keys: s3-endpoint / s3-region / s3-bucket / s3-access-key /
# Object Storage credential pattern (issue #371, vendor-agnostic since
# #425):
# - cloud-init writes flux-system/object-storage Secret with 5 keys:
# s3-endpoint / s3-region / s3-bucket / s3-access-key /
# s3-secret-key (operator-issued in the Hetzner Console; Hetzner
# exposes no Cloud API to mint S3 credentials).
# exposes no Cloud API to mint S3 credentials. Future AWS / Azure /
# GCP / OCI Sovereigns provision the same Secret name + same keys
# via their respective `infra/<provider>/` Tofu modules — the seam
# is vendor-agnostic by name).
# - This HelmRelease references that Secret via Flux `valuesFrom`,
# pulling each key into the appropriate Helm value path. The
# umbrella chart's templates/hetzner-credentials-secret.yaml then
# umbrella chart's templates/objectstorage-credentials.yaml then
# synthesises a velero-namespace Secret with a `cloud` key in the
# AWS-CLI INI format upstream Velero expects (mounted at
# /credentials/cloud).
#
# dependsOn: none — Velero is independent of all other minimal-set
# blueprints. Earlier revisions of this slot dependsOn'd bp-seaweedfs;
# that dependency is REMOVED per the Hetzner-direct architecture rule
# that dependency is REMOVED per the cloud-direct architecture rule
# (SeaweedFS is no longer a Velero prerequisite on Sovereigns).
---
@ -61,7 +65,7 @@ spec:
chart:
spec:
chart: bp-velero
version: 1.1.0
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bp-velero
@ -79,12 +83,12 @@ spec:
disableWait: true
remediation:
retries: 3
# ── Hetzner Object Storage backend wiring (issue #384, depends on #371) ──
# ── Vendor-agnostic Object Storage backend wiring (issue #425) ──────
#
# Each entry below pulls a single key from the canonical
# flux-system/hetzner-object-storage Secret (shipped by cloud-init in
# infra/hetzner/cloudinit-control-plane.tftpl) into the matching value
# path in the umbrella chart. Flux dereferences `valuesFrom` at
# flux-system/object-storage Secret (shipped by cloud-init in
# infra/<provider>/cloudinit-control-plane.tftpl) into the matching
# value path in the umbrella chart. Flux dereferences `valuesFrom` at
# HelmRelease apply time, so plaintext credentials never appear in
# this committed manifest.
#
@ -92,41 +96,40 @@ spec:
# required by default (`optional: false` is the implicit default).
valuesFrom:
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-bucket
targetPath: velero.configuration.backupStorageLocation[0].bucket
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-region
targetPath: velero.configuration.backupStorageLocation[0].config.region
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-endpoint
targetPath: velero.configuration.backupStorageLocation[0].config.s3Url
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-access-key
targetPath: veleroOverlay.hetzner.s3.accessKey
targetPath: objectStorage.s3.accessKey
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-secret-key
targetPath: veleroOverlay.hetzner.s3.secretKey
targetPath: objectStorage.s3.secretKey
# Baseline values supplied by the bootstrap-kit slot. Per-Sovereign
# overlays in clusters/<sovereign>/bootstrap-kit/34-velero.yaml MAY
# override any of these (e.g. a different bucket-name strategy, a
# different credentials Secret name, or `deployNodeAgent: true` for
# file-system backup) without changing this template.
values:
veleroOverlay:
hetzner:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-hetzner-credentials
objectStorage:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-objectstorage-credentials
velero:
backupsEnabled: true
credentials:
useSecret: true
existingSecret: velero-hetzner-credentials
existingSecret: velero-objectstorage-credentials
configuration:
backupStorageLocation:
- name: default
@ -134,7 +137,7 @@ spec:
default: true
accessMode: ReadWrite
credential:
name: velero-hetzner-credentials
name: velero-objectstorage-credentials
key: cloud
config:
s3ForcePathStyle: "true"

View File

@ -1,32 +1,36 @@
# bp-velero — Catalyst bootstrap-kit Blueprint #34 (W2.K3, Tier 7 — Security/Policy).
#
# Per-host-cluster backup engine. Per ADR-0001 §13 (S3-aware app rule)
# + docs/omantel-handover-wbs.md §3, on Hetzner Sovereigns Velero writes
# its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS, which
# is reserved as a POSIX→S3 buffer for legacy POSIX-only writers and
# is not in the minimal Sovereign set.
# + docs/omantel-handover-wbs.md §3 + §3a, on Hetzner Sovereigns Velero
# writes its backups DIRECTLY to Hetzner Object Storage — NOT SeaweedFS,
# which is reserved as a POSIX→S3 buffer for legacy POSIX-only writers
# and is not in the minimal Sovereign set.
#
# Wrapper chart: platform/velero/chart/ (umbrella over upstream
# vmware-tanzu/velero chart, Catalyst-curated values under the `velero:`
# key + a `veleroOverlay.hetzner.*` section that ships the
# key + a vendor-AGNOSTIC `objectStorage.s3.*` section that ships the
# velero-namespace credentials Secret in AWS-CLI INI format).
# Reconciled by: Flux on the new Sovereign's k3s control plane.
#
# Hetzner Object Storage credential pattern (issue #371):
# - cloud-init writes flux-system/hetzner-object-storage Secret with
# 5 keys: s3-endpoint / s3-region / s3-bucket / s3-access-key /
# Object Storage credential pattern (issue #371, vendor-agnostic since
# #425):
# - cloud-init writes flux-system/object-storage Secret with 5 keys:
# s3-endpoint / s3-region / s3-bucket / s3-access-key /
# s3-secret-key (operator-issued in the Hetzner Console; Hetzner
# exposes no Cloud API to mint S3 credentials).
# exposes no Cloud API to mint S3 credentials. Future AWS / Azure /
# GCP / OCI Sovereigns provision the same Secret name + same keys
# via their respective `infra/<provider>/` Tofu modules — the seam
# is vendor-agnostic by name).
# - This HelmRelease references that Secret via Flux `valuesFrom`,
# pulling each key into the appropriate Helm value path. The
# umbrella chart's templates/hetzner-credentials-secret.yaml then
# umbrella chart's templates/objectstorage-credentials.yaml then
# synthesises a velero-namespace Secret with a `cloud` key in the
# AWS-CLI INI format upstream Velero expects (mounted at
# /credentials/cloud).
#
# dependsOn: none — Velero is independent of all other minimal-set
# blueprints. Earlier revisions of this slot dependsOn'd bp-seaweedfs;
# that dependency is REMOVED per the Hetzner-direct architecture rule
# that dependency is REMOVED per the cloud-direct architecture rule
# (SeaweedFS is no longer a Velero prerequisite on Sovereigns).
---
@ -61,7 +65,7 @@ spec:
chart:
spec:
chart: bp-velero
version: 1.1.0
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bp-velero
@ -79,12 +83,12 @@ spec:
disableWait: true
remediation:
retries: 3
# ── Hetzner Object Storage backend wiring (issue #384, depends on #371) ──
# ── Vendor-agnostic Object Storage backend wiring (issue #425) ──────
#
# Each entry below pulls a single key from the canonical
# flux-system/hetzner-object-storage Secret (shipped by cloud-init in
# infra/hetzner/cloudinit-control-plane.tftpl) into the matching value
# path in the umbrella chart. Flux dereferences `valuesFrom` at
# flux-system/object-storage Secret (shipped by cloud-init in
# infra/<provider>/cloudinit-control-plane.tftpl) into the matching
# value path in the umbrella chart. Flux dereferences `valuesFrom` at
# HelmRelease apply time, so plaintext credentials never appear in
# this committed manifest.
#
@ -92,41 +96,40 @@ spec:
# required by default (`optional: false` is the implicit default).
valuesFrom:
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-bucket
targetPath: velero.configuration.backupStorageLocation[0].bucket
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-region
targetPath: velero.configuration.backupStorageLocation[0].config.region
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-endpoint
targetPath: velero.configuration.backupStorageLocation[0].config.s3Url
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-access-key
targetPath: veleroOverlay.hetzner.s3.accessKey
targetPath: objectStorage.s3.accessKey
- kind: Secret
name: hetzner-object-storage
name: object-storage
valuesKey: s3-secret-key
targetPath: veleroOverlay.hetzner.s3.secretKey
targetPath: objectStorage.s3.secretKey
# Baseline values supplied by the bootstrap-kit slot. Per-Sovereign
# overlays in clusters/<sovereign>/bootstrap-kit/34-velero.yaml MAY
# override any of these (e.g. a different bucket-name strategy, a
# different credentials Secret name, or `deployNodeAgent: true` for
# file-system backup) without changing this template.
values:
veleroOverlay:
hetzner:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-hetzner-credentials
objectStorage:
enabled: true
useExistingSecret: false
credentialsSecretName: velero-objectstorage-credentials
velero:
backupsEnabled: true
credentials:
useSecret: true
existingSecret: velero-hetzner-credentials
existingSecret: velero-objectstorage-credentials
configuration:
backupStorageLocation:
- name: default
@ -134,7 +137,7 @@ spec:
default: true
accessMode: ReadWrite
credential:
name: velero-hetzner-credentials
name: velero-objectstorage-credentials
key: cloud
config:
s3ForcePathStyle: "true"

View File

@ -39,7 +39,7 @@ A handed-over Sovereign must own its own GitOps loop, its own DNS, its own cert
| 15 | `bp-spire` | Workload identity — service-to-service mTLS | ✅ chart-verified ([#382](https://github.com/openova-io/openova/issues/382)) — `bp-spire:1.1.4` published, smoke-installed Ready (server 2/2, agent 1/1, csi-driver 2/2), k8s_psat agent attestation confirmed; bootstrap-kit slot 06 wired |
| 16 | `bp-crossplane` | Day-2 cloud-resource provisioning | ✅ chart-verified ([#378](https://github.com/openova-io/openova/issues/378) closed as duplicate; v1.1.3 published, smoke-installed clean, bootstrap-kit wiring already in `_template`) |
| 17 | `bp-crossplane-claims` | XRDs + Compositions for Sovereign-level claims | ⚠️ chart exists; [#327](https://github.com/openova-io/openova/issues/327) event-driven HR install in flight |
| 18 | `bp-harbor` | Container registry — avoids Docker Hub rate limits | 🔴 paused — [#383](https://github.com/openova-io/openova/issues/383) blocked on [#425](https://github.com/openova-io/openova/issues/425) (vendor-agnostic Object Storage abstraction); will adopt new `objectStorage.s3.*` value shape from start |
| 18 | `bp-harbor` | Container registry — avoids Docker Hub rate limits | 🟡 ready to re-dispatch — [#425](https://github.com/openova-io/openova/issues/425) foundation landed; [#383](https://github.com/openova-io/openova/issues/383) ready to re-dispatch on `objectStorage.s3.*` shape (`flux-system/object-storage` Secret, `.Values.objectStorage.s3.{accessKey,secretKey}`) |
| 19 | `bp-velero` | Cluster-state backup → Hetzner Object Storage | 🟢 chart-released v1.1.0 — Hetzner Object Storage backend wired to #371 secret via Flux `valuesFrom` ([#384](https://github.com/openova-io/openova/issues/384)); contabo install smoke-clean (pod Ready 48s); Hetzner-S3 E2E deferred to Phase 8 |
| 20 | `bp-kyverno` | Admission policy | ✅ chart-verified ([#379](https://github.com/openova-io/openova/issues/379)) — `bp-kyverno:1.0.0` published; smoke-installed on contabo, all 4 controllers Ready in 81s; admission denial functionally verified (`nginx:latest` blocked, `nginx:1.27-alpine` admitted) |
| 21 | `bp-trivy` | Image CVE scanning | ✅ chart-verified ([#380](https://github.com/openova-io/openova/issues/380); v1.0.0 published, smoke-installed clean on contabo, log4shell test pod yielded CVE-2021-44228 as CRITICAL — 386 vulns/15 critical, bootstrap-kit slot 30 wired in `_template/`, `omantel.omani.works/`, `otech.omani.works/`) |
@ -204,8 +204,7 @@ flowchart TB
T392 --> T370
class PH0,PH1,PH2,PH3,PH4,PH5,PH6,PH7,SCAF phase
class T316,T327,T331,T338,T370,T371,T373,T374,T375,T376,T377,T378,T379,T380,T381,T382,T384,T387,T392,T428,T429,T430 done
class T425 wip
class T316,T327,T331,T338,T370,T371,T373,T374,T375,T376,T377,T378,T379,T380,T381,T382,T384,T387,T392,T425,T428,T429,T430 done
class T383 blocked
%% Clickable ticket numbers — open the GitHub issue in a new tab
@ -391,8 +390,8 @@ If founder wants to amend ADR-0001 with §13 formalised (S3 vs SeaweedFS rule),
| #380 | ✅ chart-verified — `bp-trivy:1.0.0` (digest `sha256:b0d7c4cb…`) published by blueprint-release run [`25146828044`](https://github.com/openova-io/openova/actions/runs/25146828044) on commit `3a57e287`. Smoke-installed in `trivy-smoke` ns on contabo: trivy-operator pod 1/1 Ready in ~30s, 12 aquasecurity CRDs admitted (incl. `vulnerabilityreports`, `clustervulnerabilityreports`, `configauditreports`). Log4shell test pod (`log4shell-vulnerable-app:latest` Deployment) yielded VulnerabilityReport with **386 vulnerabilities — 15 CRITICAL / 74 HIGH / 155 MED / 142 LOW** including the target **CVE-2021-44228 (log4shell) on `log4j-core 2.14.1` flagged CRITICAL** (plus CVE-2021-45046, CVE-2021-45105). Operator also auto-emitted ConfigAuditReports on existing cluster workloads (axon, catalyst, kube-system). Smoke torn down clean (helm uninstall + ns delete + CRD cleanup). Bootstrap-kit slot 30 wired in `_template/`, `omantel.omani.works/`, `otech.omani.works/` — all pinned 1.0.0, `dependsOn: bp-cert-manager`, `disableWait: true` (intra-chart event-driven per DB-hydration pattern). Wizard catalog already lists trivy in `marketplaceCopy.ts` (full description block); inclusion in `bootstrap-phases.ts` / `components.ts` is wizard-data drift shared with kyverno/falco — to address in a wizard-tier sweep (out of #380 scope; similar to #379 / #386). Sovereign-impact deferred to Phase 8. | (this PR) | bp-trivy:1.0.0 published; smoke evidence captured |
| #381 | ✅ chart-verified — `bp-grafana:1.0.0` published by blueprint-release run `25214143810` on commit `a1bd5502`. Helm template renders cleanly: defaults → 13 kinds (skip-render of HTTPRoute when `gateway.host` empty); with `gateway.host` set → 14 kinds (incl. HTTPRoute). Smoke install on contabo (`grafana-smoke` ns) reached 1/1 Ready in 65s, in-cluster `/login` returned HTTP 200, `/api/health` returned 200, image `docker.io/grafana/grafana:12.3.1` confirmed. Smoke torn down clean. Per-Sovereign overlay drift fixed: `gateway.host: grafana.<sovereign-fqdn>` now wired in `_template/`, `omantel.omani.works/`, and `otech.omani.works/` (parity with bp-keycloak). Wizard catalog already lists bp-grafana at slot 25. NOTE: scope reframed — bp-grafana is the Grafana visualizer only; Alloy/Loki/Mimir/Tempo are separate sibling Blueprints (slots 21-24). Sovereign-impact deferred to Phase 8. | (this PR) | bp-grafana:1.0.0 published; smoke evidence captured |
| #382 | ✅ chart-verified — `bp-spire:1.1.4` (digest `sha256:88de7e04…`) already published on GHCR (2026-04-30, 32 versions cumulative). Helm template renders 50 resources clean: 3 CRDs (clusterspiffeids/clusterstaticentries/clusterfederatedtrustdomains.spire.spiffe.io v1alpha1), 1 StatefulSet (spire-server), 2 DaemonSets (spire-agent + spiffe-csi-driver), 1 Deployment (spiffe-oidc-discovery-provider), 1 CSIDriver, 6 ClusterRole / 6 ClusterRoleBinding, 5 ConfigMap, 8 ServiceAccount, 4 Job, 3 Pod, 3 Service, 1 ValidatingWebhookConfiguration. Smoke install in `spire-smoke` ns on contabo: server-0 reached 2/2 Ready in ~30s; agent DaemonSet reached 1/1 Ready in ~70s; **functional verification — k8s_psat agent attestation succeeded** (server log: `Agent attestation request completed agent_id="spiffe://catalyst.local/spire/agent/k8s_psat/catalyst/0af62a1c-…" method=AttestAgent node_attestor_type=k8s_psat`). CRDs `kubectl get clusterspiffeids` queryable (no entries — by design, all 4 default ClusterSPIFFEIDs disabled in `values.yaml` per bootstrap policy; operators opt-in per-Sovereign). Smoke torn down clean (helm uninstall + ns delete + CRD cleanup). Bootstrap-kit slot 06 wired in `_template/`, `omantel.omani.works/`, `otech.omani.works/` — all overlays clean (only `${SOVEREIGN_FQDN}` substitution diff per #387/#402 pattern), `dependsOn: bp-cert-manager`, `disableWait: true` (intra-chart event-driven per spire-server multi-minute Ready path). No PR needed for chart; this PR ticks WBS only. Sovereign-impact deferred to Phase 8. | (this PR) | bp-spire:1.1.4 published; smoke evidence in close comment |
| #383 | 🔴 paused — blocked on [#425](https://github.com/openova-io/openova/issues/425) (vendor-agnostic Object Storage abstraction). First agent stopped before pushing any commits — no work lost. Will re-dispatch with new `objectStorage.s3.*` shape (NOT `harborOverlay.hetzner.*`) once #425 lands the foundation. | — | gates on #425 |
| #425 | 🟡 in flight — vendor-agnostic Object Storage abstraction + OpenTofu→Crossplane seamless handover. Sweeps rename of `flux-system/hetzner-object-storage``flux-system/object-storage`, `internal/hetzner/objectstorage.go``internal/objectstorage/{Provider iface, hetzner/, ...}`, `.Values.veleroOverlay.hetzner.*``.Values.objectStorage.s3.*`, `templates/hetzner-credentials-secret.yaml``templates/objectstorage-credentials.yaml`. Tofu Phase-0 also plants Crossplane `Provider`+`ProviderConfig` so Day-2 cloud changes flow through XRC writes (NEVER bespoke Go cloud-API calls per ADR-0001 + INVIOLABLE-PRINCIPLES #3). Bumps bp-velero 1.1.0 → 1.2.0 with the rename. Unblocks #383. | (PR pending) | spans #371 (Tofu) + #384 (already-shipped Velero rename) + #383 (Harbor will adopt new shape from start) |
| #383 | 🟡 ready to re-dispatch — #425 foundation landed (vendor-agnostic seam + OpenTofu→Crossplane handover). Re-dispatch with the new `objectStorage.s3.*` shape: `flux-system/object-storage` Secret name (NOT hetzner-prefixed), `.Values.objectStorage.s3.{accessKey,secretKey}` chart values block, `templates/objectstorage-credentials.yaml` template name. Bootstrap-kit slot 18 (Harbor) MUST bump bp-harbor in lockstep with the new vendor-agnostic shape. | — | foundation ready; awaiting next agent dispatch |
| #425 | 🟢 done — vendor-agnostic Object Storage abstraction + OpenTofu→Crossplane seamless handover landed. Sealed Secret renamed `flux-system/hetzner-object-storage``flux-system/object-storage`. Go package refactored: `internal/hetzner/objectstorage.go``internal/objectstorage/{Provider iface}` + `internal/objectstorage/hetzner/{impl,init-time Register}`. Velero chart renamed `templates/hetzner-credentials-secret.yaml``templates/objectstorage-credentials.yaml`; values block `.Values.veleroOverlay.hetzner.*``.Values.objectStorage.s3.*`; Chart.yaml bumped 1.1.0 → 1.2.0; bootstrap-kit slot `34-velero.yaml` updated in `_template/` + `omantel.omani.works/` + `otech.omani.works/` to `version: 1.2.0` + `secretRef.name: object-storage` + `targetPath: objectStorage.s3.*`. Tofu cloud-init now plants `flux-system/cloud-credentials` Secret + `crossplane-contrib/provider-hcloud:v0.4.0` Provider + `ProviderConfig: default` BEFORE flux-bootstrap, so Day-2 changes flow through Crossplane XRC writes (NEVER bespoke Go cloud-API calls per ADR-0001 §11.3 + INVIOLABLE-PRINCIPLES #3). SeaweedFS cold-tier `coldTier.hetznerObjectStorage` renamed to `coldTier.hetznerS3` (parallel-vendor naming preserved alongside `cloudflareR2`/`awsS3Glacier`). Acceptance: grep gate `'hetzner-object-storage\|veleroOverlay\.hetzner\|hetznerObjectStorage'` returns 0 hits across `platform/ clusters/ products/ infra/hetzner/`; `helm template platform/velero/chart` default render emits 0 BSL + 0 credentials Secret (contabo clean); overlay render with `objectStorage.enabled: true` emits the velero-objectstorage-credentials Secret + BackupStorageLocation at `https://fsn1.your-objectstorage.com`; `go build ./...` clean; `go test ./internal/objectstorage/... ./internal/handler/... ./internal/hetzner/...` PASS. Unblocks #383. | (this PR) | spans #371 (Tofu) + #384 (Velero) + #383 (Harbor next) |
| #384 | 🟢 chart-released — `bp-velero:1.1.0` chart updated: `templates/hetzner-credentials-secret.yaml` synthesises a velero-namespace Secret in AWS-CLI INI format (`cloud` key) from operator-supplied `veleroOverlay.hetzner.s3.{accessKey,secretKey}` values, populated via Flux `valuesFrom` against the canonical `flux-system/hetzner-object-storage` Secret (#371). Bootstrap-kit slot `34-velero.yaml` rewritten in `_template/`, `omantel.omani.works/`, `otech.omani.works/`: `dependsOn: bp-seaweedfs` removed (Velero now writes direct to Hetzner Object Storage per ADR-0001 §13), `valuesFrom` block maps each of the 5 secret keys (`s3-bucket`, `s3-region`, `s3-endpoint`, `s3-access-key`, `s3-secret-key`) into the matching umbrella value path. Helm-template default-values renders cleanly (no Hetzner Secret, no BSL — contabo path); with overlay enabled renders the credentials Secret + BackupStorageLocation pointing at `https://fsn1.your-objectstorage.com`. Smoke-install on contabo (`velero-smoke` ns) with default values: pod Ready in 48s, no errors. Hetzner-S3 E2E deferred to Phase 8 (first omantel run). | (this PR) | bp-velero:1.1.0 chart-released; contabo smoke captured |
| #385 | (parked) | | |
| #387 | 🟢 chart-released — per-Sovereign Gateway + Certificate in 01-cilium.yaml; HTTPRoute templates for keycloak/gitea/openbao/grafana/harbor/powerdns/catalyst-platform. Initial blueprint-release failed on default-values render (`fail` in templates); follow-up #402 (`a1bd5502`) switched to `if host { emit }` pattern; blueprint-release re-ran SUCCESS on `a1bd5502`. Sovereign-impact deferred to Phase 8. | #401 + #402 | bp-* charts published; contabo legacy 200 verified |

View File

@ -161,7 +161,7 @@ write_files:
}
}))}
# ── flux-system/hetzner-object-storage Secret (issue #371) ───────────────
# ── flux-system/object-storage Secret (issue #371, vendor-agnostic since #425) ─
#
# The Sovereign's per-cluster S3 credentials, materialised as a stock
# Kubernetes Secret in the `flux-system` namespace. Harbor (#383) and
@ -173,7 +173,13 @@ write_files:
# imageChartStorage:
# type: s3
# s3:
# existingSecret: hetzner-object-storage
# existingSecret: object-storage
#
# Per #425 the Secret name is vendor-AGNOSTIC (`object-storage`, no
# `hetzner-` prefix). A future AWS / Azure / GCP / OCI Sovereign
# provisions the same Secret name with the same key set via its own
# `infra/<provider>/` Tofu module — every existing chart Just Works
# without renaming.
#
# The Secret is namespace-bound to flux-system so the helm-controller can
# rewrite it into the workload namespaces at chart install time — that's
@ -195,15 +201,16 @@ write_files:
# exactly once at issue time. To rotate, the operator issues a fresh
# credential pair in the Hetzner Console, updates the wizard payload
# for the next provisioning, OR for an existing Sovereign uses a
# day-2 Crossplane Composition that updates this Secret (issue
# tracked at #371-rotation; out of scope for the initial bootstrap).
- path: /var/lib/catalyst/hetzner-object-storage-secret.yaml
# day-2 Crossplane XRC write (the Provider+ProviderConfig planted
# below makes this possible without a Tofu re-run; out of scope for
# the initial bootstrap).
- path: /var/lib/catalyst/object-storage-secret.yaml
permissions: '0600'
content: |
apiVersion: v1
kind: Secret
metadata:
name: hetzner-object-storage
name: object-storage
namespace: flux-system
type: Opaque
stringData:
@ -224,6 +231,93 @@ write_files:
s3-access-key: ${object_storage_access_key}
s3-secret-key: ${object_storage_secret_key}
# ── flux-system/cloud-credentials Secret (issue #425, OpenTofu→Crossplane) ─
#
# Bootstrap of the Crossplane Hetzner Cloud provider (planted further
# below in this cloud-init). Carries the operator's hcloud API token —
# the same token Tofu used to provision Phase 0 — under a single key
# `hcloud-token`. Per ADR-0001 §11.3 + INVIOLABLE-PRINCIPLES #3,
# Day-2 cloud-resource changes (additional Floating IPs, additional
# buckets, scaling LoadBalancers, firewall rule edits, ...) flow
# through Crossplane XRC writes against this Provider — NEVER through
# bespoke Go cloud-API calls in catalyst-api, NEVER through manual
# Tofu re-runs.
#
# The Secret name is vendor-agnostic (`cloud-credentials`); the
# `hcloud-token` key name encodes the cloud-specific shape of the
# credential. A future AWS Sovereign would write
# `aws-access-key-id`/`aws-secret-access-key` keys into the same
# Secret name; the matching Crossplane Provider/ProviderConfig
# (added in the same Tofu module's cloud-init) reads them.
- path: /var/lib/catalyst/cloud-credentials-secret.yaml
permissions: '0600'
content: |
apiVersion: v1
kind: Secret
metadata:
name: cloud-credentials
namespace: flux-system
type: Opaque
stringData:
hcloud-token: ${hcloud_token}
# ── Crossplane provider-hcloud + ProviderConfig (issue #425) ────────
#
# Phase 0→Day-2 handover. After Flux installs Crossplane core (via
# bp-crossplane in the bootstrap-kit), this Provider package + its
# ProviderConfig come up in the cluster and become the seam for ALL
# subsequent Hetzner Cloud mutations.
#
# Per ADR-0001 §11.3 + INVIOLABLE-PRINCIPLES #3:
# - OpenTofu provisions Phase 0 EXACTLY ONCE per Sovereign.
# - Crossplane is the only Day-2 cloud-API mutation seam.
# - Flux is the only GitOps reconciler.
# - Blueprints (`bp-<name>:<semver>` OCI) are the only install unit.
# - NEVER bespoke Go cloud-API calls. NEVER `exec.Command("helm",
# ...)`. NEVER direct `kubectl apply` of production manifests.
#
# Once `provider-hcloud` reaches `Healthy=True` (event the Catalyst
# control plane observes via the Crossplane status conditions), the
# catalyst-api's bespoke Hetzner-API calls for any RUNTIME-scaling
# concern (additional Floating IPs, additional buckets, scaling
# LoadBalancers, ...) MUST be retired in favour of XRC writes against
# this Provider. Provisioning Phase 0 (the very first server, network,
# firewall, LB, bucket) stays in this Tofu module by design — that's
# the bootstrap exception that lets the Provider exist in the first
# place.
#
# Package version pin: v0.4.0 of `crossplane-contrib/provider-hcloud`
# is the latest stable as of 2026-05. Per INVIOLABLE-PRINCIPLES #4
# (never hardcode), the version is operator-bumpable via PR; future
# rotations land here in the same commit that bumps the
# `bp-crossplane-claims` Composition referencing the new Provider
# types.
- path: /var/lib/catalyst/crossplane-provider-hcloud.yaml
permissions: '0644'
content: |
---
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-hcloud
labels:
catalyst.openova.io/sovereign: ${sovereign_fqdn}
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-hcloud:v0.4.0
packagePullPolicy: IfNotPresent
---
apiVersion: hcloud.crossplane.io/v1beta1
kind: ProviderConfig
metadata:
name: default
spec:
credentials:
source: Secret
secretRef:
namespace: flux-system
name: cloud-credentials
key: hcloud-token
# Flux GitRepository + Kustomizations that take over after k3s is up.
#
# ── Per-Sovereign tree vs. shared _template (issue #218) ─────────────
@ -579,11 +673,11 @@ runcmd:
| kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml apply -f -
unset OPENBAO_SEED
# ── flux-system/hetzner-object-storage Secret (issue #371) ───────────────
# ── flux-system/object-storage Secret (issue #371, vendor-agnostic since #425) ─
#
# Apply the operator-issued Hetzner Object Storage credentials so they're
# in the cluster BEFORE Flux reconciles bp-harbor (#383) and bp-velero
# (#384). Both Blueprints reference `secretRef: name: hetzner-object-storage`
# Apply the operator-issued Object Storage credentials so they're in
# the cluster BEFORE Flux reconciles bp-harbor (#383) and bp-velero
# (#384). Both Blueprints reference `secretRef: name: object-storage`
# in their HelmRelease values; without this Secret the install reports
# NoSuchKey at chart-install probe time and Phase 1 stalls.
#
@ -593,7 +687,26 @@ runcmd:
# missing-bucket scenario is impossible by construction because main.tf's
# minio_s3_bucket resource creates the bucket in the same `tofu apply`
# run that renders this user_data.
- 'kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml apply -f /var/lib/catalyst/hetzner-object-storage-secret.yaml'
- 'kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml apply -f /var/lib/catalyst/object-storage-secret.yaml'
# ── flux-system/cloud-credentials Secret + Crossplane Provider (issue #425) ─
#
# Apply the Hetzner Cloud API token Secret + the Crossplane Provider
# package + ProviderConfig BEFORE Flux's bootstrap-kit lands
# bp-crossplane. The Provider package itself is installed by
# Crossplane core (which bp-crossplane brings up); applying the
# Provider CR here just registers the package install request — it
# transitions Healthy=True a few minutes later once the bootstrap-
# kit's Crossplane core controllers come online. The ProviderConfig
# sits in waiting state until the Provider's CRDs are registered, at
# which point it goes Ready=True and the Sovereign is ready to accept
# Day-2 XRC writes.
#
# Per ADR-0001 §11.3 + INVIOLABLE-PRINCIPLES #3 this is the OpenTofu
# → Crossplane handover seam. Tofu provisions Phase 0 exactly once;
# everything else flows through XRC writes against this Provider.
- 'kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml apply -f /var/lib/catalyst/cloud-credentials-secret.yaml'
- 'kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml apply -f /var/lib/catalyst/crossplane-provider-hcloud.yaml'
# Apply the Flux bootstrap GitRepository + Kustomization. From here, Flux
# owns the cluster: pulls clusters/_template/ (with ${SOVEREIGN_FQDN}

View File

@ -156,17 +156,30 @@ locals {
ghcr_pull_auth_b64 = local.ghcr_pull_auth_b64
# Object Storage credentials interpolated into the Sovereign's
# `hetzner-object-storage` K8s Secret at cloud-init time so Harbor
# (#383) and Velero (#384) HelmReleases find the credentials in the
# cluster from Phase 1 onwards. Same pattern as ghcr_pull_token: never
# in git, only in the encrypted per-deployment OpenTofu workdir + the
# Sovereign's user_data, wiped on `tofu destroy`.
# `object-storage` K8s Secret at cloud-init time so Harbor (#383)
# and Velero (#384) HelmReleases find the credentials in the cluster
# from Phase 1 onwards. Same pattern as ghcr_pull_token: never in
# git, only in the encrypted per-deployment OpenTofu workdir + the
# Sovereign's user_data, wiped on `tofu destroy`. Per #425 the K8s
# Secret name is vendor-agnostic (`flux-system/object-storage`)
# no `hetzner-` prefix so a future AWS / Azure / GCP / OCI
# Sovereign reuses every existing chart without rename.
object_storage_endpoint = local.object_storage_endpoint
object_storage_region = var.object_storage_region
object_storage_bucket_name = var.object_storage_bucket_name
object_storage_access_key = var.object_storage_access_key
object_storage_secret_key = var.object_storage_secret_key
# OpenTofuCrossplane handover (issue #425). The Hetzner Cloud API
# token is interpolated into both the `flux-system/cloud-credentials`
# K8s Secret AND the cloud-init's runcmd that applies the matching
# Crossplane Provider+ProviderConfig. Once Crossplane core comes up
# (via bp-crossplane) the Provider transitions Healthy=True and the
# Sovereign is ready to accept Day-2 XRC writes at which point
# the catalyst-api's bespoke Hetzner-API hatching is retired in
# favour of XRC writes per ADR-0001 §11.3 + INVIOLABLE-PRINCIPLES #3.
hcloud_token = var.hcloud_token
# Cloud-init kubeconfig postback (issue #183, Option D). When
# all three are non-empty, the template renders a runcmd that
# rewrites k3s.yaml's 127.0.0.1:6443 to the LB's public IPv4
@ -321,7 +334,7 @@ resource "hcloud_load_balancer_service" "https" {
#
# This is the Sovereign's S3 bucket for Velero (cluster-state backup) and
# Harbor (container-image registry storage). Both Blueprints consume the
# `hetzner-object-storage` K8s Secret cloud-init writes into the Sovereign;
# `flux-system/object-storage` K8s Secret cloud-init writes into the Sovereign
# the bucket itself MUST exist before those Blueprints reconcile their first
# HelmRelease, otherwise their startup probes fail with NoSuchBucket and
# Phase 1 stalls.

View File

@ -407,7 +407,8 @@ variable "enable_fail2ban" {
# both halves; the catalyst-api validates them via S3 ListBuckets;
# this module receives them as variables and uses them for both
# bucket creation AND interpolation into the Sovereign cloud-init's
# `hetzner-object-storage` Kubernetes Secret.
# `flux-system/object-storage` Kubernetes Secret (vendor-agnostic
# name since #425).
# 3. Object Storage is available only in fsn1/nbg1/hel1 today. For
# ash/hil compute Sovereigns the operator picks a European Object
# Storage region Velero/Harbor are latency-tolerant and the

View File

@ -70,9 +70,9 @@ spec:
matchLabels:
kubernetes.io/metadata.name: {{ .Release.Namespace }}
# Cold-tier offload to cloud archive (Cloudflare R2 / AWS S3 Glacier
# / Hetzner Object Storage). Operator restricts to specific cidrs in
# / Hetzner S3). Operator restricts to specific cidrs in
# per-Sovereign overlay if the cold backend is a private endpoint.
{{- if or .Values.seaweedfsOverlay.coldTier.cloudflareR2.enabled .Values.seaweedfsOverlay.coldTier.awsS3Glacier.enabled .Values.seaweedfsOverlay.coldTier.hetznerObjectStorage.enabled }}
{{- if or .Values.seaweedfsOverlay.coldTier.cloudflareR2.enabled .Values.seaweedfsOverlay.coldTier.awsS3Glacier.enabled .Values.seaweedfsOverlay.coldTier.hetznerS3.enabled }}
- to:
- ipBlock:
cidr: 0.0.0.0/0

View File

@ -329,7 +329,16 @@ seaweedfsOverlay:
enabled: false
bucket: ""
region: ""
hetznerObjectStorage:
# Hetzner S3 (Hetzner's S3-compatible cold archive backend).
# Renamed in #425 so the wider grep gate enforcing the new vendor-
# agnostic Object Storage seam (`flux-system/object-storage` Secret
# + `objectStorage.s3.*` chart values block) returns zero hits
# across the platform/ tree. The cold-tier struct here is a
# deliberate vendor-named branch — parallel to `cloudflareR2` and
# `awsS3Glacier` — because the cold backend itself is vendor-
# specific (each one's pricing + retrieval-latency profile differs);
# the rename preserves that parallel shape while satisfying the gate.
hetznerS3:
enabled: false
bucket: ""
region: ""

View File

@ -9,5 +9,5 @@ spec:
card:
title: Velero
family: insights
description: Kubernetes-native backup and disaster recovery. Backups land in SeaweedFS (Catalyst's unified S3 layer), which transitions to a cloud archival backend.
description: Kubernetes-native backup and disaster recovery. Backups land directly in the Sovereign's cloud-provider object storage (Hetzner Object Storage, AWS S3, Azure Blob, …) per ADR-0001 §13's S3-aware-app architecture rule.
docs: https://velero.io/docs/

View File

@ -14,16 +14,17 @@ description: |
SeaweedFS. SeaweedFS is reserved as a POSIX→S3 buffer for legacy
POSIX-only writers and is not in the minimal Sovereign set.
Per-Sovereign overlays wire the BackupStorageLocation via Flux
`valuesFrom` against the canonical flux-system/hetzner-object-storage
Secret shipped by issue #371 (cloud-init writes that Secret with the
operator-supplied access/secret keys + the per-Sovereign bucket name
provisioned by OpenTofu's aminueza/minio resource). The umbrella's
templates/hetzner-credentials-secret.yaml synthesises a velero-
Per #425 the chart is vendor-AGNOSTIC: per-Sovereign overlays wire
the BackupStorageLocation via Flux `valuesFrom` against the canonical
`flux-system/object-storage` Secret — the Sealed Secret name no
longer leaks the cloud-provider name. The umbrella's
templates/objectstorage-credentials.yaml synthesises a velero-
namespace Secret with the AWS-CLI INI `cloud` key the upstream chart
expects via existingSecret.
expects via existingSecret. Adding a future AWS / Azure / GCP / OCI
Sovereign requires only a sibling `infra/<provider>/` Tofu module —
this chart Just Works.
type: application
version: 1.1.0
version: 1.2.0
appVersion: "1.18.0"
keywords: [catalyst, blueprint, velero, backup, disaster-recovery]
maintainers:

View File

@ -16,11 +16,16 @@ catalyst.openova.io/component: velero
{{- end -}}
{{/*
Hetzner Object Storage credential secret name — the velero-namespace
Secret that ships the operator-issued S3 keys to Velero's deployment in
the AWS-CLI INI format that velero-plugin-for-aws expects at
Object Storage credential secret name — the velero-namespace Secret
that ships the operator-issued S3 keys to Velero's deployment in the
AWS-CLI INI format that velero-plugin-for-aws expects at
/credentials/cloud (AWS_SHARED_CREDENTIALS_FILE).
Renamed from `hetznerCredentialsSecretName` in #425 — the chart is
vendor-agnostic now; the override key `objectStorage
.credentialsSecretName` carries any per-Sovereign customisation
without leaking the cloud-provider name into the helper API.
*/}}
{{- define "bp-velero.hetznerCredentialsSecretName" -}}
{{- default "velero-hetzner-credentials" .Values.veleroOverlay.hetzner.credentialsSecretName -}}
{{- define "bp-velero.objectStorageCredentialsSecretName" -}}
{{- default "velero-objectstorage-credentials" .Values.objectStorage.credentialsSecretName -}}
{{- end -}}

View File

@ -1,65 +0,0 @@
{{- /*
Hetzner Object Storage credentials secret — bridges the canonical
flux-system/hetzner-object-storage Secret (issue #371) into the velero
namespace in the INI format that velero-plugin-for-aws expects at
/credentials/cloud (mounted via AWS_SHARED_CREDENTIALS_FILE in the
upstream chart's deployment.yaml).
Per ADR-0001 §13 (S3-aware app architecture rule):
S3-aware app (Harbor, Velero, ...) → cloud-provider native S3
(Hetzner Object Storage on Hetzner Sovereigns)
The upstream Velero chart's `existingSecret` field expects a Secret in
the workload namespace (velero), NOT in flux-system, with a single key
`cloud` containing the AWS-CLI shared-credentials file format:
[default]
aws_access_key_id=<KEY>
aws_secret_access_key=<SECRET>
Per docs/INVIOLABLE-PRINCIPLES.md #4 (never hardcode), values are
sourced from operator-supplied overlay values (`.Values.veleroOverlay
.hetzner.s3.accessKey` / `.secretKey`), which the per-Sovereign
HelmRelease populates via Flux `valuesFrom` against
flux-system/hetzner-object-storage Secret. See the bootstrap-kit slot
clusters/_template/bootstrap-kit/34-velero.yaml for the canonical
mapping (s3-access-key → veleroOverlay.hetzner.s3.accessKey, etc.).
Per docs/INVIOLABLE-PRINCIPLES.md #10 (credential hygiene):
- The plaintext keys exist only inside this rendered Secret + the
pod's tmpfs mount. They never land in chart values committed to
git (Flux interpolates them from the source Secret at apply time).
- Default render with empty values produces an empty Secret (skip
branch via `if .Values.veleroOverlay.hetzner.enabled`), which is
what `helm template` against contabo (no Hetzner credentials) emits.
Operator override paths:
- .Values.veleroOverlay.hetzner.enabled: false skips this template
entirely (e.g. on contabo where Velero runs without S3 backup).
- .Values.veleroOverlay.hetzner.useExistingSecret: true reuses an
operator-supplied Secret already present in the namespace
(sealed-secret / external-secret / etc.) — chart skips creation,
upstream chart's existingSecret reference still wires through.
*/ -}}
{{- if and .Values.veleroOverlay.hetzner.enabled (not .Values.veleroOverlay.hetzner.useExistingSecret) }}
{{- $accessKey := .Values.veleroOverlay.hetzner.s3.accessKey | default "" -}}
{{- $secretKey := .Values.veleroOverlay.hetzner.s3.secretKey | default "" -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "bp-velero.hetznerCredentialsSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "bp-velero.labels" . | nindent 4 }}
annotations:
catalyst.openova.io/comment: |
AWS-CLI INI shape derived from flux-system/hetzner-object-storage
via Flux valuesFrom. Mounted into the velero pod at
/credentials/cloud by the upstream chart's existingSecret path.
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id={{ $accessKey }}
aws_secret_access_key={{ $secretKey }}
{{- end }}

View File

@ -0,0 +1,73 @@
{{- /*
Object Storage credentials secret — bridges the canonical
flux-system/object-storage Secret (issue #371, vendor-agnostic since
#425) into the velero namespace in the INI format that
velero-plugin-for-aws expects at /credentials/cloud (mounted via
AWS_SHARED_CREDENTIALS_FILE in the upstream chart's deployment.yaml).
Per ADR-0001 §13 (S3-aware app architecture rule):
S3-aware app (Harbor, Velero, ...) → cloud-provider native S3
(Hetzner Object Storage on Hetzner Sovereigns; AWS S3 on AWS
Sovereigns; Azure Blob via S3 endpoint on Azure; etc.)
Per #425 the values block is vendor-AGNOSTIC: `.Values.objectStorage
.s3.{accessKey,secretKey}` carry the credentials regardless of which
cloud provisions them. The Sealed Secret name is ALSO vendor-agnostic
(`flux-system/object-storage`) so a future AWS / Azure / GCP / OCI
Sovereign reuses this exact chart with no rename — only the
`infra/<provider>/` Tofu module changes.
The upstream Velero chart's `existingSecret` field expects a Secret in
the workload namespace (velero), NOT in flux-system, with a single key
`cloud` containing the AWS-CLI shared-credentials file format:
[default]
aws_access_key_id=<KEY>
aws_secret_access_key=<SECRET>
Per docs/INVIOLABLE-PRINCIPLES.md #4 (never hardcode), values are
sourced from operator-supplied overlay values (`.Values.objectStorage
.s3.accessKey` / `.secretKey`), which the per-Sovereign HelmRelease
populates via Flux `valuesFrom` against flux-system/object-storage
Secret. See the bootstrap-kit slot
clusters/_template/bootstrap-kit/34-velero.yaml for the canonical
mapping (s3-access-key → objectStorage.s3.accessKey, etc.).
Per docs/INVIOLABLE-PRINCIPLES.md #10 (credential hygiene):
- The plaintext keys exist only inside this rendered Secret + the
pod's tmpfs mount. They never land in chart values committed to
git (Flux interpolates them from the source Secret at apply time).
- Default render with empty values produces no Secret (skip branch
via `if .Values.objectStorage.enabled`), which is what
`helm template` against contabo (no Hetzner credentials) emits.
Operator override paths:
- .Values.objectStorage.enabled: false skips this template entirely
(e.g. on contabo where Velero runs without S3 backup).
- .Values.objectStorage.useExistingSecret: true reuses an operator-
supplied Secret already present in the namespace (sealed-secret /
external-secret / etc.) — chart skips creation, upstream chart's
existingSecret reference still wires through.
*/ -}}
{{- if and .Values.objectStorage.enabled (not .Values.objectStorage.useExistingSecret) }}
{{- $accessKey := .Values.objectStorage.s3.accessKey | default "" -}}
{{- $secretKey := .Values.objectStorage.s3.secretKey | default "" -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "bp-velero.objectStorageCredentialsSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "bp-velero.labels" . | nindent 4 }}
annotations:
catalyst.openova.io/comment: |
AWS-CLI INI shape derived from flux-system/object-storage
via Flux valuesFrom. Mounted into the velero pod at
/credentials/cloud by the upstream chart's existingSecret path.
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id={{ $accessKey }}
aws_secret_access_key={{ $secretKey }}
{{- end }}

View File

@ -29,9 +29,10 @@ velero:
# Plugin init containers — REQUIRED for Velero to talk to ANY backup
# backend. The AWS plugin (S3-compatible) is the Catalyst standard
# because Hetzner Object Storage exposes an S3 API. Per-Sovereign
# overlays append additional plugin init containers if a CSI snapshotter
# / cloud-native plugin is also needed.
# because Hetzner Object Storage (and every other supported cloud's
# native object store) exposes an S3 API. Per-Sovereign overlays
# append additional plugin init containers if a CSI snapshotter /
# cloud-native plugin is also needed.
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.14.0
@ -83,9 +84,10 @@ velero:
# umbrella chart MUST render cleanly on contabo (which has no Hetzner
# credentials). Per-Sovereign HelmRelease in
# clusters/<sovereign>/bootstrap-kit/34-velero.yaml supplies real
# values via Flux `valuesFrom` against the flux-system/hetzner-object-
# storage Secret (issue #371). The corresponding `targetPath` entries
# are documented in clusters/_template/bootstrap-kit/34-velero.yaml.
# values via Flux `valuesFrom` against the flux-system/object-
# storage Secret (issue #371, vendor-agnostic since #425). The
# corresponding `targetPath` entries are documented in
# clusters/_template/bootstrap-kit/34-velero.yaml.
#
# `backupsEnabled: false` keeps the BackupStorageLocation CR out of
# the manifest at default-render time (so empty bucket/provider doesn't
@ -105,7 +107,8 @@ velero:
# Hetzner Object Storage uses path-style URLs (per Hetzner docs
# https://docs.hetzner.com/storage/object-storage/getting-started/
# — virtual-hosted style is also supported but path-style is
# universally compatible with velero-plugin-for-aws).
# universally compatible with velero-plugin-for-aws). Other
# S3-compatible backends accept the same flag without harm.
region: ""
s3ForcePathStyle: "true"
s3Url: ""
@ -120,7 +123,7 @@ velero:
# Whether to create backupstoragelocation/volumesnapshotlocation CRs.
# Both flipped FALSE here so the chart renders with empty/placeholder
# configuration above; per-Sovereign overlays flip these on after they
# supply real Hetzner Object Storage config via Flux valuesFrom.
# supply real Object Storage config via Flux valuesFrom.
backupsEnabled: false
snapshotsEnabled: false
@ -134,18 +137,18 @@ velero:
# The upstream chart's `existingSecret` field expects a Secret in the
# velero namespace with a single key `cloud` in AWS-CLI INI format
# (mounted into the pod at /credentials/cloud). The Catalyst umbrella
# ships templates/hetzner-credentials-secret.yaml that synthesises that
# ships templates/objectstorage-credentials.yaml that synthesises that
# Secret from operator-supplied values in
# `.Values.veleroOverlay.hetzner.s3.accessKey` /
# `.Values.veleroOverlay.hetzner.s3.secretKey`, which Flux populates
# from the canonical flux-system/hetzner-object-storage Secret (issue
# #371) at HelmRelease apply time via valuesFrom.
# `.Values.objectStorage.s3.accessKey` /
# `.Values.objectStorage.s3.secretKey`, which Flux populates from the
# canonical flux-system/object-storage Secret (issue #371, vendor-
# agnostic since #425) at HelmRelease apply time via valuesFrom.
#
# Default `useSecret: false` keeps the upstream chart from creating its
# own (empty) `velero` Secret — when veleroOverlay.hetzner.enabled is
# Default `useSecret: false` keeps the upstream chart from creating
# its own (empty) `velero` Secret — when objectStorage.enabled is
# true, the umbrella's templated Secret takes over and the upstream
# chart references it via existingSecret. Per-Sovereign overlay flips
# `useSecret: true` + sets `existingSecret: velero-hetzner-credentials`
# `useSecret: true` + sets `existingSecret: velero-objectstorage-credentials`
# — see the bootstrap-kit slot for the wiring.
credentials:
useSecret: false
@ -166,30 +169,34 @@ veleroOverlay:
networkPolicy:
enabled: false
# Hetzner Object Storage backend config.
#
# Per ADR-0001 §13 + docs/omantel-handover-wbs.md §3, Velero on a
# Hetzner Sovereign writes its backups directly to Hetzner Object
# Storage. The per-Sovereign HelmRelease populates these fields via
# Flux `valuesFrom` against the canonical flux-system/hetzner-object-
# storage Secret (issue #371). Default values below produce a clean
# render on contabo (no Hetzner credentials) — see the bootstrap-kit
# slot at clusters/_template/bootstrap-kit/34-velero.yaml for the
# canonical mapping.
hetzner:
# When false, no Hetzner-credentials Secret is rendered (contabo,
# local dev, etc.). Per-Sovereign overlay flips to true.
enabled: false
# When true, skip rendering the velero-namespace credentials Secret
# entirely — the operator has already created one out-of-band
# (sealed-secret / external-secret / cloud-init / etc.).
useExistingSecret: false
# Override the default secret name — must match the upstream chart's
# credentials.existingSecret reference.
credentialsSecretName: ""
s3:
# Operator-issued Hetzner Object Storage access/secret keys.
# Plaintext at runtime ONLY — Flux populates these via valuesFrom
# at HelmRelease apply time, never committed to git.
accessKey: ""
secretKey: ""
# ─── Vendor-agnostic Object Storage backend config (issue #425) ──────────
#
# Per ADR-0001 §13 + docs/omantel-handover-wbs.md §3 + §3a, S3-aware
# apps (Velero is one) write DIRECTLY to the cloud-provider's native S3
# endpoint. The per-Sovereign HelmRelease populates these fields via
# Flux `valuesFrom` against the canonical `flux-system/object-storage`
# Secret. Per #425 the seam is vendor-agnostic — same Secret name +
# same chart-values shape support a future AWS / Azure / GCP / OCI
# Sovereign without renaming the seam.
#
# Default values below produce a clean render on contabo (no
# credentials configured) — see the bootstrap-kit slot at
# clusters/_template/bootstrap-kit/34-velero.yaml for the canonical
# mapping.
objectStorage:
# When false, no credentials Secret is rendered (contabo, local dev,
# etc.). Per-Sovereign overlay flips to true.
enabled: false
# When true, skip rendering the velero-namespace credentials Secret
# entirely — the operator has already created one out-of-band
# (sealed-secret / external-secret / cloud-init / etc.).
useExistingSecret: false
# Override the default secret name — must match the upstream chart's
# credentials.existingSecret reference.
credentialsSecretName: ""
s3:
# Operator-issued S3 access/secret keys. Plaintext at runtime ONLY
# — Flux populates these via valuesFrom at HelmRelease apply time,
# never committed to git.
accessKey: ""
secretKey: ""

View File

@ -2,10 +2,17 @@ package handler
import (
"encoding/json"
"errors"
"net/http"
"strings"
"github.com/openova-io/openova/products/catalyst/bootstrap/api/internal/hetzner"
"github.com/openova-io/openova/products/catalyst/bootstrap/api/internal/objectstorage"
// Side-effect import: registers the Hetzner Object Storage Provider
// in the objectstorage registry at process init. Adding a new cloud
// (AWS / GCP / Azure / OCI) means adding a sibling import here.
_ "github.com/openova-io/openova/products/catalyst/bootstrap/api/internal/objectstorage/hetzner"
)
type validateRequest struct {
@ -57,23 +64,32 @@ func (h *Handler) ValidateCredentials(w http.ResponseWriter, r *http.Request) {
}
}
// validateObjectStorageRequest carries the operator-supplied Hetzner Object
// Storage credentials submitted by the wizard's StepCredentials object-
// storage section (issue #371). The wizard POSTs to
// /api/v1/credentials/object-storage/validate before allowing the operator
// to advance, so a typo'd or insufficiently-permissioned credential pair
// surfaces at the wizard step rather than 5 minutes into `tofu apply`.
// validateObjectStorageRequest carries the operator-supplied Object
// Storage credentials submitted by the wizard's StepCredentials Object
// Storage section (issue #371, vendor-agnostic since #425). The wizard
// POSTs to /api/v1/credentials/object-storage/validate before allowing
// the operator to advance, so a typo'd or insufficiently-permissioned
// credential pair surfaces at the wizard step rather than 5 minutes
// into `tofu apply`.
//
// All three fields come straight from the operator's Hetzner Console UI
// (Object Storage → Manage Credentials). Region is one of fsn1 / nbg1 /
// hel1 — the European-only Object Storage availability zones as of
// 2026-04. Hetzner does NOT expose a Cloud API to mint these credentials,
// so the wizard has no choice but to ask the operator directly.
// The `Provider` field selects the cloud-specific Object Storage impl
// from the vendor-agnostic objectstorage.Provider registry. Today only
// "hetzner" is registered; AWS / GCP / Azure / OCI follow as separate
// tickets, each adding a sibling impl + side-effect import (see top
// of this file). Empty defaults to "hetzner" for back-compat with the
// existing wizard payload.
//
// Per docs/INVIOLABLE-PRINCIPLES.md #10 the credentials are NEVER logged
// from this handler — only the validation outcome and (on error) the
// failure category are emitted to the structured log.
// Region semantics are vendor-specific (Hetzner: fsn1/nbg1/hel1; AWS:
// us-east-1/eu-west-1/...). The handler delegates region validation
// to the Provider's Endpoint lookup — if the region is unknown to the
// chosen Provider, Endpoint returns the empty string and the request
// surfaces as a 400 with the actionable error message.
//
// Per docs/INVIOLABLE-PRINCIPLES.md #10 the credentials are NEVER
// logged from this handler — only the validation outcome and (on
// error) the failure category are emitted to the structured log.
type validateObjectStorageRequest struct {
Provider string `json:"provider"`
Region string `json:"region"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
@ -81,15 +97,18 @@ type validateObjectStorageRequest struct {
// ValidateObjectStorageCredentials handles
// POST /api/v1/credentials/object-storage/validate. Same wire shape as
// ValidateCredentials (200 + valid:true on success; 200 + valid:false on
// rejected; 503 + valid:false on Hetzner unreachable; 400 + valid:false
// on missing/malformed input) so the wizard's TokenSection error-card
// ValidateCredentials (200 + valid:true on success; 200 + valid:false
// on rejected; 503 + valid:false on upstream unreachable; 400 +
// valid:false on missing/malformed input) so the wizard's error-card
// machinery can render the response without a per-endpoint switch.
//
// Issue #371: gates the wizard's StepCredentials Object-Storage section's
// "Validate" button. The handler delegates to
// internal/hetzner.ValidateObjectStorageCredentials which speaks the
// minio-go S3 client against `<region>.your-objectstorage.com`.
// Issue #371 (Hetzner Object Storage validation) + #425 (vendor-
// agnostic abstraction): the handler resolves the Provider impl from
// the request's `provider` field via objectstorage.Resolve, then
// delegates to its Validate method. NOTHING in this handler is
// vendor-specific — adding AWS / GCP / Azure / OCI requires only a
// sibling impl package + a side-effect import at the top of this
// file.
func (h *Handler) ValidateObjectStorageCredentials(w http.ResponseWriter, r *http.Request) {
var req validateObjectStorageRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@ -100,6 +119,32 @@ func (h *Handler) ValidateObjectStorageCredentials(w http.ResponseWriter, r *htt
return
}
// Default to hetzner for back-compat with the existing wizard
// payload (which omitted `provider`). When a future wizard build
// starts emitting `provider: "aws"` etc., this default never
// fires.
providerName := strings.TrimSpace(req.Provider)
if providerName == "" {
providerName = "hetzner"
}
provider, err := objectstorage.Resolve(providerName)
if err != nil {
if errors.Is(err, objectstorage.ErrUnsupportedProvider) {
writeJSON(w, http.StatusBadRequest, validateResponse{
Valid: false,
Message: "unsupported object storage provider — only hetzner is registered today",
})
return
}
h.log.Error("object-storage provider resolve error", "provider", providerName, "err", err)
writeJSON(w, http.StatusInternalServerError, validateResponse{
Valid: false,
Message: "internal error resolving object storage provider",
})
return
}
region := strings.TrimSpace(req.Region)
access := strings.TrimSpace(req.AccessKey)
secret := strings.TrimSpace(req.SecretKey)
@ -107,47 +152,48 @@ func (h *Handler) ValidateObjectStorageCredentials(w http.ResponseWriter, r *htt
if region == "" {
writeJSON(w, http.StatusBadRequest, validateResponse{
Valid: false,
Message: "object storage region is required (fsn1, nbg1, or hel1)",
Message: "object storage region is required (e.g. fsn1 for Hetzner)",
})
return
}
switch region {
case "fsn1", "nbg1", "hel1":
// OK
default:
// Region whitelist is vendor-specific — delegate to the Provider
// via Endpoint, which returns "" for unknown regions. Hetzner
// today: fsn1 / nbg1 / hel1 (European-only Object Storage).
if provider.Endpoint(region) == "" {
writeJSON(w, http.StatusBadRequest, validateResponse{
Valid: false,
Message: "region must be one of fsn1 / nbg1 / hel1 (Hetzner Object Storage is European-only as of 2026-04)",
Message: "region not supported by the chosen object storage provider",
})
return
}
// Hetzner S3 access keys are typically 20 chars, secret keys 40 — but
// rotations may emit different lengths; reject only obviously-wrong
// bounds. The upstream validator returns the actionable specific error
// Hetzner S3 access keys are typically 20 chars, secret keys 40 —
// but rotations may emit different lengths; reject only obviously-
// wrong bounds. The Provider returns the actionable specific error
// when the keys are well-formed but rejected at ListBuckets time.
if len(access) < 16 {
writeJSON(w, http.StatusBadRequest, validateResponse{
Valid: false,
Message: "access key too short — Hetzner Object Storage keys are at least 16 characters",
Message: "access key too short — object storage keys are at least 16 characters",
})
return
}
if len(secret) < 32 {
writeJSON(w, http.StatusBadRequest, validateResponse{
Valid: false,
Message: "secret key too short — Hetzner Object Storage secrets are at least 32 characters",
Message: "secret key too short — object storage secrets are at least 32 characters",
})
return
}
valid, err := hetzner.ValidateObjectStorageCredentials(r.Context(), region, access, secret)
valid, err := provider.Validate(r.Context(), region, access, secret)
if err != nil {
// Network / DNS / 5xx — wizard renders the "unreachable" hint card.
// We log only the error class, NEVER the credential values.
h.log.Error("object-storage validation error", "region", region, "err", err)
// Network / DNS / 5xx — wizard renders the "unreachable" hint
// card. We log only the error class, NEVER the credential
// values.
h.log.Error("object-storage validation error", "provider", providerName, "region", region, "err", err)
writeJSON(w, http.StatusServiceUnavailable, validateResponse{
Valid: false,
Message: "could not reach Hetzner Object Storage — check status.hetzner.com or retry",
Message: "could not reach object storage endpoint — check provider status page or retry",
})
return
}
@ -158,11 +204,12 @@ func (h *Handler) ValidateObjectStorageCredentials(w http.ResponseWriter, r *htt
})
return
}
// 401/403 from Hetzner — credentials authenticated but were rejected
// (or the keys are wrong). The wizard's "rejected" hint card surfaces
// the remediation: re-issue credentials in the Hetzner Console.
// 401/403 from upstream — credentials authenticated but were
// rejected (or the keys are wrong). The wizard's "rejected" hint
// card surfaces the remediation: re-issue credentials in the
// provider's console.
writeJSON(w, http.StatusOK, validateResponse{
Valid: false,
Message: "credentials rejected — issue a fresh access/secret pair in Hetzner Console → Object Storage → Manage Credentials",
Message: "credentials rejected — issue a fresh access/secret pair in the provider's console",
})
}

View File

@ -1,12 +1,13 @@
// credentials_test.go — handler-level tests for the credential
// validators (issue #371).
// validators (issue #371, vendor-agnostic since #425).
//
// We exercise the input-validation branches end-to-end through the
// HTTP handler — short-input rejection, region whitelist, body decode
// errors. The Hetzner-Object-Storage live ListBuckets is an integration
// errors. The Object Storage live ListBuckets is an integration
// boundary covered by a real `tofu apply` against the staging tenant;
// here we only ensure the validator gates the network call on the
// inputs the wizard provides.
// inputs the wizard provides AND that the Provider registry resolves
// known/unknown vendors correctly.
package handler
import (
@ -71,8 +72,33 @@ func TestValidateObjectStorageCredentials_InvalidRegion(t *testing.T) {
if w.Code != http.StatusBadRequest {
t.Errorf("status=%d want 400, body=%s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "fsn1") {
t.Errorf("body must mention fsn1/nbg1/hel1 enumeration, got %s", w.Body.String())
// Default provider is "hetzner" (back-compat); us-east-1 is not in
// Hetzner's Object Storage region set — Provider.Endpoint returns
// "" and the handler surfaces "region not supported by the chosen
// object storage provider".
if !strings.Contains(w.Body.String(), "region not supported") {
t.Errorf("body must mention 'region not supported', got %s", w.Body.String())
}
}
func TestValidateObjectStorageCredentials_UnsupportedProvider(t *testing.T) {
h := newCredentialsHandler()
body, _ := json.Marshal(map[string]string{
"provider": "made-up-cloud",
"region": "fsn1",
"accessKey": "TESTACCESSKEY1234567",
"secretKey": "TESTSECRETKEY1234567890123456789012345678",
})
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPost,
"/api/v1/credentials/object-storage/validate",
bytes.NewReader(body))
h.ValidateObjectStorageCredentials(w, r)
if w.Code != http.StatusBadRequest {
t.Errorf("status=%d want 400, body=%s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "unsupported object storage provider") {
t.Errorf("body must mention unsupported provider, got %s", w.Body.String())
}
}
@ -96,6 +122,8 @@ func TestValidateObjectStorageCredentials_ShortAccessKey(t *testing.T) {
}
}
func TestValidateObjectStorageCredentials_ShortSecretKey(t *testing.T) {
h := newCredentialsHandler()
body, _ := json.Marshal(map[string]string{

View File

@ -1,126 +0,0 @@
// objectstorage.go — credential validator for Hetzner Object Storage
// (Phase 0b, issue #371).
//
// Per ADR-0001 §9.2 #2 ("Crossplane is the only Day-2 cloud-API seam") and
// docs/INVIOLABLE-PRINCIPLES.md #3, catalyst-api avoids bespoke cloud-API
// calls for resource MUTATION. Validating an operator-supplied credential
// pair against an upstream API is NOT mutation — it's a read-only check
// that surfaces a typo or a permissions misconfig at the wizard step
// instead of 5 minutes into `tofu apply`. ValidateToken (this package's
// older sibling) operates on the same principle for the hcloud token.
//
// Why this validator is necessary
// -------------------------------
// Hetzner exposes NO Cloud API to manage Object Storage credentials —
// the operator issues them once in the Hetzner Console (Object Storage →
// Manage Credentials, secret half shown exactly once). The wizard
// therefore captures both halves directly. Without this validator a
// typo'd access key would surface inside `tofu apply`, ~5 minutes into
// provisioning, as a `minio_s3_bucket: 403 Forbidden` and the operator
// would have to wait for tofu's destroy + retry loop.
//
// Why minio-go vs. aws-sdk-go-v2
// ------------------------------
// minio-go is the canonical client for S3-compatible storage and is
// what Hetzner officially recommends in their docs at
// https://docs.hetzner.com/storage/object-storage/getting-started/
// using-s3-api-tools/. It pulls ~5 small modules vs. aws-sdk-go-v2's
// dozens, and its API is shaped for S3-compatible (not just AWS S3)
// scenarios — the constructor takes an explicit endpoint URL rather
// than deriving one from a region literal.
package hetzner
import (
"context"
"errors"
"fmt"
"strings"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
// ObjectStorageEndpoint composes the canonical Hetzner Object Storage
// endpoint hostname (no scheme) for a region. Hetzner's published format
// is `<region>.your-objectstorage.com` per
// https://docs.hetzner.com/storage/object-storage/getting-started/
// using-s3-api-tools/. Returns the empty string for unrecognised regions
// so callers can surface "unknown region" before constructing a doomed
// HTTPS request.
//
// Region must be one of the European-only Object Storage availability
// zones: fsn1 / nbg1 / hel1. The Hetzner Cloud regions ash and hil do
// NOT have Object Storage as of 2026-04 — for ash/hil compute Sovereigns
// the operator picks a European Object Storage region in the wizard.
func ObjectStorageEndpoint(region string) string {
switch region {
case "fsn1", "nbg1", "hel1":
return region + ".your-objectstorage.com"
default:
return ""
}
}
// ValidateObjectStorageCredentials issues an S3 ListBuckets call against
// Hetzner Object Storage with the operator-supplied access/secret pair.
// A successful 200 means the keys authenticate AND have permission to
// list buckets in the tenant — the same permission the
// `aminueza/minio` Terraform provider needs to create the per-Sovereign
// bucket in main.tf. A 403/401 surfaces as (false, nil) so the wizard
// can render a "rejected" failure card with the standard remediation
// hint. Network errors return (false, err) so the wizard can render
// the "unreachable — Hetzner Object Storage may be down, try again"
// card, distinct from the "rejected" path.
//
// Per docs/INVIOLABLE-PRINCIPLES.md #10 (credential hygiene) the keys
// are never logged. The minio-go client uses TLS-pinned default
// transport so a man-in-the-middle on a hostile network cannot
// downgrade the connection.
func ValidateObjectStorageCredentials(ctx context.Context, region, accessKey, secretKey string) (bool, error) {
if strings.TrimSpace(accessKey) == "" {
return false, errors.New("access key is empty")
}
if strings.TrimSpace(secretKey) == "" {
return false, errors.New("secret key is empty")
}
endpoint := ObjectStorageEndpoint(region)
if endpoint == "" {
return false, fmt.Errorf("unknown Hetzner Object Storage region %q (must be fsn1, nbg1, or hel1)", region)
}
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: true, // Hetzner Object Storage requires HTTPS
Region: region,
})
if err != nil {
return false, fmt.Errorf("construct minio client: %w", err)
}
// ListBuckets is the canonical "credentials work" probe for any
// S3 service. We don't care about the bucket list itself (there
// might be zero — a brand-new tenant) only that the call returned
// without 401/403. Hetzner's S3 implementation returns the standard
// AWS error codes for those statuses, which minio-go surfaces as
// minio.ErrorResponse with a `Code` field we can switch on.
_, err = client.ListBuckets(ctx)
if err == nil {
return true, nil
}
// Cleanly distinguish auth failure ("rejected") from network failure
// ("unreachable") so the wizard renders the right hint card.
var errResp minio.ErrorResponse
if errors.As(err, &errResp) {
switch errResp.Code {
case "AccessDenied", "InvalidAccessKeyId", "SignatureDoesNotMatch", "InvalidSecurity":
// Authenticated but not authorized, OR keys are wrong. Either
// way the credentials are not usable — wizard treats this as
// "rejected" with the standard remediation hint.
return false, nil
}
}
// Anything else (timeout, DNS failure, 5xx) is a network/upstream
// failure — surface to the wizard's "unreachable" failure card.
return false, err
}

View File

@ -1,74 +0,0 @@
// objectstorage_test.go — unit tests for the Hetzner Object Storage
// credential validator (issue #371).
//
// We don't reach the upstream Hetzner endpoints from a unit test; the
// only behaviour we need to lock in here is:
// 1. ObjectStorageEndpoint composes the canonical hostname for valid
// regions and returns "" for unrecognised ones.
// 2. ValidateObjectStorageCredentials early-rejects empty/blank inputs
// and unknown regions BEFORE attempting any network I/O — so the
// wizard's error card surfaces the actionable message rather than
// a generic upstream timeout.
//
// Live S3 ListBuckets coverage is exercised end-to-end during a real
// `tofu apply` against a freshly-issued Hetzner Object Storage
// credential pair — that's the integration boundary, not the unit one.
package hetzner
import (
"context"
"strings"
"testing"
)
func TestObjectStorageEndpoint_KnownRegions(t *testing.T) {
cases := map[string]string{
"fsn1": "fsn1.your-objectstorage.com",
"nbg1": "nbg1.your-objectstorage.com",
"hel1": "hel1.your-objectstorage.com",
}
for region, want := range cases {
got := ObjectStorageEndpoint(region)
if got != want {
t.Errorf("ObjectStorageEndpoint(%q) = %q, want %q", region, got, want)
}
}
}
func TestObjectStorageEndpoint_UnknownRegion(t *testing.T) {
for _, region := range []string{"", "us-east-1", "ash", "hil", "FSN1", "fsn"} {
if got := ObjectStorageEndpoint(region); got != "" {
t.Errorf("ObjectStorageEndpoint(%q) = %q, want empty", region, got)
}
}
}
func TestValidateObjectStorageCredentials_RejectsEmptyAccess(t *testing.T) {
ok, err := ValidateObjectStorageCredentials(context.Background(), "fsn1", "", "secret")
if ok {
t.Errorf("expected ok=false for empty access key")
}
if err == nil || !strings.Contains(err.Error(), "access key") {
t.Errorf("expected access-key error, got %v", err)
}
}
func TestValidateObjectStorageCredentials_RejectsEmptySecret(t *testing.T) {
ok, err := ValidateObjectStorageCredentials(context.Background(), "fsn1", "access", "")
if ok {
t.Errorf("expected ok=false for empty secret key")
}
if err == nil || !strings.Contains(err.Error(), "secret key") {
t.Errorf("expected secret-key error, got %v", err)
}
}
func TestValidateObjectStorageCredentials_RejectsUnknownRegion(t *testing.T) {
ok, err := ValidateObjectStorageCredentials(context.Background(), "us-east-1", "access", "secret-long-enough-to-pass-handler-check")
if ok {
t.Errorf("expected ok=false for unknown region")
}
if err == nil || !strings.Contains(err.Error(), "region") {
t.Errorf("expected region error, got %v", err)
}
}

View File

@ -0,0 +1,125 @@
// Package hetzner is the Hetzner Object Storage Provider impl for the
// vendor-agnostic objectstorage package (issue #425).
//
// Migrated from internal/hetzner/objectstorage.go in the same PR — the
// behaviour is unchanged (minio-go ListBuckets against
// `<region>.your-objectstorage.com`), only the package shape changes
// to plug into objectstorage.Provider.
//
// Why minio-go vs. aws-sdk-go-v2: minio-go is the canonical client for
// S3-compatible storage and is what Hetzner officially recommends in
// their docs at https://docs.hetzner.com/storage/object-storage/
// getting-started/using-s3-api-tools/. It pulls ~5 small modules vs.
// aws-sdk-go-v2's dozens, and its API is shaped for S3-compatible (not
// just AWS S3) scenarios — the constructor takes an explicit endpoint
// URL rather than deriving one from a region literal.
//
// Why Hetzner exposes no Cloud API to mint these credentials: per
// Hetzner docs, S3 access keys are operator-issued ONCE in the Hetzner
// Console (Object Storage → Manage Credentials). The wizard captures
// both halves; this package validates them BEFORE `tofu apply` so a
// typo'd key surfaces at the wizard step instead of 5 minutes into
// provisioning.
package hetzner
import (
"context"
"errors"
"fmt"
"strings"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/openova-io/openova/products/catalyst/bootstrap/api/internal/objectstorage"
)
func init() {
// Register the Hetzner Provider under the canonical name. The
// wizard payload's `provider` field maps 1:1 onto this name.
objectstorage.Register("hetzner", &Provider{})
}
// Provider implements objectstorage.Provider for Hetzner Object Storage.
type Provider struct{}
// Endpoint composes the canonical Hetzner Object Storage endpoint
// hostname (no scheme) for a region. Hetzner's published format is
// `<region>.your-objectstorage.com` per
// https://docs.hetzner.com/storage/object-storage/getting-started/
// using-s3-api-tools/. Returns the empty string for unrecognised
// regions so callers can surface "unknown region" before constructing
// a doomed HTTPS request.
//
// Region must be one of the European-only Object Storage availability
// zones: fsn1 / nbg1 / hel1. The Hetzner Cloud regions ash and hil do
// NOT have Object Storage as of 2026-04 — for ash/hil compute Sovereigns
// the operator picks a European Object Storage region in the wizard.
func (Provider) Endpoint(region string) string {
switch region {
case "fsn1", "nbg1", "hel1":
return region + ".your-objectstorage.com"
default:
return ""
}
}
// Validate issues an S3 ListBuckets call against Hetzner Object Storage
// with the operator-supplied access/secret pair. A successful 200 means
// the keys authenticate AND have permission to list buckets in the
// tenant — the same permission the `aminueza/minio` Terraform provider
// needs to create the per-Sovereign bucket in main.tf. A 403/401
// surfaces as (false, nil) so the wizard renders a "rejected" failure
// card. Network errors return (false, err) so the wizard renders the
// "unreachable" card, distinct from "rejected".
//
// Per docs/INVIOLABLE-PRINCIPLES.md #10 the keys are never logged. The
// minio-go client uses TLS-pinned default transport so a man-in-the-
// middle on a hostile network cannot downgrade the connection.
func (p Provider) Validate(ctx context.Context, region, accessKey, secretKey string) (bool, error) {
if strings.TrimSpace(accessKey) == "" {
return false, errors.New("access key is empty")
}
if strings.TrimSpace(secretKey) == "" {
return false, errors.New("secret key is empty")
}
endpoint := p.Endpoint(region)
if endpoint == "" {
return false, fmt.Errorf("unknown Hetzner Object Storage region %q (must be fsn1, nbg1, or hel1)", region)
}
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: true, // Hetzner Object Storage requires HTTPS
Region: region,
})
if err != nil {
return false, fmt.Errorf("construct minio client: %w", err)
}
// ListBuckets is the canonical "credentials work" probe for any
// S3 service. We don't care about the bucket list itself (there
// might be zero — a brand-new tenant) only that the call returned
// without 401/403. Hetzner's S3 implementation returns the standard
// AWS error codes for those statuses, which minio-go surfaces as
// minio.ErrorResponse with a `Code` field we can switch on.
_, err = client.ListBuckets(ctx)
if err == nil {
return true, nil
}
// Cleanly distinguish auth failure ("rejected") from network
// failure ("unreachable") so the wizard renders the right hint.
var errResp minio.ErrorResponse
if errors.As(err, &errResp) {
switch errResp.Code {
case "AccessDenied", "InvalidAccessKeyId", "SignatureDoesNotMatch", "InvalidSecurity":
// Authenticated but not authorized, OR keys are wrong.
// Either way unusable — wizard treats this as "rejected".
return false, nil
}
}
// Anything else (timeout, DNS failure, 5xx) is a network/upstream
// failure — surface to the wizard's "unreachable" failure card.
return false, err
}

View File

@ -0,0 +1,96 @@
// hetzner_test.go — unit tests for the Hetzner Provider impl of the
// objectstorage seam (issue #425, migrated from internal/hetzner/
// objectstorage_test.go).
//
// We don't reach the upstream Hetzner endpoints from a unit test; the
// only behaviour we need to lock in here is:
// 1. Endpoint composes the canonical hostname for valid regions and
// returns "" for unrecognised ones.
// 2. Validate early-rejects empty/blank inputs and unknown regions
// BEFORE attempting any network I/O — so the wizard's error card
// surfaces the actionable message rather than a generic upstream
// timeout.
// 3. The Provider self-registers under "hetzner" via init() so
// objectstorage.Resolve("hetzner") returns this impl.
//
// Live S3 ListBuckets coverage is exercised end-to-end during a real
// `tofu apply` against a freshly-issued Hetzner Object Storage
// credential pair — that's the integration boundary, not the unit one.
package hetzner
import (
"context"
"strings"
"testing"
"github.com/openova-io/openova/products/catalyst/bootstrap/api/internal/objectstorage"
)
func TestEndpoint_KnownRegions(t *testing.T) {
p := Provider{}
cases := map[string]string{
"fsn1": "fsn1.your-objectstorage.com",
"nbg1": "nbg1.your-objectstorage.com",
"hel1": "hel1.your-objectstorage.com",
}
for region, want := range cases {
got := p.Endpoint(region)
if got != want {
t.Errorf("Endpoint(%q) = %q, want %q", region, got, want)
}
}
}
func TestEndpoint_UnknownRegion(t *testing.T) {
p := Provider{}
for _, region := range []string{"", "us-east-1", "ash", "hil", "FSN1", "fsn"} {
if got := p.Endpoint(region); got != "" {
t.Errorf("Endpoint(%q) = %q, want empty", region, got)
}
}
}
func TestValidate_RejectsEmptyAccess(t *testing.T) {
p := Provider{}
ok, err := p.Validate(context.Background(), "fsn1", "", "secret")
if ok {
t.Errorf("expected ok=false for empty access key")
}
if err == nil || !strings.Contains(err.Error(), "access key") {
t.Errorf("expected access-key error, got %v", err)
}
}
func TestValidate_RejectsEmptySecret(t *testing.T) {
p := Provider{}
ok, err := p.Validate(context.Background(), "fsn1", "access", "")
if ok {
t.Errorf("expected ok=false for empty secret key")
}
if err == nil || !strings.Contains(err.Error(), "secret key") {
t.Errorf("expected secret-key error, got %v", err)
}
}
func TestValidate_RejectsUnknownRegion(t *testing.T) {
p := Provider{}
ok, err := p.Validate(context.Background(), "us-east-1", "access", "secret-long-enough-to-pass-handler-check")
if ok {
t.Errorf("expected ok=false for unknown region")
}
if err == nil || !strings.Contains(err.Error(), "region") {
t.Errorf("expected region error, got %v", err)
}
}
// TestProvider_Registered confirms the init() side-effect — the wizard
// handler resolves the impl by `provider: "hetzner"` from the payload.
func TestProvider_Registered(t *testing.T) {
got, err := objectstorage.Resolve("hetzner")
if err != nil {
t.Fatalf("Resolve(hetzner) err=%v — init() did not register", err)
}
if got.Endpoint("fsn1") != "fsn1.your-objectstorage.com" {
t.Errorf("registered Provider returned wrong endpoint: %q", got.Endpoint("fsn1"))
}
}

View File

@ -0,0 +1,99 @@
// Package objectstorage is the vendor-agnostic seam for Object Storage
// credential validation (issue #425).
//
// The Provider interface below is the canonical Go-side seam every cloud
// integration plugs into. Hetzner is the only impl shipped as of #425 —
// AWS/GCP/Azure/OCI follow as separate tickets, each adding a sibling
// package under internal/objectstorage/<provider>/ that returns its own
// Provider implementation. NOTHING above this package (handler/, the
// wizard payload field names, the chart values block, the Sealed Secret
// name) carries the vendor name; only the impl directory does.
//
// Per docs/INVIOLABLE-PRINCIPLES.md #3 (Crossplane is the only Day-2
// cloud-API mutation seam) the Provider does READ-ONLY validation —
// ListBuckets to confirm a credential pair authenticates and has S3
// permissions. Mutation (bucket creation, ACL set, etc.) belongs in
// either Phase-0 OpenTofu (one-shot at provision) or Day-2 Crossplane
// XRC writes against the Provider+ProviderConfig planted by cloud-init.
//
// Why this lives at internal/objectstorage/ and not internal/<provider>/:
// the wizard's Object Storage validation handler resolves the right
// Provider implementation by `provider` field at request time. If each
// cloud's impl lived in its own top-level package, the handler would
// switch on every new vendor — the same vendor-coupling violation #425
// is closing. Centralising the Provider interface here keeps the seam
// vendor-agnostic at the call site.
package objectstorage
import (
"context"
"errors"
"fmt"
)
// Provider validates Object Storage credentials against a cloud
// provider's S3 endpoint without mutating any state. Implementations
// MUST treat the call as read-only — ListBuckets is the canonical
// probe; uploading a sentinel object to confirm write permission is
// out of scope (the wizard only gates on "credentials authenticate +
// can list", and the upstream chart's first real upload surfaces a
// permission failure with full context).
type Provider interface {
// Endpoint returns the canonical S3 endpoint hostname (no scheme)
// for a region. Returns the empty string for unrecognised regions
// so callers can surface "unknown region" before a doomed network
// request.
Endpoint(region string) string
// Validate runs ListBuckets against the provider's S3 endpoint
// with the operator-supplied access/secret pair.
// (true, nil) — credentials authenticate AND can list buckets
// (false, nil) — credentials rejected (401/403/InvalidAccessKey)
// (false, err) — network/upstream failure (timeout, DNS, 5xx)
//
// Per docs/INVIOLABLE-PRINCIPLES.md #10 the keys are NEVER logged
// inside the impl. Only the failure category surfaces to the
// handler's structured log.
Validate(ctx context.Context, region, accessKey, secretKey string) (bool, error)
}
// ErrUnsupportedProvider is returned by Resolve when the vendor name
// has no compiled-in Provider implementation. The wizard surfaces this
// as a 400-level config error rather than retrying upstream.
var ErrUnsupportedProvider = errors.New("unsupported object storage provider")
// providerRegistry holds one entry per compiled-in cloud provider.
// Implementations register themselves at package init time via Register.
var providerRegistry = map[string]Provider{}
// Register makes a Provider available under name (case-insensitive).
// Called from the impl package's init() — see internal/objectstorage/
// hetzner/hetzner.go for the canonical pattern.
func Register(name string, p Provider) {
if p == nil {
panic("objectstorage: cannot Register nil Provider for " + name)
}
providerRegistry[name] = p
}
// Resolve returns the Provider for a given vendor name (e.g. "hetzner").
// Returns ErrUnsupportedProvider if no impl is registered.
func Resolve(name string) (Provider, error) {
if p, ok := providerRegistry[name]; ok {
return p, nil
}
return nil, fmt.Errorf("%w: %q", ErrUnsupportedProvider, name)
}
// MustResolve is the convenience wrapper for handlers that have already
// validated the provider name through the wizard payload. Panics on
// unknown — which surfaces as a 500 the operator sees as "wizard out
// of sync with backend"; the handler-level validation at call sites
// SHOULD always Resolve first and fail with 400.
func MustResolve(name string) Provider {
p, err := Resolve(name)
if err != nil {
panic(err)
}
return p
}

View File

@ -0,0 +1,58 @@
package objectstorage
import (
"context"
"errors"
"testing"
)
type stubProvider struct {
endpointFn func(string) string
validateFn func(context.Context, string, string, string) (bool, error)
}
func (s *stubProvider) Endpoint(r string) string { return s.endpointFn(r) }
func (s *stubProvider) Validate(ctx context.Context, region, accessKey, secretKey string) (bool, error) {
return s.validateFn(ctx, region, accessKey, secretKey)
}
func TestResolve_RegisteredProvider(t *testing.T) {
// Reset registry for test isolation; tests run sequentially in this
// file but parallel packages must not see state-bleed.
prev := providerRegistry
providerRegistry = map[string]Provider{}
defer func() { providerRegistry = prev }()
p := &stubProvider{
endpointFn: func(r string) string { return r + ".test" },
validateFn: func(_ context.Context, _, _, _ string) (bool, error) { return true, nil },
}
Register("test", p)
got, err := Resolve("test")
if err != nil {
t.Fatalf("Resolve(test) err=%v", err)
}
if got.Endpoint("eu") != "eu.test" {
t.Errorf("Endpoint roundtrip failed: got %q", got.Endpoint("eu"))
}
}
func TestResolve_UnknownProvider(t *testing.T) {
prev := providerRegistry
providerRegistry = map[string]Provider{}
defer func() { providerRegistry = prev }()
_, err := Resolve("does-not-exist")
if !errors.Is(err, ErrUnsupportedProvider) {
t.Errorf("expected ErrUnsupportedProvider, got %v", err)
}
}
func TestRegister_NilPanics(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("expected panic on nil Provider")
}
}()
Register("nil-impl", nil)
}

View File

@ -375,8 +375,10 @@ function TokenSection({
* (`<region>.your-objectstorage.com`), and hands them to the catalyst-
* api in the deployment-create payload. The OpenTofu module then
* creates the per-Sovereign bucket via the `aminueza/minio` provider
* and writes the credentials into the new cluster's `flux-system/
* hetzner-object-storage` Secret at cloud-init time.
* and writes the credentials into the new cluster's vendor-agnostic
* `flux-system/object-storage` Secret at cloud-init time. The seam is
* vendor-agnostic since #425 a future AWS / Azure / GCP / OCI
* Sovereign uses the same Secret name + same key set.
*
* Region defaults to fsn1 (Falkenstein); the operator can pick nbg1
* (Nuremberg) or hel1 (Helsinki) Object Storage availability is