diff --git a/README.md b/README.md index 628e585..2a51ddd 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,11 @@ # ceph-s3-backup -Repo for automated backup of ceph s3 buckets +Repo for automated backup of ceph s3 buckets. + +The backup script is searching for in cluster created ObjectBucketClaim CRD. With this information the script will do a rclone from source rook-ceph to rustfs. Currently released: -- Alpine Image with kubectl and rclone - -Outstanding: - -- Examples of - - Cronjob and Deployment with right permissions +- docker/ Alpine Image with kubectl and rclone +- k8s/ + -- working deployment (for troubleshooting / manually exectuin) and cronjob manifest which also contains right permission diff --git a/k8s/configmap/config.yaml b/k8s/configmap/config.yaml new file mode 100644 index 0000000..07990c5 --- /dev/null +++ b/k8s/configmap/config.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: rclone-config + namespace: rook-ceph-s3-backup +data: + rclone.conf: | + [ceph] + type = s3 + provider = Ceph + access_key_id = CHANGE_ME + secret_access_key = CHANGE_ME + endpoint = http://rook-ceph-rgw-s3.rook-ceph.svc:80 + acl = private + + [rustfs] + type = s3 + provider = Other + access_key_id = CHANGE_ME + secret_access_key = CHANGE_ME + acl = private + region = other-v2-signature + endpoint = https://rustfs.example.org diff --git a/k8s/configmap/script.yaml b/k8s/configmap/script.yaml new file mode 100644 index 0000000..1e25f14 --- /dev/null +++ b/k8s/configmap/script.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: rclone-sync-script + namespace: rook-ceph-s3-backup +data: + rclone-sync-script.sh: | + #!/usr/bin/env sh + set -eu + + SRC_REMOTE="ceph:" + DST_REMOTE="rustfs:ceph-s3-backup" # + + BUCKETS=$(kubectl get ObjectBucketClaim -A \ + -o jsonpath='{range .items[*]}{.spec.bucketName}{"\n"}{end}' \ + | sort -u) + + if [ -z "$BUCKETS" ]; then + echo "No buckets found" + exit 0 + fi + + echo "$BUCKETS" | while IFS= read -r bucket; do + [ -z "$bucket" ] && continue + + echo "Syncing: $bucket" + + rclone sync \ + "${SRC_REMOTE}${bucket}" \ + "${DST_REMOTE}/${bucket}" \ + --transfers 4 \ + --checkers 8 \ + --log-level INFO + + done + + echo "Done" diff --git a/k8s/cronjob.yaml b/k8s/cronjob.yaml new file mode 100644 index 0000000..4ff4ccb --- /dev/null +++ b/k8s/cronjob.yaml @@ -0,0 +1,51 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: rook-ceph-s3-backup-daily + namespace: rook-ceph-s3-backup +spec: + schedule: '15 8 * * *' + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + metadata: + labels: + app: rclone-tools + spec: + restartPolicy: Never + serviceAccountName: rook-ceph-s3-backup + containers: + - name: rclone + image: gitea.sikorski.cloud/rogersik/ceph-s3-backup:latest + imagePullPolicy: IfNotPresent + env: + - name: HOME + value: /root + command: + - sh + - -c + - | + set -euo pipefail + + # Execute sync script + /usr/local/bin/rclone-sync-script.sh + volumeMounts: + - name: rclone-sync-script + mountPath: /usr/local/bin/rclone-sync-script.sh + subPath: rclone-sync-script.sh + readOnly: true + - name: rclone-config + mountPath: /root/.config/rclone/rclone.conf + subPath: rclone.conf + volumes: + - name: rclone-sync-script + configMap: + name: rclone-sync-script + defaultMode: 0755 + - name: rclone-config + configMap: + name: rclone-config diff --git a/k8s/deployment.yaml b/k8s/deployment.yaml new file mode 100644 index 0000000..a20e1c0 --- /dev/null +++ b/k8s/deployment.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rclone-tools + namespace: rook-ceph-s3-backup + annotations: + reloader.stakater.com/auto: 'true' +spec: + replicas: 1 + selector: + matchLabels: + app: rclone-tools + template: + metadata: + labels: + app: rclone-tools + spec: + serviceAccountName: rook-ceph-s3-backup + containers: + - name: rclone + image: gitea.sikorski.cloud/rogersik/ceph-s3-backup:latest + imagePullPolicy: IfNotPresent + env: + - name: HOME + value: /root + command: + - sh + - -c + - | + sleep infinity + volumeMounts: + - name: rclone-sync-script + mountPath: /usr/local/bin/rclone-sync-script.sh + subPath: rclone-sync-script.sh + readOnly: true + - name: rclone-config + mountPath: /root/.config/rclone/rclone.conf + subPath: rclone.conf + volumes: + - name: rclone-sync-script + configMap: + name: rclone-sync-script + defaultMode: 0755 + - name: rclone-config + configMap: + name: rclone-config diff --git a/k8s/rbac/ClusterRole.yaml b/k8s/rbac/ClusterRole.yaml new file mode 100644 index 0000000..28576a4 --- /dev/null +++ b/k8s/rbac/ClusterRole.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-s3-backup +rules: + - apiGroups: ['objectbucket.io'] + resources: ['objectbucketclaims'] + verbs: ['get', 'list', 'watch'] diff --git a/k8s/rbac/ClusterRoleBinding.yaml b/k8s/rbac/ClusterRoleBinding.yaml new file mode 100644 index 0000000..95517de --- /dev/null +++ b/k8s/rbac/ClusterRoleBinding.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-ceph-s3-backup +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-s3-backup +subjects: + - kind: ServiceAccount + name: rook-ceph-s3-backup + namespace: rook-ceph-s3-backup diff --git a/k8s/rbac/ServiceAccount.yaml b/k8s/rbac/ServiceAccount.yaml new file mode 100644 index 0000000..316803e --- /dev/null +++ b/k8s/rbac/ServiceAccount.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-s3-backup + namespace: rook-ceph-s3-backup