Compare commits

..

No commits in common. "main" and "ey" have entirely different histories.
main ... ey

38 changed files with 19 additions and 1353 deletions

View file

@ -1,10 +1,3 @@
# home-server # home-server
Kubernetes manifests that define my home server Kubernetes manifests that define my home server
THIS SPIRALED
Originally a kustomize but I wanted to be able to let others use it and tweak
hostname values, so it became my first ever Helm chart.
I know I don't love Helm, but I needed to us it sadly.

View file

@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- monitoring/provider.yaml
- monitoring/grafana.yaml
- monitoring/loki.yaml
- monitoring/prometheus.yaml
- operators/mariadb.yaml
- operators/replicator.yaml
- dns/namespace.yaml
- dns/bind.yaml
- dns/externaldns.yaml
- ssl/certmanager.yaml
- auth/authentik.yaml
- files/nextcloud.yaml
- files/syncthing.yaml

View file

@ -1,60 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: mariadb-system
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: mariadb-operator
namespace: flux-system
spec:
url: https://helm.mariadb.com/mariadb-operator
interval: 1h
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: mariadb-operator-crds
namespace: mariadb-system
spec:
interval: 30m
chart:
spec:
chart: mariadb-operator-crds
version: 0.38.1
sourceRef:
kind: HelmRepository
name: mariadb-operator
namespace: flux-system
install:
createNamespace: true
upgrade:
disableWait: true
timeout: 5m
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: mariadb-operator
namespace: mariadb-system
spec:
interval: 30m
chart:
spec:
chart: mariadb-operator
version: 0.38.1
sourceRef:
kind: HelmRepository
name: mariadb-operator
namespace: flux-system
install:
createNamespace: true
dependsOn:
- name: mariadb-operator-crds
namespace: mariadb-system
values:
metrics:
enabled: true

View file

@ -1,98 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubernetes-replicator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kubernetes-replicator
rules:
- apiGroups: ["", "apps", "extensions"]
resources:
- secrets
- configmaps
- roles
- rolebindings
- cronjobs
- deployments
- events
- ingresses
- jobs
- pods
- pods/attach
- pods/exec
- pods/log
- pods/portforward
- services
- namespaces
- serviceaccounts
verbs: ["*"]
- apiGroups: ["batch"]
resources:
- configmaps
- cronjobs
- deployments
- events
- ingresses
- jobs
- pods
- pods/attach
- pods/exec
- pods/log
- pods/portforward
- services
verbs: ["*"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- roles
- rolebindings
- clusterrolebindings
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-replicator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-replicator
subjects:
- kind: ServiceAccount
name: kubernetes-replicator
namespace: kube-system
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: mittwald
namespace: flux-system
spec:
url: https://helm.mittwald.de
interval: 1h
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubernetes-replicator
namespace: kube-system
spec:
interval: 5m
chart:
spec:
chart: kubernetes-replicator
sourceRef:
kind: HelmRepository
name: mittwald
namespace: flux-system
install:
createNamespace: false
upgrade:
disableWait: false
values:
serviceAccount:
create: false
name: kubernetes-replicator

View file

@ -1,138 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: bind-master-config
namespace: home-server
data:
named.conf: |
include "/etc/bind/externaldns-key.conf";
options {
directory "/var/cache/bind";
recursion yes;
allow-query { any; };
listen-on port 53 { any; };
listen-on-v6 port 53 { any; };
forwarders {
10.40.0.254;
};
dnssec-validation auto;
};
zone "." IN {
type hint;
file "/usr/share/dns/root.hints";
};
zone "hxme.net." IN {
type master;
file "/etc/bind/db.hxme.net";
allow-update { key "externaldns-key"; };
};
db.hxme.net: |
$TTL 3600
@ IN SOA ns1.hxme.net. admin.hxme.net. (
1 ; Serial
7200 ; Refresh
1800 ; Retry
1209600 ; Expire
86400 ) ; Negative Cache TTL
;
@ IN NS ns1.hxme.net.
ns1 IN A 10.40.0.110
@ IN A 10.40.0.110
www IN A 10.40.0.110
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: bind-master
namespace: home-server
spec:
selector:
matchLabels:
app: bind-master
template:
metadata:
labels:
app: bind-master
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
securityContext:
fsGroup: 999
initContainers:
- name: fetch-root-hints
image: debian:12
command:
- sh
- -c
- |
apt update && apt -y install curl
curl -sfSL https://www.internic.net/domain/named.cache -o /usr/share/dns/root.hints
volumeMounts:
- mountPath: /usr/share/dns
name: root-hints
containers:
- name: bind-master
image: internetsystemsconsortium/bind9:9.18
command: ["named", "-g", "-c", "/etc/bind/named.conf"]
ports:
- containerPort: 53
protocol: UDP
- containerPort: 53
protocol: TCP
volumeMounts:
- name: config
mountPath: /etc/bind/named.conf
subPath: named.conf
- name: config
mountPath: /etc/bind/db.hxme.net
subPath: db.hxme.net
- name: dns-secrets
mountPath: /etc/bind/externaldns-key.conf
subPath: externaldns-key.conf
- name: bind-cache
mountPath: /var/cache/bind
- name: bind-rundir
mountPath: /var/run/named
- name: root-hints
mountPath: /usr/share/dns
volumes:
- name: dns-secrets
secret:
secretName: dns-secrets
- name: config
configMap:
name: bind-master-config
- name: bind-cache
emptyDir: {}
- name: bind-rundir
emptyDir: {}
- name: root-hints
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: bind-master
namespace: home-server
spec:
selector:
app: bind-master
ports:
- name: dns-udp
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53

View file

@ -1,73 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: jetstack
namespace: flux-system
spec:
url: https://charts.jetstack.io
interval: 1h
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: cert-manager
namespace: home-server
spec:
interval: 30m
chart:
spec:
chart: cert-manager
version: v1.18.2
sourceRef:
kind: HelmRepository
name: jetstack
namespace: flux-system
install:
crds: CreateReplace
createNamespace: true
values:
installCRDs: true
extraArgs:
- --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-rfc2136
spec:
acme:
email: admin@hxme.net
server: https://acme-v02.api.letsencrypt.org/directory
#server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-rfc2136
solvers:
- dns01:
rfc2136:
nameserver: hawke.hxst.com.au:53
tsigKeyName: "hxme-update-key"
tsigAlgorithm: HMACSHA512
tsigSecretSecretRef:
name: hxme-update-key
key: hxme-update-key
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-hxme-net
namespace: home-server
spec:
secretName: wildcard-hxme-net
secretTemplate:
annotations:
replicator.v1.mittwald.de/replication-allowed: "true"
replicator.v1.mittwald.de/replicate-to: "home-media"
issuerRef:
name: letsencrypt-rfc2136
kind: ClusterIssuer
commonName: "hxme.net"
dnsNames:
- "hxme.net"
- "*.hxme.net"

View file

@ -1,78 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
- apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"]
verbs: ["get", "watch", "list"]
# Add DNS provider specific rules here if needed (e.g., for AWS IAM, GCP etc.)
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: home-server
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: home-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: bitnami/external-dns:latest
args:
- --source=service
- --source=ingress
- --provider=rfc2136
- --rfc2136-host=bind-master.home-server.svc.cluster.local
- --rfc2136-port=53
- --rfc2136-zone=hxme.net
- --rfc2136-tsig-secret=$(RFC2136_TSIG_SECRET)
- --rfc2136-tsig-secret-alg=hmac-sha256
- --rfc2136-tsig-keyname=externaldns-key
- --policy=sync
- --registry=txt
- --txt-owner-id=my-cluster
env:
- name: RFC2136_TSIG_SECRET
valueFrom:
secretKeyRef:
name: dns-secrets
key: externaldns-secret

View file

@ -1,8 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- bind.yaml
- externaldns.yaml
- certmanager.yaml

View file

@ -1,42 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: authentik
namespace: flux-system
spec:
url: https://charts.goauthentik.io/
interval: 1h
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: authentik
namespace: home-server
spec:
interval: 30m
chart:
spec:
chart: authentik
version: 2024.4.2
sourceRef:
kind: HelmRepository
name: authentik
namespace: flux-system
install:
createNamespace: true
upgrade:
disableWait: false
timeout: 10m
valuesFrom:
- kind: Secret
name: authentik-values
values:
server:
ingress:
annotations:
external-dns.alpha.kubernetes.io/hostname: auth.hxme.net
tls:
- secretName: wildcard-hxme-net
hosts:
- auth.hxme.net

View file

@ -1,104 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: dovecot-config
namespace: home-server
data:
dovecot.conf: |
log_path = /dev/stdout
info_log_path = /dev/stdout
debug_log_path = /dev/stdout
syslog_facility = local0
protocols = imap
listen = *
disable_plaintext_auth = yes
mail_location = maildir:/data/%u
base_dir = /var/run/dovecot/
ssl = required
ssl_cert = </etc/ssl/hxme/fullchain.pem
ssl_key = </etc/ssl/hxme/privkey.pem
auth_mechanisms = plain login
passdb {
driver = ldap
args = /config/ldap.conf
}
userdb {
driver = static
args = uid=1000 gid=1000 home=/data/%u
}
service imap-login {
inet_listener imap {
port = 0
}
inet_listener imaps {
port = 993
ssl = yes
}
}
ldap.conf: |
hosts = ldap://auth.hxme.net
auth_bind = yes
base = dc=ldap,dc=goauthentik,dc=io
dn = cn=binduser,ou=service-accounts,dc=ldap,dc=goauthentik,dc=io
dnpass = FtaJpthRpKyhEEy69H5qxPymtSeSeuCT9SQCdXmWDeAe7cgTCnk6HXpSzTNS
user_attrs = =home=/data/%u
user_filter = (&(objectClass=person)(uid=%u))
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dovecot
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: dovecot
template:
metadata:
labels:
app: dovecot
spec:
containers:
- name: dovecot
image: registry.gitlab.com/dxcker/dovecot:latest
ports:
- containerPort: 993
name: imaps
volumeMounts:
- name: config
mountPath: /config/
- name: certs
mountPath: /etc/ssl/hxme
readOnly: true
volumes:
- name: config
configMap:
name: dovecot-config
- name: ldap
configMap:
name: dovecot-ldap
- name: tls
secret:
secretName: wildcard-hxme-net
---
apiVersion: v1
kind: Service
metadata:
name: dovecot
namespace: home-server
spec:
selector:
app: dovecot
ports:
- name: imaps
port: 993
targetPort: 993
type: ClusterIP

View file

@ -1,11 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- authentik.yaml
- nextcloud.yaml
- vaultwarden.yaml
- linkwarden.yaml
- samba.yaml
- dovecot.yaml

View file

@ -1,95 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: linkwarden-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
hostPath:
path: /dpool/services/linkwarden/app
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: linkwarden-pvc
namespace: home-server
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
volumeName: linkwarden-pv
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: linkwarden
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: linkwarden
template:
metadata:
labels:
app: linkwarden
spec:
initContainers:
- name: copy-linkwarden
image: ghcr.io/linkwarden/linkwarden:latest
command: ["/bin/sh", "-c"]
args:
- |
if [ -z "$(ls -A /new_data)" ]; then
echo "/new_data is empty, initializing..."
cp -r /data/. /new_data/
else
echo "/new_data already initialized, skipping copy."
fi
volumeMounts:
- name: linkwarden-data
mountPath: /new_data
containers:
- name: linkwarden
image: ghcr.io/linkwarden/linkwarden:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: TZ
value: "Australia/Brisbane"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: linkwarden-postgres-secret
key: DATABASE_URL
volumeMounts:
- name: linkwarden-data
mountPath: /data
volumes:
- name: linkwarden-data
persistentVolumeClaim:
claimName: linkwarden-pvc
---
apiVersion: v1
kind: Service
metadata:
name: linkwarden
namespace: home-server
spec:
selector:
app: linkwarden
ports:
- port: 3000
targetPort: 3000
protocol: TCP

View file

@ -1,132 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
hostPath:
path: /dpool/temp/Nextcloud
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-pvc
namespace: home-server
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 10Gi
volumeName: nextcloud-pv
---
apiVersion: v1
kind: Service
metadata:
name: nextcloud
namespace: home-server
spec:
ports:
- port: 80
selector:
app: nextcloud
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud
namespace: home-server
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
selector:
matchLabels:
app: nextcloud
template:
metadata:
labels:
app: nextcloud
spec:
containers:
- name: nextcloud
image: nextcloud:29
env:
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-secrets
key: MYSQL_PASSWORD
- name: MYSQL_DATABASE
value: nextcloud
- name: MYSQL_USER
value: nextcloud
- name: MYSQL_HOST
value: nextcloud-db
ports:
- containerPort: 80
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/html
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
volumes:
- name: nextcloud-data
persistentVolumeClaim:
claimName: nextcloud-pvc
---
apiVersion: k8s.mariadb.com/v1alpha1
kind: MariaDB
metadata:
name: nextcloud-db
namespace: home-server
spec:
rootPasswordSecretKeyRef:
name: nextcloud-secrets
key: MYSQL_ROOT_PASSWORD
database: nextcloud
username: nextcloud
passwordSecretKeyRef:
name: nextcloud-secrets
key: MYSQL_PASSWORD
image: mariadb:10.11
storage:
size: 5Gi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nextcloud
namespace: home-server
annotations:
external-dns.alpha.kubernetes.io/hostname: nc.hxme.net
nginx.ingress.kubernetes.io/server-snippet: |
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
spec:
tls:
- hosts:
- nc.hxme.net
secretName: wildcard-hxme-net
rules:
- host: nc.hxme.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nextcloud
port:
number: 80

View file

@ -1,94 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: smb-share-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
storageClassName: local-path
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /dpool/
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: smb-share-pvc
namespace: home-server
spec:
accessModes:
- ReadWriteMany
storageClassName: local-path
resources:
requests:
storage: 10Gi
volumeName: smb-share-pv
---
apiVersion: v1
kind: Service
metadata:
name: smb-server
namespace: home-server
spec:
selector:
app: smb-server
ports:
- name: smb
port: 445
targetPort: 445
- name: netbios
port: 139
targetPort: 139
type: NodePort # Use ClusterIP or LoadBalancer depending on access requirements
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: smb-server
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: smb-server
template:
metadata:
labels:
app: smb-server
spec:
containers:
- name: samba
image: dperson/samba
env:
- name: SMB_USER
valueFrom:
secretKeyRef:
name: smb-credentials
key: username
- name: SMB_PASS
valueFrom:
secretKeyRef:
name: smb-credentials
key: password
args:
- -u
- "$(SMB_USER);$(SMB_PASS)"
- -s
- "share;/mount;yes;no;no;$(SMB_USER)"
ports:
- containerPort: 139
- containerPort: 445
securityContext:
capabilities:
add: ["NET_ADMIN"]
volumeMounts:
- name: share
mountPath: /mount
volumes:
- name: share
persistentVolumeClaim:
claimName: smb-share-pvc

View file

@ -1,109 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: vaultwarden-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
hostPath:
path: /dpool/services/vaultwarden/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: vaultwarden-pvc
namespace: home-server
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
volumeName: vaultwarden-pv
---
apiVersion: v1
kind: Service
metadata:
name: vaultwarden
namespace: home-server
spec:
selector:
app: vaultwarden
ports:
- port: 80
targetPort: 80
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: vaultwarden
template:
metadata:
labels:
app: vaultwarden
spec:
containers:
- name: vaultwarden
image: vaultwarden/server:latest
imagePullPolicy: Always
env:
- name: TZ
value: "Australia/Brisbane"
- name: WEBSOCKET_ENABLED
value: "true"
- name: SIGNUPS_ALLOWED
value: "false"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: vaultwarden-postgres-secret
key: DATABASE_URL
ports:
- containerPort: 80
volumeMounts:
- name: vaultwarden-data
mountPath: /data
volumes:
- name: vaultwarden-data
persistentVolumeClaim:
claimName: vaultwarden-pvc
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: vaultwarden
namespace: home-server
annotations:
external-dns.alpha.kubernetes.io/hostname: vault.hxme.net
nginx.ingress.kubernetes.io/server-snippet: |
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
spec:
tls:
- hosts:
- vault.hxme.net
secretName: wildcard-hxme-net
rules:
- host: vault.hxme.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: vaultwarden
port:
number: 80

View file

@ -1,6 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- kusts/operators.yaml - monitoring/provider.yaml
- kusts/dns-ssl.yaml - monitoring/grafana.yaml
- kusts/home-server.yaml - monitoring/loki.yaml
- monitoring/prometheus.yaml
- operators/mariadb.yaml
- operators/replicator.yaml
- dns/namespace.yaml
- dns/bind.yaml
- dns/externaldns.yaml
- ssl/certmanager.yaml
- auth/authentik.yaml
- files/nextcloud.yaml
- files/syncthing.yaml

View file

@ -1,29 +0,0 @@
## I am so fucking mad with Flux right now I can't even begin explaining it.
# I have to do this because it doesn't respect order in kusts...
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: dns-ssl
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
url: ssh://git@repobase.net/j/home-server.git
secretRef:
name: flux-ssh
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: dns-ssl
namespace: flux-system
spec:
interval: 1m0s
path: ./deployments/dns-ssl
prune: true
sourceRef:
kind: GitRepository
name: dns-ssl

View file

@ -1,28 +0,0 @@
## I am so fucking mad with Flux right now I can't even begin explaining it.
# I have to do this because it doesn't respect order in kusts...
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: home-server-apps
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
url: ssh://git@repobase.net/j/home-server.git
secretRef:
name: flux-ssh
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: home-server-apps
namespace: flux-system
spec:
interval: 1m0s
path: ./deployments/home-server
prune: true
sourceRef:
kind: GitRepository
name: home-server-apps

View file

@ -1,29 +0,0 @@
## I am so fucking mad with Flux right now I can't even begin explaining it.
# I have to do this because it doesn't respect order in kusts...
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: operators
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
url: ssh://git@repobase.net/j/home-server.git
secretRef:
name: flux-ssh
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: operators
namespace: flux-system
spec:
interval: 1m0s
path: ./deployments/operators
prune: true
sourceRef:
kind: GitRepository
name: operators

View file

@ -1,10 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- mariadb.yaml
- redis.yaml
- postgresql.yaml
- replicator.yaml
- namespace.yaml

View file

@ -54,4 +54,7 @@ spec:
dependsOn: dependsOn:
- name: mariadb-operator-crds - name: mariadb-operator-crds
namespace: mariadb-system namespace: mariadb-system
values:
metrics:
enabled: true

View file

@ -1,5 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: home-server

View file

@ -1,148 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: postgres
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv
namespace: postgres
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
hostPath:
path: /dpool/services/postgres/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
namespace: postgres
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
volumeName: postgres-pv
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres
namespace: postgres
spec:
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: postgres:15
ports:
- containerPort: 5432
envFrom:
- secretRef:
name: postgres-secret
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
volumes:
- name: postgres-data
persistentVolumeClaim:
claimName: postgres-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: postgres
spec:
selector:
app: postgres
ports:
- port: 5432
targetPort: 5432
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-backup-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-path
hostPath:
path: /dpool/postgres/backup
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-backup-pvc
namespace: postgres
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
namespace: postgres
spec:
schedule: "0 2 * * *" # Every day at 2 AM
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: pg-backup
image: postgres:15
envFrom:
- secretRef:
name: postgres-secret
command:
- /bin/sh
- -c
- |
mkdir -p /backup
PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U $POSTGRES_USER -h localhost $POSTGRES_DB > /backup/backup-$(date +'%Y-%m-%d').sql
volumeMounts:
- name: backup-volume
mountPath: /backup
volumes:
- name: backup-volume
persistentVolumeClaim:
claimName: postgres-backup-pvc

View file

@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: home-server
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7
ports:
- containerPort: 6379
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: home-server
spec:
selector:
app: redis
ports:
- port: 6379