initial commit

main_dev1029_pgbkp_s3apierror_alert
Hoan To 3 years ago
commit 077a1da8f9

@ -0,0 +1,29 @@
apiVersion: v2
name: prodwork01-cluster-bootstrap
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.0.0"
dependencies:
- name: bootstrap
version: 0.1.0
repository: oci://prodnso-harbor-01.smardigo.digital/infrastructure

@ -0,0 +1,88 @@
# NOTE: this release was tested against kubernetes v1.18.x
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-cloud-controller-manager
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: hcloud-cloud-controller-manager
template:
metadata:
labels:
app: hcloud-cloud-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: cloud-controller-manager
dnsPolicy: Default
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the cloud controller manager
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
# cloud controller manages should be able to run on masters
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
operator: Exists
- key: "node.kubernetes.io/not-ready"
effect: "NoSchedule"
hostNetwork: true
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.1
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=false"
- "--allow-untagged-cloud"
- "--allocate-node-cidrs=true"
- "--cluster-cidr=10.244.0.0/16"
resources:
requests:
cpu: 100m
memory: 50Mi
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud-ccm
key: token
- name: HCLOUD_NETWORK
valueFrom:
secretKeyRef:
name: hcloud-ccm
key: network

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- hetzner-ccm-networks__v1.12.1.yaml

@ -0,0 +1,348 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.hetzner.cloud
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
namespace: kube-system
name: hcloud-volumes
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.hetzner.cloud
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: hcloud-csi
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi
rules:
# attacher
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
# provisioner
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumeclaims/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
# resizer
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
# node
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi
subjects:
- kind: ServiceAccount
name: hcloud-csi
namespace: kube-system
roleRef:
kind: ClusterRole
name: hcloud-csi
apiGroup: rbac.authorization.k8s.io
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: hcloud-csi-controller
namespace: kube-system
spec:
selector:
matchLabels:
app: hcloud-csi-controller
serviceName: hcloud-csi-controller
replicas: 1
template:
metadata:
labels:
app: hcloud-csi-controller
spec:
serviceAccount: hcloud-csi
containers:
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- --feature-gates=Topology=true
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:1.6.0
imagePullPolicy: Always
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud-csi
key: token
volumeMounts:
- name: socket-dir
mountPath: /run/csi
ports:
- containerPort: 9189
name: metrics
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: liveness-probe
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
volumeMounts:
- mountPath: /run/csi
name: socket-dir
volumes:
- name: socket-dir
emptyDir: {}
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: hcloud-csi-node
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
matchLabels:
app: hcloud-csi
template:
metadata:
labels:
app: hcloud-csi
spec:
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "instance.hetzner.cloud/is-root-server"
operator: NotIn
values:
- "true"
serviceAccount: hcloud-csi
containers:
- name: csi-node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
args:
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /run/csi
- name: registration-dir
mountPath: /registration
securityContext:
privileged: true
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:1.6.0
imagePullPolicy: Always
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud-csi
key: token
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /run/csi
- name: device-dir
mountPath: /dev
securityContext:
privileged: true
ports:
- containerPort: 9189
name: metrics
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
- name: liveness-probe
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory
---
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-controller-metrics
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
app: hcloud-csi-controller
ports:
- port: 9189
name: metrics
targetPort: metrics
---
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-node-metrics
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
app: hcloud-csi
ports:
- port: 9189
name: metrics
targetPort: metrics

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- hcloud-csi.v1.6.0.yaml

@ -0,0 +1,9 @@
resources:
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-alertmanagers.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-podmonitors.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-probes.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-prometheuses.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-servicemonitors.yaml
- https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-39.2.1/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://download.elastic.co/downloads/eck/2.3.0/crds.yaml
- https://download.elastic.co/downloads/eck/2.3.0/operator.yaml

@ -0,0 +1,20 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: cert-manager
spec:
acme:
email: nso.devops@netgo.de
privateKeySecretRef:
name: issuer-account-key
server: https://acme-v02.api.letsencrypt.org/directory
solvers:
- dns01:
digitalocean:
tokenSecretRef:
key: access-token
name: digitalocean-dns
selector:
dnsZones:
- smardigo.digital

@ -0,0 +1,2 @@
resources:
- clusterissuer_prod.yaml

@ -0,0 +1,20 @@
# Global
## Set this to create nginx for argocd and other services
argo_namespace: argo-cd
stage: prodwork01
domain: smardigo.digital
gitea_instance: prodnso-gitea-01
gitea_repo_path: "argocd/prodwork01-argocd"
ingress_ip_whitelist:
- "212.121.131.106/32" # netgo berlin
- "149.233.6.129/32" # netgo e-shelter
- "46.245.219.98/32" # netgo borken
- "87.150.33.14/32" # sven
# Application specific
cert-manager:
enable: false
cloud_provider: hetzner
Loading…
Cancel
Save