feat: kubernetes bootstrap
- ccm
- ingress
- certmanager
- argo-cd
{{ stage }}-kube-argocd.{{ domain }}
- prometheus
{{ stage }}-kube-grafana.{{ domain }}
master
parent
a6e603cf76
commit
a9d239f0e8
@ -1,11 +1,96 @@
|
||||
---
|
||||
|
||||
k8s_prometheus_helm__name: "prometheus"
|
||||
k8s_prometheus_helm__release_namespace: "monitoring"
|
||||
|
||||
k8s_argocd_helm__name: "argo-cd"
|
||||
k8s_argocd_helm__release_namespace: "argo-cd"
|
||||
|
||||
# https://github.com/grafana/helm-charts
|
||||
# https://github.com/prometheus-community/helm-charts
|
||||
k8s_prometheus_helm__release_values:
|
||||
grafana:
|
||||
adminPassword: "8gsf8073g"
|
||||
adminUser: "{{ grafana_admin_username }}"
|
||||
adminPassword: "{{ grafana_admin_password }}"
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
cert-manager.io/issue-temporary-certificate: "true"
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ip_whitelist | join(',') }}"
|
||||
hosts:
|
||||
- "{{ stage }}-kube-grafana.{{ domain }}"
|
||||
tls:
|
||||
- secretName: "{{ stage }}-kube-grafana-cert"
|
||||
hosts:
|
||||
- "{{ stage }}-kube-grafana.{{ domain }}"
|
||||
deploymentStrategy:
|
||||
type: Recreate
|
||||
kubeControllerManager:
|
||||
service:
|
||||
port: 10257
|
||||
targetPort: 10257
|
||||
serviceMonitor:
|
||||
https: true
|
||||
insecureSkipVerify: true
|
||||
insecureSkipVerify: true
|
||||
|
||||
# https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd
|
||||
k8s_argocd_helm__release_values:
|
||||
controller:
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: "{{ k8s_argocd_helm__release_namespace }}"
|
||||
additionalLabels:
|
||||
release: "{{ k8s_prometheus_helm__name }}"
|
||||
repoServer:
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: "{{ k8s_argocd_helm__release_namespace }}"
|
||||
additionalLabels:
|
||||
release: "{{ k8s_prometheus_helm__name }}"
|
||||
server:
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: "{{ k8s_argocd_helm__release_namespace }}"
|
||||
additionalLabels:
|
||||
release: "{{ k8s_prometheus_helm__name }}"
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
cert-manager.io/issue-temporary-certificate: "true"
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ip_whitelist | join(',') }}"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
hosts:
|
||||
- "{{ stage }}-kube-argocd.{{ domain }}"
|
||||
tls:
|
||||
- secretName: "{{ stage }}-kube-argocd-cert"
|
||||
hosts:
|
||||
- "{{ stage }}-kube-argocd.{{ domain }}"
|
||||
dex:
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: "{{ k8s_argocd_helm__release_namespace }}"
|
||||
additionalLabels:
|
||||
release: "{{ k8s_prometheus_helm__name }}"
|
||||
redis:
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: "{{ k8s_argocd_helm__release_namespace }}"
|
||||
additionalLabels:
|
||||
release: "{{ k8s_prometheus_helm__name }}"
|
||||
|
||||
@ -1 +1,6 @@
|
||||
---
|
||||
|
||||
# using kubespray default value => kube_pods_subnet
|
||||
k8s_ccm__cluster_cidr: 10.233.64.0/18
|
||||
|
||||
k8s_ccm__template: "hetzner-ccm-networks__v1.12.1.yaml.j2"
|
||||
|
||||
@ -0,0 +1,86 @@
|
||||
# NOTE: this release was tested against kubernetes v1.18.x
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:cloud-controller-manager
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hcloud-cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hcloud-cloud-controller-manager
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: cloud-controller-manager
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# this taint is set by all kubelets running `--cloud-provider=external`
|
||||
# so we should tolerate it to schedule the cloud controller manager
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
# cloud controller manages should be able to run on masters
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoSchedule"
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.0
|
||||
name: hcloud-cloud-controller-manager
|
||||
command:
|
||||
- "/bin/hcloud-cloud-controller-manager"
|
||||
- "--cloud-provider=hcloud"
|
||||
- "--leader-elect=false"
|
||||
- "--allow-untagged-cloud"
|
||||
- "--allocate-node-cidrs=true"
|
||||
- "--cluster-cidr={{ k8s_ccm__cluster_cidr | default('10.244.0.0/16') }}"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HCLOUD_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: token
|
||||
- name: HCLOUD_NETWORK
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: network
|
||||
@ -0,0 +1,88 @@
|
||||
# NOTE: this release was tested against kubernetes v1.18.x
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:cloud-controller-manager
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hcloud-cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hcloud-cloud-controller-manager
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: cloud-controller-manager
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# this taint is set by all kubelets running `--cloud-provider=external`
|
||||
# so we should tolerate it to schedule the cloud controller manager
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
# cloud controller manages should be able to run on masters
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoSchedule"
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.1
|
||||
name: hcloud-cloud-controller-manager
|
||||
command:
|
||||
- "/bin/hcloud-cloud-controller-manager"
|
||||
- "--cloud-provider=hcloud"
|
||||
- "--leader-elect=false"
|
||||
- "--allow-untagged-cloud"
|
||||
- "--allocate-node-cidrs=true"
|
||||
- "--cluster-cidr={{ k8s_ccm__cluster_cidr | default('10.244.0.0/16') }}"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HCLOUD_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: token
|
||||
- name: HCLOUD_NETWORK
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: network
|
||||
@ -1,32 +1,31 @@
|
||||
---
|
||||
|
||||
k8s_ingress_helm__release_values:
|
||||
controller:
|
||||
replicaCount: 2
|
||||
config:
|
||||
use-forwarded-headers: "true"
|
||||
compute-full-forwarded-for: "true"
|
||||
use-proxy-protocol: "true"
|
||||
ssl-ciphers: "EECDH+AESGCM:EDH+AESGCM"
|
||||
ssl-protocols: "TLSv1.3"
|
||||
service:
|
||||
externalTrafficPolicy: Local
|
||||
healthCheckNodePort: &healthchecknodeport 31066
|
||||
nodePorts:
|
||||
http: &httpnodeport 30473
|
||||
https: 30474
|
||||
annotations:
|
||||
load-balancer.hetzner.cloud/location: nbg1
|
||||
load-balancer.hetzner.cloud/name: "{{ stage }}-ingress"
|
||||
load-balancer.hetzner.cloud/type: "lb11"
|
||||
load-balancer.hetzner.cloud/disable-public-network: "true"
|
||||
load-balancer.hetzner.cloud/network-zone: "eu-central"
|
||||
load-balancer.hetzner.cloud/use-private-ip: "true"
|
||||
load-balancer.hetzner.cloud/uses-proxyprotocol: "true"
|
||||
load-balancer.hetzner.cloud/health-check-interval: "3s"
|
||||
load-balancer.hetzner.cloud/health-check-timeout: "1s"
|
||||
load-balancer.hetzner.cloud/health-check-retries: 3
|
||||
load-balancer.hetzner.cloud/health-check-protocol: "tcp"
|
||||
load-balancer.hetzner.cloud/health-check-port: *httpnodeport
|
||||
defaultBackend:
|
||||
enabled: true
|
||||
controller:
|
||||
replicaCount: 3
|
||||
config:
|
||||
use-forwarded-headers: "true"
|
||||
compute-full-forwarded-for: "true"
|
||||
use-proxy-protocol: "true"
|
||||
ssl-ciphers: "EECDH+AESGCM:EDH+AESGCM"
|
||||
ssl-protocols: "TLSv1.3"
|
||||
service:
|
||||
externalTrafficPolicy: Local
|
||||
healthCheckNodePort: &healthchecknodeport 31066
|
||||
nodePorts:
|
||||
http: &httpnodeport 30473
|
||||
https: 30474
|
||||
annotations:
|
||||
load-balancer.hetzner.cloud/type: "lb11"
|
||||
load-balancer.hetzner.cloud/location: nbg1
|
||||
load-balancer.hetzner.cloud/name: "{{ stage }}-ingress"
|
||||
load-balancer.hetzner.cloud/disable-public-network: true
|
||||
load-balancer.hetzner.cloud/disable-private-ingress: true
|
||||
load-balancer.hetzner.cloud/use-private-ip: true
|
||||
load-balancer.hetzner.cloud/uses-proxyprotocol: true
|
||||
load-balancer.hetzner.cloud/health-check-interval: "3s"
|
||||
load-balancer.hetzner.cloud/health-check-timeout: "1s"
|
||||
load-balancer.hetzner.cloud/health-check-retries: 3
|
||||
load-balancer.hetzner.cloud/health-check-protocol: "tcp"
|
||||
load-balancer.hetzner.cloud/health-check-port: *httpnodeport
|
||||
defaultBackend:
|
||||
enabled: true
|
||||
|
||||
Loading…
Reference in New Issue