diff --git a/roles/kubernetes/apps/defaults/main.yml b/roles/kubernetes/apps/defaults/main.yml index 1934225..a657880 100644 --- a/roles/kubernetes/apps/defaults/main.yml +++ b/roles/kubernetes/apps/defaults/main.yml @@ -1,11 +1,96 @@ --- + +k8s_prometheus_helm__name: "prometheus" +k8s_prometheus_helm__release_namespace: "monitoring" + +k8s_argocd_helm__name: "argo-cd" +k8s_argocd_helm__release_namespace: "argo-cd" + +# https://github.com/grafana/helm-charts +# https://github.com/prometheus-community/helm-charts k8s_prometheus_helm__release_values: grafana: - adminPassword: "8gsf8073g" + adminUser: "{{ grafana_admin_username }}" + adminPassword: "{{ grafana_admin_password }}" + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + cert-manager.io/issue-temporary-certificate: "true" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ip_whitelist | join(',') }}" + hosts: + - "{{ stage }}-kube-grafana.{{ domain }}" + tls: + - secretName: "{{ stage }}-kube-grafana-cert" + hosts: + - "{{ stage }}-kube-grafana.{{ domain }}" + deploymentStrategy: + type: Recreate kubeControllerManager: service: port: 10257 targetPort: 10257 serviceMonitor: https: true - insecureSkipVerify: true \ No newline at end of file + insecureSkipVerify: true + +# https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd +k8s_argocd_helm__release_values: + controller: + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: "{{ k8s_argocd_helm__release_namespace }}" + additionalLabels: + release: "{{ k8s_prometheus_helm__name }}" + repoServer: + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: "{{ k8s_argocd_helm__release_namespace }}" + additionalLabels: + release: "{{ k8s_prometheus_helm__name }}" + server: + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: "{{ k8s_argocd_helm__release_namespace }}" + additionalLabels: + release: "{{ k8s_prometheus_helm__name }}" + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + cert-manager.io/issue-temporary-certificate: "true" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ip_whitelist | join(',') }}" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + hosts: + - "{{ stage }}-kube-argocd.{{ domain }}" + tls: + - secretName: "{{ stage }}-kube-argocd-cert" + hosts: + - "{{ stage }}-kube-argocd.{{ domain }}" + dex: + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: "{{ k8s_argocd_helm__release_namespace }}" + additionalLabels: + release: "{{ k8s_prometheus_helm__name }}" + redis: + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: "{{ k8s_argocd_helm__release_namespace }}" + additionalLabels: + release: "{{ k8s_prometheus_helm__name }}" diff --git a/roles/kubernetes/apps/tasks/main.yml b/roles/kubernetes/apps/tasks/main.yml index 7ed1506..7323e39 100644 --- a/roles/kubernetes/apps/tasks/main.yml +++ b/roles/kubernetes/apps/tasks/main.yml @@ -6,10 +6,10 @@ - name: Deploy kube-prometheus-stack inside monitoring namespace kubernetes.core.helm: - name: prometheus + name: "{{ k8s_prometheus_helm__name }}" chart_repo_url: "{{ k8s_prometheus_helm__chart_repo_url | default('https://prometheus-community.github.io/helm-charts') }}" - chart_ref: "{{ k8s_ingress_helm__chart_ref | default('kube-prometheus-stack') }}" - release_namespace: "{{ k8s_prometheus_helm__release_namespace | default('monitoring') }}" + chart_ref: "{{ k8s_prometheus_helm__chart_ref | default('kube-prometheus-stack') }}" + release_namespace: "{{ k8s_prometheus_helm__release_namespace }}" create_namespace: yes release_values: "{{ k8s_prometheus_helm__release_values }}" when: @@ -17,22 +17,15 @@ tags: - prometheus -- name: Add argo-cd chart repo - kubernetes.core.helm_repository: - name: argo-cd - repo_url: "https://argoproj.github.io/argo-helm" - when: - - inventory_hostname == groups['kube-master'][0] - tags: - - argo-cd - -- name: Deploy Argo-CD inside argo-cd namespace +- name: Deploy argo-cd inside argo-cd namespace kubernetes.core.helm: - name: argo-cd - chart_ref: argo-cd/argo-cd - release_namespace: argo-cd - create_namespace: true + name: "{{ k8s_argocd_helm__name }}" + chart_repo_url: "{{ k8s_argocd_helm__chart_repo_url | default('https://argoproj.github.io/argo-helm') }}" + chart_ref: "{{ k8s_argocd_helm__chart_ref | default('argo-cd') }}" + release_namespace: "{{ k8s_argocd_helm__release_namespace }}" + create_namespace: yes + release_values: "{{ k8s_argocd_helm__release_values }}" when: - inventory_hostname == groups['kube-master'][0] tags: - - argo-cd \ No newline at end of file + - argo-cd diff --git a/roles/kubernetes/cloud-controller-manager/defaults/main.yml b/roles/kubernetes/cloud-controller-manager/defaults/main.yml index ed97d53..09a94b9 100644 --- a/roles/kubernetes/cloud-controller-manager/defaults/main.yml +++ b/roles/kubernetes/cloud-controller-manager/defaults/main.yml @@ -1 +1,6 @@ --- + +# using kubespray default value => kube_pods_subnet +k8s_ccm__cluster_cidr: 10.233.64.0/18 + +k8s_ccm__template: "hetzner-ccm-networks__v1.12.1.yaml.j2" diff --git a/roles/kubernetes/cloud-controller-manager/tasks/main.yml b/roles/kubernetes/cloud-controller-manager/tasks/main.yml index 1eea2df..2ab115e 100644 --- a/roles/kubernetes/cloud-controller-manager/tasks/main.yml +++ b/roles/kubernetes/cloud-controller-manager/tasks/main.yml @@ -3,16 +3,6 @@ ### tags: ### ccm -- name: Download Hetzner CCM - ansible.builtin.get_url: - url: https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/v1.12.0/ccm-networks.yaml - dest: /tmp/ccm.yaml - mode: '0664' - when: - - inventory_hostname == groups['kube-master'][0] - tags: - - ccm - - name: Create secret for Hetzner CCM kubernetes.core.k8s: definition: @@ -28,16 +18,16 @@ data: network: "{{ stage | string | b64encode }}" token: "{{ hetzner_authentication_token | string | b64encode }}" - when: - - inventory_hostname == groups['kube-master'][0] + when: + - inventory_hostname == groups['kube-master'][0] tags: - ccm -- name: Apply Hetzner CCM manifest to the cluster. +- name: Applying CCM deployment kubernetes.core.k8s: state: present - src: /tmp/ccm.yaml + definition: "{{ lookup('template', k8s_ccm__template) }}" when: - - inventory_hostname == groups['kube-master'][0] + - inventory_hostname == groups['kube-master'][0] tags: - ccm diff --git a/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.0.yaml.j2 b/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.0.yaml.j2 new file mode 100644 index 0000000..e70ebe7 --- /dev/null +++ b/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.0.yaml.j2 @@ -0,0 +1,86 @@ +# NOTE: this release was tested against kubernetes v1.18.x +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: hcloud-cloud-controller-manager + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: cloud-controller-manager + dnsPolicy: Default + tolerations: + # this taint is set by all kubelets running `--cloud-provider=external` + # so we should tolerate it to schedule the cloud controller manager + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + # cloud controller manages should be able to run on masters + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - image: hetznercloud/hcloud-cloud-controller-manager:v1.12.0 + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" + - "--allocate-node-cidrs=true" + - "--cluster-cidr={{ k8s_ccm__cluster_cidr | default('10.244.0.0/16') }}" + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: hcloud + key: token + - name: HCLOUD_NETWORK + valueFrom: + secretKeyRef: + name: hcloud + key: network diff --git a/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.1.yaml.j2 b/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.1.yaml.j2 new file mode 100644 index 0000000..b15fc9f --- /dev/null +++ b/roles/kubernetes/cloud-controller-manager/templates/hetzner-ccm-networks__v1.12.1.yaml.j2 @@ -0,0 +1,88 @@ +# NOTE: this release was tested against kubernetes v1.18.x +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: hcloud-cloud-controller-manager + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: cloud-controller-manager + dnsPolicy: Default + tolerations: + # this taint is set by all kubelets running `--cloud-provider=external` + # so we should tolerate it to schedule the cloud controller manager + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + # cloud controller manages should be able to run on masters + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + operator: Exists + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - image: hetznercloud/hcloud-cloud-controller-manager:v1.12.1 + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" + - "--allocate-node-cidrs=true" + - "--cluster-cidr={{ k8s_ccm__cluster_cidr | default('10.244.0.0/16') }}" + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: hcloud + key: token + - name: HCLOUD_NETWORK + valueFrom: + secretKeyRef: + name: hcloud + key: network diff --git a/roles/kubernetes/ingress-controller/defaults/main.yml b/roles/kubernetes/ingress-controller/defaults/main.yml index ec62d82..463d65d 100644 --- a/roles/kubernetes/ingress-controller/defaults/main.yml +++ b/roles/kubernetes/ingress-controller/defaults/main.yml @@ -1,32 +1,31 @@ --- - k8s_ingress_helm__release_values: - controller: - replicaCount: 2 - config: - use-forwarded-headers: "true" - compute-full-forwarded-for: "true" - use-proxy-protocol: "true" - ssl-ciphers: "EECDH+AESGCM:EDH+AESGCM" - ssl-protocols: "TLSv1.3" - service: - externalTrafficPolicy: Local - healthCheckNodePort: &healthchecknodeport 31066 - nodePorts: - http: &httpnodeport 30473 - https: 30474 - annotations: - load-balancer.hetzner.cloud/location: nbg1 - load-balancer.hetzner.cloud/name: "{{ stage }}-ingress" - load-balancer.hetzner.cloud/type: "lb11" - load-balancer.hetzner.cloud/disable-public-network: "true" - load-balancer.hetzner.cloud/network-zone: "eu-central" - load-balancer.hetzner.cloud/use-private-ip: "true" - load-balancer.hetzner.cloud/uses-proxyprotocol: "true" - load-balancer.hetzner.cloud/health-check-interval: "3s" - load-balancer.hetzner.cloud/health-check-timeout: "1s" - load-balancer.hetzner.cloud/health-check-retries: 3 - load-balancer.hetzner.cloud/health-check-protocol: "tcp" - load-balancer.hetzner.cloud/health-check-port: *httpnodeport - defaultBackend: - enabled: true + controller: + replicaCount: 3 + config: + use-forwarded-headers: "true" + compute-full-forwarded-for: "true" + use-proxy-protocol: "true" + ssl-ciphers: "EECDH+AESGCM:EDH+AESGCM" + ssl-protocols: "TLSv1.3" + service: + externalTrafficPolicy: Local + healthCheckNodePort: &healthchecknodeport 31066 + nodePorts: + http: &httpnodeport 30473 + https: 30474 + annotations: + load-balancer.hetzner.cloud/type: "lb11" + load-balancer.hetzner.cloud/location: nbg1 + load-balancer.hetzner.cloud/name: "{{ stage }}-ingress" + load-balancer.hetzner.cloud/disable-public-network: true + load-balancer.hetzner.cloud/disable-private-ingress: true + load-balancer.hetzner.cloud/use-private-ip: true + load-balancer.hetzner.cloud/uses-proxyprotocol: true + load-balancer.hetzner.cloud/health-check-interval: "3s" + load-balancer.hetzner.cloud/health-check-timeout: "1s" + load-balancer.hetzner.cloud/health-check-retries: 3 + load-balancer.hetzner.cloud/health-check-protocol: "tcp" + load-balancer.hetzner.cloud/health-check-port: *httpnodeport + defaultBackend: + enabled: true diff --git a/roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml b/roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml index f50153e..8e7c771 100644 --- a/roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml +++ b/roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml @@ -29,7 +29,7 @@ metadata: namespace: default spec: ports: - - port: 8080 + - port: 80 protocol: TCP targetPort: 8080 selector: @@ -56,7 +56,7 @@ spec: service: name: hello-node port: - number: 8080 + number: 80 path: / pathType: Prefix tls: