From 01049bf0318a5e886986a75fe8716a668298cfb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=B6rz=2C=20Friedrich?= Date: Fri, 23 Sep 2022 12:00:09 +0000 Subject: [PATCH] DEV-548: testcluster --- group_vars/all/firewall.yml | 16 +- kubernetes.yml | 8 +- roles/hcloud/tasks/configure-firewall2.yml | 2 +- roles/kubernetes/argocd/defaults/main.yml | 2 +- roles/kubernetes/argocd/tasks/main.yml | 389 +++++++++--------- .../kubernetes/cert_manager/defaults/main.yml | 2 + roles/kubernetes/cert_manager/tasks/main.yml | 80 ++-- .../defaults/main.yml | 2 +- .../hetzner-ccm-networks__v1.13.0.yaml.j2 | 106 +++++ 9 files changed, 363 insertions(+), 244 deletions(-) create mode 100644 roles/kubernetes/cloud_controller_manager/templates/hetzner-ccm-networks__v1.13.0.yaml.j2 diff --git a/group_vars/all/firewall.yml b/group_vars/all/firewall.yml index b3cd629..1c17803 100644 --- a/group_vars/all/firewall.yml +++ b/group_vars/all/firewall.yml @@ -97,11 +97,11 @@ hcloud_firewall_objects: - type: label_selector label_selector: - selector: 'service=connect' + selector: 'stage={{ stage }},service=connect' - type: label_selector label_selector: - selector: 'service=keycloak' + selector: 'stage={{ stage }},service=keycloak' hcloud_firewall_objects_awx: - @@ -119,7 +119,7 @@ hcloud_firewall_objects_awx: - type: label_selector label_selector: - selector: 'stage={{ stage }}' + selector: 'stage={{ stage }},service=awx' hcloud_firewall_objects_backup: - @@ -138,7 +138,7 @@ hcloud_firewall_objects_backup: - type: label_selector label_selector: - selector: 'service=backup' + selector: 'stage={{ stage }},service=backup' hcloud_firewall_objects_gitea: - @@ -170,7 +170,7 @@ hcloud_firewall_objects_gitea: - type: label_selector label_selector: - selector: 'service=gitea' + selector: 'stage={{ stage }},service=gitea' hcloud_firewall_objects_keycloak: - @@ -202,7 +202,7 @@ hcloud_firewall_objects_keycloak: - type: label_selector label_selector: - selector: 'service=keycloak' + selector: 'stage={{ stage }},service=keycloak' hcloud_firewall_objects_kibana: - @@ -234,7 +234,7 @@ hcloud_firewall_objects_kibana: - type: label_selector label_selector: - selector: 'service=kibana' + selector: 'stage={{ stage }},service=kibana' hcloud_firewall_objects_management: - @@ -266,4 +266,4 @@ hcloud_firewall_objects_management: - type: label_selector label_selector: - selector: 'service=connect,tenant=management' \ No newline at end of file + selector: 'stage={{ stage }},service=connect,tenant=management' diff --git a/kubernetes.yml b/kubernetes.yml index 1661425..e2d8170 100644 --- a/kubernetes.yml +++ b/kubernetes.yml @@ -30,11 +30,15 @@ - role: kubernetes/prometheus tags: - prometheus - - { role: kubernetes/cert_manager } + when: kubernetes_with_prometheus | default(True) + - role: kubernetes/cert_manager + when: kubernetes_with_certmanager | default(True) - role: kubernetes/external_dns + when: kubernetes_with_extdns | default(True) tags: - external-dns - - { role: kubernetes/ingress_controller } + - role: kubernetes/ingress_controller + when: kubernetes_with_ingress | default(True) - role: kubernetes/argocd when: kubernetes_with_argocd | default(True) tags: diff --git a/roles/hcloud/tasks/configure-firewall2.yml b/roles/hcloud/tasks/configure-firewall2.yml index d70182a..d32bd32 100644 --- a/roles/hcloud/tasks/configure-firewall2.yml +++ b/roles/hcloud/tasks/configure-firewall2.yml @@ -2,7 +2,7 @@ - name: "Get all existing firewalls" uri: method: GET - url: "https://api.hetzner.cloud/v1/firewalls" + url: "https://api.hetzner.cloud/v1/firewalls?per_page=1000" body_format: json headers: accept: application/json diff --git a/roles/kubernetes/argocd/defaults/main.yml b/roles/kubernetes/argocd/defaults/main.yml index 03c4bd4..883441b 100644 --- a/roles/kubernetes/argocd/defaults/main.yml +++ b/roles/kubernetes/argocd/defaults/main.yml @@ -28,7 +28,7 @@ argo_realm_users: [ "requiredActions": [] } ] -argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}" +argocd_server_admin_password: "{{ argocd_server_admin_password_vault | default( lookup('community.general.random_string', length=20) ) }}" # https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd k8s_argocd_helm__release_values: diff --git a/roles/kubernetes/argocd/tasks/main.yml b/roles/kubernetes/argocd/tasks/main.yml index 24f1655..1aad8ba 100644 --- a/roles/kubernetes/argocd/tasks/main.yml +++ b/roles/kubernetes/argocd/tasks/main.yml @@ -1,215 +1,208 @@ --- -- name: "Login with keycloak-admin" - include_role: - name: keycloak - tasks_from: _authenticate - args: - apply: - tags: - - argo-cd - when: - - k8s_argocd_with_keycloak - tags: - - argo-cd -- name: "Setup keycloak-realm for argocd" - include_role: - name: keycloak - tasks_from: _configure_realm - vars: - current_realm_name: '{{ argo_realm_name }}' - current_realm_display_name: '{{ argo_realm_display_name }}' - create_client: False - current_realm_password_policy: '' - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - args: - apply: - tags: - - argo-cd - tags: - - argo-cd +- name: "Do some stuff with keycloak as OIDC provider" + block: + - name: "Login with keycloak-admin" + include_role: + name: keycloak + tasks_from: _authenticate + args: + apply: + tags: + - argo-cd + when: + tags: + - argo-cd -- name: "Create a Keycloak group, authentication with credentials" - include_role: - name: keycloak - tasks_from: _create_realm_groups - vars: - current_realm_name: '{{ argo_realm_name }}' - current_realm_display_name: '{{ argo_realm_display_name }}' - current_realm_groups: - - name: "{{ argo_realm_group }}" - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - args: - apply: - tags: - - argo-cd - tags: - - argo-cd + - name: "Setup keycloak-realm for argocd" + include_role: + name: keycloak + tasks_from: _configure_realm + vars: + current_realm_name: '{{ argo_realm_name }}' + current_realm_display_name: '{{ argo_realm_display_name }}' + create_client: False + current_realm_password_policy: '' + when: + - inventory_hostname == groups['kube_control_plane'][0] + args: + apply: + tags: + - argo-cd + tags: + - argo-cd -- name: "Create keycloak user(s)" - include_role: - name: keycloak - tasks_from: _create_realm_users - vars: - current_realm_name: '{{ argo_realm_name }}' - current_realm_users: '{{ argo_realm_users }}' - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - args: - apply: - tags: - - argo-cd - tags: - - argo-cd + - name: "Create a Keycloak group, authentication with credentials" + include_role: + name: keycloak + tasks_from: _create_realm_groups + vars: + current_realm_name: '{{ argo_realm_name }}' + current_realm_display_name: '{{ argo_realm_display_name }}' + current_realm_groups: + - name: "{{ argo_realm_group }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + args: + apply: + tags: + - argo-cd + tags: + - argo-cd -- name: "ADD user group mapping" - include_role: - name: keycloak - tasks_from: _configure_user_groupmembership_crud - vars: - username: '{{ argocd_admin_username }}' - destination_group: '{{ argo_realm_group }}' - realm_name: '{{ argo_realm_name }}' - bearer_token: '{{ access_token }}' - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - args: - apply: - tags: - - argo-cd - tags: - - argo-cd + - name: "Create keycloak user(s)" + include_role: + name: keycloak + tasks_from: _create_realm_users + vars: + current_realm_name: '{{ argo_realm_name }}' + current_realm_users: '{{ argo_realm_users }}' + when: + - inventory_hostname == groups['kube_control_plane'][0] + args: + apply: + tags: + - argo-cd + tags: + - argo-cd -- name: "Create keycloak clientscope" - delegate_to: localhost - become: False - community.general.keycloak_clientscope: - auth_client_id: admin-cli - auth_keycloak_url: "{{ keycloak_server_url }}/auth" - auth_realm: 'master' - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - name: '{{ argo_keycloak_clientscope_name }}' - realm: '{{ argo_realm_name }}' - protocol: '{{ argo_keycloak_clientscope_protocol }}' - protocol_mappers: - - config: - access.token.claim: True - claim.name: '{{ argo_keycloak_clientscope_name }}' - full.path: False # set it to true and you will be DAMNED => groupname for argo k8s configmap argocd-rbac-cm will be "/{{ group_name }}" !!!! instead of "{{ group_name }}" - id.token.claim: True - userinfo.token.claim: True - name: '{{ argo_keycloak_clientscope_name }}' - protocol: openid-connect - protocolMapper: oidc-group-membership-mapper - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - argo-cd + - name: "ADD user group mapping" + include_role: + name: keycloak + tasks_from: _configure_user_groupmembership_crud + vars: + username: '{{ argocd_admin_username }}' + destination_group: '{{ argo_realm_group }}' + realm_name: '{{ argo_realm_name }}' + bearer_token: '{{ access_token }}' + when: + - inventory_hostname == groups['kube_control_plane'][0] + args: + apply: + tags: + - argo-cd + tags: + - argo-cd -# using template from exported keycloak client object -# due to needed params but missing in community.general.keycloak_client -# e.g. defaultClientScopes -- name: "Create json object as VAR from template" - set_fact: - keycloak_realm_create_client: "{{ lookup('template','keycloak-realm-create-client-argocd.json.j2') }}" - vars: - client_redirect_uri: '{{ argo_client_redirect_uris }}' - client_web_origins: '{{ argo_client_web_origins }}' - client_id: '{{ argo_client_id }}' - realm_name: '{{ argo_realm_name }}' - client_root_url: '{{ argo_client_root_url }}' - client_admin_url: '{{ argo_client_admin_url }}' - client_base_url: '{{ argo_client_base_url }}' - keycloak_clientscope_name: '{{ argo_keycloak_clientscope_name }}' - keycloak_clientscope_protocol: '{{ argo_keycloak_clientscope_protocol }}' - keycloak_client_secret: '{{ argo_keycloak_client_secret }}' - when: - - k8s_argocd_with_keycloak - tags: - - argo-cd + - name: "Create keycloak clientscope" + delegate_to: localhost + become: False + community.general.keycloak_clientscope: + auth_client_id: admin-cli + auth_keycloak_url: "{{ keycloak_server_url }}/auth" + auth_realm: 'master' + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + name: '{{ argo_keycloak_clientscope_name }}' + realm: '{{ argo_realm_name }}' + protocol: '{{ argo_keycloak_clientscope_protocol }}' + protocol_mappers: + - config: + access.token.claim: True + claim.name: '{{ argo_keycloak_clientscope_name }}' + full.path: False # set it to true and you will be DAMNED => groupname for argo k8s configmap argocd-rbac-cm will be "/{{ group_name }}" !!!! instead of "{{ group_name }}" + id.token.claim: True + userinfo.token.claim: True + name: '{{ argo_keycloak_clientscope_name }}' + protocol: openid-connect + protocolMapper: oidc-group-membership-mapper + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argo-cd -# throw needed VARs against keycloak API -# to CRUD -- name: "Create client" - include_role: - name: keycloak - tasks_from: _configure_client_crud - vars: - client_id: '{{ argo_client_id }}' - realm_name: '{{ argo_realm_name }}' - keycloak_client_object: '{{ keycloak_realm_create_client }}' - bearer_token: '{{ access_token }}' - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - args: - apply: - tags: - - argo-cd - tags: - - argo-cd + # using template from exported keycloak client object + # due to needed params but missing in community.general.keycloak_client + # e.g. defaultClientScopes + - name: "Create json object as VAR from template" + set_fact: + keycloak_realm_create_client: "{{ lookup('template','keycloak-realm-create-client-argocd.json.j2') }}" + vars: + client_redirect_uri: '{{ argo_client_redirect_uris }}' + client_web_origins: '{{ argo_client_web_origins }}' + client_id: '{{ argo_client_id }}' + realm_name: '{{ argo_realm_name }}' + client_root_url: '{{ argo_client_root_url }}' + client_admin_url: '{{ argo_client_admin_url }}' + client_base_url: '{{ argo_client_base_url }}' + keycloak_clientscope_name: '{{ argo_keycloak_clientscope_name }}' + keycloak_clientscope_protocol: '{{ argo_keycloak_clientscope_protocol }}' + keycloak_client_secret: '{{ argo_keycloak_client_secret }}' + tags: + - argo-cd -- name: "GET available clients from <<{{ argo_realm_name }}>>-realm" - delegate_to: localhost - become: False - uri: - url: "{{ keycloak_server_url }}/auth/admin/realms/{{ argo_realm_name }}/clients" - method: GET - headers: - Content-Type: "application/json" - Authorization: "Bearer {{ access_token }}" - status_code: [200] - register: argo_realm_clients - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - argo-cd + # throw needed VARs against keycloak API + # to CRUD + - name: "Create client" + include_role: + name: keycloak + tasks_from: _configure_client_crud + vars: + client_id: '{{ argo_client_id }}' + realm_name: '{{ argo_realm_name }}' + keycloak_client_object: '{{ keycloak_realm_create_client }}' + bearer_token: '{{ access_token }}' + when: + - inventory_hostname == groups['kube_control_plane'][0] + args: + apply: + tags: + - argo-cd + tags: + - argo-cd -# available clients: get needed ID -- name: "Get ID of client by paring argo_realm_clients object" - set_fact: - id_of_client: '{{ ( argo_realm_clients.json | selectattr("clientId","equalto",argo_client_id ) | first ).id }}' - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - argo-cd + - name: "GET available clients from <<{{ argo_realm_name }}>>-realm" + delegate_to: localhost + become: False + uri: + url: "{{ keycloak_server_url }}/auth/admin/realms/{{ argo_realm_name }}/clients" + method: GET + headers: + Content-Type: "application/json" + Authorization: "Bearer {{ access_token }}" + status_code: [200] + register: argo_realm_clients + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argo-cd -- name: "GET client-secret for client <<{{ argo_client_id }}>> in realm <<{{ argo_realm_name }}>>" - delegate_to: localhost - become: False - uri: - url: "{{ keycloak_server_url }}/auth/admin/realms/{{ argo_realm_name }}/clients/{{ id_of_client }}/client-secret" - method: GET - headers: - Content-Type: "application/json" - Authorization: "Bearer {{ access_token }}" - status_code: [200] - register: client_secret - when: - - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - argo-cd + # available clients: get needed ID + - name: "Get ID of client by paring argo_realm_clients object" + set_fact: + id_of_client: '{{ ( argo_realm_clients.json | selectattr("clientId","equalto",argo_client_id ) | first ).id }}' + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argo-cd -- name: "DEBUG" - debug: - msg: "DEBUGGING: {{ client_secret.json.value }}" + - name: "GET client-secret for client <<{{ argo_client_id }}>> in realm <<{{ argo_realm_name }}>>" + delegate_to: localhost + become: False + uri: + url: "{{ keycloak_server_url }}/auth/admin/realms/{{ argo_realm_name }}/clients/{{ id_of_client }}/client-secret" + method: GET + headers: + Content-Type: "application/json" + Authorization: "Bearer {{ access_token }}" + status_code: [200] + register: client_secret + when: + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argo-cd + + - name: "DEBUG" + debug: + msg: "DEBUGGING: {{ client_secret.json.value }}" + when: + - debug + - inventory_hostname == groups['kube_control_plane'][0] + tags: + - argo-cd when: - - debug - k8s_argocd_with_keycloak - - inventory_hostname == groups['kube_control_plane'][0] - tags: - - argo-cd + # end of block statement - name: "Create namespace <{{ k8s_argocd_helm__release_namespace }}>" become: yes diff --git a/roles/kubernetes/cert_manager/defaults/main.yml b/roles/kubernetes/cert_manager/defaults/main.yml index c5478ef..67273ca 100644 --- a/roles/kubernetes/cert_manager/defaults/main.yml +++ b/roles/kubernetes/cert_manager/defaults/main.yml @@ -2,6 +2,8 @@ k8s_prometheus_helm__name: "prometheus" +cert_manager_dplmt: True + k8s_certmanager_helm__chart_ref: cert-manager k8s_certmanager_helm__chart_repo_url: https://charts.jetstack.io k8s_certmanager_helm__release_namespace: cert-manager diff --git a/roles/kubernetes/cert_manager/tasks/main.yml b/roles/kubernetes/cert_manager/tasks/main.yml index 4f2ec16..36be783 100644 --- a/roles/kubernetes/cert_manager/tasks/main.yml +++ b/roles/kubernetes/cert_manager/tasks/main.yml @@ -3,19 +3,17 @@ ### tags: ### cert-manager -- name: Install cert-manager via helm +- name: "Create namespace>" become: yes - kubernetes.core.helm: - name: cert-manager - chart_ref: "{{ k8s_certmanager_helm__chart_ref }}" - chart_repo_url: "{{ k8s_certmanager_helm__chart_repo_url }}" - release_namespace: "{{ k8s_certmanager_helm__release_namespace }}" - create_namespace: yes - release_values: "{{ k8s_certmanager_helm__release_values }}" + kubernetes.core.k8s: + name: "{{ k8s_certmanager_helm__release_namespace }}" + api_version: v1 + kind: Namespace + state: present when: - inventory_hostname == groups['kube_control_plane'][0] tags: - - cert-manager + - namespace - name: Create secret for digitalocean-dns become: yes @@ -34,31 +32,47 @@ tags: - cert-manager -- name: Create ClusterIssuer for letsencrypt (prod/staging) - become: yes - kubernetes.core.k8s: - definition: - api_version: cert-manager.io/v1 - kind: ClusterIssuer - metadata: - name: "letsencrypt-{{ item.key }}" - spec: - acme: - email: "{{ item.value.email }}" - server: "{{ item.value.server }}" - privateKeySecretRef: - name: issuer-account-key - solvers: - - dns01: - digitalocean: - tokenSecretRef: - name: digitalocean-dns - key: access-token - selector: - dnsZones: - - 'smardigo.digital' - loop: "{{ k8s_certmanager_helm__cluster_issuers | dict2items }}" +- name: "Install cert-manager" + block: + + - name: Install cert-manager via helm + become: yes + kubernetes.core.helm: + name: cert-manager + chart_ref: "{{ k8s_certmanager_helm__chart_ref }}" + chart_repo_url: "{{ k8s_certmanager_helm__chart_repo_url }}" + release_namespace: "{{ k8s_certmanager_helm__release_namespace }}" + create_namespace: yes + release_values: "{{ k8s_certmanager_helm__release_values }}" + + - name: Create ClusterIssuer for letsencrypt (prod/staging) + become: yes + kubernetes.core.k8s: + definition: + api_version: cert-manager.io/v1 + kind: ClusterIssuer + metadata: + name: "letsencrypt-{{ item.key }}" + spec: + acme: + email: "{{ item.value.email }}" + server: "{{ item.value.server }}" + privateKeySecretRef: + name: issuer-account-key + solvers: + - dns01: + digitalocean: + tokenSecretRef: + name: digitalocean-dns + key: access-token + selector: + dnsZones: + - 'smardigo.digital' + loop: "{{ k8s_certmanager_helm__cluster_issuers | dict2items }}" + + # end of block statement when: - inventory_hostname == groups['kube_control_plane'][0] + - cert_manager_dplmt tags: - cert-manager diff --git a/roles/kubernetes/cloud_controller_manager/defaults/main.yml b/roles/kubernetes/cloud_controller_manager/defaults/main.yml index 09a94b9..c8a258e 100644 --- a/roles/kubernetes/cloud_controller_manager/defaults/main.yml +++ b/roles/kubernetes/cloud_controller_manager/defaults/main.yml @@ -3,4 +3,4 @@ # using kubespray default value => kube_pods_subnet k8s_ccm__cluster_cidr: 10.233.64.0/18 -k8s_ccm__template: "hetzner-ccm-networks__v1.12.1.yaml.j2" +k8s_ccm__template: "hetzner-ccm-networks__v1.13.0.yaml.j2" diff --git a/roles/kubernetes/cloud_controller_manager/templates/hetzner-ccm-networks__v1.13.0.yaml.j2 b/roles/kubernetes/cloud_controller_manager/templates/hetzner-ccm-networks__v1.13.0.yaml.j2 new file mode 100644 index 0000000..28135d7 --- /dev/null +++ b/roles/kubernetes/cloud_controller_manager/templates/hetzner-ccm-networks__v1.13.0.yaml.j2 @@ -0,0 +1,106 @@ +# NOTE: this release was tested against kubernetes v1.18.x +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hcloud-cloud-controller-manager + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: hcloud-cloud-controller-manager + template: + metadata: + labels: + app: hcloud-cloud-controller-manager + spec: + serviceAccountName: cloud-controller-manager + dnsPolicy: Default + tolerations: + # this taint is set by all kubelets running `--cloud-provider=external` + # so we should tolerate it to schedule the cloud controller manager + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "CriticalAddonsOnly" + operator: "Exists" + # cloud controller manages should be able to run on masters + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + operator: Exists + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - image: hetznercloud/hcloud-cloud-controller-manager:v1.13.0 + name: hcloud-cloud-controller-manager + command: + - "/bin/hcloud-cloud-controller-manager" + - "--cloud-provider=hcloud" + - "--leader-elect=false" + - "--allow-untagged-cloud" + - "--allocate-node-cidrs=true" + - "--cluster-cidr={{ k8s_ccm__cluster_cidr | default('10.244.0.0/16') }}" + resources: + requests: + cpu: 100m + memory: 50Mi + env: + - name: HCLOUD_METRICS_ENABLED + value: "true" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HCLOUD_TOKEN + valueFrom: + secretKeyRef: + name: hcloud-ccm + key: token + - name: HCLOUD_NETWORK + valueFrom: + secretKeyRef: + name: hcloud-ccm + key: network + priorityClassName: system-cluster-critical +--- +apiVersion: v1 +kind: Service +metadata: + name: 'hcloud-cloud-controller-manager-metrics' + namespace: kube-system + labels: + metrics: service-metrics +spec: + selector: + app: 'hcloud-cloud-controller-manager' + type: ClusterIP + ports: + - port: 8233 + targetPort: 8233 + protocol: TCP + name: http-metrics