From 3d00fdc7a09388888bd3b90e35511428e3179173 Mon Sep 17 00:00:00 2001 From: "Ketelsen, Sven" Date: Thu, 4 Nov 2021 13:17:59 +0000 Subject: [PATCH] DEV-222 kubernetes setup with ansible --- README.md | 4 +- ansible.cfg | 2 +- galaxy-requirements.yml | 2 +- group_vars/all/plain.yml | 2 - group_vars/k8s-cluster/plain.yml | 10 ++- group_vars/kube-master/plain.yml | 7 -- group_vars/kube-node/plain.yml | 7 -- info.yml | 14 ++-- inventory_plugins/netgo-hcloud.py | 63 +++++++++++----- kubernetes.yml | 28 +++++-- remove-server.yml | 22 +++++- roles/kubernetes-base/tasks/main.yml | 17 ----- roles/kubernetes-ccm/tasks/main.yml | 33 -------- roles/kubernetes/apps/defaults/main.yml | 1 + roles/kubernetes/apps/tasks/main.yml | 48 ++++++++++++ roles/kubernetes/base/defaults/main.yml | 1 + roles/kubernetes/base/tasks/main.yml | 29 +++++++ .../cert-manager}/defaults/main.yml | 5 +- .../cert-manager}/tasks/main.yml | 16 ++-- .../defaults/main.yml | 1 + .../cloud-controller-manager/tasks/main.yml | 75 +++++++++++++++++++ .../ingress-controller}/defaults/main.yml | 1 + .../files/hello-node__fullobjects.yaml | 0 .../ingress-controller}/tasks/main.yml | 6 +- stage-dev | 18 ++--- stage-qa | 6 +- 26 files changed, 293 insertions(+), 125 deletions(-) delete mode 100644 roles/kubernetes-base/tasks/main.yml delete mode 100644 roles/kubernetes-ccm/tasks/main.yml create mode 100644 roles/kubernetes/apps/defaults/main.yml create mode 100644 roles/kubernetes/apps/tasks/main.yml create mode 100644 roles/kubernetes/base/defaults/main.yml create mode 100644 roles/kubernetes/base/tasks/main.yml rename roles/{kubernetes-certmanager => kubernetes/cert-manager}/defaults/main.yml (78%) rename roles/{kubernetes-certmanager => kubernetes/cert-manager}/tasks/main.yml (78%) create mode 100644 roles/kubernetes/cloud-controller-manager/defaults/main.yml create mode 100644 roles/kubernetes/cloud-controller-manager/tasks/main.yml rename roles/{kubernetes-ingress => kubernetes/ingress-controller}/defaults/main.yml (99%) rename roles/{kubernetes-ingress => kubernetes/ingress-controller}/files/hello-node__fullobjects.yaml (100%) rename roles/{kubernetes-ingress => kubernetes/ingress-controller}/tasks/main.yml (89%) diff --git a/README.md b/README.md index 3630dff..7836507 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ ansible-galaxy collection install hetzner.hcloud ansible-galaxy collection install community.general - ansible-galaxy collection install community.kubernetes + ansible-galaxy collection install kubernetes.core ansible-galaxy collection install community.mysql OR @@ -34,7 +34,7 @@ poetry run ansible-galaxy collection install hetzner.hcloud poetry run ansible-galaxy collection install community.general - poetry run ansible-galaxy collection install community.kubernetes + poetry run ansible-galaxy collection install kubernetes.core poetry run ansible-galaxy collection install community.mysql # Setup diff --git a/ansible.cfg b/ansible.cfg index 29763a2..5b25851 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,3 +1,3 @@ [defaults] inventory_plugins = ./inventory_plugins -callback_whitelist = profile_tasks +callbacks_enabled = timer \ No newline at end of file diff --git a/galaxy-requirements.yml b/galaxy-requirements.yml index 692eb62..ab0b206 100644 --- a/galaxy-requirements.yml +++ b/galaxy-requirements.yml @@ -3,5 +3,5 @@ collections: - name: hetzner.hcloud version: 1.6.0 - name: community.general -- name: community.kubernetes +- name: kubernetes.core - name: community.mysql diff --git a/group_vars/all/plain.yml b/group_vars/all/plain.yml index 0a3a6b2..5a2d4f0 100644 --- a/group_vars/all/plain.yml +++ b/group_vars/all/plain.yml @@ -16,8 +16,6 @@ stage_server_url: "{{ http_s }}://{{ stage_server_domain }}" alertmanager_channel_smardigo: "#monitoring-{{ stage }}" -ansible_ssh_host: "{{ stage_server_domain }}" - hetzner_server_type: cx11 hetzner_server_image: ubuntu-20.04 diff --git a/group_vars/k8s-cluster/plain.yml b/group_vars/k8s-cluster/plain.yml index 95fc64c..aa85172 100644 --- a/group_vars/k8s-cluster/plain.yml +++ b/group_vars/k8s-cluster/plain.yml @@ -1,7 +1,15 @@ --- +ip: "{{ stage_private_server_ip | default('-') }}" + +docker_enabled: false +traefik_enabled: false +filebeat_enabled: false + +### parameters used by kubespray ### + +helm_enabled: true cloud_provider: external kube_network_plugin: calico kubelet_preferred_address_types: InternalIP,ExternalIP,Hostname docker_log_opts: "--log-opt max-size=100m --log-opt max-file=5 --log-opt compress=true" -helm_enabled: true diff --git a/group_vars/kube-master/plain.yml b/group_vars/kube-master/plain.yml index 20e209c..5191714 100644 --- a/group_vars/kube-master/plain.yml +++ b/group_vars/kube-master/plain.yml @@ -2,10 +2,3 @@ hetzner_server_type: cpx21 hetzner_server_labels: "stage={{ stage }} service=kube-master" - -ansible_ssh_host: "{{ stage_server_ip | default('-') }}" -ip: "{{ stage_private_server_ip | default('-') }}" - -docker_enabled: false -traefik_enabled: false -filebeat_enabled: false diff --git a/group_vars/kube-node/plain.yml b/group_vars/kube-node/plain.yml index 9d63f49..4bf17ce 100644 --- a/group_vars/kube-node/plain.yml +++ b/group_vars/kube-node/plain.yml @@ -2,10 +2,3 @@ hetzner_server_type: cpx41 hetzner_server_labels: "stage={{ stage }} service=kube-node" - -ansible_ssh_host: "{{ stage_server_ip | default('-') }}" -ip: "{{ stage_private_server_ip | default('-') }}" - -docker_enabled: false -traefik_enabled: false -filebeat_enabled: false diff --git a/info.yml b/info.yml index b2745d6..aa0307d 100644 --- a/info.yml +++ b/info.yml @@ -1,8 +1,9 @@ --- + - name: 'apply setup to {{ host | default("all") }}' hosts: '{{ host | default("all") }}' serial: "{{ serial_number|default(25) }}" - become: yes + become: false pre_tasks: - name: "Check if ansible version is at least 2.10.x" @@ -12,14 +13,15 @@ - ansible_version.minor >= 10 msg: "The ansible version has to be at least ({{ ansible_version.full }})" - become: false - - - name: "Import autodiscover pre-tasks" - include_tasks: tasks/autodiscover_pre_tasks.yml - - name: "Variable " debug: msg: "{{ ansible_distribution }}" + delegate_to: 127.0.0.1 - name: "Variable " debug: msg: "{{ group_names }}" + delegate_to: 127.0.0.1 + - name: "Printing ip addresses for {{ inventory_hostname }}" + debug: + msg: "{{ stage_server_ip }} / {{ stage_private_server_ip }}" + delegate_to: 127.0.0.1 \ No newline at end of file diff --git a/inventory_plugins/netgo-hcloud.py b/inventory_plugins/netgo-hcloud.py index e0f9038..7d540cf 100644 --- a/inventory_plugins/netgo-hcloud.py +++ b/inventory_plugins/netgo-hcloud.py @@ -51,38 +51,40 @@ class MyHcloudAPI: self.token = token self.label_selector = label_selector - def get_servers(self): + def get_values(self, api_path, response_values_field_name): display = Display() try: - servers = [] + response_values = [] # pagination with page_size per window, repeat until last page is reached page = 1 page_size = 20 while page > 0: - api_url = "%s/v1/servers?label_selector=" % self.BASE + self.label_selector + "&per_page=" + str(page_size) + "&page=" + str(page) - #display.display(api_url) + api_url = "{}/{}?label_selector={}&per_page={}&page={}".format(self.BASE, api_path, self.label_selector, str(page_size), str(page)) + display.display(api_url) response = open_url( api_url, headers={"Authorization": "Bearer " + self.token}, ) - result = json.loads(response.read()) - - servers += result["servers"] - #for server in result["servers"]: - # display.display(server["name"]) + json_response = json.loads(response.read()) + response_values += json_response[response_values_field_name] - if result["meta"]["pagination"]["page"] == result["meta"]["pagination"]["last_page"]: + if json_response["meta"]["pagination"]["page"] == json_response["meta"]["pagination"]["last_page"]: break page += 1 - return servers + return response_values except ValueError: raise AnsibleError("Incorrect JSON payload") except Exception as e: raise AnsibleError("Error while fetching %s: %s" % (api_url, to_native(e))) + def get_servers(self): + return self.get_values("v1/servers", "servers") + + def get_networks(self): + return self.get_values("v1/networks", "networks") class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -99,6 +101,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ).get_servers() return servers + def _read_networks_from_API(self): + networks = MyHcloudAPI( + self.get_option("api_token"), self.get_option("label_selector") + ).get_networks() + return networks + def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path, cache) config = self._read_config_data(path) @@ -133,21 +141,36 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if cache_needs_update: self._cache[cache_key] = servers - self.populate(servers) + networks = self._read_networks_from_API() + + self.populate(servers, networks) - def populate(self, servers): + def populate(self, servers, networks): display = Display() # Add a default top group 'hcloud' self.inventory.add_group(group="hcloud") + self.inventory.add_group(group="etcd") + self.inventory.add_group(group="k8s-cluster") for server in servers: + serverId = server["id"] serverName = server["name"] serverLabels = server["labels"] serverStage = serverLabels["stage"] serverService = serverLabels["service"] + serverPublicIp = server["public_net"]["ipv4"]["ip"] + serverPrivateIp = '-' - display.display("server:<" + serverName + ">, stage=<" + serverStage + ">, service=<" + serverService + ">") + for network in networks: + networkId = network["id"] + networkName = network["name"] + if serverId in network["servers"]: + for privateNet in server["private_net"]: + if networkId == privateNet["network"]: + serverPrivateIp = privateNet["ip"] + + display.display("server:<" + serverName + ">, stage=<" + serverStage + ">, service=<" + serverService + ">, publicIp=<" + serverPublicIp + ">, privateIp=<" + serverPrivateIp + ">") self.inventory.add_group(group=serverService) self.inventory.add_group(group="stage_" + serverStage) @@ -156,6 +179,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self.inventory.add_host(serverName, group=serverService) self.inventory.add_host(serverName, group="stage_" + serverStage) -# TODO get the private server ip for the given stage -# self.inventory.set_variable(serverName, 'stage_server_ip', server["public_net"]["ipv4"]["ip"]) -# self.inventory.set_variable(serverName, 'stage_private_server_ip', ...) + # should be configurable and not hard coded + if serverService == "kube-master": + self.inventory.add_host(serverName, group="etcd") + if serverService == "kube-master" or serverService == "kube-node": + self.inventory.add_host(serverName, group="k8s-cluster") + + self.inventory.set_variable(serverName, 'stage_server_ip', serverPublicIp) + self.inventory.set_variable(serverName, 'ansible_ssh_host', serverPublicIp) + self.inventory.set_variable(serverName, 'stage_private_server_ip', serverPrivateIp) diff --git a/kubernetes.yml b/kubernetes.yml index f2603ed..c22d080 100644 --- a/kubernetes.yml +++ b/kubernetes.yml @@ -1,7 +1,25 @@ --- -- hosts: k8s-cluster + +- name: 'apply kubernetes setup to {{ host | default("all") }}' + hosts: '{{ host | default("k8s-cluster") }}' + serial: "{{ serial_number | default(5) }}" + become: yes + + pre_tasks: + - name: "Check if ansible version is at least 2.10.x" + assert: + that: + - ansible_version.major >= 2 + - ansible_version.minor >= 10 + msg: "The ansible version has to be at least ({{ ansible_version.full }})" + delegate_to: 127.0.0.1 + become: false + roles: - - kubernetes-base -# - kubernetes-ccm # DEV-243 is waiting for hetzner support << Ticket#2021110303010972 RE: Anderes Problem (Server: #15275628) >> - - kubernetes-certmanager - - kubernetes-ingress + - { role: kubernetes/base } +# DEV-243 is waiting for hetzner support << Ticket#2021110303010972 RE: Anderes Problem (Server: #15275628) >> +# - { role: kubernetes/cloud-controller-manager } + - { role: kubernetes/cert-manager } + - { role: kubernetes/ingress-controller } + - { role: kubernetes/apps, tags: prometheus } + - { role: kubernetes/apps, tags: argo-cd } diff --git a/remove-server.yml b/remove-server.yml index a4fa38e..ac82b74 100644 --- a/remove-server.yml +++ b/remove-server.yml @@ -38,10 +38,23 @@ add_host: name: "{{ stage }}-{{ tenant_id }}-{{ cluster_name }}-{{ '%02d' | format(item|int) }}" groups: - - "stage_{{ stage }}" - - "{{ cluster_service }}" + - "stage_{{ stage }}" + - "{{ cluster_service }}" with_sequence: start=1 end={{ cluster_size | default(1) }} changed_when: False + when: + - tenant_id is defined + - cluster_name is defined + - cluster_service is defined + + - name: Add hosts + add_host: + name: "{{ stage }}-{{ server }}" + groups: + - "stage_{{ stage }}" + changed_when: False + when: + - server is defined ############################################################# # Delete and DNS servers for created inventory @@ -90,3 +103,8 @@ retries: 5 delay: 5 delegate_to: 127.0.0.1 + when: + - scope_id is defined + - process_instance_id is defined + - smardigo_management_token is defined + - smardigo_management_action is defined diff --git a/roles/kubernetes-base/tasks/main.yml b/roles/kubernetes-base/tasks/main.yml deleted file mode 100644 index a13f9e4..0000000 --- a/roles/kubernetes-base/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: k8s-base | install needed pip dependencies - ansible.builtin.package: - name: "{{ item }}" - state: latest - loop: - - python3-pip - when: - - inventory_hostname == groups['kube-master'][0] - -- name: k8s-base | install needed pip dependencies - pip: - name: "{{ item }}" - loop: - - kubernetes - when: - - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-ccm/tasks/main.yml b/roles/kubernetes-ccm/tasks/main.yml deleted file mode 100644 index fa6ccc6..0000000 --- a/roles/kubernetes-ccm/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: k8s-CCM | download Hetzner CCM - ansible.builtin.get_url: - url: https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/v1.12.0/ccm-networks.yaml - dest: /tmp/ccm.yaml - mode: '0664' - when: - - inventory_hostname == groups['kube-master'][0] - -- name: k8s-CCM | create secret for Hetzner CCM - community.kubernetes.k8s: - definition: - api_version: v1 - kind: Secret - metadata: - namespace: kube-system - name: hcloud - label: - app: ccm - provider: hcloud - type: Opaque - data: - network: "{{ stage | string | b64encode }}" - token: "{{ hetzner_authentication_token | string | b64encode }}" - when: - - inventory_hostname == groups['kube-master'][0] - -- name: k8s-CCM | Apply Hetzner CCM manifest to the cluster. - community.kubernetes.k8s: - state: present - src: /tmp/ccm.yaml - when: - - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes/apps/defaults/main.yml b/roles/kubernetes/apps/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/kubernetes/apps/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kubernetes/apps/tasks/main.yml b/roles/kubernetes/apps/tasks/main.yml new file mode 100644 index 0000000..ef3af20 --- /dev/null +++ b/roles/kubernetes/apps/tasks/main.yml @@ -0,0 +1,48 @@ +--- + +### tags: +### prometheus +### argo-cd + +- name: Add prometheus-community chart repo + kubernetes.core.helm_repository: + name: prometheus-community + repo_url: "https://prometheus-community.github.io/helm-charts" + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - prometheus + +# TODO +# https://stackoverflow.com/questions/65806507/how-to-change-kube-proxy-config +# https://stackoverflow.com/questions/65901186/kube-prometheus-stack-issue-scraping-metrics +- name: Deploy kube-prometheus-stack inside monitoring namespace + kubernetes.core.helm: + name: prometheus + chart_ref: prometheus-community/kube-prometheus-stack + release_namespace: monitoring + create_namespace: true + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - prometheus + +- name: Add argo-cd chart repo + kubernetes.core.helm_repository: + name: argo-cd + repo_url: "https://argoproj.github.io/argo-helm" + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - argo-cd + +- name: Deploy Argo-CD inside argo-cd namespace + kubernetes.core.helm: + name: argo-cd + chart_ref: argo-cd/argo-cd + release_namespace: argo-cd + create_namespace: true + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - argo-cd \ No newline at end of file diff --git a/roles/kubernetes/base/defaults/main.yml b/roles/kubernetes/base/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/kubernetes/base/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml new file mode 100644 index 0000000..a104ae7 --- /dev/null +++ b/roles/kubernetes/base/tasks/main.yml @@ -0,0 +1,29 @@ +--- + +### tags: + +- name: Install dependencies + ansible.builtin.package: + name: "{{ item }}" + state: latest + loop: + - python3-pip + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Install pip dependencies + ansible.builtin.pip: + name: "{{ item }}" + loop: + - kubernetes + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Install Helm plugins + kubernetes.core.helm_plugin: + plugin_path: "{{ item }}" + state: present + loop: + - https://github.com/databus23/helm-diff + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-certmanager/defaults/main.yml b/roles/kubernetes/cert-manager/defaults/main.yml similarity index 78% rename from roles/kubernetes-certmanager/defaults/main.yml rename to roles/kubernetes/cert-manager/defaults/main.yml index 581ea3b..ea1c8e4 100644 --- a/roles/kubernetes-certmanager/defaults/main.yml +++ b/roles/kubernetes/cert-manager/defaults/main.yml @@ -1,12 +1,13 @@ --- + k8s_certmanager_helm__release_values: installCRDs: true webhook.timeoutSeconds: 4 k8s_certmanager_helm__cluster_issuers: prod: - email: friedrich.goerz@netgo.de + email: "{{ lets_encrypt_email }}" server: https://acme-v02.api.letsencrypt.org/directory staging: - email: friedrich.goerz@netgo.de + email: "{{ lets_encrypt_email }}" server: https://acme-staging-v02.api.letsencrypt.org/directory diff --git a/roles/kubernetes-certmanager/tasks/main.yml b/roles/kubernetes/cert-manager/tasks/main.yml similarity index 78% rename from roles/kubernetes-certmanager/tasks/main.yml rename to roles/kubernetes/cert-manager/tasks/main.yml index 58c123b..f600eca 100644 --- a/roles/kubernetes-certmanager/tasks/main.yml +++ b/roles/kubernetes/cert-manager/tasks/main.yml @@ -1,8 +1,10 @@ --- -- name: k8s-certmanager | install cert-manager via helm - community.kubernetes.helm: + +- name: Install cert-manager via helm + kubernetes.core.helm: name: cert-manager - chart_ref: "{{ k8s_certmanager_helm__chart_ref | default('jetstack/cert-manager') }}" + chart_repo_url: "{{ k8s_certmanager_helm__chart_repo_url | default('https://charts.jetstack.io') }}" + chart_ref: "{{ k8s_certmanager_helm__chart_ref | default('cert-manager') }}" chart_version: "{{ k8s_certmanager_helm__chart_version | default('v1.5.4') }}" release_namespace: "{{ k8s_certmanager_helm__release_namespace | default('cert-manager') }}" create_namespace: yes @@ -10,8 +12,8 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: k8s-certmanager | create secret for digitalocean-dns - community.kubernetes.k8s: +- name: Create secret for digitalocean-dns + kubernetes.core.k8s: definition: api_version: v1 kind: Secret @@ -24,8 +26,8 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: k8s-certmanager | create ClusterIssuer_letsencrypt_prod - community.kubernetes.k8s: +- name: Create ClusterIssuer_letsencrypt_prod + kubernetes.core.k8s: definition: api_version: cert-manager.io/v1 kind: ClusterIssuer diff --git a/roles/kubernetes/cloud-controller-manager/defaults/main.yml b/roles/kubernetes/cloud-controller-manager/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/kubernetes/cloud-controller-manager/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kubernetes/cloud-controller-manager/tasks/main.yml b/roles/kubernetes/cloud-controller-manager/tasks/main.yml new file mode 100644 index 0000000..b2d22c3 --- /dev/null +++ b/roles/kubernetes/cloud-controller-manager/tasks/main.yml @@ -0,0 +1,75 @@ +--- + +### tags: +### ccm + +- name: Download Hetzner CCM + ansible.builtin.get_url: + url: https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/v1.12.0/ccm-networks.yaml + dest: /tmp/ccm.yaml + mode: '0664' + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - ccm + +- name: Create secret for Hetzner CCM + kubernetes.core.k8s: + definition: + api_version: v1 + kind: Secret + metadata: + namespace: kube-system + name: hcloud + label: + app: ccm + provider: hcloud + type: Opaque + data: + network: "{{ stage | string | b64encode }}" + token: "{{ hetzner_authentication_token | string | b64encode }}" + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - ccm + +- name: Apply Hetzner CCM manifest to the cluster. + kubernetes.core.k8s: + state: present + src: /tmp/ccm.yaml + when: + - inventory_hostname == groups['kube-master'][0] + tags: + - ccm + +- name: Add prometheus-community chart repo + kubernetes.core.helm_repository: + name: prometheus-community + repo_url: "https://prometheus-community.github.io/helm-charts" + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Add argo-cd chart repo + kubernetes.core.helm_repository: + name: argo-cd + repo_url: "https://argoproj.github.io/argo-helm" + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Deploy Prometheus inside monitoring namespace + kubernetes.core.helm: + name: prometheus + chart_ref: prometheus-community/kube-prometheus-stack + release_namespace: monitoring + create_namespace: true + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Deploy Argo-CD inside argo-cd namespace + kubernetes.core.helm: + name: argo-cd + chart_ref: argo-cd/argo-cd + release_namespace: argo-cd + create_namespace: true + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-ingress/defaults/main.yml b/roles/kubernetes/ingress-controller/defaults/main.yml similarity index 99% rename from roles/kubernetes-ingress/defaults/main.yml rename to roles/kubernetes/ingress-controller/defaults/main.yml index 2d482f5..d16c953 100644 --- a/roles/kubernetes-ingress/defaults/main.yml +++ b/roles/kubernetes/ingress-controller/defaults/main.yml @@ -1,4 +1,5 @@ --- + k8s_ingress_helm__release_values: controller: replicaCount: 2 diff --git a/roles/kubernetes-ingress/files/hello-node__fullobjects.yaml b/roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml similarity index 100% rename from roles/kubernetes-ingress/files/hello-node__fullobjects.yaml rename to roles/kubernetes/ingress-controller/files/hello-node__fullobjects.yaml diff --git a/roles/kubernetes-ingress/tasks/main.yml b/roles/kubernetes/ingress-controller/tasks/main.yml similarity index 89% rename from roles/kubernetes-ingress/tasks/main.yml rename to roles/kubernetes/ingress-controller/tasks/main.yml index 616582c..7ef22bc 100644 --- a/roles/kubernetes-ingress/tasks/main.yml +++ b/roles/kubernetes/ingress-controller/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: k8s-ingress | install ingress via helm - community.kubernetes.helm: + kubernetes.core.helm: name: ingress chart_repo_url: "{{ k8s_ingress_helm__chart_repo_url | default('https://kubernetes.github.io/ingress-nginx') }}" chart_ref: "{{ k8s_ingress_helm__chart_ref | default('ingress-nginx') }}" @@ -16,8 +16,8 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: k8s-ingress | adding hello-node test app - community.kubernetes.k8s: +- name: k8s-ingress | adding hello-node test app + kubernetes.core.k8s: state: "{{ k8s_ingress_helm__enable_demoapp | default('absent') }}" definition: "{{ ingress_demo_app }}" when: diff --git a/stage-dev b/stage-dev index 3b893f6..090310d 100644 --- a/stage-dev +++ b/stage-dev @@ -50,19 +50,19 @@ dev-prometheus-01 dev-webdav-01 [kube-master] -dev-k8s-master-11 ansible_ssh_host=168.119.121.8 ip=10.0.0.14 -dev-k8s-master-12 ansible_ssh_host=49.12.227.243 ip=10.0.0.10 -dev-k8s-master-13 ansible_ssh_host=49.12.239.190 ip=10.0.0.15 +dev-k8s-master-11 +dev-k8s-master-12 +dev-k8s-master-13 [etcd] -dev-k8s-master-11 ansible_ssh_host=168.119.121.8 ip=10.0.0.14 -dev-k8s-master-12 ansible_ssh_host=49.12.227.243 ip=10.0.0.10 -dev-k8s-master-13 ansible_ssh_host=49.12.239.190 ip=10.0.0.15 +dev-k8s-master-11 +dev-k8s-master-12 +dev-k8s-master-13 [kube-node] -dev-k8s-worker-11 ansible_ssh_host=49.12.239.187 ip=10.0.0.9 -dev-k8s-worker-12 ansible_ssh_host=168.119.120.31 ip=10.0.0.21 -dev-k8s-worker-13 ansible_ssh_host=168.119.120.44 ip=10.0.0.27 +dev-k8s-worker-11 +dev-k8s-worker-12 +dev-k8s-worker-13 [k8s-cluster:children] kube-node diff --git a/stage-qa b/stage-qa index 77c267b..4d9dd59 100644 --- a/stage-qa +++ b/stage-qa @@ -47,13 +47,13 @@ qa-prometheus-01 qa-webdav-01 [kube-master] -qa-k8s-master-11 ansible_ssh_host=159.69.33.228 ip=10.1.0.24 +qa-k8s-master-01 [etcd] -qa-k8s-master-11 ansible_ssh_host=159.69.33.228 ip=10.1.0.24 +qa-k8s-master-01 [kube-node] -qa-k8s-worker-11 ansible_ssh_host=159.69.214.131 ip=10.1.0.25 +qa-k8s-worker-01 [k8s-cluster:children] kube-node