bugfix: rollout new kubernetes cluster

master
Sven Ketelsen 4 years ago
parent 4c777356fd
commit a5ff2d53c9

@ -130,6 +130,12 @@ shared_service_kube_node_03: "{{ stage_server_infos
| list
| first
| default('-') }}"
shared_service_management_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-management-01' )
| map(attribute='private_ip')
| list
| first
| default('-') }}"
shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip }}"
@ -152,6 +158,8 @@ shared_service_iam_hostname: "{{ stage }}-iam-01.{{ domain }}"
shared_service_mail_hostname: "{{ stage }}-mail-01.{{ domain }}"
shared_service_gitea_hostname: "{{ stage }}-gitea-01.{{ domain }}"
shared_service_redis_hostname: "{{ stage }}-redis-01.{{ domain }}"
shared_service_kube_argocd_hostname: "{{ stage }}-kube-argocd.{{ domain }}"
shared_service_kube_awx_hostname: "{{ stage }}-kube-awx.{{ domain }}"
shared_service_kube_prometheus_hostname: "{{ stage }}-kube-prometheus.{{ domain }}"
shared_service_pdns_hostname: "{{ stage }}-pdns-01.{{ domain }}"
shared_service_webdav_hostname: "{{ stage }}-webdav-01.{{ domain }}"
@ -247,9 +255,21 @@ shared_service_hosts: [
ip: "{{ shared_service_pdns_ip }}",
name: "{{ shared_service_pdns_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_argocd_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_awx_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_prometheus_hostname }}"
},
{
ip: "{{ shared_service_management_ip }}",
name: "{{ management_service_connect_hostname }}"
}
]

@ -1,12 +1,18 @@
$ANSIBLE_VAULT;1.1;AES256
64326238383766653335313732626562616331346131366635383838313236326237303935326139
6561363534383736643263623365663836363365373231350a353435376562393538373438313066
39366236313634366566623835353265623663356434353534626239353432656438626332393734
3663333131643335380a363662616436616538383533336363303837653139623139316664353634
61646238383335656361373837643536653865343833636564633731386138333165343866393737
35366265363061636664663138366561333938353134323835303765366537306137343839313866
65396539343234656264333234373934313432393630313336666238643932626261383234383838
64313538666437623664326433633932353266386163396139643938613532363937623462633532
30613964643039373361653732333865656132373263646335656431303466636233353635323763
36333863666330326332663765393837623165646265623064363830313237306430646663343465
363831623364653236323637386139313732
34376237343736386538353235346231326462313534643130616532633535613331643236353764
3737383533313861373030313237366131356438393333350a323230316663346634636634353239
61326262653334646539626464646663383164666166306162646166333462383833333832353461
3437663431653566650a383632653134343238393762333131613633313036636536343831333630
34633361373264376263303364353531636434356263663965626639616666633861636463383637
34333838663834666532366564396566313739386262633335313335386661646166363636323766
35363535353664346463336566663163303333663065613532623265303262396531303831653636
65353565353233626331356666343932333539356331303161303062316433633761623132333033
65376632376266336361363832613064323861393366313763316434316264663562616134353766
62643165633030363237636632386166396538666337616430323534313062333965336233333836
36306637323764333233666239336331373763633737623666393466376163313738393036336232
34613536336336663837353031323665323733313634313731326537333938396361373435366435
32643338346635633962346537393338653464383431396432343932373439386230613537356134
64386165363233636237656364396333336261613037323136363630613533353639646439303337
31626663393335343962663033646135333366623738346436393764353438383264666666653635
64643462656332653361313766656633616134373166333163346131616334343161616235633666
3366

@ -130,6 +130,12 @@ shared_service_kube_node_03: "{{ stage_server_infos
| list
| first
| default('-') }}"
shared_service_management_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-management-01' )
| map(attribute='private_ip')
| list
| first
| default('-') }}"
shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip }}"
@ -152,6 +158,8 @@ shared_service_iam_hostname: "{{ stage }}-iam-01.{{ domain }}"
shared_service_mail_hostname: "{{ stage }}-mail-01.{{ domain }}"
shared_service_gitea_hostname: "{{ stage }}-gitea-01.{{ domain }}"
shared_service_redis_hostname: "{{ stage }}-redis-01.{{ domain }}"
shared_service_kube_argocd_hostname: "{{ stage }}-kube-argocd.{{ domain }}"
shared_service_kube_awx_hostname: "{{ stage }}-kube-awx.{{ domain }}"
shared_service_kube_prometheus_hostname: "{{ stage }}-kube-prometheus.{{ domain }}"
shared_service_pdns_hostname: "{{ stage }}-pdns-01.{{ domain }}"
shared_service_webdav_hostname: "{{ stage }}-webdav-01.{{ domain }}"
@ -247,9 +255,21 @@ shared_service_hosts: [
ip: "{{ shared_service_pdns_ip }}",
name: "{{ shared_service_pdns_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_argocd_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_awx_hostname }}"
},
{
ip: "{{ shared_service_kube_ip }}",
name: "{{ shared_service_kube_prometheus_hostname }}"
},
{
ip: "{{ shared_service_management_ip }}",
name: "{{ management_service_connect_hostname }}"
}
]

@ -2,7 +2,7 @@
- name: 'apply kubernetes setup to {{ host | default("all") }}'
hosts: '{{ host | default("k8s_cluster") }}'
serial: "{{ serial_number | default(5) }}"
serial: "{{ serial_number | default(10) }}"
pre_tasks:
- name: "Check if ansible version is at least 2.10.x"
@ -24,8 +24,8 @@
- { role: kubernetes/namespace }
- { role: kubernetes/cloud-controller-manager }
- { role: kubernetes/container-storage-interface }
- { role: kubernetes/apps, tags: prometheus }
- { role: kubernetes/prometheus }
- { role: kubernetes/cert-manager }
- { role: kubernetes/external-dns }
- { role: kubernetes/ingress-controller }
- { role: kubernetes/apps, tags: !prometheus }
- { role: kubernetes/apps }

@ -30,76 +30,6 @@ argo_realm_users: [
}
]
# https://github.com/grafana/helm-charts
# https://github.com/prometheus-community/helm-charts
k8s_prometheus_helm__release_values:
prometheus:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-prometheus.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-prometheus-cert"
hosts:
- "{{ stage }}-kube-prometheus.{{ domain }}"
prometheusSpec:
# TODO Using PersistentVolumeClaim
storageSpec: {}
deploymentStrategy:
type: Recreate
alertmanager:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-alertmanager.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-alertmanager-cert"
hosts:
- "{{ stage }}-kube-alertmanager.{{ domain }}"
deploymentStrategy:
type: Recreate
grafana:
adminUser: "{{ grafana_admin_username }}"
adminPassword: "{{ grafana_admin_password }}"
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-grafana.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-grafana-cert"
hosts:
- "{{ stage }}-kube-grafana.{{ domain }}"
persistence:
enabled: true
size: 10Gi
deploymentStrategy:
type: Recreate
kubeControllerManager:
service:
port: 10257
targetPort: 10257
serviceMonitor:
https: true
insecureSkipVerify: true
# https://github.com/argoproj/argo-helm/tree/master/charts/argo-cd
k8s_argocd_helm__release_values:
global:
@ -245,7 +175,7 @@ k8s_argocd_helm__release_values:
server: https://kubernetes.default.svc
project: infrastructure
source:
path: apps/awx
path: apps/{{ stage }}/awx
repoURL: https://{{ shared_service_gitea_hostname }}/gitea-admin/argocd.git
targetRevision: HEAD
syncPolicy:
@ -262,7 +192,7 @@ k8s_argocd_helm__release_values:
server: https://kubernetes.default.svc
project: infrastructure
source:
path: apps/guestbook
path: apps/{{ stage }}/guestbook
repoURL: https://{{ shared_service_gitea_hostname }}/gitea-admin/argocd.git
targetRevision: HEAD
syncPolicy:

@ -1,7 +1,6 @@
---
- name: "Fetch all {{ awx_rest_api_type }} ids"
delegate_to: localhost
no_log: true
uri:
url: "{{ awx_base_url }}/api/v2/{{ awx_rest_api_type }}/"
@ -20,7 +19,6 @@
- awx_config
- name: "Remove all {{ awx_rest_api_type }}" # noqa ignore-errors
delegate_to: localhost
no_log: true
uri:
url: "{{ awx_base_url }}{{ item.url }}"

@ -6,7 +6,6 @@
changed_when: False
- name: "Search {{ awx_rest_api_type }} informations for {{ awx_search_name }}"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/v2/{{ awx_rest_api_type }}/?search={{ awx_search_name | urlencode }}"
method: GET

@ -4,7 +4,6 @@
found_credential_id: ""
- name: "Get {{ job.name }} job_template credential id's from awx server"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/v2/job_templates/{{ awx_job_template_id }}/credentials"
method: GET
@ -46,7 +45,6 @@
- awx_type_id is defined
- name: "Add credential id {{ awx_credential_id }} to {{ job.name }} job_template"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/v2/job_templates/{{ awx_job_template_id }}/credentials/"
method: POST

@ -19,7 +19,6 @@
- (awx_type_id | default(None)) != "None"
- name: "Add job template {{ job.name }}"
delegate_to: localhost
vars:
name: "{{ job.name }}"
description: "{{ job.description | default(job.name) }}"

@ -27,7 +27,6 @@
- awx_type_id != "None"
- name: "Add user <{{ awx_ansible_username }}>"
delegate_to: localhost
vars:
username: "{{ awx_ansible_username }}"
password: "{{ awx_ansible_password }}"
@ -93,8 +92,7 @@
when:
- awx_type_id != "None"
- name: "Add <Machine> credentials <hetzner-ansible-ssh>"
delegate_to: localhost
- name: "Add <Machine> credentials <hetzner-ansible-ssh> with user: {{ ansible_awx_user_id }}"
vars:
name: "hetzner-ansible-ssh"
user_id: "{{ ansible_awx_user_id }}"
@ -161,7 +159,6 @@
- awx_type_id != "None"
- name: "Add <Source Control> credentials <{{ stage }}-gitea>"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/v2/credentials/"
method: POST
@ -229,7 +226,6 @@
- awx_type_id != "None"
- name: "Add <Vault> credentials <hetzner-ansible-vault>"
delegate_to: localhost
vars:
name: "hetzner-ansible-vault"
user_id: "{{ ansible_awx_user_id }}"
@ -294,7 +290,6 @@
- awx_type_id != "None"
- name: "Add <Container Registry> credentials <{{ shared_service_harbor_hostname }}>"
delegate_to: localhost
vars:
name: "{{ shared_service_harbor_hostname }}"
description: "{{ shared_service_harbor_hostname }}"
@ -354,7 +349,6 @@
- awx_type_id != "None"
- name: "Add execution environment <hetzner-ansible>"
delegate_to: localhost
vars:
name: "hetzner-ansible"
description: "hetzner-ansible"
@ -411,7 +405,6 @@
- awx_type_id != "None"
- name: "Add inventory <localhost>"
delegate_to: localhost
vars:
name: "localhost"
description: "localhost"
@ -465,7 +458,6 @@
- awx_type_id != "None"
- name: "Add project <hetzner-ansible>"
delegate_to: localhost
vars:
name: "hetzner-ansible"
description: "hetzner-ansible"

@ -6,7 +6,6 @@
- always
- name: "Checkin if awx in k8s cluster is avail"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/login"
method: GET
@ -43,7 +42,6 @@
- always
- name: "Authenticating with awx server"
delegate_to: localhost
uri:
url: "{{ awx_base_url }}/api/login"
method: GET

@ -1,40 +1,26 @@
---
### tags:
### prometheus
### argo-cd
- name: Deploy kube-prometheus-stack inside monitoring namespace
kubernetes.core.helm:
name: "{{ k8s_prometheus_helm__name }}"
chart_repo_url: "{{ k8s_prometheus_helm__chart_repo_url | default('https://prometheus-community.github.io/helm-charts') }}"
chart_ref: "{{ k8s_prometheus_helm__chart_ref | default('kube-prometheus-stack') }}"
release_namespace: "{{ k8s_prometheus_helm__release_namespace }}"
create_namespace: yes
release_values: "{{ k8s_prometheus_helm__release_values }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- prometheus
- name: "Deploy argo-cd"
include_tasks: argocd.yml
tags:
- argo-cd
args:
apply:
tags:
- argo-cd
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- argo-cd
- name: "Configure AWX"
include_tasks: awx.yml
tags:
- awx
args:
apply:
tags:
- awx
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- awx

@ -0,0 +1,74 @@
---
k8s_prometheus_helm__name: "prometheus"
k8s_prometheus_helm__release_namespace: "monitoring"
# https://github.com/grafana/helm-charts
# https://github.com/prometheus-community/helm-charts
k8s_prometheus_helm__release_values:
prometheus:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-prometheus.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-prometheus-cert"
hosts:
- "{{ stage }}-kube-prometheus.{{ domain }}"
prometheusSpec:
# TODO Using PersistentVolumeClaim
storageSpec: {}
deploymentStrategy:
type: Recreate
alertmanager:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-alertmanager.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-alertmanager-cert"
hosts:
- "{{ stage }}-kube-alertmanager.{{ domain }}"
deploymentStrategy:
type: Recreate
grafana:
adminUser: "{{ grafana_admin_username }}"
adminPassword: "{{ grafana_admin_password }}"
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
cert-manager.io/issue-temporary-certificate: "true"
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/whitelist-source-range: "{{ ( ip_whitelist + ip_whitelist_admins ) | join(',') }}"
hosts:
- "{{ stage }}-kube-grafana.{{ domain }}"
tls:
- secretName: "{{ stage }}-kube-grafana-cert"
hosts:
- "{{ stage }}-kube-grafana.{{ domain }}"
persistence:
enabled: true
size: 10Gi
deploymentStrategy:
type: Recreate
kubeControllerManager:
service:
port: 10257
targetPort: 10257
serviceMonitor:
https: true
insecureSkipVerify: true

@ -0,0 +1,17 @@
---
### tags:
### prometheus
- name: Deploy kube-prometheus-stack inside monitoring namespace
kubernetes.core.helm:
name: "{{ k8s_prometheus_helm__name }}"
chart_repo_url: "{{ k8s_prometheus_helm__chart_repo_url | default('https://prometheus-community.github.io/helm-charts') }}"
chart_ref: "{{ k8s_prometheus_helm__chart_ref | default('kube-prometheus-stack') }}"
release_namespace: "{{ k8s_prometheus_helm__release_namespace }}"
create_namespace: yes
release_values: "{{ k8s_prometheus_helm__release_values }}"
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- prometheus
Loading…
Cancel
Save