Compare commits

..

No commits in common. 'feature/DEV-380' and 'master' have entirely different histories.

@ -11,17 +11,16 @@ services:
alias: docker alias: docker
stages: stages:
- lint - ansible-lint
- ansible-builder - ansible-builder
- run-setup - ansible-run-setup
- run-kubernetes - ansible-run-kubernetes
- run-management-update - ansible-patchday
- run-patchday
lint-job: ansible-lint-job:
stage: lint stage: ansible-lint
script: script:
- echo "Running lint to check for linting violations" - echo "Running ansible-lint to check for linting violations"
- ansible-lint -c ansible-lint.cfg - ansible-lint -c ansible-lint.cfg
only: only:
- branches - branches
@ -30,19 +29,19 @@ lint-job:
tags: tags:
- dind - dind
builder-job: ansible-builder-job:
# A resource group ensures a job is mutually exclusive across different pipelines for the same project. # A resource group ensures a job is mutually exclusive across different pipelines for the same project.
resource_group: dev resource_group: deployment
stage: ansible-builder stage: ansible-builder
before_script: before_script:
- cd ansible-builder - cd ansible-builder
script: script:
- echo "Running ansible-builder to build awx execution environment" - echo "Running ansible-build to build awx execution environment"
- ansible-builder build -v 3 --tag $AWX_EE_DOCKER_IMAGE_EXTERN:latest - ansible-builder build -v 3 --tag $AWX_EE_DOCKER_IMAGE_EXTERN:latest
- docker push $AWX_EE_DOCKER_IMAGE_EXTERN:latest - docker push $AWX_EE_DOCKER_IMAGE_EXTERN:latest
only: only:
refs: refs:
- main - master
changes: changes:
- pip-requirements - pip-requirements
- galaxy-requirements.yml - galaxy-requirements.yml
@ -53,30 +52,22 @@ builder-job:
- dind - dind
- harbor # 05.02.22 TODO some runners run into timeouts - harbor # 05.02.22 TODO some runners run into timeouts
##################################################################################
.run-ansible:
image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
######## ########
### http://patorjk.com/software/taag/#p=display&f=Doom&t=setup.yml ### https://patorjk.com/software/taag/#p=display&f=Doom&t=ansible%20-%20run
### ###
### _ _ ### _ _ _ _ _
### | | | | ### (_) | | | | | | |
### ___ ___| |_ _ _ _ __ _ _ _ __ ___ | | ### __ _ _ __ ___ _| |__ | | ___ ______ _ __ _ _ _ __ ______ ___ ___| |_ _ _ _ __ _ _ _ __ ___ | |
### / __|/ _ \ __| | | | '_ \| | | | '_ ` _ \| | ### / _` | '_ \/ __| | '_ \| |/ _ \ |______| | '__| | | | '_ \ |______| / __|/ _ \ __| | | | '_ \| | | | '_ ` _ \| |
### \__ \ __/ |_| |_| | |_) | |_| | | | | | | | ### | (_| | | | \__ \ | |_) | | __/ | | | |_| | | | | \__ \ __/ |_| |_| | |_) | |_| | | | | | | |
### |___/\___|\__|\__,_| .__(_)__, |_| |_| |_|_| ### \__,_|_| |_|___/_|_.__/|_|\___| |_| \__,_|_| |_| |___/\___|\__|\__,_| .__(_)__, |_| |_| |_|_|
### | | __/ | ### | | __/ |
### |_| |___/ ### |_| |___/
.run-setup: ansible-run-setup-1-dev:
extends: .run-ansible image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
stage: run-setup stage: ansible-run-setup
script: before_script:
- 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )' - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s) - eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -' - 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
@ -84,109 +75,81 @@ builder-job:
- chmod 0700 ~/.ssh - chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config' - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
- ssh-add -L - ssh-add -L
- export HETZNER_LABEL_SELECTOR="stage=${STAGE}" script:
- ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml setup.yml --vault-password-file /tmp/vault-pass -t common -u gitlabci - echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
- STAGE=dev && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml setup.yml --tags common --vault-password-file /tmp/vault-pass -u gitlabci
after_script: after_script:
- rm /tmp/vault-pass - rm /tmp/vault-pass
run-setup-dev:
extends: .run-setup
resource_group: dev
before_script:
- export STAGE=dev
- echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
only:
- main
run-setup-qa:
extends: .run-setup
resource_group: qa
before_script:
- export STAGE=qa
- echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass
only: only:
- qa - master
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: dev
run-setup-prodnso: ansible-run-setup-2-qa:
extends: .run-setup image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: prodnso stage: ansible-run-setup
before_script: before_script:
- export STAGE=prodnso
- echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass
only:
- prodnso
########
### This Page: http://patorjk.com/software/taag/#p=display&f=Doom&t=kubernetes.yml
###
### _ _ _ _
### | | | | | | | |
### | | ___ _| |__ ___ _ __ _ __ ___| |_ ___ ___ _ _ _ __ ___ | |
### | |/ / | | | '_ \ / _ \ '__| '_ \ / _ \ __/ _ \/ __|| | | | '_ ` _ \| |
### | <| |_| | |_) | __/ | | | | | __/ || __/\__ \| |_| | | | | | | |
### |_|\_\\__,_|_.__/ \___|_| |_| |_|\___|\__\___||___(_)__, |_| |_| |_|_|
### __/ |
### |___/
.run-kubernetes:
extends: .run-ansible
stage: run-kubernetes
script:
- 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )' - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s) - eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -' - 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh - mkdir -p ~/.ssh
- chmod 0700 ~/.ssh - chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config' - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
- ssh-add -L script:
- export HETZNER_LABEL_SELECTOR="stage=${STAGE}" - echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass
- ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml kubernetes.yml --vault-password-file /tmp/vault-pass -u gitlabci - STAGE=qa && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml setup.yml --tags common --vault-password-file /tmp/vault-pass -u gitlabci
after_script: after_script:
- rm /tmp/vault-pass - rm /tmp/vault-pass
run-kubernetes-dev:
extends: .run-kubernetes
resource_group: dev
before_script:
- export STAGE=dev
- echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
only:
- main
run-kubernetes-qa:
extends: .run-kubernetes
resource_group: qa
before_script:
- export STAGE=qa
- echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass
only: only:
- qa - qa
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: qa
run-kubernetes-prodnso: ansible-run-setup-3-prodnso:
extends: .run-kubernetes image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: prodnso stage: ansible-run-setup
before_script: before_script:
- export STAGE=prodnso - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh
- chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
script:
- echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass - echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass
- STAGE=prodnso && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml setup.yml --tags common --vault-password-file /tmp/vault-pass -u gitlabci
after_script:
- rm /tmp/vault-pass
only: only:
- prodnso - prodnso
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: prodnso
######## ########
### http://patorjk.com/software/taag/#p=display&f=Doom&t=smardigo.yml ### https://patorjk.com/software/taag/#p=display&f=Doom&t=ansible%20-%20run
### ###
### _ _ _ ### _ _ _ _ _ _ _
### | (_) | | ### (_) | | | | | | | | | | |
### ___ _ __ ___ __ _ _ __ __| |_ __ _ ___ _ _ _ __ ___ | | ### __ _ _ __ ___ _| |__ | | ___ ______ _ __ _ _ _ __ ______ | | ___ _| |__ ___ _ __ _ __ ___| |_ ___ ___ _ _ _ __ ___ | |
### / __| '_ ` _ \ / _` | '__/ _` | |/ _` |/ _ \| | | | '_ ` _ \| | ### / _` | '_ \/ __| | '_ \| |/ _ \ |______| | '__| | | | '_ \ |______| | |/ / | | | '_ \ / _ \ '__| '_ \ / _ \ __/ _ \/ __|| | | | '_ ` _ \| |
### \__ \ | | | | | (_| | | | (_| | | (_| | (_) | |_| | | | | | | | ### | (_| | | | \__ \ | |_) | | __/ | | | |_| | | | | | <| |_| | |_) | __/ | | | | | __/ || __/\__ \| |_| | | | | | | |
### |___/_| |_| |_|\__,_|_| \__,_|_|\__, |\___(_)__, |_| |_| |_|_| ### \__,_|_| |_|___/_|_.__/|_|\___| |_| \__,_|_| |_| |_|\_\\__,_|_.__/ \___|_| |_| |_|\___|\__\___||___(_)__, |_| |_| |_|_|
### __/ | __/ | ### __/ |
### |___/ |___/ ### |___/
.run-management-update: ansible-run-kubernetes-1-dev:
extends: .run-ansible image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
stage: run-management-update stage: ansible-run-kubernetes
script: before_script:
- 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )' - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s) - eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -' - 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
@ -194,94 +157,144 @@ run-kubernetes-prodnso:
- chmod 0700 ~/.ssh - chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config' - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
- ssh-add -L - ssh-add -L
- export HETZNER_LABEL_SELECTOR="stage=${STAGE}" script:
- ansible-playbook -i stage-$STAGE smardigo.yml --vault-password-file=/tmp/vault-pass -l management -t update_configurations -u gitlabci - echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
- STAGE=dev && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml kubernetes.yml --vault-password-file /tmp/vault-pass -u gitlabci
after_script: after_script:
- rm /tmp/vault-pass - rm /tmp/vault-pass
only: only:
changes: - master
- smardigo/**/* - schedules
tags:
run-management-update-dev: - dind
extends: .run-management-update - harbor # 05.02.22 TODO some runners run into timeouts
resource_group: dev resource_group: dev
before_script:
- export STAGE=dev
- echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
only:
- main
run-management-update-qa: ansible-run-kubernetes-2-qa:
extends: .run-management-update image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: qa stage: ansible-run-kubernetes
before_script: before_script:
- export STAGE=qa - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh
- chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
script:
- echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass - echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass
- STAGE=qa && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml kubernetes.yml --vault-password-file /tmp/vault-pass -u gitlabci
after_script:
- rm /tmp/vault-pass
only: only:
- qa - qa
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: qa
run-management-update-prodnso: ansible-run-kubernetes-3-prodnso:
extends: .run-management-update image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: prodnso stage: ansible-run-kubernetes
before_script: before_script:
- export STAGE=prodnso - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh
- chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
script:
- echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass - echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass
- STAGE=prodnso && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml kubernetes.yml --vault-password-file /tmp/vault-pass -u gitlabci
after_script:
- rm /tmp/vault-pass
only: only:
- prodnso - prodnso
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: prodnso
######## ########
### http://patorjk.com/software/taag/#p=display&f=Doom&t=patchday.yml ### https://patorjk.com/software/taag/#p=display&f=Doom&t=patchday
### _ _ _
### | | | | | |
### _ __ __ _| |_ ___| |__ __| | __ _ _ _
### | '_ \ / _` | __/ __| '_ \ / _` |/ _` | | | |
### | |_) | (_| | || (__| | | | (_| | (_| | |_| |
### | .__/ \__,_|\__\___|_| |_|\__,_|\__,_|\__, |
### | | __/ |
### |_| |___/
### ###
### _ _ _ _
### | | | | | | | |
### _ __ __ _| |_ ___| |__ __| | __ _ _ _ _ _ _ __ ___ | |
### | '_ \ / _` | __/ __| '_ \ / _` |/ _` | | | || | | | '_ ` _ \| |
### | |_) | (_| | || (__| | | | (_| | (_| | |_| || |_| | | | | | | |
### | .__/ \__,_|\__\___|_| |_|\__,_|\__,_|\__, (_)__, |_| |_| |_|_|
### | | __/ | __/ |
### |_| |___/ |___/
.run-patchday: ansible-patchday-1-dev:
extends: .run-ansible image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
stage: run-patchday stage: ansible-patchday
script: before_script:
- 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )' - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s) - eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -' - 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh - mkdir -p ~/.ssh
- chmod 0700 ~/.ssh - chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config' - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
- ssh-add -L script:
- export HETZNER_LABEL_SELECTOR="stage=${STAGE}" - echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
- ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml patchday.yml patchday.yml --vault-password-file=/tmp/vault-pass -u gitlabci - STAGE=dev && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml patchday.yml --vault-password-file=/tmp/vault-pass -u gitlabci
after_script: after_script:
- rm /tmp/vault-pass - rm /tmp/vault-pass
timeout: 2h
when: manual when: manual
run-patchday-dev:
extends: .run-patchday
resource_group: dev
before_script:
- export STAGE=dev
- echo "${ANSIBLE_VAULT_PASS_DEV}" > /tmp/vault-pass
only: only:
- main - master
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: dev
run-patchday-qa: ansible-patchday-2-qa:
extends: .run-patchday image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: qa stage: ansible-patchday
before_script: before_script:
- export STAGE=qa - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh
- chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
script:
- echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass - echo "${ANSIBLE_VAULT_PASS_QA}" > /tmp/vault-pass
- STAGE=qa && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml patchday.yml --vault-password-file=/tmp/vault-pass -u gitlabci
after_script:
- rm /tmp/vault-pass
when: manual
only: only:
- qa - qa
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: qa
run-patchday-prodnso: ansible-patchday-3-prodnso:
extends: .run-patchday image: $AWX_EE_DOCKER_IMAGE_EXTERN:latest
resource_group: prodnso stage: ansible-patchday
before_script: before_script:
- export STAGE=prodnso - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- 'echo "$GITLAB_SSH_KEY" | tr -d "\r" | ssh-add -'
- mkdir -p ~/.ssh
- chmod 0700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config'
script:
- echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass - echo "${ANSIBLE_VAULT_PASS_PRODNSO}" > /tmp/vault-pass
- STAGE=prodnso && HETZNER_LABEL_SELECTOR="stage=${STAGE}" && ansible-playbook -i stage-${STAGE}-netgo-hcloud.yml patchday.yml --vault-password-file=/tmp/vault-pass -u gitlabci
after_script:
- rm /tmp/vault-pass
when: manual
only: only:
- prodnso - prodnso
- schedules
tags:
- dind
- harbor # 05.02.22 TODO some runners run into timeouts
resource_group: prodnso

@ -2,7 +2,7 @@
pipelining = True pipelining = True
host_key_checking = False host_key_checking = False
inventory_plugins = ./inventory_plugins inventory_plugins = ./inventory_plugins
callbacks_enabled = profile_tasks callbacks_enabled = timer
interpreter_python = auto_silent interpreter_python = auto_silent
log_path=last_ansible_run log_path=last_ansible_run
forks = 30 forks = 30

@ -61,7 +61,7 @@
vars: vars:
ansible_connection: local ansible_connection: local
ansible_ssh_host: "{{ stage_server_domain }}" ansible_ssh_host: "{{ stage_server_domain }}"
kibana_api_endpoint: '{{ shared_service_elastic_stack_kibana_01_hostname }}-kibana.{{ domain }}' api_endpoint: '{{ stage }}-elastic-stack-kibana-01-kibana.{{ domain }}'
elastic_state: present elastic_state: present
elastic_users: elastic_users:
- -

@ -11,7 +11,6 @@
# Parameters: # Parameters:
# playbook inventory # playbook inventory
# stage := the name of the stage (e.g. dev, int, qa, prod) # stage := the name of the stage (e.g. dev, int, qa, prod)
# database_engine := the database engine to generate a complete backup for (e.g. postgres, maria)
# smardigo message callback # smardigo message callback
# scope_id := (scope id of the management process) # scope_id := (scope id of the management process)
# process_instance_id := (process instance id of the management process) # process_instance_id := (process instance id of the management process)
@ -51,7 +50,7 @@
changed_when: False changed_when: False
- name: "Add 'storage' servers to hosts if necessary" - name: "Add 'storage' servers to hosts if necessary"
add_host: add_host:
name: "{{ stage }}-backup-01" name: "{{ stage }}-fgrz-01"
groups: groups:
- "stage_{{ stage }}" - "stage_{{ stage }}"
- storage - storage
@ -63,11 +62,6 @@
- hosts: "postgres:maria" - hosts: "postgres:maria"
serial: "{{ serial_number | default(1) }}" serial: "{{ serial_number | default(1) }}"
gather_facts: false
vars:
ansible_ssh_host: "{{ stage_server_domain }}"
current_date_time: '{{ get_current_date_time }}'
tasks: tasks:
- name: "Trigger backup mechanism" - name: "Trigger backup mechanism"
include_role: include_role:
@ -80,11 +74,8 @@
- hosts: "postgres:maria:storage" - hosts: "postgres:maria:storage"
serial: "{{ serial_number | default(5) }}" serial: "{{ serial_number | default(5) }}"
gather_facts: false
vars: vars:
ansible_ssh_host: "{{ stage_server_domain }}"
storageserver_system_user: 'backuphamster' storageserver_system_user: 'backuphamster'
tasks: tasks:
# I could not get it up and running with <synchronize> module # I could not get it up and running with <synchronize> module
# to sync data from remote server A to remote server B # to sync data from remote server A to remote server B
@ -92,8 +83,7 @@
become: yes become: yes
become_user: '{{ storageserver_system_user }}' become_user: '{{ storageserver_system_user }}'
vars: vars:
# should work with non-fqdn due to existing entry in /etc/hosts database_server_ip: "{{ stage }}-{{ database_engine }}-01.{{ domain }}"
database_server_ip: "{{ stage }}-{{ database_engine }}-01"
shell: '/home/{{ storageserver_system_user }}/pull_remote_backups.sh {{ database_server_ip }} {{ stage }} {{ database_engine }}' shell: '/home/{{ storageserver_system_user }}/pull_remote_backups.sh {{ database_server_ip }} {{ stage }} {{ database_engine }}'
when: when:
- inventory_hostname in groups['storage'] - inventory_hostname in groups['storage']
@ -101,7 +91,7 @@
- name: "Cleanup remote backup dirs: {{ database_engine }}" - name: "Cleanup remote backup dirs: {{ database_engine }}"
become: yes become: yes
file: file:
path: '{{ backup_directory }}/{{ database_engine }}/{{ get_current_date }}' path: '{{ backup_directory }}/{{ database_engine }}/{{ ansible_date_time.date }}'
state: absent state: absent
when: when:
- not inventory_hostname in groups['storage'] - not inventory_hostname in groups['storage']

@ -52,7 +52,6 @@
- hosts: "stage_{{ stage }}:!{{ stage }}-virtual-host-to-read-groups-vars" - hosts: "stage_{{ stage }}:!{{ stage }}-virtual-host-to-read-groups-vars"
serial: "{{ serial_number | default(5) }}" serial: "{{ serial_number | default(5) }}"
remote_user: root
gather_facts: false gather_facts: false
pre_tasks: pre_tasks:

@ -1,9 +1,3 @@
#!/bin/bash #!/bin/bash
if [ "x$1" == "x" ];then
echo "Stage as param \$1 is missing. exit"
exit 1
fi
docker run -v `pwd`/templates/elastic-certs:/certs -v `pwd`/templates/elastic-certs/$1-instances.yaml:/usr/share/elasticsearch/config/certificates/$1-instances.yml docker.elastic.co/elasticsearch/elasticsearch:7.12.0 /bin/sh "/certs/certutil.sh" $1 docker run -v `pwd`/templates/elastic-certs:/certs -v `pwd`/templates/elastic-certs/$1-instances.yaml:/usr/share/elasticsearch/config/certificates/$1-instances.yml docker.elastic.co/elasticsearch/elasticsearch:7.12.0 /bin/sh "/certs/certutil.sh" $1

@ -1,99 +0,0 @@
---
# Parameters:
# playbook inventory
# stage := the name of the stage (e.g. dev, int, qa, prod)
# tenant_id := (unique key for the tenant, e.g. customer)
# cluster_name := (business name for the cluster, e.g. product, department )
# cluster_size := (WIP node count for the cluster)
# cluster_service := (service to setup, e.g. 'connect', ...)
# cluster_features := (optional features to use, e.g. ['wordpress', 'resubmission', ...])
# database_backup_file := the dump file to export, has to be on the database server under /tmp (e.g. wordpress_portal.sql)
# target_database := (optional) the database to export into ( see {{ connect_wordpress_maria_database }})
# smardigo message callback
# scope_id := (scope id of the management process)
# process_instance_id := (process instance id of the management process)
# smardigo_management_action := (smardigo management action anme of the management process)
#############################################################
# Creating inventory dynamically for given parameters
#############################################################
- hosts: localhost
connection: local
gather_facts: false
pre_tasks:
- name: "Check if ansible version is at least 2.10.x"
assert:
that:
- ansible_version.major >= 2
- ansible_version.minor >= 10
msg: "The ansible version has to be at least ({{ ansible_version.full }})"
# add virtual server to load stage specific variables as context
- name: "Add <{{ stage }}-virtual-host-to-read-groups-vars> to hosts"
add_host:
name: "{{ stage }}-virtual-host-to-read-groups-vars"
groups:
- "stage_{{ stage }}"
changed_when: False
tasks:
- name: Add maria servers to hosts if necessary
add_host:
name: "{{ stage }}-maria-01"
groups:
- "stage_{{ stage }}"
- "{{ item }}"
changed_when: False
with_items: "{{ cluster_features }}"
when: item in ['connect_wordpress']
#############################################################
# exporting database backups for created inventory
#############################################################
- hosts: "stage_{{ stage }}:!{{ stage }}-virtual-host-to-read-groups-vars"
serial: "{{ serial_number | default(1) }}"
remote_user: root
vars:
ansible_ssh_host: "{{ stage_server_domain }}"
pre_tasks:
- name: "export autodiscover pre-tasks"
import_tasks: tasks/autodiscover_pre_tasks.yml
become: false
tags:
- always
roles:
- role: export_maria_database
vars:
database_backup_file: "{{ stage }}-{{ tenant_id }}-{{ cluster_name }}-wordpress.sql.gz"
when:
- "'connect_wordpress' in group_names"
- "target_database is defined"
- role: export_maria_database
vars:
target_database: "{{ connect_wordpress_maria_database }}"
database_backup_file: "{{ stage }}-{{ tenant_id }}-{{ cluster_name }}-wordpress.sql.gz"
when:
- "'connect_wordpress' in group_names"
#############################################################
# Sending smardigo management message to process
#############################################################
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
connection: local
run_once: true
vars:
connect_jwt_username: "{{ management_admin_username }}"
tasks:
- name: "Sending smardigo management message to <{{ smardigo_management_url }}>"
include_tasks: tasks/smardigo_management_message.yml

@ -20,8 +20,6 @@ roles:
version: v3.6.1 version: v3.6.1
src: https://github.com/Oefenweb/ansible-postfix.git src: https://github.com/Oefenweb/ansible-postfix.git
scm: git scm: git
- name: geerlingguy.mysql
version: 3.3.2
collections: collections:
- name: hetzner.hcloud - name: hetzner.hcloud

@ -1,60 +0,0 @@
---
# Parameters:
# playbook inventory
# stage := the name of the stage (e.g. dev, int, qa, prod)
# environment variable
# GITLAB_API_TOKEN := Access token from gitlab
#############################################################
# Creating inventory dynamically for given parameters
#############################################################
- hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Add hosts
add_host:
name: "{{ stage }}-gitlab"
groups: "{{ ['stage_' + stage ] }}"
#############################################################
# Creating gitlab mirrors for current stage
#############################################################
- hosts: "stage_{{ stage }}"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
connection: local
vars:
projects:
- id: 1210
name: argocd
- id: 1216
name: operator-awx
- id: 1212
name: operator-jaeger
- id: 1231
name: operator-knative
- id: 1233
name: smardigo-awx
- id: 1232
name: smardigo-jaeger
pre_tasks:
- name: "Add repository remote mirror to project"
delegate_to: 127.0.0.1
become: false
uri:
url: "https://git.dev-at.de/api/v4/projects/{{ item.id }}/remote_mirrors"
method: POST
body_format: json
body:
enabled: true
only_protected_branches: true
url: "https://{{ gitea_admin_username }}:{{ gitea_admin_password }}@{{ shared_service_gitea_hostname }}/argocd/{{ item.name }}.git"
headers:
PRIVATE-TOKEN: "{{ lookup('env', 'GITLAB_API_TOKEN') }}"
status_code: [201]
loop: "{{ projects }}"

@ -1,269 +0,0 @@
---
hcloud_firewall_objects:
-
name: "{{ stage }}-default"
state: present
rules:
-
direction: in
protocol: icmp
port: ''
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: ICMP allowed
-
direction: in
protocol: tcp
port: '22'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: SSH allowed
-
direction: in
protocol: tcp
port: '80'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: HTTP allowed
-
direction: in
protocol: tcp
port: '443'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: HTTPS allowed
-
direction: in
protocol: tcp
port: 'any'
source_ips: '{{ ip_whitelist_admins }}'
destination_ips: []
description: TCP - allow work from home without VPN
-
direction: in
protocol: udp
port: 'any'
source_ips: '{{ ip_whitelist_admins }}'
destination_ips: []
description: UDP - allow work from home without VPN
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
-
name: "{{ stage }}-monitoring"
state: present
rules:
-
direction: in
protocol: tcp
port: '9080-9085'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: 'Server/Service Monitoring'
-
direction: in
protocol: tcp
port: '9001'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: 'PgAdmin'
-
direction: in
protocol: tcp
port: '9187'
source_ips: '{{ ip_whitelist }}'
destination_ips: []
description: 'Postgres-Exporter'
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
-
name: "{{ stage }}-monitoring-extern-https"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips:
- "{{ lookup('community.general.dig', 'dev-blackbox-01.smardigo.digital' ) }}/32"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=connect'
-
type: label_selector
label_selector:
selector: 'service=keycloak'
hcloud_firewall_objects_awx:
-
name: "{{ stage }}-awx-ssh-access-for-k8s-nodes"
state: present
rules:
-
direction: in
protocol: tcp
port: '22'
source_ips: "{{ awx_source_ips }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
hcloud_firewall_objects_backup:
-
name: "{{ stage }}-backup-ssh-access"
state: present
rules:
-
direction: in
protocol: tcp
port: '22'
source_ips:
- "{{ offsite_storage_server_ip }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=backup'
hcloud_firewall_objects_gitea:
-
name: "{{ stage }}-access-to-gitea"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ ip_whitelist }}"
destination_ips: []
description: "Allow access for whitelisted ips"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
destination_ips: []
description: "Allow access for kubernetes worker nodes"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + (gitea_https_whitelisted_ips | default([])) }}"
destination_ips: []
description: "Allow access for custom whitelisted ips"
apply_to:
-
type: label_selector
label_selector:
selector: 'service=gitea'
hcloud_firewall_objects_keycloak:
-
name: "{{ stage }}-access-to-keycloak"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ ip_whitelist }}"
destination_ips: []
description: "Allow access for whitelisted ips"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
destination_ips: []
description: "Allow access for kubernetes worker nodes"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + (keycloak_https_whitelisted_ips | default([])) }}"
destination_ips: []
description: "Allow access for custom whitelisted ips"
apply_to:
-
type: label_selector
label_selector:
selector: 'service=keycloak'
hcloud_firewall_objects_kibana:
-
name: "{{ stage }}-access-to-kibana"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ ip_whitelist }}"
destination_ips: []
description: "Allow access for whitelisted ips"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
destination_ips: []
description: "Allow access for kubernetes worker nodes"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + (kibana_https_whitelisted_ips | default([])) }}"
destination_ips: []
description: "Allow access for custom whitelisted ips"
apply_to:
-
type: label_selector
label_selector:
selector: 'service=kibana'
hcloud_firewall_objects_management:
-
name: "{{ stage }}-access-to-management"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ ip_whitelist }}"
destination_ips: []
description: "Allow access for whitelisted ips"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
destination_ips: []
description: "Allow access for kubernetes worker nodes"
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ [shared_service_network] + (management_https_whitelisted_ips | default([])) }}"
destination_ips: []
description: "Allow access for custom whitelisted ips"
apply_to:
-
type: label_selector
label_selector:
selector: 'service=connect,tenant=management'

@ -39,11 +39,9 @@ common_apt_dependencies:
- zip - zip
- curl - curl
- htop - htop
- iotop
- net-tools - net-tools
- bash-completion - bash-completion
- python3-pip - python3-pip
- iotop
common_pip_dependencies: common_pip_dependencies:
- docker-compose - docker-compose
@ -66,17 +64,14 @@ awx_credential_machine_hetzner_name: hetzner-ansible-ssh
gitlab_ansible_user_name: "gitlabci" gitlab_ansible_user_name: "gitlabci"
backupuser_user_name: backupuser
# used for root-access by hetzner on server creation (@see cloud console/security/ssh-keys) # used for root-access by hetzner on server creation (@see cloud console/security/ssh-keys)
hetzner_ssh_keys: hetzner_ssh_keys:
- "claus.paetow@netgo.de" - "claus.paetow@netgo.de"
- "friedrich.goerz@netgo.de" - "friedrich.goerz@netgo.de"
- "peter.heise@netgo.de" - "peter.heise@netgo.de"
- "sven.ketelsen@netgo.de" - "sven.ketelsen@netgo.de"
- "michael.haehnel@netgo.de"
- "{{ awx_ansible_user_name }}@netgo.de" - "{{ awx_ansible_user_name }}@netgo.de"
- "{{ gitlab_ansible_user_name }}@git.dev-at.de" - "{{ gitlab_ansible_user_name }}@netgo.de"
hetzner_server_labels: "stage={{ stage }}" hetzner_server_labels: "stage={{ stage }}"
@ -104,26 +99,25 @@ sudo_group: "{{ sudo_groups
| replace('.','-') }}" | replace('.','-') }}"
# whitelist for outdated user detection - they wont't be deleted at all # whitelist for outdated user detection - they wont't be deleted at all
default_users: default_plattform_users:
- 'nobody' - 'nobody'
- 'elastic' - 'elastic'
- 'postgres' - 'postgres'
- 'administrator' - 'administrator'
- '{{ admin_user }}' - '{{ admin_user }}'
- '{{ backupuser_username }}'
default_plattform_users: smardigo_plattform_users:
- 'claus.paetow' - 'claus.paetow'
- 'friedrich.goerz' - 'friedrich.goerz'
- 'peter.heise' - 'peter.heise'
- 'sven.ketelsen' - 'sven.ketelsen'
- 'michael.haehnel'
- '{{ awx_ansible_user_name }}' - '{{ awx_ansible_user_name }}'
- '{{ gitlab_ansible_user_name }}' - '{{ gitlab_ansible_user_name }}'
smardigo_plattform_users: "{{ default_plattform_users + custom_plattform_users | default([]) }}"
ip_whitelist_admins: ip_whitelist_admins:
- "87.150.33.14/32" # sven - "79.215.10.239/32" # sven
- "212.86.56.112/32" # peter
ip_whitelist: ip_whitelist:
- "212.121.131.106/32" # netgo berlin - "212.121.131.106/32" # netgo berlin
@ -131,7 +125,9 @@ ip_whitelist:
- "46.245.219.98/32" # netgo borken - "46.245.219.98/32" # netgo borken
- "{{ shared_service_network }}" - "{{ shared_service_network }}"
offsite_storage_server_ip: 142.132.155.83/32 # for test purpose DEV-361
# currently (2022.03.18) set to IP of hetzner VM
gitlab_storage_server: 167.235.18.147/32
docker_owner: "{{ admin_user }}" docker_owner: "{{ admin_user }}"
docker_group: "{{ admin_user }}" docker_group: "{{ admin_user }}"
@ -141,13 +137,12 @@ docker_compose_path: "/usr/bin/docker-compose"
service_base_path: '/etc/smardigo' service_base_path: '/etc/smardigo'
devops_email_address: "nso.devops@netgo.de" gitea_admin_email: "nso.devops@netgo.de"
gitea_admin_email: '{{ devops_email_address }}' lets_encrypt_email: "nso.devops@netgo.de"
lets_encrypt_email: '{{ devops_email_address }}' connect_admin_email: "nso.devops@netgo.de"
connect_admin_email: '{{ devops_email_address }}' keycloak_admin_email: "nso.devops@netgo.de"
keycloak_admin_email: '{{ devops_email_address }}' pgadmin4_admin_email: "nso.devops@netgo.de"
pgadmin4_admin_email: '{{ devops_email_address }}' harbor_oidc_admin_email: "nso.devops@netgo.de"
harbor_oidc_admin_email: '{{ devops_email_address }}'
http_port: "80" http_port: "80"
https_port: "443" https_port: "443"
@ -202,8 +197,121 @@ blackbox_http_2xx_additional_targets: []
prometheus_federation_enabled: true prometheus_federation_enabled: true
kubernetes_prometheus_endpoint: "{{ stage }}-kube-prometheus.{{ domain }}" kubernetes_prometheus_endpoint: "{{ stage }}-kube-prometheus.{{ domain }}"
get_current_date: "{{ lookup('pipe','date +%Y-%m-%d') }}" backupuser_username: backupuser
get_current_date_time: "{{ lookup('pipe','date +%Y-%m-%d_%H:%M') }}" backupuser_ssh_pubkey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDAFRYAy3PqimYUWcO4Q9pdTvDQTsq7hKjWYoQEsJICnRRv+W+5d2lJvC3gqMpmWy9XxtrYePkVHCgIvfJSas9Jv7n7eeYoeWLWJq0nRSKg6EKFCH9y3v8tGPJQQf7wogOhHwr6m79c+lpNVUsVR+QOf76+47ZuwnuEBzK6xbDkmwyt7SPrJ59IFxOlmtz2HgVlTLczLalMygM4qlXqIt+lwuuFz4CsGcr4TwMKp9Uk6SCP3OV12oLnUUUOA3r72qmE4+JeUN6VNbXoBXEANfXm5kbM8w+dFhulCi1fQZCssB8PStA7Cs0gVqL6DYNUKRZaFL8e77hljGkPlOQDxOsBexPuceSDmmr6s5qT1wA6bnEFoeWbLlxixGlFA+1Q/LqWsYzoOZiTHDoaXvsc4VizlPp4Fn0OgJefPjuzBsWOyf0ob5oucfnmCAvEh/k+ioq0bIQDcliAM1UezitblHQgGHhqnKPMi664i0ULLiExARe4IV3KJiaG++RJyzUL5HNz3Qru+K5/pdj2jffluYTC4w+6ZYfjWEZS/DAumExv9T97kFOsapHCQJwTBa368Ch6uKkPCZO8p/ra3xTIUh/PibHaVCadgX2NR9q6jdiQtmc0SOyNJlMlPZD/Q1NrjXJ18ASny7gCBFItMyMtinVx9xQxQ+PFLB8oNYERw1ejIw== storage-server-smardigo'
current_date_time: "{{ lookup('pipe','date +%Y-%m-%d_%H:%M') }}"
hcloud_firewall_objects:
-
name: "{{ stage }}-default"
state: present
rules:
-
direction: in
protocol: icmp
port: ''
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: ICMP allowed
-
direction: in
protocol: tcp
port: '22'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: SSH allowed
-
direction: in
protocol: tcp
port: '80'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: HTTP allowed
-
direction: in
protocol: tcp
port: '443'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: HTTPS allowed
-
direction: in
protocol: tcp
port: 'any'
source_ips: '{{ ip_whitelist_admins }}'
destination_ips: []
description: TCP - allow work from home without VPN
-
direction: in
protocol: udp
port: 'any'
source_ips: '{{ ip_whitelist_admins }}'
destination_ips: []
description: UDP - allow work from home without VPN
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
-
name: "{{ stage }}-monitoring"
state: present
rules:
-
direction: in
protocol: tcp
port: '9080-9085'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: 'Server/Service Monitoring'
-
direction: in
protocol: tcp
port: '9001'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: 'PgAdmin'
-
direction: in
protocol: tcp
port: '9187'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: 'Postgres-Exporter'
-
direction: in
protocol: tcp
port: '80'
source_ips: '{{ ip_whitelist + ip_whitelist_admins }}'
destination_ips: []
description: 'AWX'
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
-
name: "{{ stage }}-monitoring-extern-https"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips:
- "{{ lookup('community.general.dig', 'dev-blackbox-01.smardigo.digital' ) }}/32"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=connect'
-
type: label_selector
label_selector:
selector: 'service=keycloak'
hetzner_authentication_ansible: "{{ hetzner_authentication_ansible_vault }}" hetzner_authentication_ansible: "{{ hetzner_authentication_ansible_vault }}"
hetzner_authentication_ccm: "{{ hetzner_authentication_ccm_vault }}" hetzner_authentication_ccm: "{{ hetzner_authentication_ccm_vault }}"
@ -213,9 +321,83 @@ k8s_basic_services:
- kubelet - kubelet
- containerd - containerd
selfsigned_ca_private_key_passphrase: '{{ selfsigned_ca_private_key_passphrase_vault }}' hcloud_firewall_objects_awx:
-
prometheus_alert_diskspaceusage_warning: 85 name: "{{ stage }}-awx-ssh-access-for-k8s-nodes"
prometheus_alert_pg_replication_lag: 120 state: present
rules:
wordpress_haproxy_admin_password: "{{ wordpress_haproxy_admin_password_vault | default('haproxy-admin') }}" -
direction: in
protocol: tcp
port: '22'
source_ips: "{{ src_ips }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'stage={{ stage }}'
-
name: "{{ stage }}-awx-access-SMA-mgmt-instance"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ src_ips }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=connect,tenant=management'
-
name: "{{ stage }}-awx-access-443-SMA-peripheral-instances"
state: present
rules:
-
direction: in
protocol: tcp
port: '443'
source_ips: "{{ src_ips }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=gitea'
-
type: label_selector
label_selector:
selector: 'service=keycloak'
-
type: label_selector
label_selector:
selector: 'service=kibana'
hcloud_firewall_objects_backup:
-
name: "{{ stage }}-database-backup-ssh-access"
state: present
rules:
-
direction: in
protocol: tcp
port: '22'
source_ips:
- "{{ gitlab_storage_server }}"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=postgres'
-
type: label_selector
label_selector:
selector: 'service=maria'

@ -1,11 +1,9 @@
--- ---
#TODO needs to be removed after story DEV-361 is finished #TODO needs to be removed after story DEV-361 is finished
hetzner_server_type: "{{ hetzner_server_type_bastelserver | default('cx21') }}" hetzner_server_type: "{{ hetzner_server_type_bastelserver | default('cx21') }}"
hetzner_server_labels: "stage={{ stage }} service=backup" hetzner_server_labels: "stage={{ stage }} service=bastelserver"
docker_enabled: false docker_enabled: false
traefik_enabled: false traefik_enabled: false
filebeat_enabled: false filebeat_enabled: false
node_exporter_enabled: false
custom_plattform_users:
- backuphamster

@ -1,6 +1,6 @@
--- ---
connect_wordpress_maria_host: "mariaproxy" connect_wordpress_maria_host: "{{ shared_service_maria_hostname }}"
connect_wordpress_maria_database: "{{ stage }}_{{ tenant_id }}_{{ cluster_name }}_connect_wordpress" connect_wordpress_maria_database: "{{ stage }}_{{ tenant_id }}_{{ cluster_name }}_connect_wordpress"
connect_wordpress_maria_username: "{{ connect_wordpress_maria_database }}" connect_wordpress_maria_username: "{{ connect_wordpress_maria_database }}"
connect_wordpress_maria_password: "connect-wordpress-maria-admin" connect_wordpress_maria_password: "connect-wordpress-maria-admin"
@ -11,4 +11,4 @@ connect_wordpress_oidc_client_id: "{{ cluster_name }}"
connect_wordpress_oidc_client_secret: "{{ cluster_name }}" connect_wordpress_oidc_client_secret: "{{ cluster_name }}"
connect_wordpress_buergerportal_username: "buergerportal" connect_wordpress_buergerportal_username: "buergerportal"
connect_wordpress_buergerportal_password: "Buerger?P0rtal." connect_wordpress_buergerportal_password: "buergerportal"

@ -1,3 +0,0 @@
---
connect_workflow_heatmap_enabled: "true"

@ -6,3 +6,4 @@ hetzner_server_labels: "stage={{ stage }} service=kube_control_plane"
docker_enabled: false docker_enabled: false
traefik_enabled: false traefik_enabled: false
filebeat_enabled: false filebeat_enabled: false
node_exporter_enabled: false

@ -6,3 +6,4 @@ hetzner_server_labels: "stage={{ stage }} service=kube_node"
docker_enabled: false docker_enabled: false
traefik_enabled: false traefik_enabled: false
filebeat_enabled: false filebeat_enabled: false
node_exporter_enabled: false

@ -2,15 +2,13 @@
hetzner_server_type: cx21 hetzner_server_type: cx21
connect_image_version: "9.0" connect_image_version: "latest"
connect_admin_username: "{{ management_admin_username }}" connect_admin_username: "{{ management_admin_username }}"
connect_admin_password: "{{ management_admin_password }}" connect_admin_password: "{{ management_admin_password }}"
connect_workflow_env: "stage:{{ stage }};smardigoUserToken:{{ smardigo_auth_token_value }}" connect_workflow_env: "stage:{{ stage }};smardigoUserToken:{{ smardigo_auth_token_value }}"
connect_process_search_module: "external" connect_process_search_module: "external"
connect_oidc_client_secret: "{{ management_oidc_client_secret }}" connect_oidc_client_secret: "{{ management_oidc_client_secret }}"
connect_external_task_script_worker_enabled: "true"
spring_profiles_include: "prod,postgres,elastic,swagger" spring_profiles_include: "prod,postgres,elastic,swagger"
tenant_id: "{{ management_oidc_realm }}" tenant_id: "{{ management_oidc_realm }}"

@ -10,6 +10,3 @@ mysql_users: []
docker_enabled: false docker_enabled: false
traefik_enabled: false traefik_enabled: false
filebeat_enabled: false filebeat_enabled: false
custom_plattform_users:
- '{{ backupuser_user_name }}'

@ -8,6 +8,3 @@ postgres_acls: []
docker_enabled: false docker_enabled: false
traefik_enabled: false traefik_enabled: false
filebeat_enabled: false filebeat_enabled: false
custom_plattform_users:
- '{{ backupuser_user_name }}'

@ -1,15 +0,0 @@
---
hetzner_server_type: "{{ hetzner_server_type_restore_database | default('cpx21') }}"
hetzner_server_labels: "stage={{ stage }} service=restore database_engine={{ database_engine | default('') }} manual=''"
docker_enabled: false
traefik_enabled: false
filebeat_enabled: false
custom_plattform_users:
- '{{ backupuser_user_name }}'
# postgresql related
# defining type of server (naster|slave|restore)
server_type: restore

@ -1,3 +1,3 @@
--- ---
awx_hetzner_ansible_revision: "main" awx_hetzner_ansible_revision: "master"

@ -58,18 +58,12 @@ shared_service_pg_slave_ip: "{{ stage_server_infos
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_1st_ip: "{{ stage_server_infos shared_service_maria_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-01' ) | selectattr('name', 'match', stage + '-maria-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_2nd_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-02' )
| map(attribute='private_ip')
| list
| first
| default('-') }}"
shared_service_keycloak_ip: "{{ stage_server_infos shared_service_keycloak_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-keycloak-01' ) | selectattr('name', 'match', stage + '-keycloak-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
@ -145,14 +139,12 @@ shared_service_management_ip: "{{ stage_server_infos
shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}" shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}"
shared_service_maria_1st_hostname: "{{ stage }}-maria-01" shared_service_maria_hostname: "{{ stage }}-maria-01"
shared_service_maria_2nd_hostname: "{{ stage }}-maria-02"
shared_service_postgres_01_hostname: "{{ stage }}-postgres-01" shared_service_postgres_01_hostname: "{{ stage }}-postgres-01"
shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01" shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01"
shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02" shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02"
shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03" shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03"
shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01" shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01"
shared_service_elastic_stack_kibana_01_hostname: "{{ stage }}-elastic-stack-kibana-01"
kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}" kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}"
kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}" kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}"
@ -200,12 +192,8 @@ shared_service_hosts: [
name: "{{ shared_service_elastic_stack_logstash_01_hostname }}" name: "{{ shared_service_elastic_stack_logstash_01_hostname }}"
}, },
{ {
ip: "{{ shared_service_maria_1st_ip }}", ip: "{{ shared_service_maria_ip }}",
name: "{{ shared_service_maria_1st_hostname }}" name: "{{ shared_service_maria_hostname }}"
},
{
ip: "{{ shared_service_maria_2nd_ip }}",
name: "{{ shared_service_maria_2nd_hostname }}"
}, },
{ {
ip: "{{ shared_service_pg_master_ip }}", ip: "{{ shared_service_pg_master_ip }}",
@ -302,8 +290,9 @@ harbor_oidc_client_secret: "{{ docker_registry_oidc_client_secret_vault }}"
harbor_oidc_admin_username: "harbor-admin" harbor_oidc_admin_username: "harbor-admin"
harbor_oidc_admin_password: "harbor-admin" harbor_oidc_admin_password: "harbor-admin"
postgres_listen_addresses: "listen_addresses = 'localhost,{{ stage_server_ip }},{{ stage_private_server_ip }}'"
connect_image_version: "8.6" connect_image_version: "8.5.47"
iam_image_version: "latest" iam_image_version: "latest"
management_oidc_realm: "management" management_oidc_realm: "management"
@ -367,9 +356,6 @@ argocd_admin_password: "argocd-admin"
argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}" argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}"
argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}" argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}"
awx_admin_username: "awx-admin"
awx_admin_password: "{{ awx_admin_password_vault }}"
netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}" netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}"
netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}" netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}"
@ -377,7 +363,7 @@ management_oidc_client_secret: "{{ management_oidc_client_secret_vault }}"
# smardigo automation DEV gpg key # smardigo automation DEV gpg key
# https://git.dev-at.de/smardigo-hetzner/communication-keys/ # https://git.dev-at.de/smardigo-hetzner/communication-keys/
# push mirror: https://{{ stage }}-gitea-01.smardigo.digital/communication-keys.git # push mirror: https://dev-gitea-01.smardigo.digital/gitea-admin/communication-keys/
gpg_key_smardigo_automation__private: '{{ gpg_key_smardigo_automation__private__vault }}' gpg_key_smardigo_automation__private: '{{ gpg_key_smardigo_automation__private__vault }}'
iam_opentracing_jaeger_enabled: true iam_opentracing_jaeger_enabled: true

File diff suppressed because it is too large Load Diff

@ -12,5 +12,3 @@ node_exporter_enabled: false
shared_service_network: "10.2.0.0/16" shared_service_network: "10.2.0.0/16"
shared_service_hosts: [] shared_service_hosts: []
traefik_admin_username: "traefik-admin"

@ -1,22 +1,18 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
38663233373062663161366637373233653833663531383237653432633832363036393236653231 34376237343736386538353235346231326462313534643130616532633535613331643236353764
6136663865393830306533376665343733383565366333630a303664306465393566383663323666 3737383533313861373030313237366131356438393333350a323230316663346634636634353239
31663735623036363431346561616538623534636334633438366238653936383335303430613932 61326262653334646539626464646663383164666166306162646166333462383833333832353461
6461346332313639340a626139353538326461633133396163393464393335373866356133333038 3437663431653566650a383632653134343238393762333131613633313036636536343831333630
30656133346362393635663566383938633663303662623136373537353462333239366331376462 34633361373264376263303364353531636434356263663965626639616666633861636463383637
64633239373639356463363464376564663162393064623635623033633966653139303766383437 34333838663834666532366564396566313739386262633335313335386661646166363636323766
63393832376561646330343637633761653232656238383636333963646332303734303539373730 35363535353664346463336566663163303333663065613532623265303262396531303831653636
37613833313332393663656466316639326164306636663861363530636338633337633833343630 65353565353233626331356666343932333539356331303161303062316433633761623132333033
36333636633164613130653732616236646663626332613234306530616565626666343335616565 65376632376266336361363832613064323861393366313763316434316264663562616134353766
37303464396237323261643236633264633838626236373734396535643466373035346436376133 62643165633030363237636632386166396538666337616430323534313062333965336233333836
63623765663134373261343431366261666565303631376533303465383161366135383263326663 36306637323764333233666239336331373763633737623666393466376163313738393036336232
35323766306238396430343965653335323437663161326233623066356464316434633234303162 34613536336336663837353031323665323733313634313731326537333938396361373435366435
35626634383366303436343038336336333963326530326161336462326535376264343564396231 32643338346635633962346537393338653464383431396432343932373439386230613537356134
32323662323839353939653065306261636338643139613933323634666633313636353864396166 64386165363233636237656364396333336261613037323136363630613533353639646439303337
35383633353735383430303930303437393563323264656439353730353839616561373639336664 31626663393335343962663033646135333366623738346436393764353438383264666666653635
31663237343136353564636366643865363464656534393832383531393532646166643637326337 64643462656332653361313766656633616134373166333163346131616334343161616235633666
38306139663863653131386263336138643831303031396537373835613731393834386261356435 3366
39333331353635363633396337643234396231323463306465323636343539353232353464333236
31396139383137666536663365393362393832656336653535626430333033353737633661663366
65633130663937373861616131353631326135396366623231366131333432326662653365373134
37303734383038346530393866613965663262373638313536663863356563383732

@ -1,4 +0,0 @@
keycloak_https_whitelisted_ips:
- 195.200.47.243/32 # DEV-230 - sparda berlin
- 195.200.47.244/32 # DEV-230 - sparda berlin
- 92.42.192.157/32 # MOB-28 - mobene

@ -58,18 +58,12 @@ shared_service_pg_slave_ip: "{{ stage_server_infos
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_1st_ip: "{{ stage_server_infos shared_service_maria_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-01' ) | selectattr('name', 'match', stage + '-maria-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_2nd_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-02' )
| map(attribute='private_ip')
| list
| first
| default('-') }}"
shared_service_keycloak_ip: "{{ stage_server_infos shared_service_keycloak_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-keycloak-01' ) | selectattr('name', 'match', stage + '-keycloak-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
@ -145,14 +139,12 @@ shared_service_management_ip: "{{ stage_server_infos
shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}" shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}"
shared_service_maria_1st_hostname: "{{ stage }}-maria-01" shared_service_maria_hostname: "{{ stage }}-maria-01"
shared_service_maria_2nd_hostname: "{{ stage }}-maria-02"
shared_service_postgres_01_hostname: "{{ stage }}-postgres-01" shared_service_postgres_01_hostname: "{{ stage }}-postgres-01"
shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01" shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01"
shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02" shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02"
shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03" shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03"
shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01" shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01"
shared_service_elastic_stack_kibana_01_hostname: "{{ stage }}-elastic-stack-kibana-01"
kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}" kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}"
kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}" kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}"
@ -200,12 +192,8 @@ shared_service_hosts: [
name: "{{ shared_service_elastic_stack_logstash_01_hostname }}" name: "{{ shared_service_elastic_stack_logstash_01_hostname }}"
}, },
{ {
ip: "{{ shared_service_maria_1st_ip }}", ip: "{{ shared_service_maria_ip }}",
name: "{{ shared_service_maria_1st_hostname }}" name: "{{ shared_service_maria_hostname }}"
},
{
ip: "{{ shared_service_maria_2nd_ip }}",
name: "{{ shared_service_maria_2nd_hostname }}"
}, },
{ {
ip: "{{ shared_service_pg_master_ip }}", ip: "{{ shared_service_pg_master_ip }}",
@ -302,8 +290,9 @@ harbor_oidc_client_secret: "{{ docker_registry_oidc_client_secret_vault }}"
harbor_oidc_admin_username: "harbor-admin" harbor_oidc_admin_username: "harbor-admin"
harbor_oidc_admin_password: "{{ harbor_oidc_admin_password_vault }}" harbor_oidc_admin_password: "{{ harbor_oidc_admin_password_vault }}"
postgres_listen_addresses: "listen_addresses = 'localhost,{{ stage_server_ip }},{{ stage_private_server_ip }}'"
connect_image_version: "8.6" connect_image_version: "8.5.47"
iam_image_version: "latest" iam_image_version: "latest"
management_oidc_realm: "management" management_oidc_realm: "management"
@ -367,9 +356,6 @@ argocd_admin_password: "{{ argocd_admin_password_vault }}"
argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}" argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}"
argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}" argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}"
awx_admin_username: "awx-admin"
awx_admin_password: "{{ awx_admin_password_vault }}"
netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}" netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}"
netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}" netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}"

File diff suppressed because it is too large Load Diff

@ -58,18 +58,12 @@ shared_service_pg_slave_ip: "{{ stage_server_infos
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_1st_ip: "{{ stage_server_infos shared_service_maria_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-01' ) | selectattr('name', 'match', stage + '-maria-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
| list | list
| first | first
| default('-') }}" | default('-') }}"
shared_service_maria_2nd_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-maria-02' )
| map(attribute='private_ip')
| list
| first
| default('-') }}"
shared_service_keycloak_ip: "{{ stage_server_infos shared_service_keycloak_ip: "{{ stage_server_infos
| selectattr('name', 'match', stage + '-keycloak-01' ) | selectattr('name', 'match', stage + '-keycloak-01' )
| map(attribute='private_ip') | map(attribute='private_ip')
@ -145,14 +139,12 @@ shared_service_management_ip: "{{ stage_server_infos
shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}" shared_service_kube_ip: "{{ stage_private_ingress_loadbalancer_ip | default('-') }}"
shared_service_maria_1st_hostname: "{{ stage }}-maria-01" shared_service_maria_hostname: "{{ stage }}-maria-01"
shared_service_maria_2nd_hostname: "{{ stage }}-maria-02"
shared_service_postgres_01_hostname: "{{ stage }}-postgres-01" shared_service_postgres_01_hostname: "{{ stage }}-postgres-01"
shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01" shared_service_elastic_stack_01_hostname: "{{ stage }}-elastic-stack-elastic-01"
shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02" shared_service_elastic_stack_02_hostname: "{{ stage }}-elastic-stack-elastic-02"
shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03" shared_service_elastic_stack_03_hostname: "{{ stage }}-elastic-stack-elastic-03"
shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01" shared_service_elastic_stack_logstash_01_hostname: "{{ stage }}-elastic-stack-logstash-01"
shared_service_elastic_stack_kibana_01_hostname: "{{ stage }}-elastic-stack-kibana-01"
kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}" kube_master_01_hostname: "{{ stage }}-kube-master-01.{{ domain }}"
kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}" kube_master_02_hostname: "{{ stage }}-kube-master-02.{{ domain }}"
@ -200,12 +192,8 @@ shared_service_hosts: [
name: "{{ shared_service_elastic_stack_logstash_01_hostname }}" name: "{{ shared_service_elastic_stack_logstash_01_hostname }}"
}, },
{ {
ip: "{{ shared_service_maria_1st_ip }}", ip: "{{ shared_service_maria_ip }}",
name: "{{ shared_service_maria_1st_hostname }}" name: "{{ shared_service_maria_hostname }}"
},
{
ip: "{{ shared_service_maria_2nd_ip }}",
name: "{{ shared_service_maria_2nd_hostname }}"
}, },
{ {
ip: "{{ shared_service_pg_master_ip }}", ip: "{{ shared_service_pg_master_ip }}",
@ -302,8 +290,9 @@ harbor_oidc_client_secret: "{{ docker_registry_oidc_client_secret_vault }}"
harbor_oidc_admin_username: "harbor-admin" harbor_oidc_admin_username: "harbor-admin"
harbor_oidc_admin_password: "{{ harbor_oidc_admin_password_vault }}" harbor_oidc_admin_password: "{{ harbor_oidc_admin_password_vault }}"
postgres_listen_addresses: "listen_addresses = 'localhost,{{ stage_server_ip }},{{ stage_private_server_ip }}'"
connect_image_version: "8.6" connect_image_version: "8.5.47"
iam_image_version: "latest" iam_image_version: "latest"
management_oidc_realm: "management" management_oidc_realm: "management"
@ -367,9 +356,6 @@ argocd_admin_password: "{{ argocd_admin_password_vault }}"
argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}" argo_keycloak_client_secret: "{{ argo_keycloak_client_secret_vault }}"
argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}" argocd_server_admin_password: "{{ argocd_server_admin_password_vault }}"
awx_admin_username: "awx-admin"
awx_admin_password: "{{ awx_admin_password_vault }}"
netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}" netgo_msteams_hook_cd: "{{ netgo_msteams_hook_cd_vault }}"
netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}" netgo_msteams_hook_alerting: "{{ netgo_msteams_hook_alerting_vault }}"

File diff suppressed because it is too large Load Diff

@ -47,6 +47,11 @@
loop: "{{ hcloud_firewall_objects }}" loop: "{{ hcloud_firewall_objects }}"
loop_control: loop_control:
loop_var: firewall_object loop_var: firewall_object
# set ENVvar awx_related=True to trigger playbook part
#
# needs to be implemented via switch due to potentially missing nodes at first time
# when playbook was executed
# #
- name: "Generate awx-related hcloud firewall rules" - name: "Generate awx-related hcloud firewall rules"
block: block:
@ -71,57 +76,21 @@
name: hcloud name: hcloud
tasks_from: configure-firewall2 tasks_from: configure-firewall2
vars: vars:
awx_source_ips: '{{ k8s_worker_node_ips }}' src_ips: '{{ k8s_worker_node_ips }}'
loop: "{{ hcloud_firewall_objects_awx }}" loop: "{{ hcloud_firewall_objects_awx }}"
loop_control: loop_control:
loop_var: firewall_object loop_var: firewall_object
when:
- awx_related is defined
- awx_related
- name: "Setup hcloud firewalls for database backup..." - name: "Setup hcloud firewalls for database backup stuff..."
include_role: include_role:
name: hcloud name: hcloud
tasks_from: configure-firewall2 tasks_from: configure-firewall2
vars: loop: "{{ hcloud_firewall_objects_backup }}"
awx_source_ips: '{{ k8s_worker_node_ips }}' loop_control:
loop: "{{ hcloud_firewall_objects_backup }}" loop_var: firewall_object
loop_control: when:
loop_var: firewall_object - backup_related is defined
- backup_related
- name: "Setup hcloud firewalls for gitea..."
include_role:
name: hcloud
tasks_from: configure-firewall2
vars:
awx_source_ips: '{{ k8s_worker_node_ips }}'
loop: "{{ hcloud_firewall_objects_gitea }}"
loop_control:
loop_var: firewall_object
- name: "Setup hcloud firewalls for keycloak..."
include_role:
name: hcloud
tasks_from: configure-firewall2
vars:
awx_source_ips: '{{ k8s_worker_node_ips }}'
loop: "{{ hcloud_firewall_objects_keycloak }}"
loop_control:
loop_var: firewall_object
- name: "Setup hcloud firewalls for kibana..."
include_role:
name: hcloud
tasks_from: configure-firewall2
vars:
awx_source_ips: '{{ k8s_worker_node_ips }}'
loop: "{{ hcloud_firewall_objects_kibana }}"
loop_control:
loop_var: firewall_object
- name: "Setup hcloud firewalls for management..."
include_role:
name: hcloud
tasks_from: configure-firewall2
vars:
awx_source_ips: '{{ k8s_worker_node_ips }}'
loop: "{{ hcloud_firewall_objects_management }}"
loop_control:
loop_var: firewall_object

@ -2,4 +2,4 @@
hetzner_server_labels: "stage={{ stage }} service=connect tenant=bdev" hetzner_server_labels: "stage={{ stage }} service=connect tenant=bdev"
hetzner_server_type: cpx21 hetzner_server_type: cx31

@ -1,5 +0,0 @@
---
hetzner_server_labels: "stage={{ stage }} service=connect tenant=bdev"
hetzner_server_type: cpx21

@ -1,4 +1,3 @@
--- ---
hetzner_server_type: cpx21
server_type: "master" server_type: "master"

@ -1,4 +1,3 @@
--- ---
hetzner_server_type: cpx21
server_type: "slave" server_type: "slave"

@ -1,3 +0,0 @@
---
hetzner_server_type: cpx21

@ -102,55 +102,33 @@
name: postgresql name: postgresql
state: started state: started
# wait_for cannot be used anymore due to enabled SSL encryption for postgres connections in DEV-382
- name: "Smardigo Patchday: check if postgres is listing on net internal ip address" - name: "Smardigo Patchday: check if postgres is listing on net internal ip address"
become: no ansible.builtin.wait_for:
community.postgresql.postgresql_ping: delay: 15
timeout: 180
port: 5432 port: 5432
ssl_mode: require host: '{{ stage_server_ip }}'
login_host: '{{ stage_private_server_ip }}'
register: check_postgres register: check_postgres
ignore_errors: yes
- name: "Smardigo Patchday: error-handling - ensure postgres started and check listing on net internal ip address" - name: "Smardigo Patchday: restart postgres and check listing on net internal ip address again"
block: block:
- name: "Smardigo Patchday: error-handling - ensure service(s) started" - name: "Smardigo Patchday: stop service(s)"
ansible.builtin.systemd: ansible.builtin.systemd:
name: postgresql name: postgresql
state: started state: restarted
- name: "Smardigo Patchday: error-handling - check if postgres is listing on net internal ip address" - name: "Smardigo Patchday: check if postgres is listing on net internal ip address"
become: no ansible.builtin.wait_for:
community.postgresql.postgresql_ping: delay: 15
timeout: 180
port: 5432 port: 5432
ssl_mode: require host: '{{ stage_server_ip }}'
login_host: '{{ stage_private_server_ip }}' register: check_postgres
register: check_postgres_again failed_when: check_postgres_again.failed
retries: 5
failed_when: not check_postgres_again.is_available
rescue:
- name: "Smardigo Patchday: error-handling - send mail to DEVOPS-DL"
delegate_to: '{{ stage }}-mail-01'
community.general.mail:
host: localhost
port: 25
to: '{{ devops_email_address }}'
subject: "patchday( {{ lookup('pipe','date +%Y-%m-%d_%H:%M') }} ) problem report for {{ inventory_hostname }}"
body: |
Dear Sir or Madam,
I have to inform you that {{ inventory_hostname }} isn'n listening on {{ stage_private_server_ip }} anymore.
Plz check what happened/ fix it little padawan ;)
kind regards,
your automation-bofh
when: when:
- not check_postgres.is_available - check_postgres.failed
- hosts: all,!elastic,!postgres,!k8s_cluster - hosts: all,!elastic,!postgres,!k8s_cluster
serial: 10 serial: 10

@ -1,251 +0,0 @@
---
# restores remote database backup
# - postgres
# - executed on stage specific server: {{ stage }}-restore-postgres-01
# - restores a server from full-backup
# - mariadb
# - executed on stage specific server: {{ stage }}-restore-maria-01
# - restores a server from full-backup
# Parameters:
# playbook inventory
# stage := the name of the stage (e.g. dev, int, qa, prod)
# database_engine := the database engine to restore a backup for (e.g. postgres, maria)
# smardigo message callback
# scope_id := (scope id of the management process)
# process_instance_id := (process instance id of the management process)
# smardigo_management_action := (smardigo management action anme of the management process)
#############################################################
# Creating inventory dynamically for given parameters
#############################################################
- hosts: localhost
connection: local
gather_facts: false
pre_tasks:
- name: "Check if ansible version is at least 2.10.x"
assert:
that:
- ansible_version.major >= 2
- ansible_version.minor >= 10
msg: "The ansible version has to be at least ({{ ansible_version.full }})"
# add virtual server to load stage specific variables as context
- name: "Add <{{ stage }}-virtual-host-to-read-groups-vars> to hosts"
add_host:
name: "{{ stage }}-virtual-host-to-read-groups-vars"
groups:
- "stage_{{ stage }}"
changed_when: False
tasks:
- name: "Add {{ database_engine }} servers to hosts if necessary"
add_host:
name: "{{ stage }}-restore-{{ database_engine }}-01"
groups:
- "stage_{{ stage }}"
- 'restore'
changed_when: False
- name: "Add 'backup' servers to hosts if necessary"
add_host:
name: "{{ stage }}-backup-01"
groups:
- "stage_{{ stage }}"
- backup
changed_when: False
#############################################################
# Create restore server(s)
#############################################################
- hosts: "restore"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
remote_user: root
roles:
- role: hcloud
vars:
sma_digitalocean_ttl: 60 # set it to 60sec to reduce DNS caching problems with internal IT in case of debugging ansible problems ;)
#############################################################
# Provisioning server(s) for created inventory
#############################################################
- hosts: "restore"
serial: "{{ serial_number | default(1) }}"
remote_user: root
vars:
ansible_ssh_host: "{{ stage_server_domain }}"
pre_tasks:
- name: "Import autodiscover pre-tasks"
import_tasks: tasks/autodiscover_pre_tasks.yml
become: false
tags:
- always
roles:
- role: common
- role: filebeat
when: filebeat_enabled | default(True)
- role: node_exporter
when: node_exporter_enabled | default(True)
- role: restore_{{ database_engine }}
#############################################################
# add restore specific firewall rule
#############################################################
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
connection: local
vars:
hcloud_firewall_objects_backup:
-
name: "{{ stage }}-restore-ssh-access"
state: present
rules:
-
direction: in
protocol: tcp
port: '22'
source_ips:
- "{{ lookup('community.general.dig', groups['backup'][0] + '.' + domain ) }}/32"
destination_ips: []
description: null
apply_to:
-
type: label_selector
label_selector:
selector: 'service=restore'
tasks:
- name: "Add hcloud firewall rule(s)"
include_role:
name: hcloud
tasks_from: configure-firewall2
loop: "{{ hcloud_firewall_objects_backup }}"
loop_control:
loop_var: firewall_object
#############################################################
# Syncing backups from backup server to restore server
#############################################################
- hosts: "backup"
serial: "{{ serial_number | default(5) }}"
gather_facts: false
vars:
backupserver_system_user: 'backuphamster'
ansible_ssh_host: "{{ stage_server_domain }}"
tasks:
# I could not get it up and running with <synchronize> module
# to sync data from remote server A to remote server B
- name: "Syncing remote backups"
become: yes
become_user: '{{ backupserver_system_user }}'
vars:
database_server_ip: "{{ groups['restore'][0] }}.{{ domain }}"
shell: '/home/{{ backupserver_system_user }}/push_backups_to_restore_server.sh {{ database_server_ip }} {{ stage }} {{ database_engine }}'
#############################################################
# Restoring from backup
#############################################################
- hosts: "restore"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
vars:
ansible_ssh_host: "{{ stage_server_domain }}"
tasks:
- name: "Triggering restore"
become: yes
shell: '/root/restore.sh {{ stage }}'
- name: "Check for test data on postgres"
block:
- name: "Querying postgres ..."
become: yes
become_user: postgres
community.postgresql.postgresql_query:
db: dummytestdb
query: SELECT movie FROM movie_quotes WHERE quote = %(quote_val)s
named_args:
quote_val: 'Shall we play'
register: query_output
- assert:
that:
- 'query_output.query_all_results | first | selectattr("movie","match","wargames") | length == 1'
when:
- database_engine == 'postgres'
- name: "Check for test data on mariadb"
block:
- name: "Querying mariadb ..."
become: yes
become_user: root
community.mysql.mysql_query:
login_unix_socket: /run/mysqld/mysqld.sock
login_db: dummytestdb
query: SELECT movie FROM movie_quotes WHERE quote = %s
positional_args:
- 'Shall we play'
register: query_output
- assert:
that:
- 'query_output.query_result | first | selectattr("movie","match","wargames") | length == 1'
when:
- database_engine == 'maria'
#############################################################
# Deleting servers/domains for created inventory
#############################################################
- hosts: "restore"
serial: "{{ serial_number | default(5) }}"
gather_facts: false
tasks:
- name: "Delete server <{{ inventory_hostname }}>"
include_role:
name: hcloud
tasks_from: _set_server_state
vars:
- server_state: "absent"
- name: "Delete DNS entry <{{ inventory_hostname }}> for <{{ domain }}>"
include_role:
name: sma_digitalocean
tasks_from: _remove_dns
vars:
record_to_remove: '{{ inventory_hostname }}'
#############################################################
# Sending smardigo management message to process
#############################################################
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
serial: "{{ serial_number | default(1) }}"
gather_facts: false
connection: local
run_once: true
vars:
connect_jwt_username: "{{ management_admin_username }}"
tasks:
- name: "Sending smardigo management message to <{{ smardigo_management_url }}>"
include_tasks: tasks/smardigo_management_message.yml

@ -1,32 +0,0 @@
#!/bin/bash
#
#
#
REMOTE_SYSTEM_USER=backupuser
DATABASE_SERVER_IP=$1
STAGE=$2
DATABASE_ENGINE=$3
# currently it defaults to todays date
DATE=$(date +%F)
LOCAL_BACKUP_DIR="${HOME}/backups/${STAGE}/${DATABASE_ENGINE}"
BACKUP_FILE_FOR_TRANSFER=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | tail -n 1)
REMOTE_BACKUP_DIR="/home/${REMOTE_SYSTEM_USER}/backups/${STAGE}/${DATABASE_ENGINE}"
DEST_DIR="${REMOTE_BACKUP_DIR}/${DATE}/"
# avoid "REMOTE HOST IDENTIFICATION HAS CHANGED" - errors due to dynamic created server on restore process
ssh-keygen -f "/home/backuphamster/.ssh/known_hosts" -R ${DATABASE_SERVER_IP}
SSH_OPTIONS='-o StrictHostKeyChecking=no'
# needed due to unknown rsync option --mkpath in rsync version 3.1.3
ssh ${SSH_OPTIONS} ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP} "mkdir -p ${DEST_DIR}"
rsync -v -e "ssh ${SSH_OPTIONS}" $BACKUP_FILE_FOR_TRANSFER ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP}:${DEST_DIR}
BKP_FILE_TRANSFERRED=$(echo $BACKUP_FILE_FOR_TRANSFER | awk -F / '{ print $NF}')
ssh ${SSH_OPTIONS} ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP} "test -f ${DEST_DIR}${BKP_FILE_TRANSFERRED}"

@ -59,7 +59,7 @@
- name: "Remove outdated users" - name: "Remove outdated users"
user: name={{ item }} state=absent remove=yes user: name={{ item }} state=absent remove=yes
with_items: "{{ current_users.stdout_lines }}" with_items: "{{ current_users.stdout_lines }}"
when: not ((item in default_users) or (item in smardigo_plattform_users)) when: not ((item in default_plattform_users) or (item in smardigo_plattform_users))
tags: tags:
- users - users
@ -97,13 +97,24 @@
tags: tags:
- users - users
- name: "Update available package list" - name: "Create stuff for backups on database servers"
apt: block:
update_cache: yes - name: "Create system user for remote_backup"
become: yes
ansible.builtin.user:
name: '{{ backupuser_username }}'
comment: "user for backup"
shell: /bin/bash
- name: "Add SSH pub key to auth_keys"
authorized_key:
user: '{{ backupuser_username }}'
key: '{{ backupuser_ssh_pubkey }}'
when:
- inventory_hostname in groups['postgres'] or
inventory_hostname in groups['maria']
tags: tags:
- install - users
- upgrade
when: ansible_distribution == "Ubuntu"
- name: "Ensure docker configuration directory exists" - name: "Ensure docker configuration directory exists"
file: file:
@ -268,13 +279,3 @@
state: present state: present
tags: tags:
- config - config
- name: "configure ssh_hardening"
include_role:
# include role from collection called 'devsec'
name: devsec.hardening.ssh_hardening
apply:
tags:
- ssh_hardening
tags:
- ssh_hardening

@ -2,6 +2,7 @@
### tags: ### tags:
### update_connections ### update_connections
### update_configuration
- name: "Checking connect is running on <{{ connect_base_url }}>" - name: "Checking connect is running on <{{ connect_base_url }}>"
delegate_to: 127.0.0.1 delegate_to: 127.0.0.1
@ -18,6 +19,7 @@
until: connect_profile_info.status in [200] until: connect_profile_info.status in [200]
tags: tags:
- update_connections - update_connections
- update_configuration
- name: "Reading connections from <{{ connect_base_url }}>" - name: "Reading connections from <{{ connect_base_url }}>"
delegate_to: 127.0.0.1 delegate_to: 127.0.0.1
@ -31,6 +33,7 @@
register: connect_connections_result register: connect_connections_result
tags: tags:
- update_connections - update_connections
- update_configuration
- name: "Reading connection ids from <{{ connect_base_url }}>" - name: "Reading connection ids from <{{ connect_base_url }}>"
set_fact: set_fact:
@ -39,6 +42,7 @@
querystr: "[[*].id]" querystr: "[[*].id]"
tags: tags:
- update_connections - update_connections
- update_configuration
- name: "Printing connection ids for <{{ connect_base_url }}>" - name: "Printing connection ids for <{{ connect_base_url }}>"
debug: debug:
@ -47,6 +51,7 @@
- debug - debug
tags: tags:
- update_connections - update_connections
- update_configuration
- name: "Creating/Updating connections on <{{ connect_base_url }}>" - name: "Creating/Updating connections on <{{ connect_base_url }}>"
delegate_to: 127.0.0.1 delegate_to: 127.0.0.1
@ -65,3 +70,4 @@
changed_when: true changed_when: true
tags: tags:
- update_connections - update_connections
- update_configuration

@ -73,3 +73,10 @@
pull: yes pull: yes
tags: tags:
- update_deployment - update_deployment
- name: "Configure connect connections"
include_tasks: connections.yml
when:
smardigo_auth_token_value is defined
tags:
- always

@ -29,7 +29,7 @@ connect_environment: [
"SPRING_PROFILES_INCLUDE: \"{{ spring_profiles_include | default('swagger') }}\"", "SPRING_PROFILES_INCLUDE: \"{{ spring_profiles_include | default('swagger') }}\"",
"RIBBON_DISPLAY_ON_ACTIVE_PROFILES: \"{{ ribbon_display_on_active_profiles | default('dev') }}\"", "RIBBON_DISPLAY_ON_ACTIVE_PROFILES: \"{{ ribbon_display_on_active_profiles | default('dev') }}\"",
"DATASOURCE_URL: \"jdbc:postgresql://{{ connect_postgres_host }}:{{ service_port_postgres }}/{{ connect_postgres_database }}?sslmode=require\"", "DATASOURCE_URL: \"jdbc:postgresql://{{ connect_postgres_host }}:{{ service_port_postgres }}/{{ connect_postgres_database }}\"",
"DATASOURCE_USERNAME: \"{{ connect_postgres_username }}\"", "DATASOURCE_USERNAME: \"{{ connect_postgres_username }}\"",
"DATASOURCE_PASSWORD: \"{{ connect_postgres_password }}\"", "DATASOURCE_PASSWORD: \"{{ connect_postgres_password }}\"",
"FILE_WHITELIST_URL: \"{{ connect_whitelist_url | default('') }}\"", "FILE_WHITELIST_URL: \"{{ connect_whitelist_url | default('') }}\"",
@ -99,10 +99,6 @@ connect_environment: [
"OPENTRACING_JAEGER_LOG_SPANS: \"{{ connect_opentracing_jaeger_log_spans | default(false) }}\"", "OPENTRACING_JAEGER_LOG_SPANS: \"{{ connect_opentracing_jaeger_log_spans | default(false) }}\"",
"OPENTRACING_JAEGER_SERVICE_NAME: \"{{ connect_opentracing_jaeger_service_name | default(connect_id) }}\"", "OPENTRACING_JAEGER_SERVICE_NAME: \"{{ connect_opentracing_jaeger_service_name | default(connect_id) }}\"",
"OPENTRACING_JAEGER_HTTP_SENDER_URL: \"{{ connect_opentracing_jaeger_http_sender_url | default() }}\"", "OPENTRACING_JAEGER_HTTP_SENDER_URL: \"{{ connect_opentracing_jaeger_http_sender_url | default() }}\"",
"CONFIG_DELETE_SCOPE_ENABLED: \"{{ connect_config_delete_scope_enabled | default(false) }}\"",
"CONFIG_LOCAL_IMPORT_ENABLED: \"{{ connect_config_local_import_enabled | default(false) }}\"",
"SMA_WORKFLOW_HEATMAP_ENABLED: \"{{ connect_workflow_heatmap_enabled | default(false) }}\"",
] ]
connect_docker: { connect_docker: {

@ -27,23 +27,6 @@ wordpress_docker: {
}, },
], ],
services: [ services: [
{
name: "mariaproxy",
image_name: "haproxytech/haproxy-alpine",
image_version: "2.2.24",
ports: [
{
"external": "16666",
"internal": "6666",
},
],
volumes: [
'"./config/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro"',
],
networks: [
'"back-tier"',
],
},
{ {
name: "{{ wordpress_id }}", name: "{{ wordpress_id }}",
image_name: "{{ wordpress_image_name }}", image_name: "{{ wordpress_image_name }}",
@ -59,7 +42,6 @@ wordpress_docker: {
"WORDPRESS_CONFIG_EXTRA: |", "WORDPRESS_CONFIG_EXTRA: |",
" define( 'WP_HOME', 'https://{{ wordpress_base_url }}' );", " define( 'WP_HOME', 'https://{{ wordpress_base_url }}' );",
" define( 'WP_SITEURL', 'https://{{ wordpress_base_url }}' );", " define( 'WP_SITEURL', 'https://{{ wordpress_base_url }}' );",
" define( 'MYSQL_CLIENT_FLAGS', MYSQLI_CLIENT_SSL | MYSQLI_CLIENT_SSL_DONT_VERIFY_SERVER_CERT );",
"AUTH_API: \"https://{{ shared_service_keycloak_hostname }}\"", "AUTH_API: \"https://{{ shared_service_keycloak_hostname }}\"",
"RESOURCE_API: \"https://{{ connect_base_url }}\"", "RESOURCE_API: \"https://{{ connect_base_url }}\"",
"REALM_ID: \"{{ current_realm_name }}\"", "REALM_ID: \"{{ current_realm_name }}\"",

@ -5,4 +5,4 @@
- name: "Setup maria for {{ inventory_hostname }}" - name: "Setup maria for {{ inventory_hostname }}"
include_role: include_role:
name: maria name: maria
tasks_from: _create_database tasks_from: _create-database

@ -1,3 +0,0 @@
---
upload_directory: "{{ backup_directory }}"

@ -1,11 +0,0 @@
---
### tags:
- name: "Export database <{{ target_database }}> to <{{ upload_directory }}/{{ database_backup_file }}>"
community.mysql.mysql_db:
name: "{{ target_database }}"
state: dump
target: "/{{ upload_directory }}/{{ database_backup_file }}"
config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}"

@ -31,7 +31,6 @@ gitea_environment: [
"GITEA__database__NAME: \"{{ gitea_postgres_database }}\"", "GITEA__database__NAME: \"{{ gitea_postgres_database }}\"",
"GITEA__database__USER: \"{{ gitea_postgres_database }}\"", "GITEA__database__USER: \"{{ gitea_postgres_database }}\"",
"GITEA__database__PASSWD: \"{{ gitea_postgres_password }}\"", "GITEA__database__PASSWD: \"{{ gitea_postgres_password }}\"",
"GITEA__database__SSL_MODE: \"require\"",
"GITEA__server__DOMAIN: \"{{ stage_server_domain }}\"", "GITEA__server__DOMAIN: \"{{ stage_server_domain }}\"",
"GITEA__server__SSH_DOMAIN: \"{{ stage_server_domain }}\"", "GITEA__server__SSH_DOMAIN: \"{{ stage_server_domain }}\"",

@ -162,4 +162,3 @@
systemd: systemd:
name: harbor name: harbor
state: started state: started
enabled: yes

@ -1,5 +1,3 @@
--- ---
server_state: "present" server_state: "present"
max_retries: 15
retry_delay: 60

@ -1,61 +1,14 @@
--- ---
- name: "Block to handle hetzner server state in case of problems"
block:
- name: "Increment the retry count"
set_fact:
retry_count: "{{ retry_count | default(0) | int + 1 }}"
- name: "Checking state for server <{{ inventory_hostname }}> is <{{ server_state }}>" - name: "Checking state for server <{{ inventory_hostname }}> is <{{ server_state }}>"
hetzner.hcloud.hcloud_server: hetzner.hcloud.hcloud_server:
api_token: "{{ hetzner_authentication_ansible }}" api_token: "{{ hetzner_authentication_ansible }}"
name: "{{ inventory_hostname }}" name: "{{ inventory_hostname }}"
server_type: "{{ hetzner_server_type }}" server_type: "{{ hetzner_server_type }}"
image: "{{ hetzner_server_image }}" image: "{{ hetzner_server_image }}"
ssh_keys: "{{ hetzner_ssh_keys }}" ssh_keys: "{{ hetzner_ssh_keys }}"
labels: "{{ hetzner_server_labels }}" labels: "{{ hetzner_server_labels }}"
location: nbg1 location: nbg1
state: "{{ server_state }}" state: "{{ server_state }}"
delegate_to: 127.0.0.1 delegate_to: 127.0.0.1
become: false become: false
async: 300
poll: 5
register: hcloud_response
ignore_errors: yes
- name: "Block - DEBUG: hcloud_response"
debug:
msg: '{{ hcloud_response.msg }}'
when:
- hcloud_response.msg is defined
- name: "Ensure Server is STARTED when server_state=present"
hetzner.hcloud.hcloud_server:
api_token: "{{ hetzner_authentication_ansible }}"
name: "{{ inventory_hostname }}"
state: "started"
delegate_to: 127.0.0.1
become: false
async: 150
poll: 15
register: hcloud_response
when:
- server_state == 'present'
rescue:
- name: "RESCUE - fail: Maximum retries reached"
fail:
msg: "max_retries of {{ max_retries }} reached. Plz check."
when: retry_count | int == max_retries | int
- name: "RESCUE-fail DEBUG: hcloud_response"
debug:
msg: '{{ hcloud_response.msg }}'
- name: "RESCUE: wait_for {{ retry_delay }} sec. between retries"
wait_for:
timeout: "{{ retry_delay }}"
delegate_to: localhost
become: false
- name: "Include _set_server one time again => increase retry_count"
include_tasks: _set_server_state.yml

@ -6,7 +6,7 @@
community.mysql.mysql_db: community.mysql.mysql_db:
name: "{{ target_database }}" name: "{{ target_database }}"
state: absent state: absent
login_unix_socket: /var/run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"
- name: "Import database from <{{ upload_directory }}/{{ database_backup_file }}> to <{{ target_database }}>" - name: "Import database from <{{ upload_directory }}/{{ database_backup_file }}> to <{{ target_database }}>"
@ -14,5 +14,5 @@
name: "{{ target_database }}" name: "{{ target_database }}"
state: import state: import
target: "/{{ upload_directory }}/{{ database_backup_file }}" target: "/{{ upload_directory }}/{{ database_backup_file }}"
login_unix_socket: /var/run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"

@ -37,7 +37,7 @@ keycloak_docker: {
"DB_USER: \"{{ keycloak_postgres_username }}\"", "DB_USER: \"{{ keycloak_postgres_username }}\"",
"DB_PASSWORD: \"{{ keycloak_postgres_password }}\"", "DB_PASSWORD: \"{{ keycloak_postgres_password }}\"",
"DB_ADDR: \"{{ keycloak_postgres_host }}\"", "DB_ADDR: \"{{ keycloak_postgres_host }}\"",
"JDBC_PARAMS: \"sslmode=require\"",
"JAVA_OPTS_APPEND: \"-Dkeycloak.profile.feature.docker=enabled\"", "JAVA_OPTS_APPEND: \"-Dkeycloak.profile.feature.docker=enabled\"",
], ],
networks: [ networks: [

@ -6,128 +6,3 @@ kibana_image_version: "7.16.1"
kibana_advanced_settings: kibana_advanced_settings:
changes: changes:
truncate:maxHeight: 0 truncate:maxHeight: 0
prometheus_es_exporter__username: '{{ stage }}-prometheus-es-exporter'
prometheus_es_exporter__password: '{{ prometheus_es_exporter__password_vault }}'
prometheus_es_exporter__email: 'nso.devops@netgo.de'
kibana_api_endpoint: '{{ shared_service_elastic_stack_kibana_01_hostname }}-kibana.{{ domain }}'
kibana_technical_users:
-
elastic_users:
-
username: '{{ prometheus_es_exporter__username }}'
roles:
- '{{ prometheus_es_exporter__username }}'
full_name: ''
password: '{{ prometheus_es_exporter__password }}'
email: '{{ prometheus_es_exporter__email }}'
enabled: true
elastic_state: present
elastic_roles:
-
elastic_state: present
name: '{{ prometheus_es_exporter__username }}'
elasticsearch:
cluster: []
indices:
- names:
- '{{ stage }}-*'
privileges:
- read
- read_cross_cluster
- view_index_metadata
allow_restricted_indices: false
run_as: []
kibana:
- base: []
feature:
advancedSettings:
- all
dashboard:
- all
discover:
- all
indexPatterns:
- all
savedObjectsManagement:
- all
visualize:
- all
spaces:
- '{{ prometheus_es_exporter__username }}'
elastic_spaces:
-
elastic_state: present
id: &es_space_name '{{ prometheus_es_exporter__username }}'
name: '{{ prometheus_es_exporter__username }}'
description: ''
disabledFeatures:
- canvas
- maps
- ml
- visualize
- enterpriseSearch
- logs
- infrastructure
- apm
- uptime
- observabilityCases
- siem
- monitoring
- fleet
- stackAlerts
- actions
- osquery
- savedObjectsTagging
elastic_index_patterns:
-
attributes:
fieldAttrs: '{}'
fields: "[]"
runtimeFieldMap: "{}"
timeFieldName: "@timestamp"
title: '{{ stage }}-management-*-connect-*'
typeMeta: "{}"
references: []
elastic_state: present
-
attributes:
fieldAttrs: '{}'
fields: "[]"
runtimeFieldMap: "{}"
timeFieldName: "@timestamp"
title: 'uncategorized-*'
typeMeta: "{}"
references: []
elastic_state: present
-
attributes:
fieldAttrs: '{}'
fields: "[]"
runtimeFieldMap: "{}"
timeFieldName: "@timestamp"
title: '{{ stage }}-*-authlog-*'
typeMeta: "{}"
references: []
elastic_state: present
-
attributes:
fieldAttrs: '{}'
fields: "[]"
runtimeFieldMap: "{}"
timeFieldName: "@timestamp"
title: '{{ stage }}-*-syslog-*'
typeMeta: "{}"
references: []
elastic_state: present
-
attributes:
fieldAttrs: '{}'
fields: "[]"
runtimeFieldMap: "{}"
timeFieldName: "@timestamp"
title: '{{ stage }}-monitoring-*'
typeMeta: "{}"
references: []
elastic_state: present

@ -10,7 +10,7 @@
- name: "Dashboards: Get all searches in elasticsearch" - name: "Dashboards: Get all searches in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/_find?per_page=10000&type={{ es_object_type }}" url: "https://{{ api_endpoint }}{{ api_path }}/_find?per_page=10000&type={{ es_object_type }}"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -47,7 +47,7 @@
- name: "Dashboards: Get all searches in elasticsearch" - name: "Dashboards: Get all searches in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}/s/{{ es_space }}/api/saved_objects/_find?per_page=10000&type=search" url: "https://{{ api_endpoint }}/s/{{ es_space }}/api/saved_objects/_find?per_page=10000&type=search"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -121,7 +121,7 @@
- name: "Create {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>" - name: "Create {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -140,7 +140,7 @@
- name: "Update {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>" - name: "Update {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_dashboard_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_dashboard_object[0]["id"] }}'
method: PUT method: PUT
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -159,7 +159,7 @@
- name: "DELETE {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>" - name: "DELETE {{ es_object_type }} <<{{ elastic_dashboard.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_dashboard_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_dashboard_object[0]["id"] }}'
method: DELETE method: DELETE
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -3,53 +3,45 @@
set_fact: set_fact:
api_path: '/s/{{ es_space }}/api/saved_objects' api_path: '/s/{{ es_space }}/api/saved_objects'
es_object_type: 'index-pattern' es_object_type: 'index-pattern'
index_pattern_exists: False indexpattern_exists: False
elastic_index_pattern_cleaned: {} elastic_indexpattern_cleaned: {}
- name: "Get all index patterns in elasticsearch" - name: "Get all indexpatterns in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/_find?per_page=10000&type={{ es_object_type }}" url: "https://{{ api_endpoint }}{{ api_path }}/_find?per_page=10000&type=index-pattern"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
password: "{{ elastic_admin_password }}" password: "{{ elastic_admin_password }}"
force_basic_auth: yes force_basic_auth: yes
register: all_index_patterns register: all_indexpatterns
become: false become: false
- name: "Lookup index pattern <{{ elastic_index_pattern.attributes.title }}>" - name: "Lookup index pattern object if exists"
set_fact: set_fact:
lookup_indexpattern_object: '{{ all_index_patterns.json | community.general.json_query(querystr1) | first | community.general.json_query(indexpattern_query) }}' lookup_indexpattern_object: '{{ all_indexpatterns.json | community.general.json_query(querystr1) | first | community.general.json_query(indexpattern_query) }}'
vars: vars:
querystr1: "[saved_objects[*]]" querystr1: "[saved_objects[*]]"
indexpattern_query: "[?attributes.title=='{{ elastic_index_pattern.attributes.title }}']" indexpattern_query: "[?attributes.title=='{{ elastic_indexpattern.attributes.title }}']"
- name: "Set switch VAR" - name: "Set switch VAR"
set_fact: set_fact:
index_pattern_exists: True indexpattern_exists: True
when: when:
- lookup_indexpattern_object | length > 0 - lookup_indexpattern_object | length > 0
- name: "Drop not needed key from dict" - name: "Drop not needed key from dict"
set_fact: set_fact:
elastic_index_pattern_cleaned: "{{ elastic_index_pattern_cleaned | combine({item.key: item.value}) }}" elastic_indexpattern_cleaned: "{{ elastic_indexpattern_cleaned | combine({item.key: item.value}) }}"
with_dict: '{{ elastic_index_pattern }}' with_dict: '{{ elastic_indexpattern }}'
when: when:
- item.key not in ['elastic_state'] - item.key not in ['elastic_state']
- name: "Create <{{ es_object_type }}> <{{ elastic_index_pattern.attributes.title }}>" - name: "Create {{ es_object_type }} <<{{ elastic_indexpattern.attributes.title }}>>"
debug:
msg: "{{ elastic_index_pattern_cleaned }}"
become: false
when:
- not index_pattern_exists
- elastic_index_pattern.elastic_state == 'present'
- name: "Create <{{ es_object_type }}> <{{ elastic_index_pattern.attributes.title }}>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -59,16 +51,16 @@
Content-Type: application/json Content-Type: application/json
kbn-xsrf: true kbn-xsrf: true
body_format: json body_format: json
body: '{{ elastic_index_pattern_cleaned | to_json }}' body: '{{ elastic_indexpattern_cleaned | to_json }}'
become: false become: false
when: when:
- not index_pattern_exists - not indexpattern_exists
- elastic_index_pattern.elastic_state == 'present' - elastic_indexpattern.elastic_state == 'present'
- name: "Update {{ es_object_type }} <<{{ elastic_index_pattern.attributes.title }}>>" - name: "Update {{ es_object_type }} <<{{ elastic_indexpattern.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_indexpattern_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_indexpattern_object[0]["id"] }}'
method: PUT method: PUT
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -78,16 +70,16 @@
Content-Type: application/json Content-Type: application/json
kbn-xsrf: true kbn-xsrf: true
body_format: json body_format: json
body: '{{ elastic_index_pattern_cleaned | to_json }}' body: '{{ elastic_indexpattern_cleaned | to_json }}'
become: false become: false
when: when:
- index_pattern_exists - indexpattern_exists
- elastic_index_pattern.elastic_state == 'present' - elastic_indexpattern.elastic_state == 'present'
- name: "DELETE {{ es_object_type }} <<{{ elastic_index_pattern.attributes.title }}>>" - name: "DELETE {{ es_object_type }} <<{{ elastic_indexpattern.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_indexpattern_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_indexpattern_object[0]["id"] }}'
method: DELETE method: DELETE
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -98,5 +90,5 @@
kbn-xsrf: true kbn-xsrf: true
become: false become: false
when: when:
- index_pattern_exists - indexpattern_exists
- elastic_index_pattern.elastic_state == 'absent' - elastic_indexpattern.elastic_state == 'absent'

@ -8,7 +8,7 @@
- name: "Get all roles in elasticsearch" - name: "Get all roles in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}" url: "https://{{ api_endpoint }}{{ api_path }}"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -39,7 +39,7 @@
- name: "Create role <<{{ elastic_role.name }}>>" - name: "Create role <<{{ elastic_role.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_role.name }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_role.name }}"
method: PUT method: PUT
status_code: [204] status_code: [204]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -58,7 +58,7 @@
- name: "Update role <<{{ elastic_role.name }}>>" - name: "Update role <<{{ elastic_role.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_role.name }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_role.name }}"
method: PUT method: PUT
status_code: [204] status_code: [204]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -77,7 +77,7 @@
- name: "DELETE role <<{{ elastic_role.name }}>>" - name: "DELETE role <<{{ elastic_role.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_role.name }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_role.name }}"
method: DELETE method: DELETE
status_code: [204] status_code: [204]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -10,7 +10,7 @@
- name: "Get all searches in elasticsearch" - name: "Get all searches in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/_find?per_page=10000&type=search" url: "https://{{ api_endpoint }}{{ api_path }}/_find?per_page=10000&type=search"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -46,7 +46,7 @@
- name: "Get all indexpatterns in elasticsearch" - name: "Get all indexpatterns in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}/s/{{ es_space }}/api/saved_objects/_find?per_page=10000&type=index-pattern" url: "https://{{ api_endpoint }}/s/{{ es_space }}/api/saved_objects/_find?per_page=10000&type=index-pattern"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -109,7 +109,7 @@
- name: "Create {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>" - name: "Create {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -128,7 +128,7 @@
- name: "Update {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>" - name: "Update {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_search_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_search_object[0]["id"] }}'
method: PUT method: PUT
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -147,7 +147,7 @@
- name: "DELETE {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>" - name: "DELETE {{ es_object_type }} <<{{ elastic_search.attributes.title }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: 'https://{{ kibana_api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_search_object[0]["id"] }}' url: 'https://{{ api_endpoint }}{{ api_path }}/{{ es_object_type }}/{{ lookup_search_object[0]["id"] }}'
method: DELETE method: DELETE
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -8,7 +8,7 @@
- name: "Get all spaces in elasticsearch" - name: "Get all spaces in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}" url: "https://{{ api_endpoint }}{{ api_path }}"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -43,7 +43,7 @@
- name: "Create space <<{{ elastic_space.name }}>>" - name: "Create space <<{{ elastic_space.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}" url: "https://{{ api_endpoint }}{{ api_path }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -62,7 +62,7 @@
- name: "Update space <<{{ elastic_space.name }}>>" - name: "Update space <<{{ elastic_space.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_space.name }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_space.name }}"
method: PUT method: PUT
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -81,7 +81,7 @@
- name: "Update space advanced settings <<{{ elastic_space.name }}>>" - name: "Update space advanced settings <<{{ elastic_space.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}/s/{{ elastic_space.name }}/api/kibana/settings" url: "https://{{ api_endpoint }}/s/{{ elastic_space.name }}/api/kibana/settings"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -100,7 +100,7 @@
- name: "DELETE space <<{{ elastic_space.name }}>>" - name: "DELETE space <<{{ elastic_space.name }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_space.name }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_space.name }}"
method: DELETE method: DELETE
status_code: [204] status_code: [204]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -9,7 +9,7 @@
- name: "Get all users in elasticsearch" - name: "Get all users in elasticsearch"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}" url: "https://{{ api_endpoint }}{{ api_path }}"
method: GET method: GET
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -48,7 +48,7 @@
- name: "Create user <<{{ elastic_user.username }}>>" - name: "Create user <<{{ elastic_user.username }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_user.username }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_user.username }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -67,7 +67,7 @@
- name: "Update user <<{{ elastic_user.username }}>>" - name: "Update user <<{{ elastic_user.username }}>>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_user.username }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_user.username }}"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"
@ -86,7 +86,7 @@
- name: "DELETE user << elastic_user.username >>" - name: "DELETE user << elastic_user.username >>"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/{{ elastic_user.username }}" url: "https://{{ api_endpoint }}{{ api_path }}/{{ elastic_user.username }}"
method: DELETE method: DELETE
status_code: [204] status_code: [204]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -1,5 +1,5 @@
--- ---
- name: "Importing kibana objects to <{{ kibana_api_endpoint }}>" - name: "Importing kibana objects to <{{ api_endpoint }}>"
include_tasks: import_service_ojects.yml include_tasks: import_service_ojects.yml
loop: "{{ es_index_pattern_services }}" loop: "{{ es_index_pattern_services }}"
loop_control: loop_control:
@ -9,7 +9,7 @@
- name: "Setting default index pattern" - name: "Setting default index pattern"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}/s/{{ stage }}-{{ tenant_id }}/api/kibana/settings" url: "https://{{ api_endpoint }}/s/{{ stage }}-{{ tenant_id }}/api/kibana/settings"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -1,34 +0,0 @@
---
- name: "Do some stuff in elastic with spaces ... "
include_role:
name: kibana
tasks_from: _configure_spaces.yml
loop: "{{ technical_user.elastic_spaces }}"
loop_control:
loop_var: elastic_space
- name: "Do some stuff in elastic with roles ..."
include_role:
name: kibana
tasks_from: _configure_roles.yml
loop: "{{ technical_user.elastic_roles }}"
loop_control:
loop_var: elastic_role
- name: "Do some stuff in elastic with users ..."
include_role:
name: kibana
tasks_from: _configure_users.yml
loop: "{{ technical_user.elastic_users }}"
loop_control:
loop_var: elastic_user
- name: "Do some stuff in elastic with index pattern ..."
vars:
es_space: "default"
include_role:
name: kibana
tasks_from: _configure_indexpattern.yml
loop: "{{ technical_user.elastic_index_patterns }}"
loop_control:
loop_var: elastic_index_pattern

@ -32,7 +32,7 @@
- name: "Importing service objects to kibana" - name: "Importing service objects to kibana"
delegate_to: localhost delegate_to: localhost
uri: uri:
url: "https://{{ kibana_api_endpoint }}{{ api_path }}/_import?overwrite=true" url: "https://{{ api_endpoint }}{{ api_path }}/_import?overwrite=true"
method: POST method: POST
status_code: [200] status_code: [200]
user: "{{ elastic_admin_username }}" user: "{{ elastic_admin_username }}"

@ -81,15 +81,3 @@
- update_certs - update_certs
- update_config - update_config
- update_deployment - update_deployment
- name: "Create ES objects for technical users"
include_tasks: add_technical_users.yml
loop: '{{ kibana_technical_users }}'
loop_control:
loop_var: technical_user
args:
apply:
tags:
- kibana-technical-users
tags:
- kibana-technical-users

@ -41,8 +41,6 @@ k8s_argocd_helm__release_values:
hostnames: hostnames:
- "{{ shared_service_gitea_hostname }}" - "{{ shared_service_gitea_hostname }}"
controller: controller:
logLevel: info
logFormat: json
metrics: metrics:
enabled: true enabled: true
serviceMonitor: serviceMonitor:
@ -51,8 +49,6 @@ k8s_argocd_helm__release_values:
additionalLabels: additionalLabels:
release: "{{ k8s_prometheus_helm__name }}" release: "{{ k8s_prometheus_helm__name }}"
repoServer: repoServer:
logLevel: info
logFormat: json
metrics: metrics:
enabled: true enabled: true
serviceMonitor: serviceMonitor:
@ -112,8 +108,6 @@ k8s_argocd_helm__release_values:
- mountPath: /gnupg-home - mountPath: /gnupg-home
name: gnupg-home name: gnupg-home
server: server:
logLevel: info
logFormat: json
config: config:
oidc.config: | oidc.config: |
name: Keycloak name: Keycloak
@ -154,11 +148,11 @@ k8s_argocd_helm__release_values:
hosts: hosts:
- "{{ k8s_argocd_helm__domain }}" - "{{ k8s_argocd_helm__domain }}"
additionalProjects: additionalProjects:
- name: bootstrap - name: infrastructure
namespace: '{{ k8s_argocd_helm__release_namespace }}' namespace: '{{ k8s_argocd_helm__release_namespace }}'
additionalLabels: {} additionalLabels: {}
additionalAnnotations: {} additionalAnnotations: {}
description: application declarations for bootstraping k8s cluster with argo-cd description: apps needed for maintaining stuff
sourceRepos: sourceRepos:
- '*' - '*'
destinations: destinations:
@ -169,54 +163,77 @@ k8s_argocd_helm__release_values:
kind: '*' kind: '*'
orphanedResources: orphanedResources:
warn: false warn: false
- name: kube-system additionalApplications:
-
name: awx-operator
namespace: '{{ k8s_argocd_helm__release_namespace }}' namespace: '{{ k8s_argocd_helm__release_namespace }}'
additionalLabels: {} destination:
additionalAnnotations: {} namespace: awx
description: applications for kube-system namespace
sourceRepos:
- '*'
destinations:
- namespace: kube-system
server: https://kubernetes.default.svc server: https://kubernetes.default.svc
clusterResourceWhitelist: project: infrastructure
- group: '*' source:
kind: '*' path: config/default
orphanedResources: repoURL: https://{{ shared_service_gitea_hostname }}/argocd/awx-operator.git
warn: false targetRevision: '{{ awx_operator_revision | default(stage) }}'
- name: infrastructure syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
-
name: awx-smardigo
namespace: '{{ k8s_argocd_helm__release_namespace }}' namespace: '{{ k8s_argocd_helm__release_namespace }}'
additionalLabels: {} destination:
additionalAnnotations: {} namespace: awx
description: infrastructure applications
sourceRepos:
- '*'
destinations:
- namespace: '*'
server: https://kubernetes.default.svc server: https://kubernetes.default.svc
clusterResourceWhitelist: project: infrastructure
- group: '*' source:
kind: '*' path: apps/{{ stage }}/awx
orphanedResources: repoURL: https://{{ shared_service_gitea_hostname }}/argocd/argocd.git
warn: false targetRevision: '{{ awx_smardigo_revision | default(stage) }}'
additionalApplications: syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- -
name: bootstrap name: jaeger-operator
namespace: '{{ k8s_argocd_helm__release_namespace }}' namespace: '{{ k8s_argocd_helm__release_namespace }}'
destination: destination:
namespace: bootstrap namespace: observability
server: https://kubernetes.default.svc server: https://kubernetes.default.svc
project: bootstrap project: infrastructure
source: source:
path: config/default path: config/default
repoURL: https://{{ shared_service_gitea_hostname }}/argocd/jaeger-operator.git
targetRevision: '{{ jaeger_operator_revision | default(stage) }}'
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
-
name: jaeger
namespace: '{{ k8s_argocd_helm__release_namespace }}'
destination:
namespace: jaeger
server: https://kubernetes.default.svc
project: infrastructure
source:
path: apps/{{ stage }}/jaeger
repoURL: https://{{ shared_service_gitea_hostname }}/argocd/argocd.git repoURL: https://{{ shared_service_gitea_hostname }}/argocd/argocd.git
targetRevision: '{{ awx_smardigo_revision | default(stage) }}' targetRevision: '{{ jaeger_smardigo_revision | default(stage) }}'
syncPolicy: syncPolicy:
automated: automated:
prune: true prune: true
selfHeal: true selfHeal: true
syncOptions: syncOptions:
- CreateNamespace=true - CreateNamespace=true
dex:
enabled: false
redis: redis:
metrics: metrics:
enabled: true enabled: true
@ -225,10 +242,8 @@ k8s_argocd_helm__release_values:
namespace: "{{ k8s_argocd_helm__release_namespace }}" namespace: "{{ k8s_argocd_helm__release_namespace }}"
additionalLabels: additionalLabels:
release: "{{ k8s_prometheus_helm__name }}" release: "{{ k8s_prometheus_helm__name }}"
dex:
enabled: false
applicationSet:
enabled: false
configs: configs:
secret: secret:
argocdServerAdminPassword: '{{ argocd_server_admin_password | password_hash("bcrypt") }}' argocdServerAdminPassword: '{{ argocd_server_admin_password | password_hash("bcrypt") }}'
k8s_argocd__crd_applicationset_version: v0.4.0

@ -1,4 +1,6 @@
--- ---
awx_admin_username: "awx-admin"
awx_admin_password: "{{ awx_admin_password_vault }}"
awx_ansible_username: ansible awx_ansible_username: ansible
awx_ansible_password: ansible awx_ansible_password: ansible
@ -12,7 +14,6 @@ awx_job_templates:
- name: "create-database-backup" - name: "create-database-backup"
- name: "create-kibana-objects" - name: "create-kibana-objects"
- name: "create-realm" - name: "create-realm"
- name: "create-remote-database-backup"
- name: "create-server" - name: "create-server"
- name: "create-service" - name: "create-service"
- name: "import-database" - name: "import-database"
@ -21,6 +22,5 @@ awx_job_templates:
- name: "remove-server" - name: "remove-server"
- name: "remove-service" - name: "remove-service"
- name: "restore-database-backup" - name: "restore-database-backup"
- name: "restore-remote-database-backup"
- name: "update-monitoring" - name: "update-monitoring"
- name: "update-service-state" - name: "update-service-state"

@ -25,6 +25,27 @@
tags: tags:
- awx - awx
- name: "Get awx admin password from k8s secret object"
become: yes
kubernetes.core.k8s_info:
api_version: v1
kind: Secret
name: awx-admin-password
namespace: awx
register: awx_admin_creds
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- awx
- name: "Define some VARs"
set_fact:
awx_admin_password: '{{ ( awx_admin_creds.resources | first ).data.password | b64decode }}'
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- awx
- name: "Authenticating with awx server" - name: "Authenticating with awx server"
delegate_to: localhost delegate_to: localhost
uri: uri:

@ -1,4 +1,4 @@
--- ---
k8s_namespace: "default" k8s_namespace: "default"
k8s_docker_registry_key: "harbor-pull-secret-key" k8s_docker_registry_key: "{{ stage }}-harbor-key"

@ -20,18 +20,3 @@ connect_connections:
authType: "BASIC_AUTH" authType: "BASIC_AUTH"
username: "{{ harbor_admin_username }}" username: "{{ harbor_admin_username }}"
password: "{{ harbor_admin_password }}" password: "{{ harbor_admin_password }}"
current_realm_users_base:
- username: "{{ management_admin_username }}"
password: "{{ management_admin_password }}"
email: "{{ connect_admin_email }}"
requiredActions: []
current_realm_password_policy: ''
connect_config_delete_scope_enabled: true
connect_config_local_import_enabled: true
connect_configurations:
- backup
- provisioning
- template

@ -1,64 +0,0 @@
---
### tags:
### update_configurations
- name: "Running all block tasks on locahost"
delegate_to: 127.0.0.1
become: false
block:
- name: "Checking connect is running on <{{ connect_base_url }}>"
uri:
url: "{{ http_s }}://{{ connect_base_url }}/api/profile-info"
method: GET
headers:
"Smardigo-User-Token": "{{ smardigo_auth_token_value }}"
status_code: [200]
register: connect_profile_info
delay: 15
retries: 10
until: connect_profile_info.status in [200]
tags:
- update_configurations
- name: "Creating archives of smardigo configuration"
ansible.builtin.tempfile:
state: directory
suffix: temp
register: temp
tags:
- update_configurations
- name: "Creating archives of smardigo configuration"
archive:
path: "./smardigo/{{ item }}"
dest: "{{ temp.path }}/{{ item }}.zip"
format: zip
mode: '0644'
with_items: "{{ connect_configurations }}"
tags:
- update_configurations
- name: "Upload configuration zip file to <{{ connect_base_url }}>"
uri:
url: "{{ http_s }}://{{ connect_base_url }}/api/v1/config/import-zip"
method: POST
headers:
Smardigo-User-Token: "{{ smardigo_auth_token_value }}"
body_format: form-multipart
body:
file:
filename: "{{ temp.path }}/{{ item }}.zip"
mime_type: "application/zip"
status_code: [200]
register: config_import_result
with_items: "{{ connect_configurations }}"
tags:
- update_configurations
- name: "Upload configuration zip file to <{{ connect_base_url }}>"
debug:
msg: '{{ config_import_result }}'
tags:
- update_configurations

@ -1,8 +1,7 @@
--- ---
### tags: ### tags:
### update_connections ### update_configuration
### update_configurations
- name: "Creating smardigo user token" - name: "Creating smardigo user token"
smardigo_user_token: smardigo_user_token:
@ -25,27 +24,17 @@
name: connect_postgres name: connect_postgres
vars: vars:
ansible_ssh_host: "{{ stage }}-postgres-01.{{ domain }}" ansible_ssh_host: "{{ stage }}-postgres-01.{{ domain }}"
tags:
- always
- name: "Create realm for <{{ inventory_hostname }}> if necessary" - name: "Create realm for <{{ inventory_hostname }}> if necessary"
include_role: include_role:
name: connect_realm name: connect_realm
tags:
- always
- name: "Create connect for <{{ inventory_hostname }}> if necessary" - name: "Create connect for <{{ inventory_hostname }}> if necessary"
include_role: include_role:
name: connect name: connect
- name: "Configure connect connections"
include_tasks: connections.yml
when:
smardigo_auth_token_value is defined
tags: tags:
- update_connections - always
- name: "Configure connect configurations"
include_tasks: configurations.yml
vars:
local_folder_name: "/tmp/smardigo"
when:
smardigo_auth_token_value is defined
tags:
- update_configurations

@ -1,14 +1,5 @@
--- ---
ansible_managed: 'do not edit manually - file powered by ansible'
mariadb_server_version: '10.6'
mariadb_server_global_my_cnf: '/etc/mysql/my.cnf'
mariadb_server_config_dir: '/etc/mysql/mariadb.conf.d'
my_cnf_file: '/root/.my.cnf' my_cnf_file: '/root/.my.cnf'
database_engine: maria database_engine: maria
backup_dest_dir: "{{ backup_directory }}/{{ database_engine }}/{{ get_current_date }}" backup_dest_dir: "{{ backup_directory }}/{{ database_engine }}/{{ ansible_date_time.date }}"
backup_status_file: '{{ backup_dest_dir }}/backup_finished' backup_status_file: '{{ backup_dest_dir }}/backup_finished'
mysql_root_username: "{{ mysql_root_username_vault }}"
mysql_root_password: "{{ mysql_root_password_vault }}"

@ -1,52 +0,0 @@
-- MySQL dump 10.19 Distrib 10.3.34-MariaDB, for debian-linux-gnu (x86_64)
--
-- Host: localhost Database: stararchitekt
-- ------------------------------------------------------
-- Server version 10.3.34-MariaDB-0ubuntu0.20.04.1
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `movie_quotes`
--
DROP TABLE IF EXISTS `movie_quotes`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `movie_quotes` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`movie` varchar(255) DEFAULT NULL,
`quote` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `movie_quotes`
--
LOCK TABLES `movie_quotes` WRITE;
/*!40000 ALTER TABLE `movie_quotes` DISABLE KEYS */;
INSERT INTO `movie_quotes` VALUES (1,'wargames','Shall we play');
/*!40000 ALTER TABLE `movie_quotes` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2022-03-31 9:46:16

@ -1,12 +0,0 @@
---
- name: "prometheus-mysqld-exporter restart"
service:
name: prometheus-mysqld-exporter
state: restarted
enabled: yes
- name: "restart mysql"
service:
name: mariadb
state: restarted
enabled: yes

@ -18,37 +18,13 @@
user={{ mysql_root_username }} user={{ mysql_root_username }}
password={{ mysql_root_password }} password={{ mysql_root_password }}
- name: "Block: gpg stuff" # there is no ansible module already in place
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
suffix: gitcheckout
path: /tmp
register: tempdir
- name: "Checkout repo for gpg communication-keys"
ansible.builtin.git:
repo: 'https://{{ gituser | default("gitea-admin") | urlencode }}:{{ gitea_admin_password | urlencode }}@{{ stage }}-gitea-01.smardigo.digital/gitea-admin/communication-keys.git'
dest: '{{ tempdir.path }}'
version: master
# there is no ansible gpg module already in place
# linting violation needs to be whitelisted
- name: "Importing stage specific automation gpg-key" # noqa command-instead-of-shell
shell: 'gpg --import {{ tempdir.path }}/smardigo_automation_{{ stage }}.gpg.pub'
# there is no ansible module already in place for (mariabackup|gpg)
# so using shell module # so using shell module
- name: "Creating mariabackup ... + doing async check if successful or not" - name: "Creating mariabackup ... + doing async check if successful or not"
become: yes become: yes
vars:
backup_file: '{{ backup_dest_dir }}/mariabackupstream_{{ current_date_time }}.gz'
shell: | shell: |
set -o pipefail set -o pipefail
/usr/bin/mariabackup --defaults-file={{ my_cnf_file }} --backup --stream=xbstream | gzip > {{ backup_file }} && \ /usr/bin/mariabackup --defaults-file={{ my_cnf_file }} --backup --stream=xbstream | gzip > {{ backup_dest_dir }}/mariabackupstream_{{ current_date_time }}.gz
gpg --encrypt --recipient "smardigo automation {{ stage | upper }}" --trust-model always {{ backup_file }} && \
rm {{ backup_file }}
args: args:
executable: /bin/bash executable: /bin/bash
async: 3600 # allows duration for task up to 3600sec async: 3600 # allows duration for task up to 3600sec
@ -70,8 +46,8 @@
become: yes become: yes
ansible.builtin.file: ansible.builtin.file:
path: '{{ backup_dest_dir }}' path: '{{ backup_dest_dir }}'
owner: '{{ backupuser_user_name }}' owner: '{{ backupuser_username }}'
group: '{{ backupuser_user_name }}' group: '{{ backupuser_username }}'
recurse: yes recurse: yes
- name: "Remove {{ my_cnf_file }} file" - name: "Remove {{ my_cnf_file }} file"

@ -6,7 +6,7 @@
collation: "{{ item.collation | default('utf8_general_ci') }}" collation: "{{ item.collation | default('utf8_general_ci') }}"
encoding: "{{ item.encoding | default('utf8') }}" encoding: "{{ item.encoding | default('utf8') }}"
state: "{{ item.state | default('present') }}" state: "{{ item.state | default('present') }}"
login_unix_socket: /var/run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"
with_items: "{{ mysql_databases }}" with_items: "{{ mysql_databases }}"
@ -18,7 +18,7 @@
state: "{{ item.state | default('present') }}" state: "{{ item.state | default('present') }}"
append_privs: "{{ item.append_privs | default('no') }}" append_privs: "{{ item.append_privs | default('no') }}"
encrypted: "{{ item.encrypted | default('no') }}" encrypted: "{{ item.encrypted | default('no') }}"
login_unix_socket: /var/run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"
host: "{{ item.host }}" host: "{{ item.host }}"
with_items: "{{ mysql_users }}" with_items: "{{ mysql_users }}"

@ -1,40 +0,0 @@
---
# task bundle simply copied from main.yml
# TODO: migrate to https://github.com/cloudalchemy/ansible-mysqld_exporter
- name: "Install prometheus-mysqld-exporter"# noqa package-latest
package:
name: "{{ item }}"
state: latest
with_items:
- prometheus-mysqld-exporter
- name: Ensure prometheus user for prometheus-mysqld-exporter exists
community.mysql.mysql_user:
name: "prometheus"
priv: "*.*:PROCESS,REPLICATION CLIENT,SELECT"
login_unix_socket: /run/mysqld/mysqld.sock
login_password: "{{ mysql_root_password }}"
register: mysql_exporter_user_creds
notify: prometheus-mysqld-exporter restart
- name: Ensure is prometheus-mysqld-exporter configured
lineinfile:
regex: "^DATA_SOURCE_NAME="
line: 'DATA_SOURCE_NAME="prometheus@unix(/run/mysqld/mysqld.sock)/"'
path: /etc/default/prometheus-mysqld-exporter
register: mysql_exporter_data_source
notify: prometheus-mysqld-exporter restart
- name: Setup prometheus-mysqld-exporter interface bind
lineinfile:
path: /etc/default/prometheus-mysqld-exporter
regex: "^ARGS="
line: "ARGS=\"--web.listen-address='{{ stage_private_server_ip }}:{{ monitor_port_maria }}'\""
register: mysql_exporter_args
notify: prometheus-mysqld-exporter restart
- name: "Ensure prometheus-mysqld-exporter is running"
service:
name: prometheus-mysqld-exporter
state: started
enabled: yes

@ -2,77 +2,34 @@
### tags: ### tags:
- name: "Add apt-key for " - name: Update
ansible.builtin.apt_key: apt: update_cache=yes force_apt_get=yes cache_valid_time=3600
url: https://mariadb.org/mariadb_release_signing_key.asc
state: present
- name: "Add source repository for mariadb-server"
ansible.builtin.apt_repository:
repo: "deb [arch=amd64] https://ftp.agdsn.de/pub/mirrors/mariadb/repo/{{ mariadb_server_version }}/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} main"
state: present
- name: "Install MariaDB "# noqa package-latest - name: MariaDB | install # noqa package-latest
package: package:
name: "{{ item }}" name: "{{ item }}"
state: latest state: latest
with_items: with_items:
- mariadb-server - mariadb-server
- mariadb-backup
- python3-pymysql - python3-pymysql
- prometheus-mysqld-exporter
- name: "Set vars" - name: Stop prometheus-mysqld-exporter
set_fact: service:
cert_private_key: '{{ mariadb_server_config_dir }}/{{ inventory_hostname }}.{{ domain }}-key.pem' name: prometheus-mysqld-exporter
cert_public_key: '{{ mariadb_server_config_dir }}/{{ inventory_hostname }}.{{ domain }}-crt.pem' state: stopped
ca_cert: '{{ mariadb_server_config_dir }}/ca-certificate.pem' enabled: no
- name: "Include role for self-signed CA"
include_role:
name: selfsigned_ca
- name: "Create certs with selfsigned CA"
include_role:
name: selfsigned_ca
tasks_from: _create_cert
vars:
selfsigned_ca_cert_private_key: '{{ cert_private_key }}'
selfsigned_ca_cert_private_key_group: mysql
selfsigned_ca_cert_public_key: '{{ cert_public_key }}'
selfsigned_ca_cacert: '{{ ca_cert }}'
selfsigned_ca_cert_subject:
CN: '{{ inventory_hostname }}.{{ domain }}'
selfsigned_ca_cert_altnames:
- 'DNS:{{ inventory_hostname }}.{{ domain }}'
- 'DNS:{{ inventory_hostname }}'
selfsigned_ca_trigger_handler: restart mysql
- name: "Create global my.cnf for mariadb"
copy:
dest: '{{ mariadb_server_global_my_cnf }}'
owner: root
group: root
mode: '0644'
content: |
{{ ansible_managed | comment }}
!includedir /etc/mysql/mariadb.conf.d/
notify: restart mysql
- name: "Create mariadb cnf file" - name: Fix binding..
vars: ansible.builtin.lineinfile:
mariadb_server_bind_address: '{{ stage_private_server_ip }}' path: /etc/mysql/mariadb.conf.d/50-server.cnf
template: regexp: '^bind-address'
src: 50-server.cnf line: 'bind-address={{ ansible_all_ipv4_addresses | ansible.netcommon.ipaddr(shared_service_network) | first }}'
dest: '{{ mariadb_server_config_dir }}/'
mode: '0644'
owner: root
group: root
notify: restart mysql
- name: Ensure service is started - name: Ensure service is started
service: service:
name: mariadb name: mariadb
state: started state: restarted
enabled: yes enabled: yes
- name: Check if root password is set - name: Check if root password is set
@ -97,10 +54,9 @@
collation: "{{ item.collation | default('utf8_general_ci') }}" collation: "{{ item.collation | default('utf8_general_ci') }}"
encoding: "{{ item.encoding | default('utf8') }}" encoding: "{{ item.encoding | default('utf8') }}"
state: "{{ item.state | default('present') }}" state: "{{ item.state | default('present') }}"
login_unix_socket: /run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"
with_items: "{{ mysql_databases }}" with_items: "{{ mysql_databases }}"
when: mysql_databases is defined
- name: Ensure MySQL users are present. - name: Ensure MySQL users are present.
community.mysql.mysql_user: community.mysql.mysql_user:
@ -110,36 +66,40 @@
state: "{{ item.state | default('present') }}" state: "{{ item.state | default('present') }}"
append_privs: "{{ item.append_privs | default('no') }}" append_privs: "{{ item.append_privs | default('no') }}"
encrypted: "{{ item.encrypted | default('no') }}" encrypted: "{{ item.encrypted | default('no') }}"
login_unix_socket: /run/mysqld/mysqld.sock config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}" login_password: "{{ mysql_root_password }}"
host: "{{ item.host }}" host: "{{ item.host }}"
with_items: "{{ mysql_users }}" with_items: "{{ mysql_users }}"
when: mysql_users is defined
- name: "Install promethues mysqld-exporter" - name: Ensure prometheus user for prometheus-mysqld-exporter exists
include_tasks: install_mysqld_exporter.yml community.mysql.mysql_user:
when: mariadb_server_with_mysqld_exporter | default(True) name: "prometheus"
priv: "*.*:PROCESS,REPLICATION CLIENT,SELECT"
config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
login_password: "{{ mysql_root_password }}"
- name: Ensure is prometheus-mysqld-exporter configured
lineinfile:
regex: "^DATA_SOURCE_NAME="
line: 'DATA_SOURCE_NAME="prometheus@unix(/run/mysqld/mysqld.sock)/"'
path: /etc/default/prometheus-mysqld-exporter
register: mysql_exporter_data_source
- name: Setup prometheus-mysqld-exporter interface bind
lineinfile:
path: /etc/default/prometheus-mysqld-exporter
regex: "^ARGS="
line: "ARGS=\"--web.listen-address='{{ stage_private_server_ip }}:{{ monitor_port_maria }}'\""
register: mysql_exporter_args
- name: Start prometheus-mysqld-exporter if needed
service:
name: prometheus-mysqld-exporter
state: started
enabled: yes
- name: 'Ensures <{{ backup_directory }}> directory exists' - name: 'Ensures <{{ backup_directory }}> directory exists'
file: file:
state: directory state: directory
path: '{{ backup_directory }}' path: '{{ backup_directory }}'
mode: 0755 mode: 0755
- name: "Copy testdb.sql to ensure test DB"
copy:
src: '{{ item }}'
dest: '/tmp/{{ item }}'
mode: '0444'
owner: root
group: root
loop:
- testdb.sql
- name: "Ensure test DB"
community.mysql.mysql_db:
login_unix_socket: /run/mysqld/mysqld.sock
login_password: "{{ mysql_root_password }}"
name: dummytestdb
state: import
target: /tmp/testdb.sql

@ -1,25 +0,0 @@
{{ ansible_managed | comment }}
[server]
[mysqld]
pid-file = /run/mysqld/mysqld.pid
basedir = /usr
bind-address = {{ mariadb_server_bind_address }}
expire_logs_days = 10
character-set-server = utf8mb4
collation-server = utf8mb4_general_ci
[embedded]
[mariadb]
require_secure_transport = on
ssl_key = {{ cert_private_key }}
ssl_cert = {{ cert_public_key }}
ssl_ca = {{ ca_cert }}
ssl = on
tls_version = TLSv1.2,TLSv1.3
ssl_cipher = TLSv1.2,TLSv1.3
[mariadb-10.6]

@ -16,354 +16,8 @@ default_shared_buffers: 256MB
database_state: present database_state: present
postgres_homedir: '/var/lib/postgresql'
postgres_listen_addresses: "listen_addresses = 'localhost,{{ stage_private_server_ip }}'" postgres_listen_addresses: "listen_addresses = 'localhost,{{ stage_private_server_ip }}'"
postgres_base_config:
-
regex: "^data_directory"
line: "data_directory = '/var/lib/postgresql/{{ default_postgres_version }}/main'"
-
regex: "^hba_file"
line: "hba_file = '/etc/postgresql/{{ default_postgres_version }}/main/pg_hba.conf'"
-
regex: "^ident_file"
line: "ident_file = '/etc/postgresql/{{ default_postgres_version }}/main/pg_ident.conf'"
-
regex: "^external_pid_file"
line: "external_pid_file = '/var/run/postgresql/{{ default_postgres_version }}-main.pid'"
-
regex: "^port"
line: "port = 5432"
-
regex: "^unix_socket_directories"
line: "unix_socket_directories = '/var/run/postgresql'"
-
regex: "^dynamic_shared_memory_type"
line: "dynamic_shared_memory_type = posix"
-
regex: "^max_wal_size"
line: "max_wal_size = 1GB"
-
regex: "^min_wal_size"
line: "min_wal_size = 80MB"
-
regex: "^log_line_prefix"
line: "log_line_prefix = '%m [%p] %q%u@%d '"
-
regex: "^log_timezone"
line: "log_timezone = 'Etc/UTC'"
-
regex: "^cluster_name"
line: "cluster_name = '{{ default_postgres_version }}/main'"
-
regex: "^stats_temp_directory"
line: "stats_temp_directory = '/var/run/postgresql/{{ default_postgres_version }}-main.pg_stat_tmp'"
-
regex: "^datestyle"
line: "datestyle = 'iso, mdy'"
-
regex: "^timezone"
line: "timezone = 'Etc/UTC'"
-
regex: "^lc_messages"
line: "lc_messages = 'en_US.UTF-8'"
-
regex: "^lc_monetary"
line: "lc_monetary = 'en_US.UTF-8'"
-
regex: "^lc_numeric"
line: "lc_numeric = 'en_US.UTF-8'"
-
regex: "^lc_time"
line: "lc_time = 'en_US.UTF-8'"
-
regex: "^default_text_search_config"
line: "default_text_search_config = 'pg_catalog.english'"
-
regex: "^include_dir"
line: "include_dir = 'conf.d'"
-
regex: "^wal_level"
line: "wal_level = replica"
-
regex: "^max_wal_senders"
line: "max_wal_senders = 10"
-
regex: "^archive_mode"
line: "archive_mode = on"
-
regex: "^archive_command"
line: "archive_command = 'cp -f %p /postgresql/replication/%f'"
-
regex: "^wal_keep_size"
line: "wal_keep_size = 16"
-
regex: "^listen_addresses = "
line: "{{ postgres_listen_addresses }}"
-
regex: "^max_connections"
line: "max_connections = {{ default_max_connections }}"
-
regex: "^shared_buffers"
line: "shared_buffers = {{ default_shared_buffers }}"
postgres_ssl_config:
-
regex: "^ssl = on"
line: "ssl = on"
-
regex: "^ssl_key_file"
line: "ssl_key_file = '{{ cert_private_key }}'"
-
regex: "^ssl_cert_file"
line: "ssl_cert_file = '{{ cert_public_key }}'"
-
regex: "^ssl_ca_file"
line: "ssl_ca_file = '{{ ca_cert }}'"
-
regex: "^ssl_ciphers"
line: "ssl_ciphers = 'HIGH'"
postgres_config : '{{ postgres_base_config + ( postgres_ssl_config | default([]) ) }}'
postgres_exporter_user: postgres
postgres_exporter_group: "{{ postgres_exporter_user }}"
postgres_exporter_dir: "/opt/"
postgres_exporter_version: "0.10.1"
postgres_exporter_checksum: "sha256:5344afe06a90c3cbd52803d56031bfcbcff78b56448e16c9228697ea0a2577b7"
postgres_exporter_dist: "postgres_exporter-{{ postgres_exporter_version }}.linux-amd64"
postgres_exporter_download_url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_version }}/{{ postgres_exporter_dist }}.tar.gz"
postgres_exporter_binary: "{{ postgres_exporter_dir }}{{ postgres_exporter_dist }}/postgres_exporter"
postgres_exporter_datasource_uri: "postgres@:5432/postgres?host=/var/run/postgresql"
postgres_exporter_home: "{{ '/var/lib/pgsql' if ansible_os_family == 'RedHat' else '/var/lib/postgresql' }}"
postgres_exporter_flags:
- "--web.listen-address='{{ stage_private_server_ip }}:{{ monitor_port_postgres }}'"
- '--auto-discover-databases'
- '--extend.query-path={{ postgres_exporter_home }}/queries.yml'
postgres_exporter_config_file: /etc/default/postgres_exporter
# got several queries from here:
# https://raw.githubusercontent.com/bdellegrazie/ansible-role-postgres_exporter/b01ae2aae53e02a0778ce6c06361cfb6af2a50c2/files/queries.yml
postgres_exporter_additional_queries:
pg_replication:
query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag"
master: true
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind master in seconds"
pg_replication_wal_files:
query: "SELECT COUNT(*) FROM pg_ls_dir('pg_wal') as count"
master: true
metrics:
- wal_files_count:
usage: "COUNTER"
description: "Number of WAL files"
pg_postmaster:
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
master: true
metrics:
- start_time_seconds:
usage: "GAUGE"
description: "Time at which postmaster started"
pg_stat_user_tables:
query: "SELECT current_database() datname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, n_mod_since_analyze, COALESCE(last_vacuum, '1970-01-01Z'), COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, COALESCE(last_analyze, '1970-01-01Z') as last_analyze, COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_stat_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- seq_scan:
usage: "COUNTER"
description: "Number of sequential scans initiated on this table"
- seq_tup_read:
usage: "COUNTER"
description: "Number of live rows fetched by sequential scans"
- idx_scan:
usage: "COUNTER"
description: "Number of index scans initiated on this table"
- idx_tup_fetch:
usage: "COUNTER"
description: "Number of live rows fetched by index scans"
- n_tup_ins:
usage: "COUNTER"
description: "Number of rows inserted"
- n_tup_upd:
usage: "COUNTER"
description: "Number of rows updated"
- n_tup_del:
usage: "COUNTER"
description: "Number of rows deleted"
- n_tup_hot_upd:
usage: "COUNTER"
description: "Number of rows HOT updated (i.e., with no separate index update required)"
- n_live_tup:
usage: "GAUGE"
description: "Estimated number of live rows"
- n_dead_tup:
usage: "GAUGE"
description: "Estimated number of dead rows"
- n_mod_since_analyze:
usage: "GAUGE"
description: "Estimated number of rows changed since last analyze"
- last_vacuum:
usage: "GAUGE"
description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)"
- last_autovacuum:
usage: "GAUGE"
description: "Last time at which this table was vacuumed by the autovacuum daemon"
- last_analyze:
usage: "GAUGE"
description: "Last time at which this table was manually analyzed"
- last_autoanalyze:
usage: "GAUGE"
description: "Last time at which this table was analyzed by the autovacuum daemon"
- vacuum_count:
usage: "COUNTER"
description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)"
- autovacuum_count:
usage: "COUNTER"
description: "Number of times this table has been vacuumed by the autovacuum daemon"
- analyze_count:
usage: "COUNTER"
description: "Number of times this table has been manually analyzed"
- autoanalyze_count:
usage: "COUNTER"
description: "Number of times this table has been analyzed by the autovacuum daemon"
pg_statio_user_tables:
query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- heap_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table"
- heap_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table"
- idx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from all indexes on this table"
- idx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in all indexes on this table"
- toast_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table (if any)"
- toast_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table (if any)"
- tidx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table indexes (if any)"
- tidx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database"
master: true
cache_seconds: 30
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
pg_stat_statements:
query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'"
master: true
metrics:
- rolname:
usage: "LABEL"
description: "Name of user"
- datname:
usage: "LABEL"
description: "Name of database"
- queryid:
usage: "LABEL"
description: "Query ID"
- calls:
usage: "COUNTER"
description: "Number of times executed"
- total_time_seconds:
usage: "COUNTER"
description: "Total time spent in the statement, in milliseconds"
- min_time_seconds:
usage: "GAUGE"
description: "Minimum time spent in the statement, in milliseconds"
- max_time_seconds:
usage: "GAUGE"
description: "Maximum time spent in the statement, in milliseconds"
- mean_time_seconds:
usage: "GAUGE"
description: "Mean time spent in the statement, in milliseconds"
- stddev_time_seconds:
usage: "GAUGE"
description: "Population standard deviation of time spent in the statement, in milliseconds"
- rows:
usage: "COUNTER"
description: "Total number of rows retrieved or affected by the statement"
- shared_blks_hit:
usage: "COUNTER"
description: "Total number of shared block cache hits by the statement"
- shared_blks_read:
usage: "COUNTER"
description: "Total number of shared blocks read by the statement"
- shared_blks_dirtied:
usage: "COUNTER"
description: "Total number of shared blocks dirtied by the statement"
- shared_blks_written:
usage: "COUNTER"
description: "Total number of shared blocks written by the statement"
- local_blks_hit:
usage: "COUNTER"
description: "Total number of local block cache hits by the statement"
- local_blks_read:
usage: "COUNTER"
description: "Total number of local blocks read by the statement"
- local_blks_dirtied:
usage: "COUNTER"
description: "Total number of local blocks dirtied by the statement"
- local_blks_written:
usage: "COUNTER"
description: "Total number of local blocks written by the statement"
- temp_blks_read:
usage: "COUNTER"
description: "Total number of temp blocks read by the statement"
- temp_blks_written:
usage: "COUNTER"
description: "Total number of temp blocks written by the statement"
- blk_read_time_seconds:
usage: "COUNTER"
description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"
- blk_write_time_seconds:
usage: "COUNTER"
description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"
database_engine: postgres database_engine: postgres
backup_dest_dir: "{{ backup_directory }}/{{ database_engine }}/{{ get_current_date }}" backup_dest_dir: "{{ backup_directory }}/{{ database_engine }}/{{ ansible_date_time.date }}"
backup_status_file: '{{ backup_dest_dir }}/backup_finished' backup_status_file: '{{ backup_dest_dir }}/backup_finished'

@ -1,60 +0,0 @@
--
-- PostgreSQL database dump
--
-- Dumped from database version 13.6 (Ubuntu 13.6-1.pgdg20.04+1)
-- Dumped by pg_dump version 13.6 (Ubuntu 13.6-1.pgdg20.04+1)
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
--
-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: -
--
CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
--
-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner:
--
COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
SET default_tablespace = '';
SET default_table_access_method = heap;
--
-- Name: movie_quotes; Type: TABLE; Schema: public; Owner: postgres
--
CREATE TABLE public.movie_quotes (
movie character varying(255),
quote character varying(255)
);
ALTER TABLE public.movie_quotes OWNER TO postgres;
--
-- Data for Name: movie_quotes; Type: TABLE DATA; Schema: public; Owner: postgres
--
COPY public.movie_quotes (movie, quote) FROM stdin;
wargames Shall we play
\.
--
-- PostgreSQL database dump complete
--

@ -1,10 +1,2 @@
--- ---
- name: "restart postgres"
service:
name: postgresql
state: restarted
- name: "restart postgres_exporter"
service:
name: postgres_exporter
state: restarted

@ -13,40 +13,14 @@
owner: postgres owner: postgres
group: postgres group: postgres
- name: "Block: gpg stuff" # there is no ansible module already in place
become: yes
become_user: postgres
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
suffix: gitcheckout
path: /tmp
register: tempdir
- name: "Checkout repo for gpg communication-keys"
ansible.builtin.git:
repo: 'https://{{ gituser | default("gitea-admin") | urlencode }}:{{ gitea_admin_password | urlencode }}@{{ stage }}-gitea-01.{{ domain }}/gitea-admin/communication-keys.git'
dest: '{{ tempdir.path }}'
version: master
# there is no ansible gpg module already in place
# linting violation needs to be whitelisted
- name: "Importing stage specific automation gpg-key" # noqa command-instead-of-shell
shell: 'gpg --import {{ tempdir.path }}/smardigo_automation_{{ stage }}.gpg.pub'
# there is no ansible module already in place for (pg_basebackup|gpg)
# so using shell module # so using shell module
- name: "Creating pg_basebackup ... + doing async check if successful or not" - name: "Creating pg_basebackup ... + doing async check if successful or not"
become: yes become: yes
become_user: postgres become_user: postgres
vars:
backup_file: '{{ backup_dest_dir }}/basebackup_{{ current_date_time }}.tar.gz'
shell: | shell: |
set -o pipefail set -o pipefail
/usr/bin/pg_basebackup -Ft -X fetch -D - | pigz -p 4 > {{ backup_file }} && \ /usr/bin/pg_basebackup -Ft -X fetch -D - | pigz -p 4 > {{ backup_dest_dir }}/basebackup_{{ current_date_time }}.tar.gz
gpg --encrypt --recipient "smardigo automation {{ stage | upper }}" --trust-model always {{ backup_file }} && \
rm {{ backup_file }}
args: args:
executable: /bin/bash executable: /bin/bash
async: 3600 # allows duration for task up to 3600sec async: 3600 # allows duration for task up to 3600sec
@ -68,6 +42,6 @@
become: yes become: yes
ansible.builtin.file: ansible.builtin.file:
path: '{{ backup_dest_dir }}' path: '{{ backup_dest_dir }}'
owner: '{{ backupuser_user_name }}' owner: '{{ backupuser_username }}'
group: '{{ backupuser_user_name }}' group: '{{ backupuser_username }}'
recurse: yes recurse: yes

@ -9,8 +9,8 @@
- name: "Updating pg_hba.conf entries for users/nodes/schemas" - name: "Updating pg_hba.conf entries for users/nodes/schemas"
lineinfile: lineinfile:
state: '{{ database_state }}' state: '{{ database_state }}'
regex: '^hostssl[ ]+{{ item.name }}[ ]+{{ item.name }}' regex: '^host[ ]+{{ item.name }}[ ]+{{ item.name }}'
line: 'hostssl {{ item.name }} {{ item.name }} {{ item.trusted_cidr_entry | default(shared_service_network) }} md5' line: 'host {{ item.name }} {{ item.name }} {{ item.trusted_cidr_entry | default(shared_service_network) }} md5'
path: /etc/postgresql/{{ default_postgres_version }}/main/pg_hba.conf path: /etc/postgresql/{{ default_postgres_version }}/main/pg_hba.conf
with_items: "{{ postgres_acls }}" with_items: "{{ postgres_acls }}"

@ -8,7 +8,7 @@
name: postgres name: postgres
uid: 2000 uid: 2000
group: postgres group: postgres
home: '{{ postgres_homedir }}' home: /var/lib/postgresql
system: true system: true
shell: /bin/bash shell: /bin/bash
@ -30,50 +30,36 @@
repo: deb http://apt.postgresql.org/pub/repos/apt {{ default_postgres_target_distribution }} main repo: deb http://apt.postgresql.org/pub/repos/apt {{ default_postgres_target_distribution }} main
state: present state: present
- name: "Ensuring several packages being installed" - name: "Ensuring postgres is installed"
apt: apt:
name: "{{ item }}" name: "postgresql-{{ default_postgres_version }}"
update_cache: yes update_cache: yes
cache_valid_time: 900 cache_valid_time: 900
state: present state: present
loop:
- 'postgresql-{{ default_postgres_version }}' - name: "Setup listen interfaces for postgresql instance"
- python3-psycopg2 lineinfile:
state: present
- name: "Set vars" regex: "^listen_addresses"
set_fact: line: "{{ postgres_listen_addresses }}"
cert_private_key: '{{ postgres_homedir }}/{{ inventory_hostname }}.{{ domain }}-key.pem' path: /etc/postgresql/{{ default_postgres_version }}/main/postgresql.conf
cert_public_key: '{{ postgres_homedir }}/{{ inventory_hostname }}.{{ domain }}-crt.pem' register: listen_addresses
ca_cert: '{{ postgres_homedir }}/ca-certificate.pem'
- name: "Setting 'max_connections={{ default_max_connections }}'"
- name: "Include role for self-signed CA" lineinfile:
include_role: state: present
name: selfsigned_ca regex: "^max_connections"
line: "max_connections = {{ default_max_connections }}"
- name: "Create certs with selfsigned CA" path: /etc/postgresql/{{ default_postgres_version }}/main/postgresql.conf
include_role: register: max_connections
name: selfsigned_ca
tasks_from: _create_cert - name: "Setting 'shared_buffers={{ default_shared_buffers }}'"
vars:
selfsigned_ca_cert_private_key: '{{ cert_private_key }}'
selfsigned_ca_cert_private_key_group: postgres
selfsigned_ca_cert_public_key: '{{ cert_public_key }}'
selfsigned_ca_cacert: '{{ ca_cert }}'
selfsigned_ca_cert_subject:
CN: '{{ inventory_hostname }}.{{ domain }}'
selfsigned_ca_cert_altnames:
- 'DNS:{{ inventory_hostname }}.{{ domain }}'
- 'DNS:{{ inventory_hostname }}'
# selfsigned_ca_trigger_handler: restart postgres
- name: "Ensure postgresql.conf via evil lineinfile..."
lineinfile: lineinfile:
state: present state: present
regex: "{{ item.regex }}" regex: "^shared_buffers"
line: "{{ item.line }}" line: "shared_buffers = {{ default_shared_buffers }}"
path: /etc/postgresql/{{ default_postgres_version }}/main/postgresql.conf path: /etc/postgresql/{{ default_postgres_version }}/main/postgresql.conf
loop: '{{ postgres_config }}' register: shared_buffers
notify: restart postgres
- name: "Creating archive directory if necessary" - name: "Creating archive directory if necessary"
file: file:
@ -83,11 +69,83 @@
group: postgres group: postgres
mode: "g+s" mode: "g+s"
- name: "Install postgres exporter via include_task" - name: "Install prometheus postgres exporter..."
include_tasks: install_postgres_exporter.yml apt:
args: name: "prometheus-postgres-exporter"
apply: update_cache: yes
tags: cache_valid_time: 900
- postgres-exporter state: present
tags:
- postgres-exporter - name: Setup prometheus-postgres-exporter interface bind
lineinfile:
path: /etc/default/prometheus-postgres-exporter
regex: "^ARGS="
line: "ARGS=\"--web.listen-address='{{ stage_private_server_ip }}:{{ monitor_port_postgres }}'\""
- name: "Set DATA_SOURCE_NAME in /etc/default/prometheus-postgres-exporter"
lineinfile:
state: present
regex: "^DATA_SOURCE_URI"
line: "DATA_SOURCE_URI='postgres@:5432/postgres?host=/var/run/postgresql'"
path: /etc/default/prometheus-postgres-exporter
- name: "Set PG_EXPORTER_AUTO_DISCOVER_DATABASES in /etc/default/prometheus-postgres-exporter"
lineinfile:
state: present
regex: "^PG_EXPORTER_AUTO_DISCOVER_DATABASES"
line: "PG_EXPORTER_AUTO_DISCOVER_DATABASES=true"
path: /etc/default/prometheus-postgres-exporter
- name: "Set PG_EXPORTER_EXTEND_QUERY_PATH in /etc/default/prometheus-postgres-exporter"
lineinfile:
state: present
regex: "^PG_EXPORTER_EXTEND_QUERY_PATH"
line: "PG_EXPORTER_EXTEND_QUERY_PATH=/metrics/queries.yaml"
path: /etc/default/prometheus-postgres-exporter
- name: "Ensure /metrics directory exists"
file:
state: directory
path: /metrics
mode: '0755'
- name: "Ensure /metrics/queries.yaml exists"
copy:
src: pg-exporter-queries.yml
dest: /metrics/queries.yaml
mode: '0755'
- name: Check role prometheus exists # noqa command-instead-of-shell no-changed-when
become: yes
become_user: postgres
shell: "/usr/bin/psql -Atc \"SELECT count(rolname) FROM pg_roles where rolname='prometheus'\""
register: role_check
ignore_errors: yes
- name: "Copy prometheus_postgres_exporter init script"
copy:
src: init.sql
dest: /tmp/prometheus_postgres_exporter.sql
mode: '0755'
when: "role_check.stdout == '0' and server_type == 'master'"
- name: "Execute prometheus_postgres_exporter init script" # noqa command-instead-of-shell
become: true
become_user: postgres
shell: "psql -f /tmp/prometheus_postgres_exporter.sql"
when: "role_check.stdout == '0' and server_type == 'master'"
- name: "Delete prometheus_postgres_exporter init script"
file: path="/tmp/prometheus_postgres_exporter.sql" state=absent
when: "role_check.stdout == '0' and server_type == 'master'"
- name: "Restarting postgres if necessary" # noqa no-handler
service:
name: postgresql
state: restarted
when: listen_addresses.changed or max_connections.changed or shared_buffers.changed
- name: "Restart prometheus postgres exporter .."
service:
name: prometheus-postgres-exporter
state: restarted

@ -1,82 +0,0 @@
---
- name: "Delete package <prometheus postgres exporter>"
apt:
name: "prometheus-postgres-exporter"
state: absent
- name: "Check if version is already installed"
ansible.builtin.stat:
path: "{{ postgres_exporter_dir }}/{{ postgres_exporter_dist }}/postgres_exporter"
register: check_pg_exp
- name: "Download and extract pg_exporter"
unarchive:
src: "{{ postgres_exporter_download_url }}"
dest: "{{ postgres_exporter_dir }}"
owner: root
group: root
mode: "u=rwx,g=rx,o=rx"
remote_src: true
creates: "{{ postgres_exporter_dir }}/{{ postgres_exporter_dist }}/postgres_exporter"
when:
- not check_pg_exp.stat.exists
- name: "Create systemd service file"
become: true
template:
src: "postgres_exporter.systemd.j2"
dest: "/etc/systemd/system/postgres_exporter.service"
owner: root
group: root
mode: "u=rw,go=r"
notify:
- restart postgres_exporter
- name: "Create Config for postgres_exporter"
template:
src: "postgres_exporter.default.conf.j2"
dest: "/etc/default/postgres_exporter"
owner: root
group: "{{ postgres_exporter_group }}"
mode: "u=rw,g=r,o="
notify: restart postgres_exporter
- name: "Create file for additional queries"
copy:
dest: '{{ postgres_exporter_home }}/queries.yml'
owner: root
group: '{{ postgres_exporter_group }}'
mode: '0644'
content: "{{ lookup('vars','postgres_exporter_additional_queries') | to_nice_yaml }}"
notify: restart postgres_exporter
- name: "Ensure postgres_exporter up and running"
service:
name: postgres_exporter
state: started
enabled: yes
daemon_reload: yes
- name: Check role prometheus exists # noqa command-instead-of-shell no-changed-when
become: yes
become_user: postgres
shell: "/usr/bin/psql -Atc \"SELECT count(rolname) FROM pg_roles where rolname='prometheus'\""
register: role_check
ignore_errors: yes
- name: "Copy prometheus_postgres_exporter init script"
copy:
src: init.sql
dest: /tmp/prometheus_postgres_exporter.sql
mode: '0755'
when: "role_check.stdout == '0' and server_type == 'master'"
- name: "Execute prometheus_postgres_exporter init script" # noqa command-instead-of-shell
become: true
become_user: postgres
shell: "psql -f /tmp/prometheus_postgres_exporter.sql"
when: "role_check.stdout == '0' and server_type == 'master'"
- name: "Delete prometheus_postgres_exporter init script"
file: path="/tmp/prometheus_postgres_exporter.sql" state=absent
when: "role_check.stdout == '0' and server_type == 'master'"

@ -5,8 +5,6 @@
# Minimal requirements for postgres # Minimal requirements for postgres
- name: Include Base Requirements - name: Include Base Requirements
include_tasks: base-requirements.yml include_tasks: base-requirements.yml
tags:
- postgres-exporter
# Master requirements for postgres # Master requirements for postgres
- name: Include Master Requirements - name: Include Master Requirements

@ -157,33 +157,3 @@
shell: "/usr/bin/psql -Atc \"SELECT pg_create_physical_replication_slot('pgstandby1');\"" shell: "/usr/bin/psql -Atc \"SELECT pg_create_physical_replication_slot('pgstandby1');\""
ignore_errors: yes # noqa ignore-errors ignore_errors: yes # noqa ignore-errors
when: replication_slot_check.stdout == "0" when: replication_slot_check.stdout == "0"
# only needed in case of install from scratch
- name: "Ensure test db stuff"
block:
- name: "Copy testdb.sql to ensure test DB"
copy:
src: '{{ item }}'
dest: '/tmp/{{ item }}'
mode: '0444'
owner: postgres
group: postgres
loop:
- testdb.sql
- name: "Ensure test DB"
become: yes
become_user: postgres
community.postgresql.postgresql_db:
name: dummytestdb
- name: "Ensure content for test DB"
become: yes
become_user: postgres
community.postgresql.postgresql_db:
name: dummytestdb
state: restore
target: /tmp/testdb.sql
when: postgres_ensure_testdb | default(False)

@ -1,7 +0,0 @@
{% if postgres_exporter_datasource_name is defined %}
DATA_SOURCE_NAME="{{ postgres_exporter_datasource_name }}"
{% endif %}
{% if postgres_exporter_datasource_uri is defined %}
DATA_SOURCE_URI="{{ postgres_exporter_datasource_uri }}"
{% endif %}
FLAGS="{{ postgres_exporter_flags | join(' ') }}"

@ -1,16 +0,0 @@
[Unit]
Description=postgres_exporter - Exporter for machine metrics.
Documentation=https://github.com/prometheus/postgres_exporter
After=network.target
[Service]
User={{ postgres_exporter_user }}
Group={{ postgres_exporter_group }}
EnvironmentFile={{ postgres_exporter_config_file }}
ExecStart={{ postgres_exporter_binary }} $FLAGS
SyslogIdentifier=postgres_exporter
Restart=always
[Install]
WantedBy=multi-user.target

@ -1,2 +0,0 @@
---
mariadb_server_with_mysqld_exporter: False

@ -1,29 +0,0 @@
#!/bin/bash
#
#
#
#
STAGE=$1
DATADIR='/var/lib/mysql'
DATE=$(date +%F)
systemctl stop mariadb
rm -rf ${DATADIR}_moved
mv ${DATADIR} ${DATADIR}_moved
mkdir -p ${DATADIR}
LOCAL_BACKUP_DIR="/home/backupuser/backups/${STAGE}/maria"
BACKUP_FILE_ENCRYPTED=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | head -n 1)
# --batch => avoid error: >> gpg: cannot open '/dev/tty': No such device or address" <<
gpg --batch --decrypt $BACKUP_FILE_ENCRYPTED | gunzip | mbstream --directory ${DATADIR} -x --parallel=2
mariabackup --prepare --target-dir=${DATADIR}
chown -R mysql:mysql ${DATADIR}
systemctl start mariadb

@ -1,43 +0,0 @@
---
# DEV-375
# "fixes" error for mysql-connect as root-user
# it's just a restore server ...
- name: "Ensure passwordless mysql-connect for root "
copy:
dest: '/root/.my.cnf'
owner: root
group: root
mode: '0600'
content: |
[client]
user={{ mysql_root_username }}
password={{ mysql_root_password }}
- name: "Install mariadb-server via include_role"
include_role:
name: maria
- name: "Copy restore script to restore server"
copy:
src: restore.sh
dest: '/root/restore.sh'
mode: '0750'
owner: root
group: root
- name: "Create file for gpg secret key"
become: yes
copy:
dest: '/root/gpg_private_key'
mode: '0600'
owner: 'root'
group: 'root'
content: |
{{ gpg_key_smardigo_automation__private }}
- name: "Import private gpg key" # noqa command-instead-of-shell
become: yes
shell: 'gpg --import /root/gpg_private_key'
register: gpg_import
changed_when:
- gpg_import.rc != '0'

@ -1,6 +0,0 @@
[mysqld]
ssl_key = {{ cert_private_key }}
ssl_cert = {{ cert_public_key }}
ssl_ca = {{ ca_cert }}
tls_version = TLSv1.2,TLSv1.3
ssl_cipher = TLSv1.2,TLSv1.3

@ -1,30 +0,0 @@
#!/bin/bash
#
#
#
STAGE=$1
DATADIR='/var/lib/postgresql/13/main'
DATE=$(date +%F)
PG_USER=postgres
PG_GROUP=postgres
systemctl stop postgresql
rm -rf ${DATADIR}_moved
mv ${DATADIR} ${DATADIR}_moved
mkdir -p ${DATADIR}
LOCAL_BACKUP_DIR="/home/backupuser/backups/${STAGE}/postgres"
BACKUP_FILE_ENCRYPTED=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | head -n 1)
# --batch => avoid error: >> gpg: cannot open '/dev/tty': No such device or address" <<
gpg --batch --decrypt $BACKUP_FILE_ENCRYPTED | tar -xz -C ${DATADIR}
chmod 0700 ${DATADIR}
chown -R ${PG_USER}:${PG_GROUP} ${DATADIR}
systemctl start postgresql

@ -1,29 +0,0 @@
---
- name: "Install postgres via include_role"
include_role:
name: postgres
- name: "Copy restore script to restore server"
copy:
src: restore.sh
dest: '/root/restore.sh'
mode: 0754
owner: root
group: root
- name: "Create file for gpg secret key"
become: yes
copy:
dest: '/root/gpg_private_key'
mode: '0600'
owner: 'root'
group: 'root'
content: |
{{ gpg_key_smardigo_automation__private }}
- name: "Import private gpg key" # noqa command-instead-of-shell
become: yes
shell: 'gpg --import /root/gpg_private_key'
register: gpg_import
changed_when:
- gpg_import.rc != '0'

@ -1,3 +0,0 @@
---
selfsigned_ca_dir: '/etc/ssl/selfsigned_ca'
selfsigned_ca_private_key_passphrase: '{{ selfsigned_ca_private_key_passphrase_vault }}'

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save