Compare commits
No commits in common. 'feature/DEV-380' and 'master' have entirely different histories.
feature/DE
...
master
@ -1,9 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "x$1" == "x" ];then
|
||||
echo "Stage as param \$1 is missing. exit"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
docker run -v `pwd`/templates/elastic-certs:/certs -v `pwd`/templates/elastic-certs/$1-instances.yaml:/usr/share/elasticsearch/config/certificates/$1-instances.yml docker.elastic.co/elasticsearch/elasticsearch:7.12.0 /bin/sh "/certs/certutil.sh" $1
|
||||
|
||||
@ -1,99 +0,0 @@
|
||||
---
|
||||
|
||||
# Parameters:
|
||||
# playbook inventory
|
||||
# stage := the name of the stage (e.g. dev, int, qa, prod)
|
||||
# tenant_id := (unique key for the tenant, e.g. customer)
|
||||
# cluster_name := (business name for the cluster, e.g. product, department )
|
||||
# cluster_size := (WIP node count for the cluster)
|
||||
# cluster_service := (service to setup, e.g. 'connect', ...)
|
||||
# cluster_features := (optional features to use, e.g. ['wordpress', 'resubmission', ...])
|
||||
# database_backup_file := the dump file to export, has to be on the database server under /tmp (e.g. wordpress_portal.sql)
|
||||
# target_database := (optional) the database to export into ( see {{ connect_wordpress_maria_database }})
|
||||
# smardigo message callback
|
||||
# scope_id := (scope id of the management process)
|
||||
# process_instance_id := (process instance id of the management process)
|
||||
# smardigo_management_action := (smardigo management action anme of the management process)
|
||||
|
||||
#############################################################
|
||||
# Creating inventory dynamically for given parameters
|
||||
#############################################################
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Check if ansible version is at least 2.10.x"
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.major >= 2
|
||||
- ansible_version.minor >= 10
|
||||
msg: "The ansible version has to be at least ({{ ansible_version.full }})"
|
||||
|
||||
# add virtual server to load stage specific variables as context
|
||||
- name: "Add <{{ stage }}-virtual-host-to-read-groups-vars> to hosts"
|
||||
add_host:
|
||||
name: "{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
groups:
|
||||
- "stage_{{ stage }}"
|
||||
changed_when: False
|
||||
|
||||
tasks:
|
||||
- name: Add maria servers to hosts if necessary
|
||||
add_host:
|
||||
name: "{{ stage }}-maria-01"
|
||||
groups:
|
||||
- "stage_{{ stage }}"
|
||||
- "{{ item }}"
|
||||
changed_when: False
|
||||
with_items: "{{ cluster_features }}"
|
||||
when: item in ['connect_wordpress']
|
||||
|
||||
#############################################################
|
||||
# exporting database backups for created inventory
|
||||
#############################################################
|
||||
|
||||
- hosts: "stage_{{ stage }}:!{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
remote_user: root
|
||||
vars:
|
||||
ansible_ssh_host: "{{ stage_server_domain }}"
|
||||
|
||||
pre_tasks:
|
||||
- name: "export autodiscover pre-tasks"
|
||||
import_tasks: tasks/autodiscover_pre_tasks.yml
|
||||
become: false
|
||||
tags:
|
||||
- always
|
||||
|
||||
roles:
|
||||
- role: export_maria_database
|
||||
vars:
|
||||
database_backup_file: "{{ stage }}-{{ tenant_id }}-{{ cluster_name }}-wordpress.sql.gz"
|
||||
when:
|
||||
- "'connect_wordpress' in group_names"
|
||||
- "target_database is defined"
|
||||
|
||||
- role: export_maria_database
|
||||
vars:
|
||||
target_database: "{{ connect_wordpress_maria_database }}"
|
||||
database_backup_file: "{{ stage }}-{{ tenant_id }}-{{ cluster_name }}-wordpress.sql.gz"
|
||||
when:
|
||||
- "'connect_wordpress' in group_names"
|
||||
|
||||
#############################################################
|
||||
# Sending smardigo management message to process
|
||||
#############################################################
|
||||
|
||||
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
connection: local
|
||||
run_once: true
|
||||
vars:
|
||||
connect_jwt_username: "{{ management_admin_username }}"
|
||||
|
||||
tasks:
|
||||
- name: "Sending smardigo management message to <{{ smardigo_management_url }}>"
|
||||
include_tasks: tasks/smardigo_management_message.yml
|
||||
@ -1,60 +0,0 @@
|
||||
---
|
||||
# Parameters:
|
||||
# playbook inventory
|
||||
# stage := the name of the stage (e.g. dev, int, qa, prod)
|
||||
# environment variable
|
||||
# GITLAB_API_TOKEN := Access token from gitlab
|
||||
|
||||
#############################################################
|
||||
# Creating inventory dynamically for given parameters
|
||||
#############################################################
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
|
||||
tasks:
|
||||
- name: Add hosts
|
||||
add_host:
|
||||
name: "{{ stage }}-gitlab"
|
||||
groups: "{{ ['stage_' + stage ] }}"
|
||||
|
||||
#############################################################
|
||||
# Creating gitlab mirrors for current stage
|
||||
#############################################################
|
||||
|
||||
- hosts: "stage_{{ stage }}"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
connection: local
|
||||
vars:
|
||||
projects:
|
||||
- id: 1210
|
||||
name: argocd
|
||||
- id: 1216
|
||||
name: operator-awx
|
||||
- id: 1212
|
||||
name: operator-jaeger
|
||||
- id: 1231
|
||||
name: operator-knative
|
||||
- id: 1233
|
||||
name: smardigo-awx
|
||||
- id: 1232
|
||||
name: smardigo-jaeger
|
||||
|
||||
pre_tasks:
|
||||
- name: "Add repository remote mirror to project"
|
||||
delegate_to: 127.0.0.1
|
||||
become: false
|
||||
uri:
|
||||
url: "https://git.dev-at.de/api/v4/projects/{{ item.id }}/remote_mirrors"
|
||||
method: POST
|
||||
body_format: json
|
||||
body:
|
||||
enabled: true
|
||||
only_protected_branches: true
|
||||
url: "https://{{ gitea_admin_username }}:{{ gitea_admin_password }}@{{ shared_service_gitea_hostname }}/argocd/{{ item.name }}.git"
|
||||
headers:
|
||||
PRIVATE-TOKEN: "{{ lookup('env', 'GITLAB_API_TOKEN') }}"
|
||||
status_code: [201]
|
||||
loop: "{{ projects }}"
|
||||
@ -1,269 +0,0 @@
|
||||
---
|
||||
hcloud_firewall_objects:
|
||||
-
|
||||
name: "{{ stage }}-default"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: icmp
|
||||
port: ''
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: ICMP allowed
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '22'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: SSH allowed
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '80'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: HTTP allowed
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: HTTPS allowed
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: 'any'
|
||||
source_ips: '{{ ip_whitelist_admins }}'
|
||||
destination_ips: []
|
||||
description: TCP - allow work from home without VPN
|
||||
-
|
||||
direction: in
|
||||
protocol: udp
|
||||
port: 'any'
|
||||
source_ips: '{{ ip_whitelist_admins }}'
|
||||
destination_ips: []
|
||||
description: UDP - allow work from home without VPN
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'stage={{ stage }}'
|
||||
-
|
||||
name: "{{ stage }}-monitoring"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '9080-9085'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: 'Server/Service Monitoring'
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '9001'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: 'PgAdmin'
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '9187'
|
||||
source_ips: '{{ ip_whitelist }}'
|
||||
destination_ips: []
|
||||
description: 'Postgres-Exporter'
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'stage={{ stage }}'
|
||||
-
|
||||
name: "{{ stage }}-monitoring-extern-https"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips:
|
||||
- "{{ lookup('community.general.dig', 'dev-blackbox-01.smardigo.digital' ) }}/32"
|
||||
destination_ips: []
|
||||
description: null
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=connect'
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=keycloak'
|
||||
|
||||
hcloud_firewall_objects_awx:
|
||||
-
|
||||
name: "{{ stage }}-awx-ssh-access-for-k8s-nodes"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '22'
|
||||
source_ips: "{{ awx_source_ips }}"
|
||||
destination_ips: []
|
||||
description: null
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'stage={{ stage }}'
|
||||
|
||||
hcloud_firewall_objects_backup:
|
||||
-
|
||||
name: "{{ stage }}-backup-ssh-access"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '22'
|
||||
source_ips:
|
||||
- "{{ offsite_storage_server_ip }}"
|
||||
destination_ips: []
|
||||
description: null
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=backup'
|
||||
|
||||
hcloud_firewall_objects_gitea:
|
||||
-
|
||||
name: "{{ stage }}-access-to-gitea"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ ip_whitelist }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for whitelisted ips"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for kubernetes worker nodes"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + (gitea_https_whitelisted_ips | default([])) }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for custom whitelisted ips"
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=gitea'
|
||||
|
||||
hcloud_firewall_objects_keycloak:
|
||||
-
|
||||
name: "{{ stage }}-access-to-keycloak"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ ip_whitelist }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for whitelisted ips"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for kubernetes worker nodes"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + (keycloak_https_whitelisted_ips | default([])) }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for custom whitelisted ips"
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=keycloak'
|
||||
|
||||
hcloud_firewall_objects_kibana:
|
||||
-
|
||||
name: "{{ stage }}-access-to-kibana"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ ip_whitelist }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for whitelisted ips"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for kubernetes worker nodes"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + (kibana_https_whitelisted_ips | default([])) }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for custom whitelisted ips"
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=kibana'
|
||||
|
||||
hcloud_firewall_objects_management:
|
||||
-
|
||||
name: "{{ stage }}-access-to-management"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ ip_whitelist }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for whitelisted ips"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + awx_source_ips }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for kubernetes worker nodes"
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '443'
|
||||
source_ips: "{{ [shared_service_network] + (management_https_whitelisted_ips | default([])) }}"
|
||||
destination_ips: []
|
||||
description: "Allow access for custom whitelisted ips"
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=connect,tenant=management'
|
||||
@ -1,11 +1,9 @@
|
||||
---
|
||||
#TODO needs to be removed after story DEV-361 is finished
|
||||
hetzner_server_type: "{{ hetzner_server_type_bastelserver | default('cx21') }}"
|
||||
hetzner_server_labels: "stage={{ stage }} service=backup"
|
||||
hetzner_server_labels: "stage={{ stage }} service=bastelserver"
|
||||
|
||||
docker_enabled: false
|
||||
traefik_enabled: false
|
||||
filebeat_enabled: false
|
||||
|
||||
custom_plattform_users:
|
||||
- backuphamster
|
||||
node_exporter_enabled: false
|
||||
@ -1,3 +0,0 @@
|
||||
---
|
||||
|
||||
connect_workflow_heatmap_enabled: "true"
|
||||
@ -1,15 +0,0 @@
|
||||
---
|
||||
|
||||
hetzner_server_type: "{{ hetzner_server_type_restore_database | default('cpx21') }}"
|
||||
hetzner_server_labels: "stage={{ stage }} service=restore database_engine={{ database_engine | default('') }} manual=''"
|
||||
|
||||
docker_enabled: false
|
||||
traefik_enabled: false
|
||||
filebeat_enabled: false
|
||||
|
||||
custom_plattform_users:
|
||||
- '{{ backupuser_user_name }}'
|
||||
|
||||
# postgresql related
|
||||
# defining type of server (naster|slave|restore)
|
||||
server_type: restore
|
||||
@ -1,3 +1,3 @@
|
||||
---
|
||||
|
||||
awx_hetzner_ansible_revision: "main"
|
||||
awx_hetzner_ansible_revision: "master"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,22 +1,18 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
38663233373062663161366637373233653833663531383237653432633832363036393236653231
|
||||
6136663865393830306533376665343733383565366333630a303664306465393566383663323666
|
||||
31663735623036363431346561616538623534636334633438366238653936383335303430613932
|
||||
6461346332313639340a626139353538326461633133396163393464393335373866356133333038
|
||||
30656133346362393635663566383938633663303662623136373537353462333239366331376462
|
||||
64633239373639356463363464376564663162393064623635623033633966653139303766383437
|
||||
63393832376561646330343637633761653232656238383636333963646332303734303539373730
|
||||
37613833313332393663656466316639326164306636663861363530636338633337633833343630
|
||||
36333636633164613130653732616236646663626332613234306530616565626666343335616565
|
||||
37303464396237323261643236633264633838626236373734396535643466373035346436376133
|
||||
63623765663134373261343431366261666565303631376533303465383161366135383263326663
|
||||
35323766306238396430343965653335323437663161326233623066356464316434633234303162
|
||||
35626634383366303436343038336336333963326530326161336462326535376264343564396231
|
||||
32323662323839353939653065306261636338643139613933323634666633313636353864396166
|
||||
35383633353735383430303930303437393563323264656439353730353839616561373639336664
|
||||
31663237343136353564636366643865363464656534393832383531393532646166643637326337
|
||||
38306139663863653131386263336138643831303031396537373835613731393834386261356435
|
||||
39333331353635363633396337643234396231323463306465323636343539353232353464333236
|
||||
31396139383137666536663365393362393832656336653535626430333033353737633661663366
|
||||
65633130663937373861616131353631326135396366623231366131333432326662653365373134
|
||||
37303734383038346530393866613965663262373638313536663863356563383732
|
||||
34376237343736386538353235346231326462313534643130616532633535613331643236353764
|
||||
3737383533313861373030313237366131356438393333350a323230316663346634636634353239
|
||||
61326262653334646539626464646663383164666166306162646166333462383833333832353461
|
||||
3437663431653566650a383632653134343238393762333131613633313036636536343831333630
|
||||
34633361373264376263303364353531636434356263663965626639616666633861636463383637
|
||||
34333838663834666532366564396566313739386262633335313335386661646166363636323766
|
||||
35363535353664346463336566663163303333663065613532623265303262396531303831653636
|
||||
65353565353233626331356666343932333539356331303161303062316433633761623132333033
|
||||
65376632376266336361363832613064323861393366313763316434316264663562616134353766
|
||||
62643165633030363237636632386166396538666337616430323534313062333965336233333836
|
||||
36306637323764333233666239336331373763633737623666393466376163313738393036336232
|
||||
34613536336336663837353031323665323733313634313731326537333938396361373435366435
|
||||
32643338346635633962346537393338653464383431396432343932373439386230613537356134
|
||||
64386165363233636237656364396333336261613037323136363630613533353639646439303337
|
||||
31626663393335343962663033646135333366623738346436393764353438383264666666653635
|
||||
64643462656332653361313766656633616134373166333163346131616334343161616235633666
|
||||
3366
|
||||
|
||||
@ -1,4 +0,0 @@
|
||||
keycloak_https_whitelisted_ips:
|
||||
- 195.200.47.243/32 # DEV-230 - sparda berlin
|
||||
- 195.200.47.244/32 # DEV-230 - sparda berlin
|
||||
- 92.42.192.157/32 # MOB-28 - mobene
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,5 +0,0 @@
|
||||
---
|
||||
|
||||
hetzner_server_labels: "stage={{ stage }} service=connect tenant=bdev"
|
||||
|
||||
hetzner_server_type: cpx21
|
||||
@ -1,4 +1,3 @@
|
||||
---
|
||||
|
||||
hetzner_server_type: cpx21
|
||||
server_type: "master"
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
---
|
||||
|
||||
hetzner_server_type: cpx21
|
||||
server_type: "slave"
|
||||
|
||||
@ -1,3 +0,0 @@
|
||||
---
|
||||
|
||||
hetzner_server_type: cpx21
|
||||
@ -1,251 +0,0 @@
|
||||
---
|
||||
|
||||
# restores remote database backup
|
||||
# - postgres
|
||||
# - executed on stage specific server: {{ stage }}-restore-postgres-01
|
||||
# - restores a server from full-backup
|
||||
# - mariadb
|
||||
# - executed on stage specific server: {{ stage }}-restore-maria-01
|
||||
# - restores a server from full-backup
|
||||
|
||||
# Parameters:
|
||||
# playbook inventory
|
||||
# stage := the name of the stage (e.g. dev, int, qa, prod)
|
||||
# database_engine := the database engine to restore a backup for (e.g. postgres, maria)
|
||||
# smardigo message callback
|
||||
# scope_id := (scope id of the management process)
|
||||
# process_instance_id := (process instance id of the management process)
|
||||
# smardigo_management_action := (smardigo management action anme of the management process)
|
||||
|
||||
#############################################################
|
||||
# Creating inventory dynamically for given parameters
|
||||
#############################################################
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
pre_tasks:
|
||||
- name: "Check if ansible version is at least 2.10.x"
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.major >= 2
|
||||
- ansible_version.minor >= 10
|
||||
msg: "The ansible version has to be at least ({{ ansible_version.full }})"
|
||||
|
||||
# add virtual server to load stage specific variables as context
|
||||
- name: "Add <{{ stage }}-virtual-host-to-read-groups-vars> to hosts"
|
||||
add_host:
|
||||
name: "{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
groups:
|
||||
- "stage_{{ stage }}"
|
||||
changed_when: False
|
||||
|
||||
tasks:
|
||||
- name: "Add {{ database_engine }} servers to hosts if necessary"
|
||||
add_host:
|
||||
name: "{{ stage }}-restore-{{ database_engine }}-01"
|
||||
groups:
|
||||
- "stage_{{ stage }}"
|
||||
- 'restore'
|
||||
changed_when: False
|
||||
- name: "Add 'backup' servers to hosts if necessary"
|
||||
add_host:
|
||||
name: "{{ stage }}-backup-01"
|
||||
groups:
|
||||
- "stage_{{ stage }}"
|
||||
- backup
|
||||
changed_when: False
|
||||
|
||||
#############################################################
|
||||
# Create restore server(s)
|
||||
#############################################################
|
||||
|
||||
- hosts: "restore"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
remote_user: root
|
||||
|
||||
roles:
|
||||
- role: hcloud
|
||||
vars:
|
||||
sma_digitalocean_ttl: 60 # set it to 60sec to reduce DNS caching problems with internal IT in case of debugging ansible problems ;)
|
||||
|
||||
#############################################################
|
||||
# Provisioning server(s) for created inventory
|
||||
#############################################################
|
||||
|
||||
- hosts: "restore"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
remote_user: root
|
||||
vars:
|
||||
ansible_ssh_host: "{{ stage_server_domain }}"
|
||||
|
||||
pre_tasks:
|
||||
- name: "Import autodiscover pre-tasks"
|
||||
import_tasks: tasks/autodiscover_pre_tasks.yml
|
||||
become: false
|
||||
tags:
|
||||
- always
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
|
||||
- role: filebeat
|
||||
when: filebeat_enabled | default(True)
|
||||
|
||||
- role: node_exporter
|
||||
when: node_exporter_enabled | default(True)
|
||||
|
||||
- role: restore_{{ database_engine }}
|
||||
|
||||
#############################################################
|
||||
# add restore specific firewall rule
|
||||
#############################################################
|
||||
|
||||
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
connection: local
|
||||
vars:
|
||||
hcloud_firewall_objects_backup:
|
||||
-
|
||||
name: "{{ stage }}-restore-ssh-access"
|
||||
state: present
|
||||
rules:
|
||||
-
|
||||
direction: in
|
||||
protocol: tcp
|
||||
port: '22'
|
||||
source_ips:
|
||||
- "{{ lookup('community.general.dig', groups['backup'][0] + '.' + domain ) }}/32"
|
||||
destination_ips: []
|
||||
description: null
|
||||
apply_to:
|
||||
-
|
||||
type: label_selector
|
||||
label_selector:
|
||||
selector: 'service=restore'
|
||||
|
||||
tasks:
|
||||
- name: "Add hcloud firewall rule(s)"
|
||||
include_role:
|
||||
name: hcloud
|
||||
tasks_from: configure-firewall2
|
||||
loop: "{{ hcloud_firewall_objects_backup }}"
|
||||
loop_control:
|
||||
loop_var: firewall_object
|
||||
|
||||
#############################################################
|
||||
# Syncing backups from backup server to restore server
|
||||
#############################################################
|
||||
|
||||
- hosts: "backup"
|
||||
serial: "{{ serial_number | default(5) }}"
|
||||
gather_facts: false
|
||||
vars:
|
||||
backupserver_system_user: 'backuphamster'
|
||||
ansible_ssh_host: "{{ stage_server_domain }}"
|
||||
tasks:
|
||||
# I could not get it up and running with <synchronize> module
|
||||
# to sync data from remote server A to remote server B
|
||||
- name: "Syncing remote backups"
|
||||
become: yes
|
||||
become_user: '{{ backupserver_system_user }}'
|
||||
vars:
|
||||
database_server_ip: "{{ groups['restore'][0] }}.{{ domain }}"
|
||||
shell: '/home/{{ backupserver_system_user }}/push_backups_to_restore_server.sh {{ database_server_ip }} {{ stage }} {{ database_engine }}'
|
||||
|
||||
#############################################################
|
||||
# Restoring from backup
|
||||
#############################################################
|
||||
|
||||
- hosts: "restore"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_host: "{{ stage_server_domain }}"
|
||||
tasks:
|
||||
- name: "Triggering restore"
|
||||
become: yes
|
||||
shell: '/root/restore.sh {{ stage }}'
|
||||
|
||||
- name: "Check for test data on postgres"
|
||||
block:
|
||||
|
||||
- name: "Querying postgres ..."
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_query:
|
||||
db: dummytestdb
|
||||
query: SELECT movie FROM movie_quotes WHERE quote = %(quote_val)s
|
||||
named_args:
|
||||
quote_val: 'Shall we play'
|
||||
register: query_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'query_output.query_all_results | first | selectattr("movie","match","wargames") | length == 1'
|
||||
|
||||
when:
|
||||
- database_engine == 'postgres'
|
||||
|
||||
- name: "Check for test data on mariadb"
|
||||
block:
|
||||
|
||||
- name: "Querying mariadb ..."
|
||||
become: yes
|
||||
become_user: root
|
||||
community.mysql.mysql_query:
|
||||
login_unix_socket: /run/mysqld/mysqld.sock
|
||||
login_db: dummytestdb
|
||||
query: SELECT movie FROM movie_quotes WHERE quote = %s
|
||||
positional_args:
|
||||
- 'Shall we play'
|
||||
register: query_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'query_output.query_result | first | selectattr("movie","match","wargames") | length == 1'
|
||||
|
||||
when:
|
||||
- database_engine == 'maria'
|
||||
|
||||
#############################################################
|
||||
# Deleting servers/domains for created inventory
|
||||
#############################################################
|
||||
|
||||
- hosts: "restore"
|
||||
serial: "{{ serial_number | default(5) }}"
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: "Delete server <{{ inventory_hostname }}>"
|
||||
include_role:
|
||||
name: hcloud
|
||||
tasks_from: _set_server_state
|
||||
vars:
|
||||
- server_state: "absent"
|
||||
|
||||
- name: "Delete DNS entry <{{ inventory_hostname }}> for <{{ domain }}>"
|
||||
include_role:
|
||||
name: sma_digitalocean
|
||||
tasks_from: _remove_dns
|
||||
vars:
|
||||
record_to_remove: '{{ inventory_hostname }}'
|
||||
|
||||
#############################################################
|
||||
# Sending smardigo management message to process
|
||||
#############################################################
|
||||
|
||||
- hosts: "{{ stage }}-virtual-host-to-read-groups-vars"
|
||||
serial: "{{ serial_number | default(1) }}"
|
||||
gather_facts: false
|
||||
connection: local
|
||||
run_once: true
|
||||
vars:
|
||||
connect_jwt_username: "{{ management_admin_username }}"
|
||||
|
||||
tasks:
|
||||
- name: "Sending smardigo management message to <{{ smardigo_management_url }}>"
|
||||
include_tasks: tasks/smardigo_management_message.yml
|
||||
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
REMOTE_SYSTEM_USER=backupuser
|
||||
DATABASE_SERVER_IP=$1
|
||||
STAGE=$2
|
||||
DATABASE_ENGINE=$3
|
||||
|
||||
# currently it defaults to todays date
|
||||
DATE=$(date +%F)
|
||||
|
||||
LOCAL_BACKUP_DIR="${HOME}/backups/${STAGE}/${DATABASE_ENGINE}"
|
||||
BACKUP_FILE_FOR_TRANSFER=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | tail -n 1)
|
||||
|
||||
REMOTE_BACKUP_DIR="/home/${REMOTE_SYSTEM_USER}/backups/${STAGE}/${DATABASE_ENGINE}"
|
||||
DEST_DIR="${REMOTE_BACKUP_DIR}/${DATE}/"
|
||||
|
||||
# avoid "REMOTE HOST IDENTIFICATION HAS CHANGED" - errors due to dynamic created server on restore process
|
||||
ssh-keygen -f "/home/backuphamster/.ssh/known_hosts" -R ${DATABASE_SERVER_IP}
|
||||
|
||||
SSH_OPTIONS='-o StrictHostKeyChecking=no'
|
||||
|
||||
# needed due to unknown rsync option --mkpath in rsync version 3.1.3
|
||||
ssh ${SSH_OPTIONS} ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP} "mkdir -p ${DEST_DIR}"
|
||||
|
||||
rsync -v -e "ssh ${SSH_OPTIONS}" $BACKUP_FILE_FOR_TRANSFER ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP}:${DEST_DIR}
|
||||
|
||||
BKP_FILE_TRANSFERRED=$(echo $BACKUP_FILE_FOR_TRANSFER | awk -F / '{ print $NF}')
|
||||
|
||||
ssh ${SSH_OPTIONS} ${REMOTE_SYSTEM_USER}@${DATABASE_SERVER_IP} "test -f ${DEST_DIR}${BKP_FILE_TRANSFERRED}"
|
||||
@ -1,3 +0,0 @@
|
||||
---
|
||||
|
||||
upload_directory: "{{ backup_directory }}"
|
||||
@ -1,11 +0,0 @@
|
||||
---
|
||||
|
||||
### tags:
|
||||
|
||||
- name: "Export database <{{ target_database }}> to <{{ upload_directory }}/{{ database_backup_file }}>"
|
||||
community.mysql.mysql_db:
|
||||
name: "{{ target_database }}"
|
||||
state: dump
|
||||
target: "/{{ upload_directory }}/{{ database_backup_file }}"
|
||||
config_file: "/etc/mysql/mariadb.conf.d/50-client.cnf"
|
||||
login_password: "{{ mysql_root_password }}"
|
||||
@ -1,61 +1,14 @@
|
||||
---
|
||||
- name: "Block to handle hetzner server state in case of problems"
|
||||
block:
|
||||
- name: "Increment the retry count"
|
||||
set_fact:
|
||||
retry_count: "{{ retry_count | default(0) | int + 1 }}"
|
||||
|
||||
- name: "Checking state for server <{{ inventory_hostname }}> is <{{ server_state }}>"
|
||||
hetzner.hcloud.hcloud_server:
|
||||
api_token: "{{ hetzner_authentication_ansible }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
server_type: "{{ hetzner_server_type }}"
|
||||
image: "{{ hetzner_server_image }}"
|
||||
ssh_keys: "{{ hetzner_ssh_keys }}"
|
||||
labels: "{{ hetzner_server_labels }}"
|
||||
location: nbg1
|
||||
state: "{{ server_state }}"
|
||||
delegate_to: 127.0.0.1
|
||||
become: false
|
||||
async: 300
|
||||
poll: 5
|
||||
register: hcloud_response
|
||||
ignore_errors: yes
|
||||
|
||||
- name: "Block - DEBUG: hcloud_response"
|
||||
debug:
|
||||
msg: '{{ hcloud_response.msg }}'
|
||||
when:
|
||||
- hcloud_response.msg is defined
|
||||
|
||||
- name: "Ensure Server is STARTED when server_state=present"
|
||||
hetzner.hcloud.hcloud_server:
|
||||
api_token: "{{ hetzner_authentication_ansible }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
state: "started"
|
||||
delegate_to: 127.0.0.1
|
||||
become: false
|
||||
async: 150
|
||||
poll: 15
|
||||
register: hcloud_response
|
||||
when:
|
||||
- server_state == 'present'
|
||||
|
||||
rescue:
|
||||
- name: "RESCUE - fail: Maximum retries reached"
|
||||
fail:
|
||||
msg: "max_retries of {{ max_retries }} reached. Plz check."
|
||||
when: retry_count | int == max_retries | int
|
||||
|
||||
- name: "RESCUE-fail DEBUG: hcloud_response"
|
||||
debug:
|
||||
msg: '{{ hcloud_response.msg }}'
|
||||
|
||||
- name: "RESCUE: wait_for {{ retry_delay }} sec. between retries"
|
||||
wait_for:
|
||||
timeout: "{{ retry_delay }}"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: "Include _set_server one time again => increase retry_count"
|
||||
include_tasks: _set_server_state.yml
|
||||
- name: "Checking state for server <{{ inventory_hostname }}> is <{{ server_state }}>"
|
||||
hetzner.hcloud.hcloud_server:
|
||||
api_token: "{{ hetzner_authentication_ansible }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
server_type: "{{ hetzner_server_type }}"
|
||||
image: "{{ hetzner_server_image }}"
|
||||
ssh_keys: "{{ hetzner_ssh_keys }}"
|
||||
labels: "{{ hetzner_server_labels }}"
|
||||
location: nbg1
|
||||
state: "{{ server_state }}"
|
||||
delegate_to: 127.0.0.1
|
||||
become: false
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
---
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
---
|
||||
- name: "Do some stuff in elastic with spaces ... "
|
||||
include_role:
|
||||
name: kibana
|
||||
tasks_from: _configure_spaces.yml
|
||||
loop: "{{ technical_user.elastic_spaces }}"
|
||||
loop_control:
|
||||
loop_var: elastic_space
|
||||
|
||||
- name: "Do some stuff in elastic with roles ..."
|
||||
include_role:
|
||||
name: kibana
|
||||
tasks_from: _configure_roles.yml
|
||||
loop: "{{ technical_user.elastic_roles }}"
|
||||
loop_control:
|
||||
loop_var: elastic_role
|
||||
|
||||
- name: "Do some stuff in elastic with users ..."
|
||||
include_role:
|
||||
name: kibana
|
||||
tasks_from: _configure_users.yml
|
||||
loop: "{{ technical_user.elastic_users }}"
|
||||
loop_control:
|
||||
loop_var: elastic_user
|
||||
|
||||
- name: "Do some stuff in elastic with index pattern ..."
|
||||
vars:
|
||||
es_space: "default"
|
||||
include_role:
|
||||
name: kibana
|
||||
tasks_from: _configure_indexpattern.yml
|
||||
loop: "{{ technical_user.elastic_index_patterns }}"
|
||||
loop_control:
|
||||
loop_var: elastic_index_pattern
|
||||
@ -1,4 +1,4 @@
|
||||
---
|
||||
|
||||
k8s_namespace: "default"
|
||||
k8s_docker_registry_key: "harbor-pull-secret-key"
|
||||
k8s_docker_registry_key: "{{ stage }}-harbor-key"
|
||||
|
||||
@ -1,64 +0,0 @@
|
||||
---
|
||||
|
||||
### tags:
|
||||
### update_configurations
|
||||
|
||||
- name: "Running all block tasks on locahost"
|
||||
delegate_to: 127.0.0.1
|
||||
become: false
|
||||
block:
|
||||
|
||||
- name: "Checking connect is running on <{{ connect_base_url }}>"
|
||||
uri:
|
||||
url: "{{ http_s }}://{{ connect_base_url }}/api/profile-info"
|
||||
method: GET
|
||||
headers:
|
||||
"Smardigo-User-Token": "{{ smardigo_auth_token_value }}"
|
||||
status_code: [200]
|
||||
register: connect_profile_info
|
||||
delay: 15
|
||||
retries: 10
|
||||
until: connect_profile_info.status in [200]
|
||||
tags:
|
||||
- update_configurations
|
||||
|
||||
- name: "Creating archives of smardigo configuration"
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
suffix: temp
|
||||
register: temp
|
||||
tags:
|
||||
- update_configurations
|
||||
|
||||
- name: "Creating archives of smardigo configuration"
|
||||
archive:
|
||||
path: "./smardigo/{{ item }}"
|
||||
dest: "{{ temp.path }}/{{ item }}.zip"
|
||||
format: zip
|
||||
mode: '0644'
|
||||
with_items: "{{ connect_configurations }}"
|
||||
tags:
|
||||
- update_configurations
|
||||
|
||||
- name: "Upload configuration zip file to <{{ connect_base_url }}>"
|
||||
uri:
|
||||
url: "{{ http_s }}://{{ connect_base_url }}/api/v1/config/import-zip"
|
||||
method: POST
|
||||
headers:
|
||||
Smardigo-User-Token: "{{ smardigo_auth_token_value }}"
|
||||
body_format: form-multipart
|
||||
body:
|
||||
file:
|
||||
filename: "{{ temp.path }}/{{ item }}.zip"
|
||||
mime_type: "application/zip"
|
||||
status_code: [200]
|
||||
register: config_import_result
|
||||
with_items: "{{ connect_configurations }}"
|
||||
tags:
|
||||
- update_configurations
|
||||
|
||||
- name: "Upload configuration zip file to <{{ connect_base_url }}>"
|
||||
debug:
|
||||
msg: '{{ config_import_result }}'
|
||||
tags:
|
||||
- update_configurations
|
||||
@ -0,0 +1 @@
|
||||
---
|
||||
@ -1,52 +0,0 @@
|
||||
-- MySQL dump 10.19 Distrib 10.3.34-MariaDB, for debian-linux-gnu (x86_64)
|
||||
--
|
||||
-- Host: localhost Database: stararchitekt
|
||||
-- ------------------------------------------------------
|
||||
-- Server version 10.3.34-MariaDB-0ubuntu0.20.04.1
|
||||
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
|
||||
--
|
||||
-- Table structure for table `movie_quotes`
|
||||
--
|
||||
|
||||
DROP TABLE IF EXISTS `movie_quotes`;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE `movie_quotes` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`movie` varchar(255) DEFAULT NULL,
|
||||
`quote` varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
--
|
||||
-- Dumping data for table `movie_quotes`
|
||||
--
|
||||
|
||||
LOCK TABLES `movie_quotes` WRITE;
|
||||
/*!40000 ALTER TABLE `movie_quotes` DISABLE KEYS */;
|
||||
INSERT INTO `movie_quotes` VALUES (1,'wargames','Shall we play');
|
||||
/*!40000 ALTER TABLE `movie_quotes` ENABLE KEYS */;
|
||||
UNLOCK TABLES;
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
-- Dump completed on 2022-03-31 9:46:16
|
||||
@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: "prometheus-mysqld-exporter restart"
|
||||
service:
|
||||
name: prometheus-mysqld-exporter
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: "restart mysql"
|
||||
service:
|
||||
name: mariadb
|
||||
state: restarted
|
||||
enabled: yes
|
||||
@ -1,40 +0,0 @@
|
||||
---
|
||||
# task bundle simply copied from main.yml
|
||||
# TODO: migrate to https://github.com/cloudalchemy/ansible-mysqld_exporter
|
||||
- name: "Install prometheus-mysqld-exporter"# noqa package-latest
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
with_items:
|
||||
- prometheus-mysqld-exporter
|
||||
|
||||
- name: Ensure prometheus user for prometheus-mysqld-exporter exists
|
||||
community.mysql.mysql_user:
|
||||
name: "prometheus"
|
||||
priv: "*.*:PROCESS,REPLICATION CLIENT,SELECT"
|
||||
login_unix_socket: /run/mysqld/mysqld.sock
|
||||
login_password: "{{ mysql_root_password }}"
|
||||
register: mysql_exporter_user_creds
|
||||
notify: prometheus-mysqld-exporter restart
|
||||
|
||||
- name: Ensure is prometheus-mysqld-exporter configured
|
||||
lineinfile:
|
||||
regex: "^DATA_SOURCE_NAME="
|
||||
line: 'DATA_SOURCE_NAME="prometheus@unix(/run/mysqld/mysqld.sock)/"'
|
||||
path: /etc/default/prometheus-mysqld-exporter
|
||||
register: mysql_exporter_data_source
|
||||
notify: prometheus-mysqld-exporter restart
|
||||
|
||||
- name: Setup prometheus-mysqld-exporter interface bind
|
||||
lineinfile:
|
||||
path: /etc/default/prometheus-mysqld-exporter
|
||||
regex: "^ARGS="
|
||||
line: "ARGS=\"--web.listen-address='{{ stage_private_server_ip }}:{{ monitor_port_maria }}'\""
|
||||
register: mysql_exporter_args
|
||||
notify: prometheus-mysqld-exporter restart
|
||||
|
||||
- name: "Ensure prometheus-mysqld-exporter is running"
|
||||
service:
|
||||
name: prometheus-mysqld-exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
@ -1,25 +0,0 @@
|
||||
{{ ansible_managed | comment }}
|
||||
|
||||
[server]
|
||||
|
||||
[mysqld]
|
||||
|
||||
pid-file = /run/mysqld/mysqld.pid
|
||||
basedir = /usr
|
||||
bind-address = {{ mariadb_server_bind_address }}
|
||||
expire_logs_days = 10
|
||||
character-set-server = utf8mb4
|
||||
collation-server = utf8mb4_general_ci
|
||||
|
||||
[embedded]
|
||||
|
||||
[mariadb]
|
||||
require_secure_transport = on
|
||||
ssl_key = {{ cert_private_key }}
|
||||
ssl_cert = {{ cert_public_key }}
|
||||
ssl_ca = {{ ca_cert }}
|
||||
ssl = on
|
||||
tls_version = TLSv1.2,TLSv1.3
|
||||
ssl_cipher = TLSv1.2,TLSv1.3
|
||||
|
||||
[mariadb-10.6]
|
||||
@ -1,60 +0,0 @@
|
||||
--
|
||||
-- PostgreSQL database dump
|
||||
--
|
||||
|
||||
-- Dumped from database version 13.6 (Ubuntu 13.6-1.pgdg20.04+1)
|
||||
-- Dumped by pg_dump version 13.6 (Ubuntu 13.6-1.pgdg20.04+1)
|
||||
|
||||
SET statement_timeout = 0;
|
||||
SET lock_timeout = 0;
|
||||
SET idle_in_transaction_session_timeout = 0;
|
||||
SET client_encoding = 'UTF8';
|
||||
SET standard_conforming_strings = on;
|
||||
SELECT pg_catalog.set_config('search_path', '', false);
|
||||
SET check_function_bodies = false;
|
||||
SET xmloption = content;
|
||||
SET client_min_messages = warning;
|
||||
SET row_security = off;
|
||||
|
||||
--
|
||||
-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: -
|
||||
--
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
|
||||
|
||||
|
||||
--
|
||||
-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner:
|
||||
--
|
||||
|
||||
COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
|
||||
|
||||
|
||||
SET default_tablespace = '';
|
||||
|
||||
SET default_table_access_method = heap;
|
||||
|
||||
--
|
||||
-- Name: movie_quotes; Type: TABLE; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
CREATE TABLE public.movie_quotes (
|
||||
movie character varying(255),
|
||||
quote character varying(255)
|
||||
);
|
||||
|
||||
|
||||
ALTER TABLE public.movie_quotes OWNER TO postgres;
|
||||
|
||||
--
|
||||
-- Data for Name: movie_quotes; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
COPY public.movie_quotes (movie, quote) FROM stdin;
|
||||
wargames Shall we play
|
||||
\.
|
||||
|
||||
|
||||
--
|
||||
-- PostgreSQL database dump complete
|
||||
--
|
||||
@ -1,82 +0,0 @@
|
||||
---
|
||||
- name: "Delete package <prometheus postgres exporter>"
|
||||
apt:
|
||||
name: "prometheus-postgres-exporter"
|
||||
state: absent
|
||||
|
||||
- name: "Check if version is already installed"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ postgres_exporter_dir }}/{{ postgres_exporter_dist }}/postgres_exporter"
|
||||
register: check_pg_exp
|
||||
|
||||
- name: "Download and extract pg_exporter"
|
||||
unarchive:
|
||||
src: "{{ postgres_exporter_download_url }}"
|
||||
dest: "{{ postgres_exporter_dir }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "u=rwx,g=rx,o=rx"
|
||||
remote_src: true
|
||||
creates: "{{ postgres_exporter_dir }}/{{ postgres_exporter_dist }}/postgres_exporter"
|
||||
when:
|
||||
- not check_pg_exp.stat.exists
|
||||
|
||||
- name: "Create systemd service file"
|
||||
become: true
|
||||
template:
|
||||
src: "postgres_exporter.systemd.j2"
|
||||
dest: "/etc/systemd/system/postgres_exporter.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "u=rw,go=r"
|
||||
notify:
|
||||
- restart postgres_exporter
|
||||
|
||||
- name: "Create Config for postgres_exporter"
|
||||
template:
|
||||
src: "postgres_exporter.default.conf.j2"
|
||||
dest: "/etc/default/postgres_exporter"
|
||||
owner: root
|
||||
group: "{{ postgres_exporter_group }}"
|
||||
mode: "u=rw,g=r,o="
|
||||
notify: restart postgres_exporter
|
||||
|
||||
- name: "Create file for additional queries"
|
||||
copy:
|
||||
dest: '{{ postgres_exporter_home }}/queries.yml'
|
||||
owner: root
|
||||
group: '{{ postgres_exporter_group }}'
|
||||
mode: '0644'
|
||||
content: "{{ lookup('vars','postgres_exporter_additional_queries') | to_nice_yaml }}"
|
||||
notify: restart postgres_exporter
|
||||
|
||||
- name: "Ensure postgres_exporter up and running"
|
||||
service:
|
||||
name: postgres_exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Check role prometheus exists # noqa command-instead-of-shell no-changed-when
|
||||
become: yes
|
||||
become_user: postgres
|
||||
shell: "/usr/bin/psql -Atc \"SELECT count(rolname) FROM pg_roles where rolname='prometheus'\""
|
||||
register: role_check
|
||||
ignore_errors: yes
|
||||
|
||||
- name: "Copy prometheus_postgres_exporter init script"
|
||||
copy:
|
||||
src: init.sql
|
||||
dest: /tmp/prometheus_postgres_exporter.sql
|
||||
mode: '0755'
|
||||
when: "role_check.stdout == '0' and server_type == 'master'"
|
||||
|
||||
- name: "Execute prometheus_postgres_exporter init script" # noqa command-instead-of-shell
|
||||
become: true
|
||||
become_user: postgres
|
||||
shell: "psql -f /tmp/prometheus_postgres_exporter.sql"
|
||||
when: "role_check.stdout == '0' and server_type == 'master'"
|
||||
|
||||
- name: "Delete prometheus_postgres_exporter init script"
|
||||
file: path="/tmp/prometheus_postgres_exporter.sql" state=absent
|
||||
when: "role_check.stdout == '0' and server_type == 'master'"
|
||||
@ -1,7 +0,0 @@
|
||||
{% if postgres_exporter_datasource_name is defined %}
|
||||
DATA_SOURCE_NAME="{{ postgres_exporter_datasource_name }}"
|
||||
{% endif %}
|
||||
{% if postgres_exporter_datasource_uri is defined %}
|
||||
DATA_SOURCE_URI="{{ postgres_exporter_datasource_uri }}"
|
||||
{% endif %}
|
||||
FLAGS="{{ postgres_exporter_flags | join(' ') }}"
|
||||
@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Description=postgres_exporter - Exporter for machine metrics.
|
||||
Documentation=https://github.com/prometheus/postgres_exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User={{ postgres_exporter_user }}
|
||||
Group={{ postgres_exporter_group }}
|
||||
EnvironmentFile={{ postgres_exporter_config_file }}
|
||||
ExecStart={{ postgres_exporter_binary }} $FLAGS
|
||||
|
||||
SyslogIdentifier=postgres_exporter
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@ -1,2 +0,0 @@
|
||||
---
|
||||
mariadb_server_with_mysqld_exporter: False
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
STAGE=$1
|
||||
|
||||
DATADIR='/var/lib/mysql'
|
||||
DATE=$(date +%F)
|
||||
|
||||
|
||||
systemctl stop mariadb
|
||||
|
||||
rm -rf ${DATADIR}_moved
|
||||
mv ${DATADIR} ${DATADIR}_moved
|
||||
mkdir -p ${DATADIR}
|
||||
|
||||
LOCAL_BACKUP_DIR="/home/backupuser/backups/${STAGE}/maria"
|
||||
BACKUP_FILE_ENCRYPTED=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | head -n 1)
|
||||
|
||||
# --batch => avoid error: >> gpg: cannot open '/dev/tty': No such device or address" <<
|
||||
gpg --batch --decrypt $BACKUP_FILE_ENCRYPTED | gunzip | mbstream --directory ${DATADIR} -x --parallel=2
|
||||
|
||||
mariabackup --prepare --target-dir=${DATADIR}
|
||||
|
||||
chown -R mysql:mysql ${DATADIR}
|
||||
|
||||
systemctl start mariadb
|
||||
@ -1,43 +0,0 @@
|
||||
---
|
||||
# DEV-375
|
||||
# "fixes" error for mysql-connect as root-user
|
||||
# it's just a restore server ...
|
||||
- name: "Ensure passwordless mysql-connect for root "
|
||||
copy:
|
||||
dest: '/root/.my.cnf'
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0600'
|
||||
content: |
|
||||
[client]
|
||||
user={{ mysql_root_username }}
|
||||
password={{ mysql_root_password }}
|
||||
|
||||
- name: "Install mariadb-server via include_role"
|
||||
include_role:
|
||||
name: maria
|
||||
|
||||
- name: "Copy restore script to restore server"
|
||||
copy:
|
||||
src: restore.sh
|
||||
dest: '/root/restore.sh'
|
||||
mode: '0750'
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: "Create file for gpg secret key"
|
||||
become: yes
|
||||
copy:
|
||||
dest: '/root/gpg_private_key'
|
||||
mode: '0600'
|
||||
owner: 'root'
|
||||
group: 'root'
|
||||
content: |
|
||||
{{ gpg_key_smardigo_automation__private }}
|
||||
|
||||
- name: "Import private gpg key" # noqa command-instead-of-shell
|
||||
become: yes
|
||||
shell: 'gpg --import /root/gpg_private_key'
|
||||
register: gpg_import
|
||||
changed_when:
|
||||
- gpg_import.rc != '0'
|
||||
@ -1,6 +0,0 @@
|
||||
[mysqld]
|
||||
ssl_key = {{ cert_private_key }}
|
||||
ssl_cert = {{ cert_public_key }}
|
||||
ssl_ca = {{ ca_cert }}
|
||||
tls_version = TLSv1.2,TLSv1.3
|
||||
ssl_cipher = TLSv1.2,TLSv1.3
|
||||
@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
STAGE=$1
|
||||
|
||||
DATADIR='/var/lib/postgresql/13/main'
|
||||
DATE=$(date +%F)
|
||||
|
||||
PG_USER=postgres
|
||||
PG_GROUP=postgres
|
||||
|
||||
systemctl stop postgresql
|
||||
|
||||
rm -rf ${DATADIR}_moved
|
||||
|
||||
mv ${DATADIR} ${DATADIR}_moved
|
||||
mkdir -p ${DATADIR}
|
||||
|
||||
LOCAL_BACKUP_DIR="/home/backupuser/backups/${STAGE}/postgres"
|
||||
BACKUP_FILE_ENCRYPTED=$(find "${LOCAL_BACKUP_DIR}/${DATE}/" -name *.gz.gpg | head -n 1)
|
||||
|
||||
# --batch => avoid error: >> gpg: cannot open '/dev/tty': No such device or address" <<
|
||||
gpg --batch --decrypt $BACKUP_FILE_ENCRYPTED | tar -xz -C ${DATADIR}
|
||||
|
||||
chmod 0700 ${DATADIR}
|
||||
chown -R ${PG_USER}:${PG_GROUP} ${DATADIR}
|
||||
|
||||
systemctl start postgresql
|
||||
@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: "Install postgres via include_role"
|
||||
include_role:
|
||||
name: postgres
|
||||
|
||||
- name: "Copy restore script to restore server"
|
||||
copy:
|
||||
src: restore.sh
|
||||
dest: '/root/restore.sh'
|
||||
mode: 0754
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: "Create file for gpg secret key"
|
||||
become: yes
|
||||
copy:
|
||||
dest: '/root/gpg_private_key'
|
||||
mode: '0600'
|
||||
owner: 'root'
|
||||
group: 'root'
|
||||
content: |
|
||||
{{ gpg_key_smardigo_automation__private }}
|
||||
|
||||
- name: "Import private gpg key" # noqa command-instead-of-shell
|
||||
become: yes
|
||||
shell: 'gpg --import /root/gpg_private_key'
|
||||
register: gpg_import
|
||||
changed_when:
|
||||
- gpg_import.rc != '0'
|
||||
@ -1,3 +0,0 @@
|
||||
---
|
||||
selfsigned_ca_dir: '/etc/ssl/selfsigned_ca'
|
||||
selfsigned_ca_private_key_passphrase: '{{ selfsigned_ca_private_key_passphrase_vault }}'
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue