feat: added traefik and node_exporter

master
Sven Ketelsen 5 years ago
parent 7eefe6b28f
commit c63d557861

@ -22,3 +22,7 @@ Create/Start servers for stage-dev
# Provisioning # Provisioning
ansible-playbook -i stage-dev setup.yml --vault-password-file ~/vault-pass -u root ansible-playbook -i stage-dev setup.yml --vault-password-file ~/vault-pass -u root
# TODO
212.121.131.106 - Siemansdamm - IPFire

@ -2,18 +2,24 @@
send_status_messages: true send_status_messages: true
domain: smardigo.digital
use_ssl: true use_ssl: true
http_s: "http{{ use_ssl | ternary('s', '', omit) }}"
service_prefix: '' service_prefix: ''
service_suffix: '' service_suffix: ''
service_name: "{{ inventory_hostname }}" service_name: "{{ inventory_hostname }}"
stage_server_hostname: "{{ inventory_hostname }}"
domain: smardigo.digital
service_url: "{{ service_name }}.{{ domain }}" service_url: "{{ service_name }}.{{ domain }}"
stage_server_name: "{{ inventory_hostname }}"
stage_server_hostname: "{{ inventory_hostname }}"
stage_server_url_host: "{{ stage_server_name }}.{{ domain }}"
stage_server_url: "{{ http_s }}://{{ stage_server_name }}.{{ domain }}"
ansible_ssh_host: "{{ inventory_hostname }}.{{ domain }}" ansible_ssh_host: "{{ inventory_hostname }}.{{ domain }}"
admin_user: "administrator" admin_user: "root"
sudo_groups: [ sudo_groups: [
{ {
@ -45,12 +51,58 @@ default_plattform_users:
smardigo_plattform_users: smardigo_plattform_users:
- 'sven.ketelsen' - 'sven.ketelsen'
docker_owner: "{{ admin_user }}"
docker_group: "{{ admin_user }}"
service_base_path: '/etc/smardigo'
# TODO we need a company email address
lets_encrypt_email: "sven.ketelsen@arxes-tolina.de"
# TODO place caddy configfile system relativ instead of docker folder relative
caddy_base_path: '{{ service_base_path }}/caddy'
caddy_config_file_path: 'config/caddy/Caddyfile'
caddy_config_file_path_full: '{{ caddy_base_path }}/{{ caddy_config_file_path }}'
caddy_landing_page_file_path: 'config/static_files/index.html'
caddy_landing_page_file_path_full: '{{ caddy_base_path }}/{{ caddy_landing_page_file_path }}'
caddy_landing_page_service_table_file_path: 'config/static_files/service_table.json'
caddy_landing_page_service_table_file_path_full: '{{ caddy_base_path }}/{{ caddy_landing_page_service_table_file_path }}'
caddy_landing_page_service_table_folder_path_full: '{{ caddy_base_path }}/config/static_files/'
http_port: "80"
https_port: "443"
service_port: "8080"
management_port: "8081"
service_port_cadvisor: "8080"
service_port_elasticsearch: "9200"
service_port_iam: "8082"
service_port_keycloak: "8080"
service_port_kibana: "5601"
service_port_logstash: "5044"
service_port_mssql: "1433"
service_port_node_exporter: "9100"
service_port_postgres: "5432"
service_port_portainer: "9000"
service_port_pgadmin: "9001"
service_port_phpmyadmin: "9002"
service_port_sonarqube: "9000"
monitor_port_service: "9081"
monitor_port_system: "9082"
monitor_port_docker: "9083"
admin_port_traefik: "9080"
hetzner_server_type: cx11 hetzner_server_type: cx11
hetzner_server_image: ubuntu-20.04 hetzner_server_image: ubuntu-20.04
hetzner_ssh_keys: hetzner_ssh_keys:
- sven.ketelsen@arxes-tolina.de - sven.ketelsen@arxes-tolina.de
#reverse_proxy_admin_username: "< see vault >"
#reverse_proxy_admin_password: "< see vault >"
#mattermost_hook_smardigo: "< see vault >" #mattermost_hook_smardigo: "< see vault >"
#hetzner_authentication_token: "< see vault >" #hetzner_authentication_token: "< see vault >"
#digitalocean_authentication_token: "< see vault >" #digitalocean_authentication_token: "< see vault >"

@ -1,20 +1,30 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
36356561396566663330633733666665333231653532633630393364633161613966643163343361 37303936393666363130613561666665623364303361613938633232363532313830316231353935
3063643237303932343339643464316363633334306431630a383632646263323365643835623932 3335653838653863326334623161336336653435373664310a363561353430356166373766393836
30316336663265366337623834393134363761313035343263386233626533316634663238323636 31303463343336613665326636643837633362636335343830396438363634656639376463353965
3137353262353531370a613036643361346430656537636363666462363633306364316435373638 6131343431653439340a363930643966386237323763613566393235303437376132393865323230
37346661666330323763323164653264613138663239323136326666666133623061663134636136 63616136386133326131656565306233343831343030313935663764353330653231666533333238
35343636316131646336636363326265646261336438623834656562613534616166306461666662 37343938363431333936613065613633396231306365346435623362653437326536663135333532
61656638396136376331633639386638336563666264333062346633376566303136313037346438 38303838663865623737663866633839333835363864616330626335323338626331316263653233
63353931633831616464343565393839323334313338663838336465663565633165633937306435 39343965666461653538303632636564336338313162663833623365396465336232366236383034
35306338643039306137663437383563316465346532366361633864383661396461626433326133 61393839616164386565373930623338323130626330316565383338623634663338366233613963
39626135666132613261353437613835633137656430663630333331636536376365666265633536 32306431383136346263633364626265363737353363396131313461656139393239306537343435
31353032303930366131636434646132633137376262653439316632643535356538656265333734 34643830373965303339373831393465366565663936663061663434633131303033333436346566
37346632386433326435646638326166653236633163663162633433353032373734643165313235 31376330613939356534613534313335653464616436393137396165326262636538656137326532
30333635313766663531383830633864326230363836623465386262396165386365346438356333 63396138383364646339353539363230306461373732333037663862306161333966313462356363
66346335346331393939386264633730383461663662343039303936653863346130343964613431 36656639346238313839623232373738376530633361373565353063323065626634306532626539
32663037323833666633393238663835653138323336656336616639656436623961313064366438 37393038633761396539353233666563316535323965363233656134393365356339626565656134
35363438616361653634303131653132393263303964373830336463393930353562363836343331 63323864653566646531393830396337353139653831343866303039313631613334313431643161
35333331633336356166396630653834373030313333613666383335613032376163353562613530 39383264646566636538626433333937333230383564316437353464613862316532343564316530
31633964336430366432633664356463333336376563383761343666663362633864656437356462 64623935383037326563613533313361333435326166343339616461386437356238376263356334
396232323765356262626366343266316136 33373166613033626130333962366464663262393134623838633937653837653332393061626637
66653730396436313339616562626230363231303136333235663534626266613831646631633530
39356263346231373463373761626430376431633135353939656664613632633965323838633362
65633335643866633530346236653435343565663936376266663862363130303032323436646133
66643833653363323935353636343430346561346262383436663838636536386638356438663430
65363262396339323530303663333730313836346565623430633232366138376261393831643137
37373734333538313566306631373233353364656438323435373265306531396534656265633532
31643831353931393139663861346234333233353566333435373338393166376333343235303034
37376565643162303531396566313531643933376663343663636230376338666565323263666539
65373832373931393265333432313232633536646331633833613561366532363239326538333565
3134

@ -0,0 +1,39 @@
---
- name: 'Insert/Update caddy configuration in {{ caddy_config_file_path_full }}'
blockinfile:
marker: '# {mark} managed by ansible (reverse proxy config for {{ current_service }})'
path: '{{ caddy_config_file_path_full }}'
state: "{{ 'present' if reverse_proxy == 'caddy' else 'absent' }}"
create: yes
block: |
{% for service in current_services %}
{{ http_s }}://{{ service.external }} {
proxy / {{ service.internal }} {
transparent
}
tls {{ caddy_tls }}
{% if service.basicauth is defined %}
basicauth {{ service.basicauth }}
{% endif %}
}
{% endfor %}
tags:
- update_deployment
- name: "Stop caddy"
shell: docker-compose down
args:
chdir: '{{ service_base_path }}/caddy'
ignore_errors: yes
when: reverse_proxy == 'caddy'
tags:
- update_deployment
- name: "Start caddy"
shell: docker-compose up -d
args:
chdir: '{{ service_base_path }}/caddy'
when: reverse_proxy == 'caddy'
tags:
- update_deployment

@ -0,0 +1,55 @@
---
- name: "Check if landing page service table exists"
stat:
path: "{{ caddy_landing_page_service_table_file_path_full }}"
register: check_caddy_landing_page_service_table_file
tags:
- update_deployment
- name: "Read landing page service table data"
slurp:
src: "{{ caddy_landing_page_service_table_file_path_full }}"
register: landing_page_service_table_plain
when: check_caddy_landing_page_service_table_file.stat.exists
tags:
- update_deployment
- name: "Set landing page service table as variable"
set_fact:
landing_page_service_table: "{{ landing_page_service_table_plain['content'] | b64decode }}"
when: check_caddy_landing_page_service_table_file.stat.exists
tags:
- update_deployment
- name: "Read landing page service table data"
set_fact:
landing_page_service_table: []
when: not check_caddy_landing_page_service_table_file.stat.exists
tags:
- update_deployment
- name: "Update landing page service table variable"
set_fact:
landing_page_service_table: "{{ ([item] + landing_page_service_table) | unique(attribute='current_name') }}"
with_items: "{{ current_services }}"
tags:
- update_deployment
- name: 'Ensures {{ caddy_landing_page_service_table_folder_path_full }} directory exists'
file:
state: directory
path: '{{ caddy_landing_page_service_table_folder_path_full }}'
tags:
- update_deployment
- update_config
- name: "Write landing page service table"
copy:
content: "{{ landing_page_service_table | to_nice_json }}"
dest: "{{ caddy_landing_page_service_table_file_path_full }}"
owner: "{{ docker_owner }}"
group: "{{ docker_group }}"
mode: 0644
tags:
- update_deployment

@ -0,0 +1,58 @@
---
- name: 'Ensures {{ current_base_path }}/{{ current_destination }} directory exists'
file:
state: directory
path: '{{ current_base_path }}/{{ current_destination }}'
tags:
- update_deployment
- update_config
- name: 'Ensure directory structure for {{ current_config }} exists'
file:
path: "{{ current_base_path }}/{{ current_destination }}/{{ item.path }}"
state: directory
owner: "{{ current_owner }}"
group: "{{ current_group }}"
mode: 0755
with_filetree: "templates/{{ current_config }}"
when: item.state == "directory"
tags:
- update_config
- name: Ensure docker files are populated from templates
template:
src: "{{ item.src }}"
dest: "{{ current_base_path }}/{{ current_destination }}/{{ item.path | regex_replace('\\.j2$', '') }}"
owner: "{{ current_owner }}"
group: "{{ current_group }}"
mode: 0644
with_filetree: "templates/_docker"
when: item.state == 'file' and item.src is match('.*\.j2$')
tags:
- update_deployment
- update_config
- name: Ensure config template files are populated from templates
template:
src: "{{ item.src }}"
dest: "{{ current_base_path }}/{{ current_destination }}/{{ item.path | regex_replace('\\.j2$', '') }}"
owner: "{{ current_owner }}"
group: "{{ current_group }}"
mode: 0644
with_filetree: "templates/{{ current_config }}"
when: item.state == 'file' and item.src is match('.*\.j2$')
tags:
- update_config
- name: Ensure config files are populated from templates
copy:
src: "{{ item.src }}"
dest: "{{ current_base_path }}/{{ current_destination }}/{{ item.path }}"
owner: "{{ current_owner }}"
group: "{{ current_group }}"
mode: 0644
with_filetree: "templates/{{ current_config }}"
when: item.state == 'file' and item.src is not match('.*\.j2$')
tags:
- update_config

@ -0,0 +1,14 @@
---
- name: "Register variable for docker networks"
shell: docker network ls
register: docker_networks
- name: "Docker network create back-tier"
shell: docker network create back-tier
when: docker_networks.stdout.find("back-tier") == -1
- name: "Docker network create front-tier"
shell: docker network create front-tier
when: docker_networks.stdout.find("front-tier") == -1

@ -0,0 +1,123 @@
# $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
Include /etc/ssh/sshd_config.d/*.conf
#Port 22
#AddressFamily any
ListenAddress 5.9.148.23
ListenAddress 212.121.131.106
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
PermitRootLogin yes
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
PasswordAuthentication no
#PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin yes
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server

@ -8,11 +8,5 @@
- name: restart ssh - name: restart ssh
service: service:
name=sshd name=sshd
state=restarted state=restarted
- name: reload NetworkManager
service:
name: NetworkManager
state: reloaded

@ -104,15 +104,6 @@
tags: tags:
- users - users
#- name: "Set up authorized keys as administrator"
# authorized_key:
# user: administrator
# state: present
# key: "{{ lookup('file', '{{ inventory_dir }}/keys/{{ item }}/id_rsa.pub') }}"
# loop: '{{ smardigo_plattform_users }}'
# tags:
# - users
- name: "Ensure docker configuration directory exists" - name: "Ensure docker configuration directory exists"
file: file:
path: '/home/{{ item }}/.docker/' path: '/home/{{ item }}/.docker/'
@ -186,33 +177,14 @@
tags: tags:
- config - config
#- name: "Make sure line 'dns=none' is set in /etc/NetworkManager/NetworkManager.conf" - name: sshd configuration file update
# ini_file: template:
# path: /etc/NetworkManager/NetworkManager.conf src: 'configs/sshd/sshd_config.j2'
# state: present dest: '/etc/ssh/sshd_config'
# no_extra_spaces: yes backup: yes
# section: main mode: 0644
# option: dns notify:
# value: none - restart ssh
# owner: root
# group: root
# mode: 0644
# notify:
# - reload NetworkManager
# tags:
# - config
#- name: "Deploy resolv.conf template"
# template:
# src: resolv.conf.j2
# dest: /etc/resolv.conf
# owner: root
# group: root
# mode: 0644
# notify:
# - reload NetworkManager
# tags:
# - config
# elasticsearch production mode requirements # elasticsearch production mode requirements
- name: "Set vm.max_map_count" - name: "Set vm.max_map_count"

@ -2,8 +2,8 @@
### tags: ### tags:
- name: Create a new server {{ inventory_hostname }} - name: Create new server {{ inventory_hostname }}
hcloud_server: hetzner.hcloud.hcloud_server:
api_token: "{{ hetzner_authentication_token }}" api_token: "{{ hetzner_authentication_token }}"
name: "{{ inventory_hostname }}" name: "{{ inventory_hostname }}"
server_type: "{{ hetzner_server_type }}" server_type: "{{ hetzner_server_type }}"
@ -12,6 +12,25 @@
state: present state: present
delegate_to: 127.0.0.1 delegate_to: 127.0.0.1
#- name: Create management network and server {{ inventory_hostname }}
# hetzner.hcloud.hcloud_server_network:
# api_token: "{{ hetzner_authentication_token }}"
# network: "management"
# server: "{{ inventory_hostname }}"
# state: present
# delegate_to: 127.0.0.1
#- hetzner.hcloud.hcloud_firewall:
# api_token: "{{ hetzner_authentication_token }}"
# name: "{{ inventory_hostname }}"
# rules:
# - direction: in
# protocol: icmp
# source_ips:
# - 212.121.131.106/0
# state: present
# delegate_to: 127.0.0.1
- name: Gather current server infos - name: Gather current server infos
hcloud_server_info: hcloud_server_info:
api_token: "{{ hetzner_authentication_token }}" api_token: "{{ hetzner_authentication_token }}"

@ -0,0 +1,68 @@
---
node_exporter_id: "{{ service_name }}-node-exporter"
node_exporter_image_name: "prom/node-exporter"
node_exporter_image_version: "v1.1.2"
node_exporter_docker: {
networks: [
{
name: front-tier,
external: true,
},
],
services: [
{
name: "{{ node_exporter_id }}",
image_name: "{{ node_exporter_image_name }}",
image_version: "{{ node_exporter_image_version }}",
command: [
'"--path.procfs=/host/proc"',
'"--path.sysfs=/host/sys"',
'"--no-collector.systemd"',
'"--no-collector.logind"',
'"--no-collector.ntp"',
'"--no-collector.bonding"',
'"--no-collector.bcache"',
'"--no-collector.arp"',
'"--no-collector.edac"',
'"--no-collector.infiniband"',
'"--no-collector.ipvs"',
'"--no-collector.mdadm"',
'"--no-collector.nfs"',
'"--no-collector.nfsd"',
'"--no-collector.wifi"',
'"--no-collector.hwmon"',
'"--no-collector.conntrack"',
'"--no-collector.timex"',
'"--no-collector.zfs"',
'"--collector.tcpstat"',
'"--collector.interrupts"',
'"--collector.meminfo_numa"',
'"--collector.processes"',
'"--collector.textfile"',
'"--collector.textfile.directory=/rootfs/textfiles"',
'"--collector.filesystem.ignored-mount-points"',
'"^/(sys|proc|dev|host|etc|run|run/lock|boot|var/lib/docker|run/docker/netns|var/lib/docker/aufs)($$|/)"',
],
labels: [
'"traefik.enable=true"',
'"traefik.http.routers.{{ node_exporter_id }}.service={{ node_exporter_id }}"',
'"traefik.http.routers.{{ node_exporter_id }}.rule=Host(`{{ service_url }}`)"',
'"traefik.http.routers.{{ node_exporter_id }}.entrypoints=admin-system"',
'"traefik.http.routers.{{ node_exporter_id }}.tls=true"',
'"traefik.http.routers.{{ node_exporter_id }}.tls.certresolver=letsencrypt"',
'"traefik.http.services.{{ node_exporter_id }}.loadbalancer.server.port={{ service_port_node_exporter }}"',
],
volumes: [
'"/proc:/host/proc:ro"',
'"/sys:/host/sys:ro"',
'"/:/rootfs:ro"',
],
networks: [
'"front-tier"'
]
}
]
}

@ -0,0 +1,61 @@
---
- name: "Send mattermost messsge"
uri:
url: "{{ mattermost_hook_smardigo }}"
method: POST
body: "{{ lookup('template','mattermost-deploy-start.json.j2') }}"
body_format: json
headers:
Content-Type: "application/json"
delegate_to: 127.0.0.1
become: false
when:
- send_status_messages
- name: "Check docker networks"
include_role:
name: _docker
tasks_from: networks
- name: "Check if node-exporter/docker-compose.yml exists"
stat:
path: '{{ service_base_path }}/node-exporter/docker-compose.yml'
register: check_docker_compose_file
- name: "Stop node-exporter"
shell: docker-compose down
args:
chdir: '{{ service_base_path }}/node-exporter'
when: check_docker_compose_file.stat.exists
ignore_errors: yes
- name: "Deploy service configuration for node-exporter"
include_role:
name: _deploy
tasks_from: configs
vars:
current_config: "node-exporter"
current_base_path: "{{ service_base_path }}"
current_destination: "node-exporter"
current_owner: "{{ docker_owner }}"
current_group: "{{ docker_group }}"
current_docker: "{{ node_exporter_docker }}"
- name: "Start node-exporter"
shell: docker-compose up -d
args:
chdir: '{{ service_base_path }}/node-exporter'
- name: "Send mattermost messsge"
uri:
url: "{{ mattermost_hook_smardigo }}"
method: POST
body: "{{ lookup('template','mattermost-deploy-end.json.j2') }}"
body_format: json
headers:
Content-Type: "application/json"
delegate_to: 127.0.0.1
become: false
when:
- send_status_messages

@ -0,0 +1,77 @@
---
traefik_id: "{{ service_name }}-traefik"
traefik_image_name: "traefik"
traefik_image_version: "v2.4"
caddy_docker: {
networks: [
{
name: front-tier,
external: 'true',
},
],
services: [
{
name: "{{ traefik_id }}",
image_name: "{{ traefik_image_name }}",
image_version: "{{ traefik_image_version }}",
environment: [
'DO_AUTH_TOKEN: "{{ digitalocean_authentication_token }}"',
],
volumes: [
'"./acme.json:/acme.json"',
'"./traefik.toml:/traefik.toml:ro"',
'"./traefik_dynamic.toml:/traefik_dynamic.toml:ro"',
'"/var/run/docker.sock:/var/run/docker.sock:ro"',
'"./config/static_files:/var/www/static_files:ro"',
],
networks: [
'"front-tier"'
],
ports: [
{
external: "0.0.0.0:{{ http_port }}",
internal: "{{ http_port }}"
},
{
external: "0.0.0.0:{{ https_port }}",
internal: "{{ https_port }}"
},
{
external: "0.0.0.0:{{ service_port_portainer }}",
internal: "{{ service_port_portainer }}"
},
{
external: "0.0.0.0:{{ service_port_pgadmin }}",
internal: "{{ service_port_pgadmin }}"
},
{
external: "0.0.0.0:{{ service_port_phpmyadmin }}",
internal: "{{ service_port_phpmyadmin }}"
},
{
external: "0.0.0.0:{{ admin_port_traefik }}",
internal: "{{ admin_port_traefik }}"
},
{
external: "0.0.0.0:{{ monitor_port_service }}",
internal: "{{ monitor_port_service }}"
},
{
external: "0.0.0.0:{{ monitor_port_system }}",
internal: "{{ monitor_port_system }}"
},
{
external: "0.0.0.0:{{ monitor_port_docker }}",
internal: "{{ monitor_port_docker }}"
},
],
dns: [
'"8.8.8.8"',
'"8.8.8.4"',
],
}
]
}

@ -0,0 +1,91 @@
---
- name: "Send mattermost messsge"
uri:
url: "{{ mattermost_hook_smardigo }}"
method: POST
body: "{{ lookup('template','mattermost-deploy-start.json.j2') }}"
body_format: json
headers:
Content-Type: "application/json"
delegate_to: 127.0.0.1
become: false
when:
- send_status_messages
- name: "Check docker networks"
include_role:
name: _docker
tasks_from: networks
- name: "Check if traefik/docker-compose.yml exists"
stat:
path: '{{ service_base_path }}/traefik/docker-compose.yml'
register: check_docker_compose_file
- name: "Stop traefik"
shell: docker-compose down
args:
chdir: '{{ service_base_path }}/traefik'
when: check_docker_compose_file.stat.exists
ignore_errors: yes
- name: "Deploy service configuration for traefik"
include_role:
name: _deploy
tasks_from: configs
vars:
current_config: "traefik"
current_base_path: "{{ service_base_path }}"
current_destination: "traefik"
current_owner: "{{ docker_owner }}"
current_group: "{{ docker_group }}"
current_docker: "{{ caddy_docker }}"
- name: "Ensure acme.json exists"
copy:
content: ""
dest: '{{ service_base_path }}/traefik/acme.json'
force: no
owner: "{{ docker_owner }}"
group: "{{ docker_group }}"
mode: '0600'
- name: "Update landing page"
include_role:
name: _deploy
tasks_from: caddy_landing_page
vars:
current_services: []
- name: "Update landing page for traefik"
include_role:
name: _deploy
tasks_from: caddy_landing_page
vars:
current_services: [
{
current_name: "traefik",
current_url: "{{ http_s }}://{{ stage_server_url_host }}:{{ admin_port_traefik }}",
current_version: "{{ traefik_image_version }}",
current_date: "{{ ansible_date_time.iso8601 }}",
},
]
- name: "Start traefik"
shell: docker-compose up -d
args:
chdir: '{{ service_base_path }}/traefik'
- name: "Send mattermost messsge"
uri:
url: "{{ mattermost_hook_smardigo }}"
method: POST
body: "{{ lookup('template','mattermost-deploy-end.json.j2') }}"
body_format: json
headers:
Content-Type: "application/json"
delegate_to: 127.0.0.1
become: false
when:
- send_status_messages

@ -39,3 +39,13 @@
- role: common - role: common
tags: tags:
- common - common
- role: node_exporter
when: node_exporter_enabled | default(True)
tags:
- node_exporter
- role: traefik
when: traefik_enabled | default(True)
tags:
- traefik

@ -2,6 +2,10 @@
dev-elastic-stack-01 dev-elastic-stack-01
dev-elastic-stack-02 dev-elastic-stack-02
dev-elastic-stack-03 dev-elastic-stack-03
dev-prometheus-01
[prometheus]
dev-prometheus-01
[stage_dev:children] [stage_dev:children]
hcloud hcloud

@ -0,0 +1,141 @@
version: '3.7'
{# ################################################## networks #}
{% if
current_docker.networks is defined
and (current_docker.networks|length>0)
%}
networks:
{% for network in current_docker.networks %}
{{ network.name }}:
external: {{ network.external | default('false') }}
{% endfor %}
{% endif %}
{# ################################################## networks #}
{# ################################################### volumes #}
{% if
current_docker.volumes is defined
and (current_docker.volumes|length>0)
%}
volumes:
{% for volume in current_docker.volumes %}
{{ volume.name }}: {}
{% endfor %}
{% endif %}
{# ################################################### volumes #}
{# ################################################## services #}
services:
{% for service in current_docker.services %}
{{ service.name }}:
image: "{{ service.image_name }}:{{ service.image_version }}"
container_name: "{{ service.name }}"
restart: {{ service.restart | default('always') }}
{% if
service.user is defined
%}
user: {{ service.user }}
{% endif %}
{# ########################################## command #}
{% if
service.command is defined
and (service.command|length>0)
%}
command: [
{% for item in service.command %}
{{ item }},
{% endfor %}
]
{% endif %}
{# ########################################## command #}
{# ###################################### labels #}
{% if
service.labels is defined
and (service.labels|length>0)
%}
labels:
{% for item in service.labels %}
- {{ item }}
{% endfor %}
{% endif %}
{# ###################################### labels #}
{# ###################################### environment #}
{% if
service.environment is defined
and (service.environment|length>0)
%}
environment:
{% for item in service.environment %}
{{ item }}
{% endfor %}
{% endif %}
{# ###################################### environment #}
{# ######################################### env_file #}
{% if
service.env_file is defined
and (service.env_file|length>0)
%}
env_file:
{% for item in service.env_file %}
- {{ item }}
{% endfor %}
{% endif %}
{# ######################################## env_file #}
{# ######################################### volumes #}
{% if
service.volumes is defined
and (service.volumes|length>0)
%}
volumes:
{% for item in service.volumes %}
- {{ item }}
{% endfor %}
{% endif %}
{# ######################################### volumes #}
{# ########################################## dns #}
{% if
service.dns is defined
and (service.dns|length>0)
%}
dns:
{% for item in service.dns %}
- {{ item }}
{% endfor %}
{% endif %}
{# ########################################## dns #}
{# ######################################## networks #}
{% if
service.networks is defined
and (service.networks|length>0)
%}
networks:
{% for item in service.networks %}
- {{ item }}
{% endfor %}
{% endif %}
{# ######################################## networks #}
{# ##################################### extra_hosts #}
{% if
service.extra_hosts is defined
and (service.extra_hosts|length>0)
%}
extra_hosts:
{% for item in service.extra_hosts %}
- {{ item.hostname }}:{{ item.ip }}
{% endfor %}
{% endif %}
{# ##################################### extra_hosts #}
{# ########################################### ports #}
{% if
service.ports is defined
and (service.ports|length>0)
%}
ports:
{% for item in service.ports %}
- {{ item.external }}:{{ item.internal }}
{% endfor %}
{% endif %}
{# ########################################### ports #}
{% endfor %}
{# ################################################## services #}

@ -0,0 +1,115 @@
[log]
# level = "DEBUG"
format = "json"
[accessLog]
format = "json"
# [accessLog.filters]
# statusCodes = ["200", "300-302"]
# [accessLog.fields]
# defaultMode = "keep"
[accessLog.fields.names]
"StartLocal" = "drop"
# "ClientAddr" = "drop"
# "ClientHost" = "drop"
# "ClientPort" = "drop"
"ClientUsername" = "drop"
[api]
dashboard = true
[metrics]
[metrics.prometheus]
manualRouting = true
addServicesLabels = true
addEntryPointsLabels = true
buckets = [0.1,0.3,1.2,5.0]
entryPoint = "admin-service"
[entryPoints]
### http -> https ###
[entryPoints.web]
address = ":{{ http_port }}"
[entryPoints.web.http.redirections.entryPoint]
to = "websecure"
scheme = "https"
### production service port ###
[entryPoints.websecure]
address = ":{{ https_port }}"
### portainer as a service for the host ###
[entryPoints.service-portainer ]
address = ":{{ service_port_portainer }}"
[entryPoints.service-portainer.http.redirections.entryPoint]
to = "service-portainer"
scheme = "https"
### postgres admin service for the host ###
[entryPoints.admin-postgres]
address = ":{{ service_port_pgadmin }}"
[entryPoints.admin-postgres.http.redirections.entryPoint]
to = "admin-postgres"
scheme = "https"
### mysql admin service for the host ###
[entryPoints.admin-mysql]
address = ":{{ service_port_phpmyadmin }}"
[entryPoints.admin-mysql.http.redirections.entryPoint]
to = "admin-mysql"
scheme = "https"
### traefik admin service for the host ###
[entryPoints.admin-traefik]
address = ":{{ admin_port_traefik }}"
[entryPoints.admin-traefik.http.redirections.entryPoint]
to = "admin-traefik"
scheme = "https"
### system monitoring port - host metrics ###
[entryPoints.admin-system]
address = ":{{ monitor_port_system }}"
[entryPoints.admin-system.http.redirections.entryPoint]
to = "admin-system"
scheme = "https"
### system monitoring port - docker metrics ###
[entryPoints.admin-docker]
address = ":{{ monitor_port_docker }}"
[entryPoints.admin-docker.http.redirections.entryPoint]
to = "admin-docker"
scheme = "https"
### service monitoring port - metrics for all served services ###
[entryPoints.admin-service]
address = ":{{ monitor_port_service }}"
[entryPoints.admin-service.http.redirections.entryPoint]
to = "admin-service"
scheme = "https"
[certificatesResolvers.letsencrypt.acme]
email = "{{ lets_encrypt_email }}"
storage = "acme.json"
{% if letsencrypt_caserver_directory_url is defined %}
caserver = "{{ letsencrypt_caserver_directory_url }}"
{% endif %}
[certificatesResolvers.letsencrypt.acme.dnsChallenge]
provider = "digitalocean"
resolvers = ["8.8.8.8:53"]
[certificatesResolvers.letsencrypt-http.acme]
email = "{{ lets_encrypt_email }}"
storage = "acme.json"
{% if letsencrypt_caserver_directory_url is defined %}
caserver = "{{ letsencrypt_caserver_directory_url }}"
{% endif %}
[certificatesResolvers.letsencrypt-http.acme.httpChallenge]
entrypoint = "web"
[providers.docker]
watch = true
network = "front-tier"
exposedByDefault = false
[providers.file]
filename = "traefik_dynamic.toml"

@ -0,0 +1,23 @@
# secure admin resources with basic authentication
[http.middlewares.traefik-auth.basicAuth]
users = [
"{{ reverse_proxy_admin_username }}:{{ reverse_proxy_admin_password }}"
]
# admin api (dashboard, rest api, ...)
[http.routers.api]
rule = "Host(`{{ stage_server_name }}.{{ domain }}`)"
entrypoints = ["admin-traefik"]
middlewares = ["traefik-auth"]
service = "api@internal"
[http.routers.api.tls]
certResolver = "letsencrypt"
# metrics api (prometheus)
[http.routers.metrics]
rule = "Host(`{{ stage_server_name }}.{{ domain }}`) && Path(`/metrics`)"
entrypoints = ["admin-service"]
middlewares = ["traefik-auth"]
service = "prometheus@internal"
[http.routers.metrics.tls]
certResolver = "letsencrypt"
Loading…
Cancel
Save