Skip to content
---
# defaults file for prometheus
app_main_port: "9090"
app_group: "{{ app_user }}"
version: '3.7'
volumes:
prometheus_data: {}
services:
prometheus:
image: prom/prometheus
volumes:
- ./prometheus/:/etc/prometheus/
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.config.file=/etc/prometheus/web.yml'
ports:
- 127.0.0.1:${APP_MAIN_PORT:-9090}:9090
restart: always
---
- name: reload nginx prometheus
service: name=nginx state=reloaded
---
- name: Create of update let'encrypt certificate
import_role:
name: _letsencrypt_certificate
when: app_domain is defined and app_domain != ""
- name: create user {{ app_user }}
import_role:
name: _user
vars:
user_name: "{{ app_user }}"
user_password: "{{ app_user_password }}"
when: app_user is defined
- name: "directory for www logs mounted in jail"
file:
state: directory
path: "{{ app_instance_root }}/../logs"
mode: 0711
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: app_user is defined
- name: "template nginx_app.j2 {{ app_instance_id }}"
template:
src: "nginx_app.j2"
dest: "/etc/nginx/sites-available/{{ app_instance_id }}.conf"
notify: reload nginx prometheus
tags:
- rev_proxy
- name: "Prometheus conf directory"
file:
path: "{{ app_instance_root }}/prometheus"
state: directory
tags:
- prometheus_installation
- name: "Move configuration file of Prometheus"
template:
src: "prometheus.yml.j2"
dest: "{{ app_instance_root }}/prometheus/prometheus.yml"
backup: yes
tags:
- prometheus_installation
- name: "Move configuration file of Prometheus"
template:
src: "web.yml.j2"
dest: "{{ app_instance_root }}/prometheus/web.yml"
backup: yes
tags:
- prometheus_installation
- name: "Copy node_exporter targets file"
copy:
src: "~/.pm_monitoring/node_exporter.json"
dest: "{{ app_instance_root }}/prometheus/node_exporter.json"
mode: 0755
tags:
- prometheus_installation
- name: "Copy services targets file"
copy:
src: "~/.pm_monitoring/services.json"
dest: "{{ app_instance_root }}/prometheus/services.json"
mode: 0755
tags:
- prometheus_installation
- name: "copy docker-compose {{ app_instance_id }}"
copy:
src: "docker-compose.yml"
dest: "{{ app_instance_root }}/docker-compose.yml"
tags:
- prometheus_installation
- name: "start prometheus environment"
docker_compose:
project_src: "{{ app_instance_root }}"
state: present
tags:
- prometheus_installation
- name: log rotate
import_role:
name: _app_logrotate
- name: "enable site for {{ app_domain }}"
file:
state: link
path: "/etc/nginx/sites-enabled/{{ app_instance_id }}.conf"
src: "/etc/nginx/sites-available/{{ app_instance_id }}.conf"
notify: reload nginx prometheus
- name: Add monit
import_role:
name: _app_monit
when: monit_request is defined and monit_request != ''
---
- name: "set user home var "
set_fact:
app_user_home: "/home/{{ app_user }}"
tags:
- setpath
- name: "set instance root"
set_fact:
app_instance_root: "{{ app_user_home }}/{{ app_instance_id }}"
tags:
- setpath
- name: "set instance root"
set_fact:
run_user: "{{ app_user }}"
tags:
- setpath
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
map $http_user_agent $log_ua {
~Monit 0;
default 1;
}
server {
listen 80;
listen [::]:80;
server_name {{ app_domain | mandatory }};
# enforce https
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ app_domain }};
ssl_certificate /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ app_domain }}/privkey.pem;
# Add headers to serve security related headers
# Before enabling Strict-Transport-Security headers please read into this
# topic first.
# add_header Strict-Transport-Security "max-age=15768000;
# includeSubDomains; preload;";
#
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header X-Robots-Tag all; # https://developers.google.com/search/docs/advanced/robots/robots_meta_tag
add_header X-Download-Options noopen;
add_header X-Permitted-Cross-Domain-Policies none;
add_header Strict-Transport-Security "max-age=15768000";
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
access_log {{ www_log }}/{{ app_instance_id }}/access.log combined if=$log_ua;
error_log {{ www_log }}/{{ app_instance_id }}/error.log;
# set max upload size
client_max_body_size 512M;
fastcgi_buffers 64 4K;
location / {
proxy_set_header HOST $host;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_pass http://localhost:{{ app_main_port }};
}
}
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
# - "first.rules"
# - "second.rules"
scrape_configs:
- job_name: prometheus
basic_auth:
username: "admin"
password: "{{ clear_admin_pwd }}"
static_configs:
- targets: ['localhost:{{ app_main_port }}']
- job_name: node_exporter
basic_auth:
username: "admin"
password: "{{ admin_node_exporter_pwd }}"
file_sd_configs:
- files:
- node_exporter.json
- job_name: services
basic_auth:
username: "metrics"
password: "{{ admin_services_pwd }}"
file_sd_configs:
- files:
- services.json
global:
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost: '{{ smtp_host }}:{{ smtp_port }}'
smtp_from: '{{ smtp_from }}'
smtp_auth_username: '{{ smtp_user }}'
smtp_auth_password: 'password'
# The directory from which notification templates are read.
templates:
- '/etc/alertmanager/template/*.tmpl'
# The root route on which each incoming alert enters.
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
#
# To aggregate by all possible labels use '...' as the sole label name.
# This effectively disables aggregation entirely, passing through all
# alerts as-is. This is unlikely to be what you want, unless you have
# a very low alert volume or your upstream notification system performs
# its own grouping. Example: group_by: [...]
group_by: ['alertname', 'cluster', 'service']
# When a new group of alerts is created by an incoming alert, wait at
# least 'group_wait' to send the initial notification.
# This way ensures that you get multiple alerts for the same group that start
# firing shortly after another are batched together on the first
# notification.
group_wait: 30s
# When the first notification was sent, wait 'group_interval' to send a batch
# of new alerts that started firing for that group.
group_interval: 5m
# If an alert has successfully been sent, wait 'repeat_interval' to
# resend them.
repeat_interval: 3h
# A default receiver
receiver: team-X-mails
# All the above attributes are inherited by all child routes and can
# overwritten on each.
# The child route trees.
routes:
# This routes performs a regular expression match on alert labels to
# catch alerts that are related to a list of services.
- matchers:
- service=~"foo1|foo2|baz"
receiver: team-X-mails
# The service has a sub-route for critical alerts, any alerts
# that do not match, i.e. severity != critical, fall-back to the
# parent node and are sent to 'team-X-mails'
routes:
- matchers:
- severity="critical"
receiver: team-X-pager
- matchers:
- service="files"
receiver: team-Y-mails
routes:
- matchers:
- severity="critical"
receiver: team-Y-pager
# This route handles all alerts coming from a database service. If there's
# no team to handle it, it defaults to the DB team.
- matchers:
- service="database"
receiver: team-DB-pager
# Also group alerts by affected database.
group_by: [alertname, cluster, database]
routes:
- matchers:
- owner="team-X"
receiver: team-X-pager
continue: true
- matchers:
- owner="team-Y"
receiver: team-Y-pager
# Inhibition rules allow to mute a set of alerts given that another alert is
# firing.
# We use this to mute any warning-level notifications if the same alert is
# already critical.
inhibit_rules:
- source_matchers: [severity="critical"]
target_matchers: [severity="warning"]
# Apply inhibition if the alertname is the same.
# CAUTION:
# If all label names listed in `equal` are missing
# from both the source and target alerts,
# the inhibition rule will apply!
equal: [alertname, cluster, service]
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
- name: 'team-X-pager'
email_configs:
- to: 'team-X+alerts-critical@example.org'
pagerduty_configs:
- service_key: <team-X-key>
- name: 'team-Y-mails'
email_configs:
- to: 'team-Y+alerts@example.org'
- name: 'team-Y-pager'
pagerduty_configs:
- service_key: <team-Y-key>
- name: 'team-DB-pager'
pagerduty_configs:
- service_key: <team-DB-key>
\ No newline at end of file
basic_auth_users:
admin: {{ admin_pwd }}
\ No newline at end of file
---
- hosts: localhost
remote_user: root
roles:
- prometheus
---
# vars file for prometheus
......@@ -47,7 +47,7 @@ server {
error_log {{ www_log | mandatory }}/{{ app_instance_id }}/error.log;
# set max upload size
# client_max_body_size 512M;
client_max_body_size 512M;
location / {
proxy_pass http://backend{{ app_instance_id }}/;
......@@ -63,4 +63,4 @@ server {
proxy_redirect off;
}
}
\ No newline at end of file
}
......@@ -38,3 +38,21 @@
import_role:
name: _app_monit
when: monit_request is defined and monit_request != ''
- name: cron to stop uptime-kuma during the night
cron:
name: "stop uptime"
hour: "{{ uptime_stop_hour }}"
minute: "{{ uptime_stop_minute }}"
job: "/usr/bin/docker stop uptime-kuma"
tags:
- cron_uptime
- name: "cron to start uptime-kuma during the night"
cron:
name: "start start"
hour: "{{ uptime_start_hour | mandatory }}"
minute: "{{ uptime_start_minute | mandatory }}"
job: "/usr/bin/docker start uptime-kuma"
tags:
- cron_uptime
---
# defaults file for cvat
# defaults file for WEBLATE_MT_MYMEMORY_ENABLED
app_main_port: "8090"
app_group: "{{ app_user }}"
app_email: "{{ smtp_user }}"
app_from_email: "{{ smtp_user }}"
database_type: "postgres_docker"
database_user : "{{ app_instance_id }}_usr"
database_name : "{{ app_instance_id }}_db"
......
version: '3'
services:
weblate:
image: weblate/weblate
image: weblate/weblate:${APP_VERSION}
tmpfs:
- /app/cache
volumes:
......
---
# handlers file for cvat
# handlers file for weblate
- name: reload nginx weblate
service: name=nginx state=reloaded
......@@ -5,6 +5,10 @@
name: _app_log_inventory
vars:
log_type: "install"
when: not ansible_check_mode
- import_role:
name: _app_backup
- name: Create of update let'encrypt certificate
import_role:
......
---
# tasks file for cvat
# tasks file for weblate
- name: "set user home var "
set_fact:
......