diff --git a/mysqld_exporter/defaults/main.yml b/mysqld_exporter/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..7d0f4545079342109ff95b42e0bea954da73094d --- /dev/null +++ b/mysqld_exporter/defaults/main.yml @@ -0,0 +1,4 @@ +--- +mysqld_exporter_data_source: >- + prometheus:nopassword@unix(/run/mysqld/mysqld.sock)/ +mysqld_exporter_args: "" diff --git a/mysqld_exporter/handlers/main.yml b/mysqld_exporter/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..0c1a1c1143c0504c6607e99777d853d23a9f9ddf --- /dev/null +++ b/mysqld_exporter/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart mysqld_exporter + systemd: + name: prometheus-mysqld-exporter.service + state: restarted diff --git a/mysqld_exporter/tasks/main.yml b/mysqld_exporter/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..e84070c33c913484db2c8dfbf2fed349ea959ae4 --- /dev/null +++ b/mysqld_exporter/tasks/main.yml @@ -0,0 +1,41 @@ +--- + +- name: Install mysqld_exporter + apt: + name: prometheus-mysqld-exporter + state: present + when: ansible_distribution_major_version|int >= 10 + tags: + - prometheus + - prometheus-exporter + +- name: Install mysqld_exporter (stretch) + apt: + name: prometheus-mysqld-exporter + state: present + default_release: stretch-backports + when: ansible_distribution_major_version|int == 9 + tags: + - prometheus + - prometheus-exporter + +- name: Configure mysqld_exporter + template: + src: prometheus-mysqld-exporter.j2 + dest: /etc/default/prometheus-mysqld-exporter + notify: + - Restart mysqld_exporter + tags: + - prometheus + - prometheus-exporter + - config + +- name: Configure Prometheus server to scrape us + template: + src: scrape.yml.j2 + dest: "/etc/prometheus/scrape/mysqld_{{ ansible_fqdn }}.yml" + delegate_to: "{{ prometheus_host }}" + tags: + - prometheus + - prometheus-exporter + - config diff --git a/mysqld_exporter/templates/prometheus-mysqld-exporter.j2 b/mysqld_exporter/templates/prometheus-mysqld-exporter.j2 new file mode 100644 index 0000000000000000000000000000000000000000..048c4a9e2fd2b35ee1013d3bc3dc088cf8f06074 --- /dev/null +++ b/mysqld_exporter/templates/prometheus-mysqld-exporter.j2 @@ -0,0 +1,122 @@ +# By default the connection string will be read from +# $HOME/my.cnf or from the file specified with the -config.my-cnf parameter. + +# To set a connection string from the environment instead, uncomment one of the +# following lines. + +# Using UNIX domain sockets and authentication: +# DATA_SOURCE_NAME="prometheus:nopassword@unix(/run/mysqld/mysqld.sock)/" + +# Using a TCP connection and password authentication: +# DATA_SOURCE_NAME="login:password@(hostname:port)/dbname" + +DATA_SOURCE_NAME="{{ mysqld_exporter_data_source }}" + +# Note the user must be granted enough privileges for the exporter to run. +# Example to create a user to connect with the UNIX socket: +# +# CREATE USER IF NOT EXISTS 'prometheus'@'localhost' IDENTIFIED VIA unix_socket; +# GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'prometheus'@'localhost'; + +# Set the command-line arguments to pass to the exporter. +ARGS='{{ mysqld_exporter_args }}' + +# Usage of prometheus-mysqld-exporter: +# -collect.auto_increment.columns +# Collect auto_increment columns and max values from information_schema +# --exporter.lock_wait_timeout=2 +# Set a lock_wait_timeout on the connection to avoid long metadata +# locking. +# --exporter.log_slow_filter +# Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not +# supported by Oracle MySQL. +# --collect.heartbeat.database="heartbeat" +# Database from where to collect heartbeat data +# --collect.heartbeat.table="heartbeat" +# Table from where to collect heartbeat data +# --collect.info_schema.processlist.min_time=0 +# Minimum time a thread must be in each state to be counted +# --collect.info_schema.tables.databases="*" +# The list of databases to collect table stats for, or '*' for all +# --collect.perf_schema.eventsstatements.limit=250 +# Limit the number of events statements digests by response time +# --collect.perf_schema.eventsstatements.timelimit=86400 +# Limit how old the 'last_seen' events statements can be, in seconds +# --collect.perf_schema.eventsstatements.digest_text_limit=120 +# Maximum length of the normalized statement text +# --collect.perf_schema.file_instances.filter=".*" +# RegEx file_name filter for performance_schema.file_summary_by_instance +# --collect.perf_schema.file_instances.remove_prefix="/var/lib/mysql/" +# Remove path prefix in performance_schema.file_summary_by_instance +# --web.listen-address=":9104" +# Address to listen on for web interface and telemetry. +# --web.telemetry-path="/metrics" +# Path under which to expose metrics. +# --config.my-cnf="$HOME/.my.cnf" +# Path to .my.cnf file to read MySQL credentials from. +# --collect.global_variables +# Collect from SHOW GLOBAL VARIABLES +# --collect.slave_status +# Collect from SHOW SLAVE STATUS +# --collect.info_schema.processlist +# Collect current thread state counts from the +# information_schema.processlist +# --collect.info_schema.tables +# Collect metrics from information_schema.tables +# --collect.info_schema.innodb_tablespaces +# Collect metrics from information_schema.innodb_sys_tablespaces +# --collect.info_schema.innodb_metrics +# Collect metrics from information_schema.innodb_metrics +# --collect.auto_increment.columns +# Collect auto_increment columns and max values from information_schema +# --collect.global_status +# Collect from SHOW GLOBAL STATUS +# --collect.perf_schema.tableiowaits +# Collect metrics from performance_schema.table_io_waits_summary_by_table +# --collect.perf_schema.indexiowaits +# Collect metrics from +# performance_schema.table_io_waits_summary_by_index_usage +# --collect.perf_schema.tablelocks +# Collect metrics from +# performance_schema.table_lock_waits_summary_by_table +# --collect.perf_schema.eventsstatements +# Collect metrics from +# performance_schema.events_statements_summary_by_digest +# --collect.perf_schema.eventswaits +# Collect metrics from +# performance_schema.events_waits_summary_global_by_event_name +# --collect.perf_schema.file_events +# Collect metrics from performance_schema.file_summary_by_event_name +# --collect.perf_schema.file_instances +# Collect metrics from performance_schema.file_summary_by_instance +# --collect.binlog_size +# Collect the current size of all registered binlog files +# --collect.info_schema.userstats +# If running with userstat=1, set to true to collect user statistics +# --collect.info_schema.clientstats +# If running with userstat=1, set to true to collect client statistics +# --collect.info_schema.tablestats +# If running with userstat=1, set to true to collect table statistics +# --collect.info_schema.innodb_cmp +# Collect metrics from information_schema.innodb_cmp +# --collect.info_schema.innodb_cmpmem +# Collect metrics from information_schema.innodb_cmpmem +# --collect.info_schema.query_response_time +# Collect query response time distribution if query_response_time_stats +# is ON. +# --collect.engine_tokudb_status +# Collect from SHOW ENGINE TOKUDB STATUS +# --collect.perf_schema.replication_group_member_stats +# Collect metrics from performance_schema.replication_group_member_stats +# --collect.heartbeat +# Collect from heartbeat +# --collect.slave_hosts +# Scrape information from 'SHOW SLAVE HOSTS' +# --collect.engine_innodb_status +# Collect from SHOW ENGINE INNODB STATUS +# --log.level="info" +# Only log messages with the given severity or above. Valid levels: +# [debug, info, warn, error, fatal] +# --log.format="logger:stderr" +# Set the log target and format. Example: +# "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true" diff --git a/mysqld_exporter/templates/scrape.yml.j2 b/mysqld_exporter/templates/scrape.yml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..76c2deed127d691b776b0e671d2c280ff5a92152 --- /dev/null +++ b/mysqld_exporter/templates/scrape.yml.j2 @@ -0,0 +1,4 @@ +- targets: + - "{{ ansible_fqdn }}:9104" + labels: + job: mysqld