使用docker部署grafana+prometheus配置
docker-compose-monitor.yml
version: '2'
networks:
monitor:
driver: bridge
services:
influxdb:
image: influxdb:latest
container_name: tig-influxdb
ports:
- "18083:8083"
- "18086:8086"
- "18090:8090"
env_file:
- 'env.influxdb'
volumes:
# Data persistency
# sudo mkdir -p ./influxdb/data
- ./influxdb/data:/var/lib/influxdb
# 配置docker里的時間為東八區(qū)時間
- ./timezone:/etc/timezone:ro
- ./localtime:/etc/localtime:ro
restart: unless-stopped #停止后自動
telegraf:
image: telegraf:latest
container_name: tig-telegraf
links:
- influxdb
volumes:
- ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
- ./timezone:/etc/timezone:ro
- ./localtime:/etc/localtime:ro
restart: unless-stopped
prometheus:
image: prom/prometheus
container_name: prometheus
hostname: prometheus
restart: always
volumes:
- /home/qa/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml
- /home/qa/docker/grafana/node_down.yml:/etc/prometheus/node_down.yml
ports:
- '9090:9090'
networks:
- monitor
alertmanager:
image: prom/alertmanager
container_name: alertmanager
hostname: alertmanager
restart: always
volumes:
- /home/qa/docker/grafana/alertmanager.yml:/etc/alertmanager/alertmanager.yml
ports:
- '9093:9093'
networks:
- monitor
grafana:
image: grafana/grafana:6.7.4
container_name: grafana
hostname: grafana
restart: always
ports:
- '13000:3000'
networks:
- monitor
node-exporter:
image: quay.io/prometheus/node-exporter
container_name: node-exporter
hostname: node-exporter
restart: always
ports:
- '9100:9100'
networks:
- monitor
cadvisor:
image: google/cadvisor:latest
container_name: cadvisor
hostname: cadvisor
restart: always
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
ports:
- '18080:8080'
networks:
- monitor
alertmanager.yml
global:
resolve_timeout: 5m
smtp_from: '郵箱'
smtp_smarthost: 'smtp.exmail.qq.com:25'
smtp_auth_username: '郵箱'
smtp_auth_password: '密碼'
smtp_require_tls: false
smtp_hello: 'qq.com'
route:
group_by: ['alertname']
group_wait: 5s
group_interval: 5s
repeat_interval: 5m
receiver: 'email'
receivers:
- name: 'email'
email_configs:
- to: '收件郵箱'
send_resolved: true
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']
prometheus.yml
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['192.168.32.117:9093']
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "node_down.yml"
# - "node-exporter-alert-rules.yml"
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# IO存儲節(jié)點組
- job_name: 'io'
scrape_interval: 8s
static_configs: #端口為node-exporter啟動的端口
- targets: ['192.168.32.117:9100']
- targets: ['192.168.32.196:9100']
- targets: ['192.168.32.136:9100']
- targets: ['192.168.32.193:9100']
- targets: ['192.168.32.153:9100']
- targets: ['192.168.32.185:9100']
- targets: ['192.168.32.190:19100']
- targets: ['192.168.32.192:9100']
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'cadvisor'
static_configs: #端口為cadvisor啟動的端口
- targets: ['192.168.32.117:18080']
- targets: ['192.168.32.193:8080']
- targets: ['192.168.32.153:8080']
- targets: ['192.168.32.185:8080']
- targets: ['192.168.32.190:18080']
- targets: ['192.168.32.192:18080']
node_down.yml
groups:
- name: node_down
rules:
- alert: InstanceDown
expr: up == 0
for: 1m
labels:
user: test
annotations:
summary: 'Instance {{ $labels.instance }} down'
description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes.'
#剩余內(nèi)存小于10%
- alert: 剩余內(nèi)存小于10%
expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
#剩余磁盤小于10%
- alert: 剩余磁盤小于10%
expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
for: 2m
labels:
severity: warning
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
#cpu負(fù)載 > 80%
- alert: CPU負(fù)載 > 80%
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
for: 0m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
告警:https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring
官網(wǎng)儀表盤:https://grafana.com/grafana/dashboards/
到此這篇關(guān)于docker部署grafana+prometheus配置的文章就介紹到這了,更多相關(guān)docker部署grafana+prometheus內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
相關(guān)文章
Docker創(chuàng)建tomcat容器實例后無法訪問(HTTP狀態(tài)404)
本文主要介紹了Docker創(chuàng)建tomcat容器實例后無法訪問,HTTP狀態(tài)顯示404,文中通過示例代碼介紹的非常詳細(xì),對大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價值,需要的朋友們下面隨著小編來一起學(xué)習(xí)學(xué)習(xí)吧2024-07-07
安裝Docker時執(zhí)行yum?install?-y?yum-utils報錯解決辦法
在安裝工具時使用yum命令報錯,問題通常是服務(wù)器無法連接網(wǎng)絡(luò),解決此問題需配置鏡像源,文中通過代碼介紹的非常詳細(xì),對大家的學(xué)習(xí)或者工作具有一定的參考借鑒價值,需要的朋友可以參考下2024-11-11
jenkins中通過Publish Over SSH插件將項目部署到遠程機器上的講解說明
今天小編就為大家分享一篇關(guān)于jenkins中通過Publish Over SSH插件將項目部署到遠程機器上的講解說明,小編覺得內(nèi)容挺不錯的,現(xiàn)在分享給大家,具有很好的參考價值,需要的朋友一起跟隨小編來看看吧2019-02-02
Docker 網(wǎng)絡(luò)模式(四種)詳細(xì)介紹
這篇文章主要介紹了Docker 網(wǎng)絡(luò)模式詳細(xì)介紹的相關(guān)資料,這里提供了四種網(wǎng)絡(luò)模式的介紹,Docker 作為輕量級容器技術(shù),很多比較不錯的功能,網(wǎng)絡(luò)不是多好,這里就整理下,需要的朋友可以參考下2016-11-11

