cd /apps/prometheus/2.33.1
wget https://github.com/prometheus/prometheus/releases/download/v2.33.1/prometheus-2.33.1.linux-amd64.tar.gz
cd /apps/prometheus/2.33.1
tar -xzf prometheus-2.33.1.linux-amd64.tar.gz --strip-components 1
mkdir -p /apps/prometheus/2.33.1/data
prometheus.yml
文件,内容如下:# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
cd /apps/prometheus/2.33.1
# 前台运行
./prometheus
# 后台运行
./prometheus --config.file=prometheus.yml --web.enable-lifecycle > /apps/prometheus/2.33.1/logs/prometheus.log 2>&1 &
vim /etc/systemd/system/prometheus.service
[Unit]
Description=Prometheus Monitoring System
Documentation=Prometheus Monitoring System
After=network.target
[Service]
#User=root
#Group=root
Type=simple
# 启动脚本
ExecStart=/apps/prometheus/2.33.1/prometheus --config.file=/apps/prometheus/2.33.1/prometheus.yml --web.enable-lifecycle
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
[Install]
WantedBy=multi-user.target
# 重载
systemctl daemon-reload
# 配置开机启动
systemctl enable prometheus
# 启动
systemctl start prometheus
# 查看状态
systemctl status prometheus
# 停止
systemctl stop prometheus
docker pull prom/prometheus:2.33.1
docker run -p 9090:9090 -v /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
mkdir pv /apps/prometheus/{conf,data,rules}
配置 data 目录权限:
chmod -R 777 /apps/prometheus/data/
vim /apps/prometheus/docker-compose.yml
version: "3"
services:
prometheus:
image: prom/prometheus:v2.33.1
container_name: prometheus-prometheus
hostname: prometheus
restart: always
volumes:
- /apps/prometheus/data:/prometheus
- /apps/prometheus/rules:/etc/prometheus/rules
- /apps/prometheus/conf/prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- 9090:9090
command: "--config.file=/etc/prometheus/prometheus.yml --web.enable-lifecycle"
networks:
default:
external:
name: prometheus
注:
上方的 --web.enable-lifecycle
为启动热加载,添加后可以通过 curl http://服务器IP:PrometheusServer端口号/-/reload -X POST
重载配置文件
3. 编辑 prometheus.yml 文件
vim /apps/prometheus/conf/prometheus.yml
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
#alerting:
# alertmanagers:
# - static_configs:
# - targets: ['localhost:9093']
rule_files:
- "/etc/prometheus/rules/*.yml"
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
docker network list
若没有则创建:
docker network create prometheus --subnet 10.21.22.0/24
cd /apps/prometheus
docker-compose up -d
cd /apps/prometheus
docker-compose ps
docker-compose logs -f