# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout issetto the global default (10s).
# Load rules once and periodically evaluate them according to the global'evaluation_interval'. rule_files: - "first_rules.yml" # - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: "prometheus" # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: ["localhost:9090"] - job_name: "clash" metrics_path: /metrics scrape_interval: 1s static_configs: - targets: ["127.0.0.1:2112"]
1
vim first_rules.yml
1 2 3 4 5
groups: - name: discard_destination rules: - record: source_policy_type:clash_network_traffic_bytes_total:sum expr: sum without (destination, job) (clash_network_traffic_bytes_total)