# my global config global: scrape_interval: "15s" # Set the scrape interval to every 5 seconds. Default is every 1 minute. evaluation_interval: "15s" # Evaluate rules every 5 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: monitor: "{{ ansible_hostname }}" # A scrape configuration containing exactly one endpoint to scrape: # Here it"s Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - job_name: "prometheus" # metrics_path defaults to "/metrics" # scheme defaults to "http". static_configs: - targets: [ "0.0.0.0:9090" ] - job_name: "netdata" metrics_path: "/api/v1/allmetrics" params: # format: prometheus | prometheus_all_hosts # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your # hostname instead of IP format: [ "prometheus_all_hosts" ] # # sources: as-collected | raw | average | sum | volume # default is: average source: [ "average" ] # # server name for this prometheus - the default is the client IP # for Netdata to uniquely identify it #server: [ "prometheus1" ] honor_labels: true static_configs: - targets: [ "metrics-target:19999" ]