Logo Gray

Prometheus

Ports öffnen: 9090, 9100, 9000, 10901-10908

Das aktuelle release herausfinden:

				
					uname -m
				
			
  • 86_64 → Du brauchst die AMD64-Version (Intel/AMD-Prozessor).
  • aarch64 oder arm64 → Du brauchst die ARM64-Version (z. B. Raspberry Pi 4/5, Apple M1/M2, AWS Graviton).
  • armv7l oder armhf → Du brauchst die ARMv7 (32-bit)-Version.
				
					cd opt
				
			
				
					wget https://github.com/prometheus/prometheus/releases/download/v3.1.0/prometheus-3.1.0.linux-amd64.tar.gz
				
			
				
					tar xvf prometheus-3.1.0.linux-armv7.tar.gz
sudo mv prometheus-3.1.0.linux-amd64 prometheus
cd prometheus
				
			
				
					nano /opt/prometheus/prometheus.yml
				
			
				
					global:
  scrape_interval: 15s

storage.tsdb:
  path: "/opt/prometheus/data"
  min-block-duration: 2h
  max-block-duration: 2h
  retention: 30d  # Daten für 30 Tage behalten


scrape_configs:
 - job_name: 'node'
    static_configs:
      - targets: ['localhost:9100']  # Node Exporter
  - job_name: 'prometheus'
    static_configs:
      - targets: ['localhost:9090']  # Prometheus selbst

				
			
				
					nano /etc/systemd/system/prometheus.service
				
			
				
					[Unit]
Description=Prometheus
Wants=network-online.target
After=network-online.target

[Service]
User=root
Group=root
Type=simple
ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --storage.tsdb.path=/opt/prometheus/data --web.enable-admin-api


[Install]
WantedBy=multi-user.target

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable prometheus
sudo systemctl start prometheus

				
			
				
					systemctl status prometheus

				
			

Es sollte auf http://<server-ip>:9090 erreichbar sein.

Node Exporter für Systemmetriken installieren

				
					cd /opt
sudo wget https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-amd64.tar.gz
sudo tar xvf node_exporter-1.8.2.linux-amd64.tar.gz
sudo mv node_exporter-1.8.2.linux-amd64 node_exporter

				
			
				
					nano /etc/systemd/system/node_exporter.service
				
			
				
					[Unit]
Description=Node Exporter
Wants=network-online.target
After=network-online.target

[Service]
User=root
Group=root
Type=simple
ExecStart=/opt/node_exporter/node_exporter

[Install]
WantedBy=multi-user.target

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable node_exporter
sudo systemctl start node_exporter

				
			
				
					systemctl status node_exporter

				
			

Er ist dann erreichbar unter http://<server-ip>:9100/metrics.

MinIO einrichten (S3 Storage)

				
					cd /usr/local/bin
sudo wget https://dl.min.io/server/minio/release/linux-amd64/minio
sudo chmod +x minio

				
			
				
					sudo mkdir -p /mnt/minio
sudo chown -R $USER:$USER /mnt/minio

				
			
				
					nano /etc/systemd/system/minio.service
				
			
				
					[Unit]
Description=MinIO Storage
After=network.target

[Service]
User=root
Group=root
ExecStart=/usr/local/bin/minio server /mnt/minio --console-address ":9001"
Restart=always

[Install]
WantedBy=multi-user.target

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable minio
sudo systemctl start minio

				
			

MinIO ist erreichbar unter http://<server-ip>:9001 mit den Standard-Admin-Zugangsdaten minioadmin:minioadmin.

Erstelle über die Web-GUI einen Bucket namens thanos.

Thanos einrichten

Thanos erweitert Prometheus mit Langzeitarchivierung und Downsampling.

Thanos Sidecar starten

				
					cd /opt
sudo wget https://github.com/thanos-io/thanos/releases/download/v0.37.2/thanos-0.37.2.linux-amd64.tar.gz
sudo tar xvf thanos-0.37.2.linux-amd64.tar.gz
sudo mv thanos-0.37.2.linux-amd64 thanos
cd thanos
				
			
				
					nano /etc/systemd/system/thanos-sidecar.service
				
			
				
					[Unit]
Description=Thanos Sidecar
After=network.target

[Service]
User=root
ExecStart=/opt/thanos/thanos sidecar \
  --tsdb.path /opt/prometheus/data \
  --prometheus.url http://localhost:9090 \
  --objstore.config-file /opt/thanos/minio.yaml

Restart=always

[Install]
WantedBy=multi-user.target

				
			
				
					nano /opt/thanos/minio.yaml
				
			
				
					type: S3
config:
  bucket: "thanos"
  endpoint: "localhost:9000"
  access_key: "minioadmin"
  secret_key: "minioadmin"
  insecure: true

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable thanos-sidecar
sudo systemctl start thanos-sidecar

				
			

Thanos Store für Abfragen starten

				
					nano /etc/systemd/system/thanos-store.service
				
			
				
					[Unit]
Description=Thanos Store
After=network.target

[Service]
User=root
ExecStart=/opt/thanos/thanos store \
  --data-dir /opt/thanos/store \
  --objstore.config-file /opt/thanos/minio.yaml \
  --grpc-address 0.0.0.0:10901 \
  --http-address 0.0.0.0:10906

Restart=always

[Install]
WantedBy=multi-user.target

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable thanos-store
sudo systemctl start thanos-store

				
			

Thanos Querier für Downsampling

				
					nano /etc/systemd/system/thanos-querier.service
				
			
				
					[Unit]
Description=Thanos Querier
After=network.target

[Service]
User=root
ExecStart=/opt/thanos/thanos query \
  --grpc-address 0.0.0.0:10903 \
  --http-address 0.0.0.0:10904 \
  --store grpc://localhost:10901

Restart=always

[Install]
WantedBy=multi-user.target

				
			
				
					sudo systemctl daemon-reload
sudo systemctl enable thanos-querier
sudo systemctl start thanos-querier

				
			

MinIO Client (mc) installieren

				
					wget https://dl.min.io/client/mc/release/linux-amd64/mc
sudo chmod +x mc
sudo mv mc /usr/local/bin/

				
			
				
					mc --version

				
			
				
					mc alias set local http://localhost:9000 minioadmin minioadmin

				
			
				
					mc alias list

				
			

überprüfen ob bucket gefunden werden kann ( den man vorher im webui erstellt hat)

der name vom bucket muss in der config nano /opt/thanos/minio.yaml

geändert werden.

				
					mc ls local

				
			

Autotrestart und Retention Time Service

				
					  GNU nano 7.2                                                                                                                                                                                   /etc/prometheus/prometheus.yml                                                                                                                                                                                             # Sample config for Prometheus.

global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

  # Attach these labels to any time series or alerts when communicating with
  # external systems (federation, remote storage, Alertmanager).
  external_labels:
      monitor: 'example'

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets: ['localhost:9093']

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # Override the global default and scrape targets from this job every 5 seconds.
    scrape_interval: 5s
    scrape_timeout: 5s

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ['localhost:9090']

  - job_name: node
    # If prometheus-node-exporter is installed, grab stats about the local
    # machine by default.
    static_configs:
      - targets: ['localhost:9100']
#mit dem node exporter
  - job_name: 'gitlab_node_exporter'
    basic_auth:
      username: jotho
      password: 20Nas20?jo
    static_configs:
      - targets: ['212.227.202.85:1901']

  - job_name: 'gitlab_internal_exporter'
    static_configs:
      - targets: ['git.uckermark.consulting']
    metrics_path: '/-/metrics'

  - job_name: 'gitlab_usage_trends'
    static_configs:
      - targets: ['git.uckermark.consulting']
    #authorization:
      #type: 'Bearer'
      #credentials: 'glpat-r3G8kFr7xgoj9xTrRgv2'
    metrics_path: 'https://git.uckermark.consulting/-/metrics?token=KEVtgkzpkfsZzyo-ufEy'
#/api/v4/application/statistics'

  - job_name: 'node_exporter_cueline_dev'
    static_configs:
      - targets: ['dev.cueline.app:1900']
#/api/v4/application/statistics'

  - job_name: 'pl.uckermark.consulting'
    static_configs:
      - targets: ['pl.uckermark.consulting:1900']

  - job_name: 'cueline.app'
    static_configs:
      - targets: ['87.106.153.120:1900']

				
			

die default retention time ist die Zeit die prometheus die daten likal behält. diese kann man mit der


–storage.tsdb.retention.time=120d

variable einstellen.

Für längeres behalten von daten empfielt sich thanos.

Die retention time kann auch im prometheus file bearbeitet werden. In /etc/default/prometheus (falls die datei existiert)

				
					[Unit]
Description=Prometheus Monitoring System
Wants=network-online.target
After=network-online.target

[Service]
User=prometheus
Group=prometheus
Type=simple
ExecStart=/usr/local/bin/prometheus \
  --config.file=/etc/prometheus/prometheus.yml \
  --storage.tsdb.path=/var/lib/prometheus \
  --web.listen-address=":60102" \
  --storage.tsdb.retention.time=120d

Restart=always

[Install]
WantedBy=multi-user.target