This commit is contained in:
MSWS
2025-03-15 20:27:58 -07:00
parent 10a5e32fef
commit aac27b5a83
7 changed files with 73 additions and 10 deletions

View File

@@ -11,7 +11,7 @@ services:
TZ: "Europe/Berlin" #change Time Zone if needed
NEXTCLOUD_ADMIN_USER: "admin"
NEXTCLOUD_ADMIN_PASSWORD: ${NEXTCLOUD_ADMIN_PASSWORD}
NEXTCLOUD_TRUSTED_DOMAINS: "drive.msws.xyz"
NEXTCLOUD_TRUSTED_DOMAINS: "drive.msws.xyz drive.local.msws.xyz"
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
MYSQL_DATABASE: ${MYSQL_DATABASE}
MYSQL_USER: ${MYSQL_USER}

View File

@@ -9,24 +9,40 @@ services:
volumes:
- grafana-data:/var/lib/grafana
networks:
- monolith_default
- cloudflared
- grafana
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: always
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/etc/prometheus/
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.retention.time=365d'
networks:
- monolith_default
- cloudflared
- grafana
node_exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
command:
- '--path.rootfs=/host'
pid: host
restart: unless-stopped
volumes:
- '/:/host:ro,rslave'
networks:
- grafana
volumes:
grafana-data:
prometheus-data:
networks:
monolith_default:
cloudflared:
external: true
grafana:

39
grafana/prometheus.yml Normal file
View File

@@ -0,0 +1,39 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]
- job_name: node
static_configs:
- targets: ['node_exporter:9100']
- job_name: 'wakapi'
scrape_interval: 1m
metrics_path: '/api/metrics'
bearer_token: 'ZmNmZjkxZDktODU0YS00NDRiLWE2N2YtZTRkZjY5ZGZiZDI4Cg=='
static_configs:
- targets: [ 'wakapi:3002' ]

View File

@@ -8,6 +8,8 @@ services:
- /home/iboaz/jellyfin/config:/config
- /home/iboaz/jellyfin/cache:/cache
- /mnt/usb/media/jellyfin:/usbmedia
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- type: bind
source: /home/iboaz/jellyfin/media
target: /media

View File

@@ -2,20 +2,23 @@ services:
pihole:
container_name: pihole
image: pihole/pihole:latest
restart: unless-stopped
network_mode: "host"
network_mode: host
environment:
TZ: "America/Los_Angeles" # Change this to your timezone
WEBPASSWORD: ${WEB_PASSWORD} # Set the Pi-hole admin password
DNSMASQ_LISTENING: "all"
PIHOLE_DNS_: "1.1.1.1;1.0.0.1" # Upstream DNS (Cloudflare)
VIRTUAL_HOST: "pi.hole"
FTLCONF_webserver_port: 3141
volumes:
- etc-pihole:/etc/pihole
- etc-dnsmasq:/etc/dnsmasq.d
cap_add:
- NET_ADMIN
- SYS_NICE
volumes:
etc-pihole:
etc-dnsmasq:
networks:
cloudflared:
external: true

View File

@@ -7,13 +7,14 @@ services:
ports:
- 443:443
- 22:22
- 80:80
environment:
- PUID=1000
- PGID=1000
- "TZ=Europe/Berlin"
- "URL=msws.xyz"
- "SUBDOMAINS=wildcard"
- "EXTRA_DOMAINS=*.code.msws.xyz"
- "EXTRA_DOMAINS=*.code.msws.xyz,*.local.msws.xyz"
- "VALIDATION=dns"
- "DNSPLUGIN=cloudflare"
- "EMAIL=imodmaker@gmail.com"

View File

@@ -5,8 +5,10 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
listen 80;
listen [::]:80;
server_name code.* "~^[0-9]{1,10}\.code\..*$";
server_name local.code.* code.* "~^[0-9]{1,10}\.code\..*$";
include /config/nginx/ssl.conf;