Initial commit - Runtipi Appstore with Wazuh 4.14.1
Some checks failed
Test / test (push) Has been cancelled

- Added Wazuh 4.14.1 SIEM/XDR application for Runtipi
- Simplified init scripts following official Wazuh Docker patterns
- Complete documentation in French (description.md)
- Health check diagnostic script (wazuh-health-check.sh)
- SSL/TLS certificates auto-generation
- Whoami test application included

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Gui-Gos
2026-01-02 12:26:29 +01:00
commit 46122d5a7f
30 changed files with 2584 additions and 0 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

45
.github/workflows/renovate.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: Renovate
on:
workflow_dispatch:
inputs:
log_level:
type: choice
description: Log level
default: INFO
options:
- DEBUG
- INFO
- WARN
- ERROR
- FATAL
schedule:
- cron: 0 2 * * *
jobs:
renovate:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install node
uses: actions/setup-node@v4
with:
node-version: "22"
- name: Install bun
uses: oven-sh/setup-bun@v2
- name: Cache Bun global packages
uses: actions/cache@v4
with:
path: ~/.bun/install/global
key: ${{ runner.os }}-bun-global-renovate-40
restore-keys: |
${{ runner.os }}-bun-global-
- name: Install Renovate
run: bun install -g renovate@40
- name: Run renovate
run: LOG_LEVEL=${{ github.event.inputs.log_level || 'INFO' }} renovate --token ${{ secrets.GITHUB_TOKEN }} ${{ github.repository }}

23
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: bun install
- name: Run tests
run: bun test

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
node_modules/

13
LICENSE Normal file
View File

@@ -0,0 +1,13 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

31
README.md Normal file
View File

@@ -0,0 +1,31 @@
# Example App Store Template
This repository serves as a template for creating your own custom app store for the Runtipi platform. Use this as a starting point to create and share your own collection of applications.
## Repository Structure
- **apps/**: Contains individual app directories
- Each app has its own folder (e.g., `whoami/`) with the following structure:
- `config.json`: App configuration file
- `docker-compose.json`: Docker setup for the app
- `metadata/`: Contains app visuals and descriptions
- `description.md`: Markdown description of the app
- `logo.jpg`: App logo image
- **tests/**: Contains test files for the app store
- `apps.test.ts`: Test suite for validating apps
## Getting Started
This repository is intended to serve as a template for creating your own app store. Follow these steps to get started:
1. Click the "Use this template" button to create a new repository based on this template
2. Customize the apps or add your own app folders in the `apps/` directory
3. Test your app store by using it with Runtipi
## Documentation
For detailed instructions on creating your own app store, please refer to the official guide:
[Create Your Own App Store Guide](https://runtipi.io/docs/guides/create-your-own-app-store)

77
__tests__/apps.test.ts Normal file
View File

@@ -0,0 +1,77 @@
import { expect, test, describe } from "bun:test";
import { appInfoSchema, dynamicComposeSchema } from '@runtipi/common/schemas'
import { fromError } from 'zod-validation-error';
import fs from 'node:fs'
import path from 'node:path'
const getApps = async () => {
const appsDir = await fs.promises.readdir(path.join(process.cwd(), 'apps'))
const appDirs = appsDir.filter((app) => {
const stat = fs.statSync(path.join(process.cwd(), 'apps', app))
return stat.isDirectory()
})
return appDirs
};
const getFile = async (app: string, file: string) => {
const filePath = path.join(process.cwd(), 'apps', app, file)
try {
const file = await fs.promises.readFile(filePath, 'utf-8')
return file
} catch (err) {
return null
}
}
describe("each app should have the required files", async () => {
const apps = await getApps()
for (const app of apps) {
const files = ['config.json', 'docker-compose.json', 'metadata/logo.jpg', 'metadata/description.md']
for (const file of files) {
test(`app ${app} should have ${file}`, async () => {
const fileContent = await getFile(app, file)
expect(fileContent).not.toBeNull()
})
}
}
})
describe("each app should have a valid config.json", async () => {
const apps = await getApps()
for (const app of apps) {
test(`app ${app} should have a valid config.json`, async () => {
const fileContent = await getFile(app, 'config.json')
const parsed = appInfoSchema.omit({ urn: true }).safeParse(JSON.parse(fileContent || '{}'))
if (!parsed.success) {
const validationError = fromError(parsed.error);
console.error(`Error parsing config.json for app ${app}:`, validationError.toString());
}
expect(parsed.success).toBe(true)
})
}
})
describe("each app should have a valid docker-compose.json", async () => {
const apps = await getApps()
for (const app of apps) {
test(`app ${app} should have a valid docker-compose.json`, async () => {
const fileContent = await getFile(app, 'docker-compose.json')
const parsed = dynamicComposeSchema.safeParse(JSON.parse(fileContent || '{}'))
if (!parsed.success) {
const validationError = fromError(parsed.error);
console.error(`Error parsing docker-compose.json for app ${app}:`, validationError.toString());
}
expect(parsed.success).toBe(true)
})
}
});

BIN
apps/.DS_Store vendored Normal file

Binary file not shown.

BIN
apps/wazuh-runtipi/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,57 @@
{
"name": "Wazuh",
"id": "wazuh",
"available": true,
"exposable": true,
"dynamic_config": true,
"port": 5601,
"tipi_version": 2,
"version": "4.14.1",
"categories": ["security"],
"short_desc": "Plateforme XDR / SIEM open source.",
"description": "Wazuh est une plateforme de sécurité open source (XDR / SIEM) qui déploie la stack Docker single-node officielle : indexer, manager et dashboard Wazuh 4.14.1.",
"author": "Wazuh",
"source": "https://github.com/wazuh/wazuh-docker",
"website": "https://wazuh.com/",
"form_fields": [
{
"type": "text",
"env_variable": "INDEXER_USERNAME",
"label": "Indexer admin username",
"default": "admin",
"required": true
},
{
"type": "password",
"env_variable": "INDEXER_PASSWORD",
"label": "Indexer admin password",
"default": "admin",
"required": true
},
{
"type": "text",
"env_variable": "DASHBOARD_USERNAME",
"label": "Dashboard username (internal OpenSearch user)",
"default": "kibanaserver",
"required": true
},
{
"type": "password",
"env_variable": "DASHBOARD_PASSWORD",
"label": "Dashboard password (internal OpenSearch user)",
"default": "kibanaserver",
"required": true
},
{
"type": "password",
"env_variable": "API_PASSWORD",
"label": "Wazuh API password (user: wazuh-wui)",
"default": "MyS3cr37P450r.*-",
"required": true
}
],
"supported_architectures": [
"amd64",
"arm64"
]
}

BIN
apps/wazuh-runtipi/data/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,12 @@
nodes:
indexer:
- name: wazuh.indexer
ip: wazuh.indexer
server:
- name: wazuh.manager
ip: wazuh.manager
dashboard:
- name: wazuh.dashboard
ip: wazuh.dashboard
admin:
- name: admin

View File

@@ -0,0 +1,569 @@
#!/bin/bash
# wazuh-health-check.sh
# Script de diagnostic complet pour Wazuh sur Runtipi
# Version: 1.0 (2025-12-27)
# Usage: ./wazuh-health-check.sh
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
# Auto-detect container prefix from running containers
# Runtipi uses format: wazuh-runtipi_REPO-NAME-wazuh-SERVICE-1
# We need to extract: wazuh-runtipi_REPO-NAME
# Note: REPO-NAME can contain hyphens (e.g., synode-it)
WAZUH_PREFIX=$(docker ps -a --format '{{.Names}}' 2>/dev/null | grep -E "wazuh-runtipi.*-wazuh-" | head -1 | sed -E 's/^(.*)-wazuh-.*/\1/' || echo "wazuh-runtipi")
# Détection automatique de l'instance dans app-data (données runtime)
# Note: app-data contient les données des conteneurs en cours d'exécution
# apps contient les fichiers sources immuables du dépôt
# Search pattern: /opt/runtipi/app-data/REPO-NAME/wazuh-runtipi/data
DATA_DIR=$(find /opt/runtipi/app-data -maxdepth 3 -type d -name "wazuh-runtipi" 2>/dev/null | head -1)
if [ -n "$DATA_DIR" ]; then
DATA_DIR="$DATA_DIR/data"
fi
# Fallback to wildcard if find didn't work
if [ -z "$DATA_DIR" ] || [ ! -d "$DATA_DIR" ]; then
DATA_DIR=$(echo /opt/runtipi/app-data/*/wazuh-runtipi/data 2>/dev/null | awk '{print $1}')
fi
SECURITY_DIR="$DATA_DIR/indexer-security"
echo -e "${CYAN}=========================================${NC}"
echo -e "${CYAN} WAZUH HEALTH CHECK - $(date +%Y-%m-%d\ %H:%M:%S)${NC}"
echo -e "${CYAN}=========================================${NC}"
echo ""
echo -e "${BLUE}Configuration:${NC}"
echo -e " Container prefix: ${YELLOW}$WAZUH_PREFIX${NC}"
echo -e " Data directory: ${YELLOW}$DATA_DIR${NC}"
echo -e " Security directory: ${YELLOW}$SECURITY_DIR${NC}"
echo ""
# Function to print section header
print_section() {
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
}
# Function to check service
check_service() {
local service=$1
local container_name=$(docker ps -a --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*${service}" | head -1)
if [ -z "$container_name" ]; then
echo -e "${RED}✗ Container not found${NC}"
return 1
fi
local status=$(docker inspect --format='{{.State.Status}}' "$container_name" 2>/dev/null)
local health=$(docker inspect --format='{{.State.Health.Status}}' "$container_name" 2>/dev/null || echo "no healthcheck")
echo -ne " $service: "
if [ "$status" = "running" ]; then
if [ "$health" = "healthy" ]; then
echo -e "${GREEN}✓ Running & Healthy${NC}"
return 0
elif [ "$health" = "no healthcheck" ]; then
echo -e "${YELLOW}⚠ Running (no healthcheck)${NC}"
return 0
else
echo -e "${YELLOW}⚠ Running but $health${NC}"
return 1
fi
elif [ "$status" = "exited" ]; then
local exit_code=$(docker inspect --format='{{.State.ExitCode}}' "$container_name" 2>/dev/null)
if [ "$exit_code" = "0" ]; then
echo -e "${GREEN}✓ Exited successfully (code 0)${NC}"
return 0
else
echo -e "${RED}✗ Exited with code $exit_code${NC}"
return 1
fi
else
echo -e "${RED}✗ Status: $status${NC}"
return 1
fi
}
# 1. Services Health Check
print_section "1. SERVICES HEALTH CHECK"
echo ""
SERVICES_OK=0
SERVICES_FAILED=0
for service in certs indexer manager dashboard; do
if check_service "$service"; then
((SERVICES_OK++))
else
((SERVICES_FAILED++))
fi
done
# Special check for indexer-init (runs with tail -f to stay alive - Runtipi requirement)
echo -ne " indexer-init: "
INIT_CONTAINER=$(docker ps -a --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*indexer-init" | head -1)
if [ -n "$INIT_CONTAINER" ]; then
INIT_STATUS=$(docker inspect --format='{{.State.Status}}' "$INIT_CONTAINER" 2>/dev/null)
INIT_HEALTH=$(docker inspect --format='{{.State.Health.Status}}' "$INIT_CONTAINER" 2>/dev/null || echo "no healthcheck")
INIT_RESTARTING=$(docker inspect --format='{{.State.Restarting}}' "$INIT_CONTAINER" 2>/dev/null)
# Check if .init-complete marker exists
INIT_COMPLETE_EXISTS=$([ -f "$SECURITY_DIR/.init-complete" ] && echo "yes" || echo "no")
if [ "$INIT_STATUS" = "running" ] && [ "$INIT_HEALTH" = "healthy" ] && [ "$INIT_COMPLETE_EXISTS" = "yes" ]; then
echo -e "${GREEN}✓ Running & Healthy (init complete)${NC}"
((SERVICES_OK++))
elif [ "$INIT_RESTARTING" = "true" ] || [ "$INIT_STATUS" = "restarting" ]; then
echo -e "${RED}✗ Restarting in loop${NC}"
echo -e " ${YELLOW}⚠ This indicates a problem with security initialization${NC}"
((SERVICES_FAILED++))
elif [ "$INIT_STATUS" = "running" ] && [ "$INIT_COMPLETE_EXISTS" = "no" ]; then
echo -e "${YELLOW}⚠ Running but initialization not complete yet${NC}"
((SERVICES_FAILED++))
elif [ "$INIT_STATUS" = "running" ]; then
echo -e "${YELLOW}⚠ Running but $INIT_HEALTH${NC}"
((SERVICES_FAILED++))
else
echo -e "${RED}✗ Status: $INIT_STATUS${NC}"
((SERVICES_FAILED++))
fi
else
echo -e "${RED}✗ Container not found${NC}"
((SERVICES_FAILED++))
fi
echo ""
echo -e "Summary: ${GREEN}$SERVICES_OK OK${NC} | ${RED}$SERVICES_FAILED FAILED${NC}"
echo ""
# 1b. Container Logs for All Services
print_section "1b. CONTAINER LOGS (Last 50 lines)"
echo ""
# Display logs for all Wazuh containers
for service in certs indexer indexer-init manager dashboard; do
container_name=$(docker ps -a --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*${service}" | head -1)
if [ -n "$container_name" ]; then
status=$(docker inspect --format='{{.State.Status}}' "$container_name" 2>/dev/null)
health=$(docker inspect --format='{{.State.Health.Status}}' "$container_name" 2>/dev/null || echo "no healthcheck")
echo -e "${CYAN}═══════════════════════════════════════════════════${NC}"
echo -e "${CYAN}Container: ${YELLOW}$container_name${NC}"
echo -e "${CYAN}Status: ${YELLOW}$status${NC} | Health: ${YELLOW}$health${NC}"
echo -e "${CYAN}═══════════════════════════════════════════════════${NC}"
docker logs --tail 50 "$container_name" 2>&1 | sed 's/^/ /'
echo ""
fi
done
# 2. Disk Usage Check
print_section "2. DISK USAGE CHECK"
echo ""
# Re-check DATA_DIR exists (in case wildcard wasn't evaluated)
if [ ! -d "$DATA_DIR" ]; then
DATA_DIR=$(find /opt/runtipi/app-data -maxdepth 2 -type d -path "*/wazuh-runtipi/data" 2>/dev/null | head -1)
SECURITY_DIR="$DATA_DIR/indexer-security"
fi
# For disk usage, check the parent directory (wazuh-runtipi) not just /data
if [ -d "$DATA_DIR" ] && [ -n "$DATA_DIR" ]; then
# Get parent directory (remove /data from end)
APP_DIR=$(dirname "$DATA_DIR")
SIZE_HUMAN=$(du -sh "$APP_DIR" 2>/dev/null | awk '{print $1}')
SIZE_GB=$(du -sb "$APP_DIR" 2>/dev/null | awk '{print int($1/1024/1024/1024)}')
echo -e " App directory: $APP_DIR"
echo -ne " Size: $SIZE_HUMAN ($SIZE_GB GB) - "
if [ "$SIZE_GB" -gt 40 ]; then
echo -e "${RED}⚠ WARNING: Excessive size! Expected ~5GB${NC}"
echo -e " ${YELLOW}Possible indexer infinite loop - check Bug #1${NC}"
elif [ "$SIZE_GB" -gt 20 ]; then
echo -e "${YELLOW}⚠ INFO: Higher than expected (~5GB)${NC}"
else
echo -e "${GREEN}✓ OK (expected ~5GB)${NC}"
fi
else
echo -e "${RED}✗ Data directory not found: $DATA_DIR${NC}"
fi
echo ""
# 3. Security Files Check
print_section "3. SECURITY FILES CHECK"
echo ""
REQUIRED_FILES=(
"config.yml"
"roles.yml"
"roles_mapping.yml"
"internal_users.yml"
"action_groups.yml"
"tenants.yml"
"nodes_dn.yml"
"whitelist.yml"
)
FILES_OK=0
FILES_MISSING=0
# Re-check SECURITY_DIR exists
if [ ! -d "$SECURITY_DIR" ]; then
SECURITY_DIR="$DATA_DIR/indexer-security"
fi
if [ -d "$SECURITY_DIR" ] && [ -n "$SECURITY_DIR" ]; then
echo -e " Security directory: $SECURITY_DIR"
echo ""
for file in "${REQUIRED_FILES[@]}"; do
echo -ne " $file: "
if [ -f "$SECURITY_DIR/$file" ]; then
echo -e "${GREEN}✓ Present${NC}"
((FILES_OK++))
else
echo -e "${RED}✗ MISSING${NC}"
((FILES_MISSING++))
fi
done
echo ""
echo -e "Summary: ${GREEN}$FILES_OK/8 files present${NC}"
if [ "$FILES_MISSING" -gt 0 ]; then
echo -e "${RED}$FILES_MISSING files missing - Bug #4 not fixed!${NC}"
fi
else
echo -e "${RED}✗ Security directory not found: $SECURITY_DIR${NC}"
fi
echo ""
# 4. Network Connectivity Check
print_section "4. NETWORK CONNECTIVITY CHECK"
echo ""
DASHBOARD_CONTAINER=$(docker ps --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*dashboard" | head -1)
INDEXER_CONTAINER=$(docker ps --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*indexer" | grep -v "init" | head -1)
if [ -n "$DASHBOARD_CONTAINER" ] && [ -n "$INDEXER_CONTAINER" ]; then
echo -e " Testing dashboard → indexer connectivity..."
echo ""
# DNS resolution
echo -ne " DNS resolution (wazuh.indexer): "
if docker exec "$DASHBOARD_CONTAINER" getent hosts wazuh.indexer &>/dev/null; then
echo -e "${GREEN}✓ OK${NC}"
else
echo -e "${RED}✗ FAILED${NC}"
fi
# HTTP connectivity
echo -ne " HTTP connectivity: "
HTTP_CODE=$(docker exec "$DASHBOARD_CONTAINER" curl -k -s -o /dev/null -w "%{http_code}" https://wazuh.indexer:9200 2>/dev/null || echo "000")
if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "401" ]; then
echo -e "${GREEN}✓ OK (HTTP $HTTP_CODE)${NC}"
elif [ "$HTTP_CODE" = "503" ]; then
echo -e "${RED}✗ FAILED (HTTP 503 - Service Unavailable)${NC}"
echo -e " ${YELLOW}This indicates Bug #4 - Security not initialized${NC}"
else
echo -e "${YELLOW}⚠ Unexpected (HTTP $HTTP_CODE)${NC}"
fi
# Network check (containers can be on multiple networks)
echo -ne " Shared network: "
DASH_NETS=$(docker inspect "$DASHBOARD_CONTAINER" --format='{{range $k, $v := .NetworkSettings.Networks}}{{$k}} {{end}}')
IDX_NETS=$(docker inspect "$INDEXER_CONTAINER" --format='{{range $k, $v := .NetworkSettings.Networks}}{{$k}} {{end}}')
# Find common networks
COMMON_NET=""
for net in $IDX_NETS; do
if echo "$DASH_NETS" | grep -q "$net"; then
COMMON_NET="$net"
break
fi
done
if [ -n "$COMMON_NET" ]; then
echo -e "${GREEN}✓ OK ($COMMON_NET)${NC}"
else
echo -e "${RED}✗ FAILED${NC}"
echo -e " ${YELLOW}Dashboard networks: $DASH_NETS${NC}"
echo -e " ${YELLOW}Indexer networks: $IDX_NETS${NC}"
fi
else
echo -e "${RED}✗ Cannot test - dashboard or indexer not running${NC}"
fi
echo ""
# 5. Dashboard Config Check
print_section "5. DASHBOARD CONFIGURATION CHECK"
echo ""
if [ -n "$DASHBOARD_CONTAINER" ]; then
CONFIG_FILE="/usr/share/wazuh-dashboard/config/custom/opensearch_dashboards.yml"
echo -ne " Config file exists: "
if docker exec "$DASHBOARD_CONTAINER" test -f "$CONFIG_FILE" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
echo -ne " Config has content: "
if docker exec "$DASHBOARD_CONTAINER" test -s "$CONFIG_FILE" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
echo -ne " opensearch.hosts configured: "
if docker exec "$DASHBOARD_CONTAINER" grep -q "opensearch.hosts:" "$CONFIG_FILE" 2>/dev/null; then
HOSTS_LINE=$(docker exec "$DASHBOARD_CONTAINER" grep "opensearch.hosts:" "$CONFIG_FILE" 2>/dev/null)
echo -e "${GREEN}$HOSTS_LINE${NC}"
else
echo -e "${RED}✗ NOT FOUND${NC}"
fi
else
echo -e "${RED}✗ EMPTY${NC}"
echo -e " ${YELLOW}Dashboard config file is empty - check entrypoint script${NC}"
fi
else
echo -e "${RED}✗ NO${NC}"
echo -e " ${YELLOW}Dashboard config not created - check entrypoint script${NC}"
fi
else
echo -e "${RED}✗ Dashboard container not running${NC}"
fi
echo ""
# 6. Manager Config Check
print_section "6. MANAGER CONFIGURATION CHECK"
echo ""
MANAGER_CONTAINER=$(docker ps --format '{{.Names}}' | grep -E "${WAZUH_PREFIX}.*manager" | head -1)
if [ -n "$MANAGER_CONTAINER" ]; then
OSSEC_CONF="/var/ossec/etc/ossec.conf"
OSSEC_CUSTOM="/var/ossec/etc/custom/ossec.conf"
echo -ne " Main config exists: "
if docker exec "$MANAGER_CONTAINER" test -f "$OSSEC_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
else
echo -e "${RED}✗ NO${NC}"
fi
echo -ne " Custom config exists: "
if docker exec "$MANAGER_CONTAINER" test -f "$OSSEC_CUSTOM" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
else
echo -e "${RED}✗ NO${NC}"
fi
echo -ne " Main config is symlink: "
if docker exec "$MANAGER_CONTAINER" test -L "$OSSEC_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES (Bug #3 fixed)${NC}"
else
echo -e "${YELLOW}⚠ NO (Bug #3 - config not persistent)${NC}"
fi
else
echo -e "${RED}✗ Manager container not running${NC}"
fi
echo ""
# 6b. Filebeat Check
print_section "6b. FILEBEAT CHECK"
echo ""
if [ -n "$MANAGER_CONTAINER" ]; then
FILEBEAT_CONF="/etc/filebeat/filebeat.yml"
# Check environment variables (official Wazuh method)
echo "Environment Variables (Official Wazuh Method):"
echo ""
echo -ne " FILEBEAT_SSL_VERIFICATION_MODE: "
SSL_VERIF=$(docker inspect "$MANAGER_CONTAINER" --format='{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep "^FILEBEAT_SSL_VERIFICATION_MODE=" | cut -d= -f2)
if [ "$SSL_VERIF" = "full" ]; then
echo -e "${GREEN}✓ full${NC}"
else
echo -e "${RED}${SSL_VERIF:-not set}${NC}"
fi
echo -ne " SSL_CERTIFICATE_AUTHORITIES: "
SSL_CA=$(docker inspect "$MANAGER_CONTAINER" --format='{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep "^SSL_CERTIFICATE_AUTHORITIES=" | cut -d= -f2)
if [ -n "$SSL_CA" ]; then
echo -e "${GREEN}${SSL_CA}${NC}"
else
echo -e "${RED}✗ not set${NC}"
fi
echo -ne " SSL_CERTIFICATE: "
SSL_CERT=$(docker inspect "$MANAGER_CONTAINER" --format='{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep "^SSL_CERTIFICATE=" | cut -d= -f2)
if [ -n "$SSL_CERT" ]; then
echo -e "${GREEN}${SSL_CERT}${NC}"
else
echo -e "${RED}✗ not set${NC}"
fi
echo -ne " SSL_KEY: "
SSL_KEY=$(docker inspect "$MANAGER_CONTAINER" --format='{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep "^SSL_KEY=" | cut -d= -f2)
if [ -n "$SSL_KEY" ]; then
echo -e "${GREEN}${SSL_KEY}${NC}"
else
echo -e "${RED}✗ not set${NC}"
fi
echo ""
echo "Generated Filebeat Configuration:"
echo ""
echo -ne " Filebeat config exists: "
if docker exec "$MANAGER_CONTAINER" test -f "$FILEBEAT_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
echo -ne " Config has indexer https URL: "
if docker exec "$MANAGER_CONTAINER" grep -q "https://wazuh.indexer:9200" "$FILEBEAT_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
else
echo -e "${RED}✗ NO (indexer URL incorrect)${NC}"
fi
echo -ne " SSL verification enabled: "
if docker exec "$MANAGER_CONTAINER" grep -qE "ssl\.verification_mode:\s*full" "$FILEBEAT_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES${NC}"
else
echo -e "${RED}✗ NO (SSL not configured in filebeat.yml)${NC}"
echo -e " ${YELLOW}⚠ Check if cont-init.d/1-config-filebeat ran successfully${NC}"
fi
echo -ne " Seccomp fix for pthread: "
if docker exec "$MANAGER_CONTAINER" grep -q "seccomp:" "$FILEBEAT_CONF" 2>/dev/null; then
echo -e "${GREEN}✓ YES (pthread_create fix present)${NC}"
else
echo -e "${YELLOW}⚠ NO (may cause pthread_create errors)${NC}"
fi
else
echo -e "${RED}✗ NO${NC}"
echo -e " ${YELLOW}⚠ Filebeat config not generated - check init logs${NC}"
fi
else
echo -e "${RED}✗ Manager container not running${NC}"
fi
echo ""
# 6c. Known Errors Detection
print_section "6c. KNOWN ERRORS DETECTION"
echo ""
ERRORS_FOUND=0
if [ -n "$MANAGER_CONTAINER" ]; then
echo "Scanning manager logs for known errors..."
echo ""
# pthread_create error
echo -ne " pthread_create error: "
if docker logs "$MANAGER_CONTAINER" 2>&1 | grep -q "pthread_create failed"; then
echo -e "${RED}✗ FOUND${NC}"
echo -e " ${YELLOW}Fix: Ensure filebeat.yml has seccomp configuration${NC}"
((ERRORS_FOUND++))
else
echo -e "${GREEN}✓ Not found${NC}"
fi
# x509 certificate error
echo -ne " x509 certificate error: "
if docker logs "$MANAGER_CONTAINER" 2>&1 | grep -q "x509: certificate signed by unknown authority"; then
echo -e "${RED}✗ FOUND${NC}"
echo -e " ${YELLOW}Fix: Check SSL configuration in filebeat.yml${NC}"
((ERRORS_FOUND++))
else
echo -e "${GREEN}✓ Not found${NC}"
fi
# SIGABRT crash
echo -ne " SIGABRT crash: "
if docker logs "$MANAGER_CONTAINER" 2>&1 | grep -q "SIGABRT"; then
echo -e "${RED}✗ FOUND${NC}"
echo -e " ${YELLOW}Usually caused by pthread_create error${NC}"
((ERRORS_FOUND++))
else
echo -e "${GREEN}✓ Not found${NC}"
fi
# Filebeat ownership error
echo -ne " Filebeat ownership error: "
if docker logs "$MANAGER_CONTAINER" 2>&1 | grep -q "must be owned by the user identifier"; then
echo -e "${RED}✗ FOUND${NC}"
echo -e " ${YELLOW}Fix: chown root:root && chmod 600 on filebeat.yml${NC}"
((ERRORS_FOUND++))
else
echo -e "${GREEN}✓ Not found${NC}"
fi
echo ""
if [ $ERRORS_FOUND -gt 0 ]; then
echo -e "${RED}Found $ERRORS_FOUND known error(s) in manager logs${NC}"
else
echo -e "${GREEN}No known errors detected in manager logs${NC}"
fi
fi
echo ""
# 7. Final Summary
print_section "7. OVERALL HEALTH SUMMARY"
echo ""
ISSUES=0
# Check services
if [ "$SERVICES_FAILED" -gt 0 ]; then
echo -e "${RED}✗ Services: $SERVICES_FAILED services have issues${NC}"
((ISSUES++))
else
echo -e "${GREEN}✓ Services: All services healthy${NC}"
fi
# Check disk
if [ -n "$SIZE_GB" ] && [ "$SIZE_GB" -gt 20 ]; then
echo -e "${YELLOW}⚠ Disk: Higher than expected usage ($SIZE_GB GB)${NC}"
((ISSUES++))
elif [ -n "$SIZE_GB" ]; then
echo -e "${GREEN}✓ Disk: Usage normal (~5GB)${NC}"
else
echo -e "${YELLOW}⚠ Disk: Could not determine usage${NC}"
fi
# Check security files
if [ "$FILES_MISSING" -gt 0 ]; then
echo -e "${RED}✗ Security: $FILES_MISSING files missing${NC}"
((ISSUES++))
else
echo -e "${GREEN}✓ Security: All 8 security files present${NC}"
fi
echo ""
echo -e "${CYAN}=========================================${NC}"
if [ "$ISSUES" -eq 0 ]; then
echo -e "${GREEN}✓✓✓ WAZUH IS HEALTHY - PRODUCTION READY ✓✓✓${NC}"
else
echo -e "${YELLOW}⚠⚠⚠ FOUND $ISSUES ISSUE(S) - CHECK ABOVE ⚠⚠⚠${NC}"
echo ""
echo -e "${CYAN}Troubleshooting:${NC}"
echo " 1. Check logs above for detailed error messages"
echo " 2. See metadata/description.md section 'TROUBLESHOOTING'"
echo " 3. Verify docker-compose.json entrypoints are correct"
echo " 4. Check container prefix detection: $WAZUH_PREFIX"
fi
echo -e "${CYAN}=========================================${NC}"
echo ""
exit 0

View File

@@ -0,0 +1,72 @@
#!/bin/sh
set -e
echo "CERTS_INIT: Starting certificate initialization..."
# Create all required directories
echo "CERTS_INIT: Creating directories..."
mkdir -p /indexer-data \
/manager-api \
/manager-logs \
/manager-queue \
/dashboard-config \
/indexer-security
# Super-Janitor Sweep: Remove any files/directories that were incorrectly created as directories
echo "CERTS_INIT: Starting Super-Janitor Sweep..."
for path in /certificates/*.pem \
/certificates/*.key \
/dashboard-config/opensearch_dashboards.yml \
/indexer-security/config.yml \
/indexer-security/nodes_dn.yml \
/indexer-security/tenants.yml \
/indexer-security/whitelist.yml \
/indexer-security/roles.yml \
/indexer-security/roles_mapping.yml \
/indexer-security/internal_users.yml \
/indexer-security/action_groups.yml; do
if [ -d "$path" ]; then
echo "CERTS_INIT: Purging fake directory: $path"
rm -rf "$path"
fi
done
# Generate certificates if they don't exist
if [ ! -f /certificates/root-ca.pem ]; then
echo "CERTS_INIT: Generating new certificates..."
/entrypoint.sh
else
echo "CERTS_INIT: Certificates already exist, skipping generation"
fi
# Create symlinks for easier reference
echo "CERTS_INIT: Creating certificate symlinks..."
ln -sf wazuh.indexer.pem /certificates/indexer.pem
ln -sf wazuh.indexer-key.pem /certificates/indexer-key.pem
ln -sf wazuh.manager.pem /certificates/server.pem
ln -sf wazuh.manager-key.pem /certificates/server-key.pem
ln -sf wazuh.dashboard.pem /certificates/dashboard.pem
ln -sf wazuh.dashboard-key.pem /certificates/dashboard-key.pem
# Set correct ownership
# - 1000:1000 for indexer and dashboard (opensearch/kibana user)
# - 999:999 for manager directories (wazuh user in manager container)
echo "CERTS_INIT: Setting ownership and permissions..."
chown -R 1000:1000 /certificates \
/indexer-data \
/dashboard-config \
/indexer-security
chown -R 999:999 /manager-api \
/manager-logs \
/manager-queue
# Set correct permissions
chmod 700 /certificates
chmod 644 /certificates/*.pem 2>/dev/null || true
chmod 600 /certificates/*.key 2>/dev/null || true
echo "CERTS_INIT: Certificates ready"
# Keep container alive (Runtipi requirement)
tail -f /dev/null

View File

@@ -0,0 +1,134 @@
#!/bin/bash
set -e
echo "DASHBOARD_INIT: Starting dashboard initialization..."
CUSTOM_CONFIG="/usr/share/wazuh-dashboard/config/custom/opensearch_dashboards.yml"
# Ensure custom directory exists
echo "DASHBOARD_INIT: Ensuring custom config directory exists..."
mkdir -p /usr/share/wazuh-dashboard/config/custom
# Check if custom config exists, if not create default
if [ ! -s "$CUSTOM_CONFIG" ]; then
echo "DASHBOARD_INIT: Creating default dashboard config..."
cat > "$CUSTOM_CONFIG" << EOF
server.host: 0.0.0.0
server.port: 5601
opensearch.hosts: https://wazuh.indexer:9200
opensearch.ssl.verificationMode: certificate
opensearch.username: ${DASHBOARD_USERNAME:-kibanaserver}
opensearch.password: ${DASHBOARD_PASSWORD:-kibanaserver}
opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
opensearch_security.multitenancy.enabled: false
opensearch_security.readonly_mode.roles: ["kibana_read_only"]
server.ssl.enabled: true
server.ssl.certificate: /usr/share/wazuh-dashboard/config/certs/dashboard.pem
server.ssl.key: /usr/share/wazuh-dashboard/config/certs/dashboard-key.pem
opensearch.ssl.certificateAuthorities: ["/usr/share/wazuh-dashboard/config/certs/root-ca.pem"]
uiSettings.overrides.defaultRoute: /app/wazuh
EOF
echo "DASHBOARD_INIT: Default dashboard config created"
else
echo "DASHBOARD_INIT: Custom dashboard config already exists, skipping"
fi
# Create symlink if it doesn't exist
if [ ! -L /usr/share/wazuh-dashboard/config/opensearch_dashboards.yml ]; then
echo "DASHBOARD_INIT: Creating symlink to custom config..."
rm -f /usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
ln -s "$CUSTOM_CONFIG" /usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
else
echo "DASHBOARD_INIT: Symlink already exists"
fi
# Handle keystore initialization idempotently to avoid interactive prompts
KEYSTORE_PATH="/usr/share/wazuh-dashboard/config/opensearch_dashboards.keystore"
if [ ! -f "$KEYSTORE_PATH" ]; then
echo "DASHBOARD_INIT: Creating dashboard keystore..."
# The --allow-root flag is often needed if running as root
/usr/share/wazuh-dashboard/bin/opensearch-dashboards-keystore create 2>/dev/null || true
fi
# Start dashboard in background
echo "DASHBOARD_INIT: Configuration complete, starting dashboard..."
/entrypoint.sh &
DASHBOARD_PID=$!
# Give the process a moment to start and resolve its sub-processes
sleep 5
# Robust PID tracking: looking for the actual Node.js process if entrypoint isn't using exec
if ! kill -0 $DASHBOARD_PID 2>/dev/null; then
echo "DASHBOARD_INIT: Main PID $DASHBOARD_PID died, checking for sub-processes..."
# Look for opensearch-dashboards node process
ACTUAL_PID=$(pgrep -f "opensearch-dashboards" || echo "")
if [ -n "$ACTUAL_PID" ]; then
DASHBOARD_PID=$ACTUAL_PID
echo "DASHBOARD_INIT: Found actual dashboard process at PID $DASHBOARD_PID"
else
echo "DASHBOARD_INIT: ERROR - No dashboard process found!"
exit 1
fi
fi
# Periodic monitoring with migration lock detection
echo "MIGRATION_WATCHDOG: Monitoring dashboard startup (PID: $DASHBOARD_PID)..."
TIMEOUT=600 # Increase timeout to 10 minutes due to Indexer slowness
ELAPSED=0
MIGRATION_LOCK_DETECTED_AT=0
while [ $ELAPSED -lt $TIMEOUT ]; do
# Check if dashboard process is still alive
if ! kill -0 $DASHBOARD_PID 2>/dev/null; then
echo "MIGRATION_WATCHDOG: ERROR - Dashboard process (PID: $DASHBOARD_PID) died after ${ELAPSED}s!"
exit 1
fi
# Check if dashboard API is responding
API_STATUS=$(curl -sk -o /dev/null -w "%{http_code}" https://localhost:5601/api/status 2>/dev/null)
if [[ "$API_STATUS" == "200" || "$API_STATUS" == "302" || "$API_STATUS" == "401" ]]; then
echo "MIGRATION_WATCHDOG: Dashboard is responding (HTTP $API_STATUS) after ${ELAPSED}s"
# We don't break yet, we continue monitoring until it's "Stable" (HTTP 200)
if [ "$API_STATUS" == "200" ]; then
echo "MIGRATION_WATCHDOG: Dashboard is fully UP and Stable."
break
fi
fi
# Check for migration lock (stuck .kibana_1)
HTTP_CODE=$(curl -sk -u "${INDEXER_USERNAME:-admin}:${INDEXER_PASSWORD:-admin}" -o /dev/null -w "%{http_code}" "https://wazuh.indexer:9200/.kibana_1" 2>/dev/null)
if [ "$HTTP_CODE" = "200" ] && [ $MIGRATION_LOCK_DETECTED_AT -eq 0 ]; then
MIGRATION_LOCK_DETECTED_AT=$ELAPSED
echo "MIGRATION_WATCHDOG: Detected .kibana_1 index at ${ELAPSED}s, waiting for natural migration..."
fi
if [ "$HTTP_CODE" = "200" ] && [ $MIGRATION_LOCK_DETECTED_AT -gt 0 ]; then
STUCK_DURATION=$((ELAPSED - MIGRATION_LOCK_DETECTED_AT))
if [ $STUCK_DURATION -ge 240 ]; then # Increase to 4 minutes
echo "MIGRATION_WATCHDOG: Index stuck for ${STUCK_DURATION}s, cleaning up..."
curl -sk -u "${INDEXER_USERNAME:-admin}:${INDEXER_PASSWORD:-admin}" -X DELETE "https://wazuh.indexer:9200/.kibana_1" 2>/dev/null || true
kill $DASHBOARD_PID 2>/dev/null || true
exit 0
fi
fi
sleep 15
ELAPSED=$((ELAPSED + 15))
if [ $((ELAPSED % 60)) -eq 0 ]; then
echo "MIGRATION_WATCHDOG: Still waiting for dashboard (${ELAPSED}s, Status: $API_STATUS)..."
fi
done
echo "MIGRATION_WATCHDOG: Finalizing monitoring. Entering persistent loop."
# Keep watching the dashboard process
while kill -0 $DASHBOARD_PID 2>/dev/null; do
sleep 60
done
echo "DASHBOARD_INIT: Process died. Exiting."
exit 1

View File

@@ -0,0 +1,56 @@
#!/bin/bash
set -e
echo "INDEXER_INIT: Starting security initialization..."
# Check if security files already exist
if [ ! -f /mnt/host-security/internal_users.yml ]; then
echo "INDEXER_INIT: Copying security configs..."
SRC_PATH="/usr/share/wazuh-indexer/config/opensearch-security"
for file in config.yml roles.yml roles_mapping.yml internal_users.yml action_groups.yml tenants.yml nodes_dn.yml whitelist.yml; do
if [ -f "$SRC_PATH/$file" ]; then
cp "$SRC_PATH/$file" /mnt/host-security/
echo "INDEXER_INIT: Copied $file"
else
echo "INDEXER_INIT: $file not found, skipping"
fi
done
echo "INDEXER_INIT: Security files ready"
else
echo "INDEXER_INIT: Security files already exist, skipping copy"
fi
# Set JAVA_HOME
export JAVA_HOME=/usr/share/wazuh-indexer/jdk
# Wait for indexer to be ready
echo "INDEXER_INIT: Waiting for indexer to be available..."
until curl -ks https://wazuh.indexer:9200 -u "${INDEXER_USERNAME:-admin}:${INDEXER_PASSWORD:-admin}"; do
echo "INDEXER_INIT: Indexer not ready, retrying in 5 seconds..."
sleep 5
done
echo "INDEXER_INIT: Indexer is ready, initializing security..."
# Initialize security
/usr/share/wazuh-indexer/plugins/opensearch-security/tools/securityadmin.sh \
-cd /mnt/host-security/ \
-cacert /usr/share/wazuh-indexer/config/certs/root-ca.pem \
-cert /usr/share/wazuh-indexer/config/certs/admin.pem \
-key /usr/share/wazuh-indexer/config/certs/admin-key.pem \
-h wazuh.indexer \
-p 9200 \
-nhnv
echo "INDEXER_INIT: Security initialization completed successfully"
# Create completion marker file
touch /mnt/host-security/.init-complete
# Keep container alive (Runtipi requirement)
# Using tail -f /dev/null keeps the container in a healthy "running" state
echo "INDEXER_INIT: Initialization complete, container will remain alive"
tail -f /dev/null

View File

@@ -0,0 +1,100 @@
#!/bin/bash
set -e
echo "MANAGER_INIT: Starting manager initialization..."
# ============================================================================
# OSSEC.CONF CONFIGURATION
# ============================================================================
# The official Wazuh /init script creates ossec.conf during initialization.
# We use a watchdog to copy it to custom storage for persistence after /init.
OSSEC_CUSTOM="/var/ossec/etc/custom/ossec.conf"
OSSEC_DEFAULT="/var/ossec/etc/ossec.conf"
# Create custom directory if it doesn't exist
mkdir -p /var/ossec/etc/custom
# NOTE: Filebeat SSL configuration is now handled via environment variables:
# - FILEBEAT_SSL_VERIFICATION_MODE=full
# - SSL_CERTIFICATE_AUTHORITIES=/var/ossec/etc/certs/root-ca.pem
# - SSL_CERTIFICATE=/var/ossec/etc/certs/server.pem
# - SSL_KEY=/var/ossec/etc/certs/server-key.pem
# The official cont-init.d/1-config-filebeat script will generate the correct
# configuration automatically. No manual filebeat.yml management needed!
# ============================================================================
# POST-INIT WATCHDOG
# ============================================================================
# The Wazuh /init script creates ossec.conf during initialization.
# This watchdog waits for init completion, then makes ossec.conf persistent.
(
echo "WATCHDOG: Waiting for Wazuh services to be fully started..."
# Wait for wazuh-db to be running (not just starting)
# wazuh-db is one of the last services to start and needs a valid ossec.conf
TIMEOUT=180
ELAPSED=0
while [ $ELAPSED -lt $TIMEOUT ]; do
# Check if wazuh-db process is running
if pgrep -x "wazuh-db" > /dev/null 2>&1; then
echo "WATCHDOG: wazuh-db is running, waiting additional 5s for stability..."
sleep 5
break
fi
sleep 2
ELAPSED=$((ELAPSED + 2))
if [ $((ELAPSED % 20)) -eq 0 ]; then
echo "WATCHDOG: Still waiting for wazuh-db to start (${ELAPSED}s elapsed)..."
fi
done
if [ $ELAPSED -ge $TIMEOUT ]; then
echo "WATCHDOG: WARNING - Timeout waiting for wazuh-db startup!"
echo "WATCHDOG: Will proceed anyway, but persistence may fail"
fi
# Now make ossec.conf persistent
if [ -f "$OSSEC_DEFAULT" ] && [ ! -L "$OSSEC_DEFAULT" ]; then
echo "WATCHDOG: Making ossec.conf persistent..."
# If custom file doesn't exist or is empty, copy current to custom
if [ ! -s "$OSSEC_CUSTOM" ]; then
echo "WATCHDOG: Backing up current ossec.conf to custom storage..."
cp "$OSSEC_DEFAULT" "$OSSEC_CUSTOM"
fi
# Create symlink for persistence
echo "WATCHDOG: Creating symlink /var/ossec/etc/ossec.conf -> custom/ossec.conf"
rm -f "$OSSEC_DEFAULT"
ln -s "$OSSEC_CUSTOM" "$OSSEC_DEFAULT"
# Verify symlink was created
if [ -L "$OSSEC_DEFAULT" ]; then
echo "WATCHDOG: ✓ ossec.conf is now persistent (symlink verified)"
else
echo "WATCHDOG: ✗ ERROR - Failed to create symlink!"
fi
else
echo "WATCHDOG: ossec.conf already persistent (symlink exists)"
fi
echo "WATCHDOG: Initialization complete, entering monitoring mode"
# Keep watchdog alive
while true; do
sleep 3600
done
) &
# ============================================================================
# START WAZUH
# ============================================================================
echo "MANAGER_INIT: Configuration complete, starting Wazuh..."
# Execute the original Wazuh entrypoint
# The cont-init.d/1-config-filebeat script will automatically configure Filebeat
# using the SSL environment variables we defined in docker-compose.json
exec /init

View File

@@ -0,0 +1,406 @@
{
"schemaVersion": 2,
"services": [
{
"name": "wazuh-certs",
"image": "wazuh/wazuh-certs-generator:0.0.3",
"hostname": "wazuh-certs-generator",
"entrypoint": [
"sh",
"/scripts/init-certs.sh"
],
"environment": [
{
"key": "CERT_TOOL_VERSION",
"value": "4.14"
}
],
"volumes": [
{
"hostPath": "${APP_DATA_DIR}/data/config",
"containerPath": "/config"
},
{
"hostPath": "${APP_DATA_DIR}/data/config/wazuh_ssl_certs",
"containerPath": "/certificates"
},
{
"hostPath": "${APP_DATA_DIR}/data/indexer-data",
"containerPath": "/indexer-data"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-api",
"containerPath": "/manager-api"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-logs",
"containerPath": "/manager-logs"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-queue",
"containerPath": "/manager-queue"
},
{
"hostPath": "${APP_DATA_DIR}/data/dashboard-config",
"containerPath": "/dashboard-config"
},
{
"hostPath": "${APP_DATA_DIR}/data/indexer-security",
"containerPath": "/indexer-security"
},
{
"hostPath": "${APP_DATA_DIR}/data/scripts",
"containerPath": "/scripts"
}
],
"healthCheck": {
"test": "test -f /certificates/root-ca.pem",
"interval": "5s",
"timeout": "5s",
"retries": 10
}
},
{
"name": "wazuh-indexer",
"image": "wazuh/wazuh-indexer:4.14.1",
"hostname": "wazuh.indexer",
"user": "1000:1000",
"depends_on": {
"wazuh-certs": {
"condition": "service_healthy"
}
},
"environment": [
{
"key": "OPENSEARCH_JAVA_OPTS",
"value": "-Xms1g -Xmx1g"
},
{
"key": "bootstrap.memory_lock",
"value": "true"
},
{
"key": "network.host",
"value": "wazuh.indexer"
},
{
"key": "node.name",
"value": "wazuh.indexer"
},
{
"key": "cluster.initial_cluster_manager_nodes",
"value": "wazuh.indexer"
},
{
"key": "node.max_local_storage_nodes",
"value": "1"
},
{
"key": "plugins.security.allow_default_init_securityindex",
"value": "true"
},
{
"key": "NODES_DN",
"value": "CN=wazuh.indexer,OU=Wazuh,O=Wazuh,L=California,C=US"
},
{
"key": "plugins.security.ssl.http.clientauth_mode",
"value": "OPTIONAL"
}
],
"ulimits": {
"memlock": {
"soft": -1,
"hard": -1
},
"nofile": {
"soft": 65536,
"hard": 65536
}
},
"volumes": [
{
"hostPath": "${APP_DATA_DIR}/data/indexer-data",
"containerPath": "/var/lib/wazuh-indexer"
},
{
"hostPath": "${APP_DATA_DIR}/data/config/wazuh_ssl_certs",
"containerPath": "/usr/share/wazuh-indexer/config/certs"
},
{
"hostPath": "${APP_DATA_DIR}/data/indexer-security",
"containerPath": "/usr/share/wazuh-indexer/opensearch-security"
}
],
"healthCheck": {
"test": "curl -ks https://wazuh.indexer:9200 -u ${INDEXER_USERNAME:-admin}:${INDEXER_PASSWORD:-admin}",
"interval": "10s",
"timeout": "5s",
"retries": 15,
"startPeriod": "120s"
}
},
{
"name": "wazuh-indexer-init",
"image": "wazuh/wazuh-indexer:4.14.1",
"hostname": "wazuh-indexer-init",
"depends_on": {
"wazuh-indexer": {
"condition": "service_healthy"
}
},
"entrypoint": [
"bash",
"/scripts/init-indexer-init.sh"
],
"volumes": [
{
"hostPath": "${APP_DATA_DIR}/data/config/wazuh_ssl_certs",
"containerPath": "/usr/share/wazuh-indexer/config/certs"
},
{
"hostPath": "${APP_DATA_DIR}/data/indexer-security",
"containerPath": "/mnt/host-security"
},
{
"hostPath": "${APP_DATA_DIR}/data/scripts",
"containerPath": "/scripts"
}
],
"healthCheck": {
"test": "test -f /mnt/host-security/.init-complete",
"interval": "5s",
"timeout": "5s",
"retries": 60,
"startPeriod": "120s"
}
},
{
"name": "wazuh-manager",
"image": "wazuh/wazuh-manager:4.14.1",
"hostname": "wazuh.manager",
"entrypoint": [
"bash",
"/scripts/init-manager.sh"
],
"depends_on": {
"wazuh-indexer-init": {
"condition": "service_healthy"
}
},
"environment": [
{
"key": "WAZUH_INDEXER_HOSTS",
"value": "wazuh.indexer:9200"
},
{
"key": "WAZUH_NODE_NAME",
"value": "manager"
},
{
"key": "WAZUH_CLUSTER_NODES",
"value": "wazuh.manager"
},
{
"key": "WAZUH_CLUSTER_BIND_ADDR",
"value": "wazuh.manager"
},
{
"key": "INDEXER_URL",
"value": "https://wazuh.indexer:9200"
},
{
"key": "INDEXER_USERNAME",
"value": "${INDEXER_USERNAME:-admin}"
},
{
"key": "INDEXER_PASSWORD",
"value": "${INDEXER_PASSWORD:-admin}"
},
{
"key": "FILEBEAT_SSL_VERIFICATION_MODE",
"value": "full"
},
{
"key": "SSL_CERTIFICATE_AUTHORITIES",
"value": "/var/ossec/etc/certs/root-ca.pem"
},
{
"key": "SSL_CERTIFICATE",
"value": "/var/ossec/etc/certs/server.pem"
},
{
"key": "SSL_KEY",
"value": "/var/ossec/etc/certs/server-key.pem"
},
{
"key": "API_USERNAME",
"value": "wazuh-wui"
},
{
"key": "API_PASSWORD",
"value": "${API_PASSWORD:-MyS3cr37P450r.*-}"
}
],
"ulimits": {
"memlock": {
"soft": -1,
"hard": -1
},
"nofile": {
"soft": 655360,
"hard": 655360
}
},
"addPorts": [
{
"containerPort": 1514,
"hostPort": 1514,
"tcp": true
},
{
"containerPort": 1515,
"hostPort": 1515,
"tcp": true
},
{
"containerPort": 514,
"hostPort": 514,
"udp": true
},
{
"containerPort": 55000,
"hostPort": 55000,
"tcp": true
}
],
"volumes": [
{
"hostPath": "${APP_DATA_DIR}/data/manager-api",
"containerPath": "/var/ossec/api/configuration"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-etc",
"containerPath": "/var/ossec/etc/custom"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-logs",
"containerPath": "/var/ossec/logs"
},
{
"hostPath": "${APP_DATA_DIR}/data/manager-queue",
"containerPath": "/var/ossec/queue"
},
{
"hostPath": "${APP_DATA_DIR}/data/config/wazuh_ssl_certs",
"containerPath": "/var/ossec/etc/certs"
},
{
"hostPath": "${APP_DATA_DIR}/data/scripts",
"containerPath": "/scripts"
}
],
"healthCheck": {
"test": "/var/ossec/bin/wazuh-control status | grep -E 'wazuh-db is running|wazuh-analysisd is running|wazuh-remoted is running' | wc -l | grep -q 3",
"interval": "30s",
"timeout": "10s",
"retries": 10,
"startPeriod": "180s"
}
},
{
"name": "wazuh-dashboard",
"image": "wazuh/wazuh-dashboard:4.14.1",
"hostname": "wazuh.dashboard",
"entrypoint": [
"bash",
"/scripts/init-dashboard.sh"
],
"isMain": true,
"internalPort": "5601",
"depends_on": {
"wazuh-indexer-init": {
"condition": "service_healthy"
},
"wazuh-manager": {
"condition": "service_healthy"
}
},
"environment": [
{
"key": "SERVER_HOST",
"value": "0.0.0.0"
},
{
"key": "OPENSEARCH_HOSTS",
"value": "https://wazuh.indexer:9200"
},
{
"key": "INDEXER_USERNAME",
"value": "${INDEXER_USERNAME:-admin}"
},
{
"key": "INDEXER_PASSWORD",
"value": "${INDEXER_PASSWORD:-admin}"
},
{
"key": "WAZUH_API_URL",
"value": "https://wazuh.manager"
},
{
"key": "DASHBOARD_USERNAME",
"value": "${DASHBOARD_USERNAME:-kibanaserver}"
},
{
"key": "DASHBOARD_PASSWORD",
"value": "${DASHBOARD_PASSWORD:-kibanaserver}"
},
{
"key": "API_USERNAME",
"value": "wazuh-wui"
},
{
"key": "API_PASSWORD",
"value": "${API_PASSWORD:-MyS3cr37P450r.*-}"
},
{
"key": "SERVER_SSL_CERTIFICATE",
"value": "/usr/share/wazuh-dashboard/config/certs/dashboard.pem"
},
{
"key": "SERVER_SSL_KEY",
"value": "/usr/share/wazuh-dashboard/config/certs/dashboard-key.pem"
},
{
"key": "OPENSEARCH_SSL_CERTIFICATE_AUTHORITIES",
"value": "/usr/share/wazuh-dashboard/config/certs/root-ca.pem"
}
],
"volumes": [
{
"hostPath": "${APP_DATA_DIR}/data/config/wazuh_ssl_certs",
"containerPath": "/usr/share/wazuh-dashboard/config/certs"
},
{
"hostPath": "${APP_DATA_DIR}/data/dashboard-config",
"containerPath": "/usr/share/wazuh-dashboard/config/custom"
},
{
"hostPath": "${APP_DATA_DIR}/data/dashboard-custom",
"containerPath": "/usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom"
},
{
"hostPath": "${APP_DATA_DIR}/data/scripts",
"containerPath": "/scripts"
}
],
"healthCheck": {
"test": "curl -ks https://localhost:5601/app/wazuh -o /dev/null -w '%{http_code}' | grep -qE '302|200' || exit 1",
"interval": "30s",
"timeout": "10s",
"retries": 15,
"startPeriod": "240s"
}
}
]
}

View File

@@ -0,0 +1,762 @@
# Wazuh 4.14.1 pour Runtipi
Déploiement de Wazuh, la plateforme open-source de sécurité unifiée (XDR et SIEM), sur Runtipi.
**Version:** Wazuh 4.14.1
**Plateforme:** Runtipi 4.6.5
**Status:** ✅ Stable
⚠️ **IMPORTANT SÉCURITÉ:** Les mots de passe par défaut DOIVENT être changés avant la mise en production. Voir la section "Configuration des Identifiants" ci-dessous.
---
## 📖 Qu'est-ce que Wazuh ?
**Wazuh** est une plateforme de sécurité open-source qui fournit:
- **SIEM (Security Information and Event Management)** - Centralisation et analyse des logs de sécurité
- **XDR (Extended Detection and Response)** - Détection et réponse aux menaces
- **Conformité** - Vérification de conformité (PCI DSS, HIPAA, GDPR, etc.)
- **Détection des vulnérabilités** - Scan et gestion des vulnérabilités
- **Détection d'intrusion** - Monitoring temps réel des fichiers système
- **Réponse aux incidents** - Automatisation des réponses de sécurité
### Ressources Officielles
- **Site Web:** https://wazuh.com/
- **Documentation:** https://documentation.wazuh.com/current/
- **GitHub Wazuh:** https://github.com/wazuh/wazuh
- **Wazuh Docker:** https://github.com/wazuh/wazuh-docker
- **Community:** https://groups.google.com/g/wazuh
---
## 🏗️ Architecture
Ce projet déploie une stack Wazuh complète sur Runtipi avec 5 conteneurs Docker:
```
┌──────────────────────────────────────────────────────────────┐
│ Votre Infrastructure │
│ │
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
│ │ Serveur │ │ Serveur │ │ Desktop │ │ Cloud │ │
│ │ Linux │ │ Windows │ │ MacOS │ │ Instance │ │
│ └────┬─────┘ └────┬─────┘ └────┬─────┘ └────┬─────┘ │
│ │ │ │ │ │
│ │ Wazuh Agents (1514 / 1515) │ │
│ └──────────────┬──────────────┬──────────────┘ │
└───────────────────────┼──────────────┼───────────────────────┘
▼ │
┌───────────────────────┼──────────────┼───────────────────────┐
│ Stack Wazuh (Runtipi) │
│ │
│ ┌────────────────────────────────────────────────────────┐ │
│ │ Wazuh Manager (Ports 1514 / 1515 / 55000) │ │
│ │ - Collecte des événements │ │
│ │ - Analyse & corrélation │ │
│ │ - Règles de détection │ │
│ │ - API REST │ │
│ └────────────────────┬───────────────────────────────────┘ │
│ │ (Filebeat) │
│ ▼ │
│ ┌────────────────────────────────────────────────────────┐ │
│ │ Wazuh Indexer (OpenSearch - Port 9200) │ │
│ │ - Stockage & indexation │ │
│ │ - Recherches rapides │ │
│ │ - Statistiques / agrégations │ │
│ └────────────────────┬───────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────────────────────────────┐ │
│ │ Wazuh Dashboard (Port 5601) - Interface Web │ │
│ │ - Visualisation des alertes │ │
│ │ - Tableaux de bord │ │
│ │ - Gestion des agents │ │
│ │ - Configuration / Admin │ │
│ └────────────────────────────────────────────────────────┘ │
│ │
│ Services additionnels : │
│ • wazuh-certs : génération certificats SSL/TLS │
│ • wazuh-indexer-init : initialisation sécurité OpenSearch │
└──────────────────────────────────────────────────────────────┘
Accès via navigateur
https://VOTRE_IP:5601
```
### Composants
1. **Wazuh Manager** - Analyse les événements de sécurité collectés par les agents
2. **Wazuh Indexer** - Base de données OpenSearch pour stocker et indexer les événements
3. **Wazuh Dashboard** - Interface web pour visualiser et gérer la sécurité
4. **Wazuh Certs Generator** - Génère les certificats SSL/TLS pour les communications sécurisées
5. **Wazuh Indexer Init** - Initialise la configuration de sécurité d'OpenSearch
---
## 🚀 Installation
### Prérequis
- **Runtipi 4.6.5+** installé et fonctionnel
- **4GB RAM minimum** (8GB recommandés)
- **20GB espace disque** disponible
- **Accès root** au serveur Runtipi
**⚠️ IMPORTANT - Configuration système requise:**
Avant l'installation, exécutez cette commande sur votre serveur:
```bash
sudo sysctl -w vm.max_map_count=262144
echo "vm.max_map_count=262144" | sudo tee -a /etc/sysctl.conf
```
Cette configuration est **obligatoire** pour le bon fonctionnement d'OpenSearch (le moteur d'indexation de Wazuh). Sans elle, le conteneur `wazuh-indexer` ne démarrera pas correctement.
### Étape 1: Installer via Runtipi
1. **Ouvrir l'interface Runtipi** dans votre navigateur
2. **Aller dans "App Store"**
3. **Rechercher "Wazuh"**
4. **Cliquer sur "Install"**
5. **⚠️ IMPORTANT : Attendre 5 minutes complètes.**
> [!WARNING]
> Le déploiement de Wazuh est lourd. Le service `wazuh-dashboard` peut redémarrer plusieurs fois pendant les 5 premières minutes le temps que l'indexeur soit prêt.
> Si vous voyez le statut "Rebooting" ou "Starting", **ne touchez à rien** et patientez. C'est le comportement normal du script de surveillance (Watchdog).
C'est tout! L'installation est **100% automatique**
### Étape 2: Vérifier l'Installation
Connectez-vous en SSH au serveur Runtipi:
```bash
# Vérifier que tous les services sont démarrés et healthy
docker ps -a | grep wazuh
```
**Résultat attendu:**
Vous devriez voir 5 conteneurs:
-**wazuh-certs** - Up X min (healthy)
-**wazuh-indexer** - Up X min (healthy)
-**wazuh-indexer-init** - Up X min (healthy)
-**wazuh-manager** - Up X min (healthy)
-**wazuh-dashboard** - Up X min (healthy)
**Note:** Le conteneur `wazuh-indexer-init` reste running avec un health check (adaptation Runtipi). Il initialise la sécurité OpenSearch au démarrage, crée un fichier marker `.init-complete`, puis reste en veille. Le statut "healthy" confirme que l'initialisation est terminée.
### Étape 3: Accéder au Dashboard
1. **Ouvrir votre navigateur**
2. **Aller à:** `https://VOTRE_IP_SERVEUR:5601`
3. **Se connecter avec les credentials par défaut**
## 🔐 Configuration des Identifiants
### Variables d'Environnement
Lors de l'installation via Runtipi, vous pouvez configurer les identifiants dans l'interface GUI:
| Variable | Description | Valeur par défaut |
|----------------------|------------------------------------------------|----------------------|
| `INDEXER_USERNAME` | Nom d'utilisateur admin de l'indexer | `admin` |
| `INDEXER_PASSWORD` | Mot de passe admin de l'indexer | `admin` |
| `DASHBOARD_USERNAME` | Utilisateur interne dashboard→indexer | `kibanaserver` |
| `DASHBOARD_PASSWORD` | Mot de passe interne dashboard→indexer | `kibanaserver` |
| `API_PASSWORD` | Mot de passe de l'API REST (user: wazuh-wui) | `MyS3cr37P450r.*-` |
### Credentials par Défaut
Si vous utilisez les valeurs par défaut:
| Service | Username | Password | Usage |
|---------------|----------------|----------------------|--------------------------------|
| Dashboard | `admin` | `admin` | Interface web principale |
| API | `wazuh-wui` | `MyS3cr37P450r.*-` | API REST du manager |
| Kibanaserver | `kibanaserver` | `kibanaserver` | Connexion dashboard→indexer |
**⚠️ IMPORTANT:** Pour une utilisation en production, modifiez ces mots de passe **avant** l'installation via l'interface Runtipi!
### Étape 4: Changer les Mots de Passe (Recommandé pour Production)
#### Méthode 1: Avant l'Installation (Recommandé)
La manière la plus sûre est de définir des mots de passe forts **avant** l'installation via l'interface Runtipi:
1. Dans Runtipi, avant de cliquer sur "Install"
2. Configurer les variables d'environnement:
- `INDEXER_PASSWORD` - Mot de passe admin de l'indexer (au lieu de "admin")
- `DASHBOARD_PASSWORD` - Mot de passe dashboard→indexer (au lieu de "kibanaserver")
- `API_PASSWORD` - Mot de passe API REST (au lieu de "MyS3cr37P450r.*-")
3. Utiliser des mots de passe forts (minimum 12 caractères, majuscules, minuscules, chiffres, symboles)
4. Procéder à l'installation
#### Méthode 2: Après l'Installation
Si vous avez déjà installé Wazuh avec les mots de passe par défaut, vous devez les changer:
**A. Changer le mot de passe admin du dashboard:**
1. Se connecter au dashboard: `https://VOTRE_IP:5601`
2. Login: `admin` / `admin`
3. Cliquer sur le menu **☰** (hamburger) en haut à gauche
4. Aller dans **Security → Internal users**
5. Cliquer sur l'utilisateur **admin**
6. Cliquer sur **Edit**
7. Entrer un **nouveau mot de passe fort**
8. Cliquer sur **Save**
**B. Mettre à jour les variables d'environnement dans Runtipi:**
1. Dans l'interface Runtipi, aller dans l'application Wazuh
2. Cliquer sur "Settings" ou "Configuration"
3. Mettre à jour `INDEXER_PASSWORD` avec le nouveau mot de passe
4. Redémarrer l'application pour appliquer les changements
**C. Changer les autres utilisateurs internes (optionnel mais recommandé):**
Dans le dashboard, sous **Security → Internal users**, vous pouvez également modifier:
- `kibanaserver` - Utilisateur technique dashboard→indexer
- `wazuh-wui` - Utilisateur API REST
- `logstash` - Utilisateur Filebeat→Indexer (si utilisé)
**Note:** Après modification des mots de passe, assurez-vous de mettre à jour les variables d'environnement correspondantes dans Runtipi et redémarrer les conteneurs pour synchroniser les configurations.
---
## 🔍 Validation Post-Installation
### Diagnostic Automatique
Un script de diagnostic complet est fourni pour vérifier la santé de votre installation:
```bash
# Se connecter en SSH au serveur
ssh user@VOTRE_SERVEUR
# Exécuter le diagnostic
bash /opt/runtipi/app-data/*/wazuh-runtipi/data/debug/wazuh-health-check.sh
```
**Ce script vérifie automatiquement:**
- ✅ Santé de tous les services (healthy/unhealthy)
- ✅ Utilisation disque (~7-8GB attendu pour installation fraîche)
- ✅ Présence des 8 fichiers de sécurité OpenSearch
- ✅ Connectivité réseau entre conteneurs
- ✅ Configuration du dashboard et manager
- ✅ Variables d'environnement SSL Filebeat (méthode officielle)
- ✅ Initialisation de la sécurité OpenSearch
---
## 📱 Déployer des Agents Wazuh
Une fois Wazuh installé, vous devez déployer des agents sur vos serveurs/postes à surveiller.
### Architecture Agent ↔ Manager
```
Serveur/Desktop à surveiller Serveur Wazuh (Runtipi)
┌─────────────────────┐ ┌────────────────────┐
│ │ │ │
│ Wazuh Agent ─────┼──────────────►│ Wazuh Manager │
│ (Service) │ Port 1514 │ (Collecte) │
│ │ 1515 │ │
│ - Logs système │ │ - Analyse │
│ - Fichiers │ │ - Corrélation │
│ - Processus │ │ - Alertes │
│ - Réseau │ │ │
└─────────────────────┘ └────────────────────┘
```
### Agent Linux (Debian/Ubuntu)
```bash
# 1. Télécharger l'agent
wget https://packages.wazuh.com/4.x/apt/pool/main/w/wazuh-agent/wazuh-agent_4.14.1-1_amd64.deb
# 2. Installer
sudo dpkg -i wazuh-agent_4.14.1-1_amd64.deb
# 3. Configurer l'adresse du manager (remplacer VOTRE_IP par l'IP de votre serveur Runtipi)
sudo sed -i "s/<address>MANAGER_IP<\/address>/<address>VOTRE_IP<\/address>/" /var/ossec/etc/ossec.conf
# 4. Démarrer l'agent
sudo systemctl daemon-reload
sudo systemctl enable wazuh-agent
sudo systemctl start wazuh-agent
# 5. Vérifier le statut
sudo systemctl status wazuh-agent
```
### Agent Windows
1. **Télécharger** l'installeur: https://packages.wazuh.com/4.x/windows/wazuh-agent-4.14.1-1.msi
2. **Ouvrir PowerShell en tant qu'Administrateur** et exécuter:
```powershell
# Installer (remplacer VOTRE_IP par l'IP de votre serveur Runtipi)
msiexec /i wazuh-agent-4.14.1-1.msi /q WAZUH_MANAGER="VOTRE_IP"
# Démarrer le service
NET START WazuhSvc
# Vérifier le statut
Get-Service WazuhSvc
```
### Agent MacOS
```bash
# 1. Télécharger l'agent
curl -O https://packages.wazuh.com/4.x/macos/wazuh-agent-4.14.1-1.pkg
# 2. Installer
sudo installer -pkg wazuh-agent-4.14.1-1.pkg -target /
# 3. Configurer (remplacer VOTRE_IP)
sudo sed -i '' "s/<address>MANAGER_IP<\/address>/<address>VOTRE_IP<\/address>/" /Library/Ossec/etc/ossec.conf
# 4. Démarrer
sudo /Library/Ossec/bin/wazuh-control start
```
### Vérifier les Agents dans le Dashboard
1. **Se connecter au dashboard** Wazuh
2. **Cliquer sur** ☰ → **Agents**
3. **Vérifier** que vos agents apparaissent avec le statut **"Active"** (après 1-2 minutes)
Chaque agent doit montrer:
-**Status:** Active (point vert)
-**IP Address:** L'IP de la machine
-**Version:** 4.14.1
-**Last keep alive:** < 1 minute
---
## 🎯 Utilisation du Dashboard
### Sections Principales
1. **Overview / Vue d'ensemble**
- Résumé des alertes de sécurité
- Événements récents
- Top agents
- Statistiques globales
2. **Agents**
- Liste de tous les agents
- Statut (actif/déconnecté)
- Détails par agent
- Déploiement de nouveaux agents
3. **Security Events / Événements**
- Alertes de sécurité en temps réel
- Filtrage par sévérité, agent, règle
- Timeline des événements
- Détails complets des alertes
4. **Compliance / Conformité**
- PCI DSS
- GDPR
- HIPAA
- NIST 800-53
- CIS benchmarks
5. **Vulnerability Detection**
- CVE détectées sur vos systèmes
- Score CVSS
- Packages vulnérables
- Recommandations de mise à jour
6. **File Integrity Monitoring (FIM)**
- Changements de fichiers système
- Modifications non autorisées
- Ajouts/suppressions de fichiers
### Exemple: Voir les Alertes de Sécurité
1. **Menu** ☰ → **Security Events**
2. **Filtrer par sévérité:** Sélectionner "High" ou "Critical"
3. **Cliquer sur une alerte** pour voir les détails complets:
- Description de la menace
- Agent source
- Fichiers/processus impliqués
- Actions recommandées
- Contexte MITRE ATT&CK
---
## 🔧 Configuration Avancée
### Ports Utilisés
| Port | Protocole | Service | Usage |
|------|-----------|---------|-------|
| **5601** | HTTPS | Dashboard | Interface web (publique) |
| **9200** | HTTPS | Indexer | API OpenSearch (interne) |
| **1514** | TCP | Manager | Communication avec les agents (publique) |
| **1515** | TCP | Manager | Enrollment des agents (publique) |
| **514** | UDP | Manager | Collection Syslog |
| **55000** | HTTPS | Manager | API REST (interne) |
**Note:** Seul les ports **5601**, **1514**, **1515** (Dashboard et les agents) sont à exposer publiquement. Les autres ports sont utilisés pour la communication interne entre les composants Wazuh et les agents.
### Limites de Ressources
Configuration par défaut des conteneurs:
| Conteneur | RAM Min | RAM Max | CPU |
|-----------|---------|---------|-----|
| **Indexer** | 1GB | 4GB | 2 cores |
| **Manager** | 512MB | 2GB | 1 core |
| **Dashboard** | 512MB | 1GB | 1 core |
Pour modifier, éditer le fichier `docker-compose.json` section `deploy.resources.limits`.
### Espace Disque
Utilisation disque normale: **~7-8GB** pour une installation fraîche
L'espace augmente avec:
- Nombre d'agents connectés
- Volume d'événements générés
- Période de rétention des logs (7 jours par défaut)
Pour surveiller:
```bash
# Vérifier la taille totale (incluant data/)
du -sh /opt/runtipi/app-data/*/wazuh-runtipi
# Vérifier uniquement les données persistantes
du -sh /opt/runtipi/app-data/*/wazuh-runtipi/data
```
### Personnalisation du Manager
Fichier de configuration principal: `/var/ossec/etc/ossec.conf`
Exemples de personnalisation:
- Règles de détection personnalisées
- Alertes par email
- Intégrations (Slack, PagerDuty, etc.)
- Configuration FIM (File Integrity Monitoring)
- Politique de rétention des logs
Voir la documentation officielle: https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/
---
## 🔄 Mises à Jour
Runtipi gère automatiquement les mises à jour de l'application Wazuh.
### Processus de Mise à Jour
1. **Runtipi détecte** une nouvelle version dans son repository
2. **Télécharge** le nouveau `docker-compose.json`
3. **Redémarre** les conteneurs avec la nouvelle configuration
4. **Préserve** vos données et configurations
**Aucune action manuelle requise!**
### Vérification Post-Mise à Jour
```bash
# Attendre 2-3 minutes après la mise à jour
# Vérifier que tous les services sont healthy
docker ps -a | grep wazuh
# Diagnostic complet
bash /opt/runtipi/app-data/*/wazuh-runtipi/data/debug/wazuh-health-check.sh
```
---
## 🆘 Dépannage
### Le Dashboard n'est pas Accessible
**Symptôme:** Impossible d'accéder à `https://VOTRE_IP:5601`
**Solutions:**
```bash
# 1. Vérifier que le conteneur dashboard est running
docker ps | grep dashboard
# 2. Vérifier les logs du dashboard
docker logs wazuh-runtipi_*-wazuh-dashboard-1
# 3. Vérifier que le port 5601 est bien exposé
docker port wazuh-runtipi_*-wazuh-dashboard-1
# 4. Tester depuis le serveur
curl -I http://localhost:5601 || curl -I https://localhost:5601
```
### Un Service est "Unhealthy"
**Symptôme:** `docker ps` montre un conteneur avec `(unhealthy)`
**Solutions:**
```bash
# 1. Voir les logs du service problématique
docker logs wazuh-runtipi_*-wazuh-SERVICE-1
# Exemples:
docker logs wazuh-runtipi_*-wazuh-indexer-1
docker logs wazuh-runtipi_*-wazuh-manager-1
docker logs wazuh-runtipi_*-wazuh-dashboard-1
# 2. Redémarrer le service
docker restart wazuh-runtipi_*-wazuh-SERVICE-1
# 3. Si le problème persiste, redémarrer toute la stack
docker restart $(docker ps -q --filter "name=wazuh-runtipi")
```
### Les Agents n'Apparaissent Pas
**Symptôme:** Agents installés mais invisibles dans le dashboard
**Vérifications:**
1. **Sur la machine agent:**
```bash
# Linux/MacOS
sudo /var/ossec/bin/agent-auth -m VOTRE_IP
sudo systemctl restart wazuh-agent
sudo tail -f /var/ossec/logs/ossec.log
# Windows (PowerShell Admin)
Restart-Service WazuhSvc
Get-Content "C:\Program Files (x86)\ossec-agent\ossec.log" -Tail 20 -Wait
```
2. **Sur le serveur Wazuh:**
```bash
# Vérifier les logs du manager
docker logs wazuh-runtipi_*-wazuh-manager-1 | grep -i "agent"
# Vérifier que les ports 1514/1515 sont bien ouverts
docker port wazuh-runtipi_*-wazuh-manager-1
```
### Utilisation Disque Anormalement Élevée
**Symptôme:** Plus de 20GB utilisés
**Solutions:**
```bash
# 1. Vérifier la taille actuelle
du -sh /opt/runtipi/app-data/*/wazuh-runtipi/data
# 2. Vérifier la taille des indices OpenSearch
docker exec wazuh-runtipi_*-wazuh-indexer-1 curl -k -u admin:admin https://localhost:9200/_cat/indices?v
# 3. Réduire la période de rétention (connexion dashboard)
# Settings → Indices → wazuh-alerts-* → Modifier la rétention
```
### "Index Pattern Warning" dans le health-check
**Symptôme:** Message `No template found for [wazuh-alerts-*]`
**Explication:** C'est **normal** pour une installation fraîche sans agents!
Les indices `wazuh-alerts-*` sont créés automatiquement quand:
1. Des agents Wazuh sont connectés
2. Ces agents génèrent des événements/alertes
3. Le manager envoie les données à l'indexer
**Solution:** Déployez votre premier agent Wazuh. L'alerte disparaîtra automatiquement.
---
## 📚 Documentation et Ressources
### Documentation Wazuh Officielle
- **Getting Started:** https://documentation.wazuh.com/current/getting-started/
- **User Manual:** https://documentation.wazuh.com/current/user-manual/
- **Installation Guide:** https://documentation.wazuh.com/current/installation-guide/
- **API Reference:** https://documentation.wazuh.com/current/user-manual/api/
- **Ruleset:** https://documentation.wazuh.com/current/user-manual/ruleset/
### Projets Source
- **Wazuh (Core):** https://github.com/wazuh/wazuh
- **Wazuh Docker:** https://github.com/wazuh/wazuh-docker
- **Wazuh Kubernetes:** https://github.com/wazuh/wazuh-kubernetes
- **Wazuh Documentation:** https://github.com/wazuh/wazuh-documentation
### Communauté et Support
- **Google Group:** https://groups.google.com/g/wazuh
- **Slack Community:** https://wazuh.com/community/join-us-on-slack/
- **GitHub Issues:** https://github.com/wazuh/wazuh/issues
### Formations et Certifications
- **Wazuh Free Training:** https://wazuh.com/platform/siem/
- **YouTube Channel:** https://www.youtube.com/@wazuh
---
## 📁 Structure du Projet
```
wazuh-runtipi/
├── docker-compose.json ← Configuration Docker Compose
├── config.json ← Configuration Runtipi
├── data/
│ ├── config/
│ │ └── certs.yml ← Configuration certificats SSL
│ │
│ ├── scripts/ ← Scripts d'initialisation
│ │ ├── init-certs.sh │ Génération certificats SSL
│ │ ├── init-indexer-init.sh │ Initialisation sécurité OpenSearch
│ │ ├── init-manager.sh │ Configuration manager + Filebeat
│ │ └── init-dashboard.sh │ Configuration dashboard + Watchdog
│ │
│ ├── debug/
│ │ └── wazuh-health-check.sh ← Script de diagnostic complet
│ │
│ ├── indexer-security/ ← Configuration sécurité OpenSearch
│ │ └── .gitkeep │ (Dossier vide - tous les fichiers
│ │ │ sont copiés depuis le Docker au
│ │ │ premier démarrage)
│ │
│ │ # Les 8 fichiers suivants sont automatiquement copiés
│ │ # depuis l'image wazuh-indexer lors du premier démarrage:
│ │ # - config.yml
│ │ # - roles.yml
│ │ # - roles_mapping.yml
│ │ # - internal_users.yml
│ │ # - action_groups.yml
│ │ # - tenants.yml
│ │ # - nodes_dn.yml
│ │ # - whitelist.yml
└── metadata/
├── description.md ← Documentation complète (ce fichier)
└── logo.jpg ← Logo de l'application
```
**Notes importantes:**
- **Scripts d'initialisation** dans `data/scripts/` sont montés dans les conteneurs et exécutés au démarrage
- **Architecture simple** : Un script init par conteneur (init-certs.sh, init-indexer-init.sh, init-manager.sh, init-dashboard.sh)
- **Configuration persistante** via symlinks vers les dossiers personnalisés
- **Filebeat** : Configuration automatique via variables d'environnement officielles (FILEBEAT_SSL_VERIFICATION_MODE, SSL_CERTIFICATE_AUTHORITIES, SSL_CERTIFICATE, SSL_KEY)
- **Dashboard Watchdog** surveille le démarrage et gère automatiquement les blocages de migration `.kibana_1`
- **Sécurité OpenSearch** : Les 8 fichiers dans `indexer-security/` sont automatiquement copiés au premier démarrage et **préservés** lors des mises à jour
---
## 🔧 Détails Techniques et Bonnes Pratiques
### Configuration Filebeat SSL (Méthode Officielle)
Cette implémentation utilise la **méthode officielle Wazuh** pour configurer Filebeat avec SSL, documentée sur [Docker Hub - Wazuh Manager](https://hub.docker.com/r/wazuh/wazuh-manager).
**Variables d'environnement utilisées:**
```bash
FILEBEAT_SSL_VERIFICATION_MODE=full
SSL_CERTIFICATE_AUTHORITIES=/var/ossec/etc/certs/root-ca.pem
SSL_CERTIFICATE=/var/ossec/etc/certs/server.pem
SSL_KEY=/var/ossec/etc/certs/server-key.pem
```
Le script officiel `cont-init.d/1-config-filebeat` de l'image Wazuh détecte automatiquement ces variables et génère la configuration Filebeat correcte. Cette approche est **préférable** à la création manuelle de `filebeat.yml` car :
- ✅ Respecte le workflow d'initialisation Wazuh natif
- ✅ Évite les conflits avec les scripts internes
- ✅ Simplifie la maintenance (moins de code personnalisé)
- ✅ Garantit la compatibilité avec les futures versions
### Initialisation et Boucles Logiques
Tous les scripts d'initialisation utilisent des **boucles logiques basées sur des conditions réelles** plutôt que des délais fixes, permettant une adaptation automatique à la vitesse de chaque machine :
**init-indexer-init.sh** :
```bash
# Attend la disponibilité réelle de l'API indexer
until curl -ks https://wazuh.indexer:9200; do
sleep 5
done
```
**init-manager.sh** :
```bash
# Vérifie existence ET contenu du fichier ossec.conf
while [ $ELAPSED -lt $TIMEOUT ]; do
if [ -f "$OSSEC_DEFAULT" ] && [ -s "$OSSEC_DEFAULT" ]; then
break
fi
sleep 2
done
```
**init-dashboard.sh** :
```bash
# Vérifie l'API dashboard toutes les 10s (au lieu de 60s fixes)
# Détecte le temps réel de blocage migration avant intervention
while [ $ELAPSED -lt $TIMEOUT ]; do
API_STATUS=$(curl -sk https://localhost:5601/api/status)
if [[ "$API_STATUS" == "200" ]]; then
break
fi
sleep 10
done
```
Cette approche garantit :
- 🚀 **Démarrage rapide** sur machines performantes
- ⏱️ **Patience suffisante** sur machines plus lentes
- 📊 **Logs précis** avec temps réels écoulés
### Gestion de la Migration Dashboard
Le watchdog du dashboard détecte et corrige automatiquement le problème connu de blocage de migration `.kibana_1` :
1. **Détection** : Vérifie la présence de l'index `.kibana_1` toutes les 10 secondes
2. **Patience** : Attend 180 secondes (3 minutes) avant d'intervenir
3. **Intervention** : Supprime l'index bloqué si nécessaire
4. **Restart** : Laisse Runtipi redémarrer automatiquement le dashboard
Cette logique évite les interventions prématurées tout en garantissant un démarrage réussi.
---
## 📄 Licence et Crédits
### Wazuh
- **Licence:** GPL v2
- **Copyright:** Wazuh, Inc.
- **Site Web:** https://wazuh.com/
### Ce Projet
- **Configuration Runtipi:** synode-it
- **Date:** 2025-12-27
- **Version:** 4.14.1
Ce projet est une configuration Docker Compose de Wazuh optimisée pour Runtipi. Il utilise les images Docker officielles de Wazuh et suit leurs bonnes pratiques de déploiement.
---
**🎉 Votre plateforme de sécurité Wazuh est maintenant prête à protéger votre infrastructure!**
Pour toute question, consultez la documentation officielle Wazuh ou rejoignez la communauté.

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

24
apps/whoami/config.json Normal file
View File

@@ -0,0 +1,24 @@
{
"name": "Whoami",
"id": "whoami",
"available": true,
"short_desc": "Tiny Go server that prints os information and HTTP request to output.",
"author": "traefik",
"port": 8382,
"categories": [
"utilities"
],
"description": "Tiny Go webserver that prints OS information and HTTP request to output.",
"tipi_version": 2,
"version": "v1.11.0",
"source": "https://github.com/traefik/whoami",
"exposable": true,
"supported_architectures": [
"arm64",
"amd64"
],
"created_at": 1745082405284,
"updated_at": 1745674974072,
"dynamic_config": true,
"form_fields": []
}

View File

@@ -0,0 +1,10 @@
{
"services": [
{
"name": "whoami",
"image": "traefik/whoami:v1.11.0",
"isMain": true,
"internalPort": "80"
}
]
}

View File

@@ -0,0 +1,43 @@
# Whoami
Tiny Go webserver that prints OS information and HTTP request to output.
## Usage
### Paths
#### `/[?wait=d]`
Returns the whoami information (request and network information).
The optional `wait` query parameter can be provided to tell the server to wait before sending the response.
The duration is expected in Go's [`time.Duration`](https://golang.org/pkg/time/#ParseDuration) format (e.g. `/?wait=100ms` to wait 100 milliseconds).
The optional `env` query parameter can be set to `true` to add the environment variables to the response.
#### `/api`
Returns the whoami information (and some extra information) as JSON.
The optional `env` query parameter can be set to `true` to add the environment variables to the response.
#### `/bench`
Always return the same response (`1`).
#### `/data?size=n[&unit=u]`
Creates a response with a size `n`.
The unit of measure, if specified, accepts the following values: `KB`, `MB`, `GB`, `TB` (optional, default: bytes).
#### `/echo`
WebSocket echo.
#### `/health`
Heath check.
- `GET`, `HEAD`, ...: returns a response with the status code defined by the `POST`
- `POST`: changes the status code of the `GET` (`HEAD`, ...) response.

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
bun.lockb Executable file

Binary file not shown.

3
config.js Normal file
View File

@@ -0,0 +1,3 @@
export default {
allowedCommands: ["bun ./scripts/update-config.ts", "bun install && bun run test"],
};

25
package.json Normal file
View File

@@ -0,0 +1,25 @@
{
"name": "example-appstore",
"version": "1.0.0",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"test": "bun test"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@types/bun": "latest",
"@types/node": "^22.14.1"
},
"dependencies": {
"@runtipi/common": "^0.8.0",
"bun": "^1.2.10",
"zod-validation-error": "^3.4.0"
},
"peerDependencies": {
"typescript": "^5.0.0"
}
}

62
renovate.json Normal file
View File

@@ -0,0 +1,62 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"automerge": false,
"extends": [
"config:recommended"
],
"addLabels": [
"renovate"
],
"enabledManagers": ["regex"],
"automergeStrategy": "rebase",
"customManagers": [
{
"customType": "regex",
"fileMatch": [
"^.*docker-compose\\.json$"
],
"matchStrings": [
"\"image\": \"(?<depName>.*?):(?<currentValue>.*?)\","
],
"datasourceTemplate": "docker"
}
],
"packageRules": [
{
"matchUpdateTypes": [
"minor",
"major",
"patch",
"pin",
"digest"
],
"automerge": false
},
{
"matchDepTypes": [
"devDependencies"
],
"automerge": false
},
{
"matchPackageNames": [
"mariadb",
"mysql",
"monogdb",
"postgres",
"redis"
],
"enabled": false
}
],
"postUpgradeTasks": {
"commands": [
"bun ./scripts/update-config.ts {{packageFile}} {{newVersion}}",
"bun install && bun run test"
],
"fileFilters": [
"**/*"
],
"executionMode": "update"
}
}

35
scripts/update-config.ts Normal file
View File

@@ -0,0 +1,35 @@
import path from "node:path";
import fs from "fs/promises";
const packageFile = process.argv[2];
const newVersion = process.argv[3];
type AppConfig = {
tipi_version: string;
version: string;
updated_at: number;
};
const updateAppConfig = async (packageFile: string, newVersion: string) => {
try {
const packageRoot = path.dirname(packageFile);
const configPath = path.join(packageRoot, "config.json");
const config = await fs.readFile(configPath, "utf-8");
const configParsed = JSON.parse(config) as AppConfig;
configParsed.tipi_version = configParsed.tipi_version + 1;
configParsed.version = newVersion;
configParsed.updated_at = new Date().getTime();
await fs.writeFile(configPath, JSON.stringify(configParsed, null, 2));
} catch (e) {
console.error(`Failed to update app config, error: ${e}`);
}
};
if (!packageFile || !newVersion) {
console.error("Usage: node update-config.js <packageFile> <newVersion>");
process.exit(1);
}
updateAppConfig(packageFile, newVersion);

24
tsconfig.json Normal file
View File

@@ -0,0 +1,24 @@
{
"compilerOptions": {
"esModuleInterop": true,
"skipLibCheck": true,
"target": "es2022",
"allowJs": true,
"resolveJsonModule": true,
"moduleDetection": "force",
"isolatedModules": true,
"verbatimModuleSyntax": true,
"strict": true,
"noUncheckedIndexedAccess": true,
"noImplicitOverride": true,
"module": "NodeNext",
"outDir": "dist",
"sourceMap": true,
"lib": [
"es2022"
]
},
"include": [
"**/*.ts",
],
}