diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 3a41019..70e04ae 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,13 +5,18 @@ }, "workspaceFolder": "${localWorkspaceFolder}", "workspaceMount": "source=${localWorkspaceFolder},target=${localWorkspaceFolder},type=bind", + "initializeCommand": "mkdir -p ${localEnv:HOME}/.local/spark/http-proxy/certs", "mounts": [ "source=${localEnv:HOME}/.config/gcloud,target=/home/vscode/.config/gcloud,type=bind", + "source=${localEnv:HOME}/.local/spark/http-proxy/certs,target=${localEnv:HOME}/.local/spark/http-proxy/certs,type=bind", "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind", "source=sparkci-bashhistory,target=/commandhistory,type=volume" ], + "containerEnv": { + "LOCAL_HOME": "${localEnv:HOME}" + }, "runArgs": [ - "--add-host=host.docker.internal:host-gateway", + "--add-host=host.docker.internal:host-gateway" ], "features": { "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {}, @@ -58,4 +63,4 @@ } } } -} \ No newline at end of file +} diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..a4cc228 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,45 @@ +--- +applyTo: "*.go" +--- + +You are a Golang and Devops expert. Your task is to write high-quality, idiomatic Go code that adheres to best practices in software development. +Focus on clarity, maintainability, and performance. Do not write code that is overly complex or difficult to understand. +Do not try to reach a result if it requires writing code that is not idiomatic or that does not follow best practices. +When in doubt, do not write code. Instead, ask for clarification or more information about the requirements. + +Please adhere to the principles of "Effective Go" to ensure the code is clear, idiomatic, and maintainable. Pay close attention to the following conventions: + +**1. Formatting:** +All code should be formatted with `gofmt`. Ensure that the output is consistent with the standard Go formatting. + +**2. Naming Conventions:** +* **Packages:** Use short, concise, and all-lowercase names. Avoid camelCase or snake_case. +* **Getters:** Method names for getters should not have a "Get" prefix. For a variable `owner`, the getter should be named `owner()`, not `Owner()`. +* **Interfaces:** Interfaces that are satisfied by a single method should be named by the method name plus the "-er" suffix (e.g., `Reader`, `Writer`). + +**3. Control Structures:** +* **For Loops:** Utilize the generalized `for` loop. Use the `for...range` clause for iterating over arrays, slices, strings, and maps. +* **Switch Statements:** Use the flexible and powerful `switch` statement. Remember that `switch` cases in Go do not fall through by default. + +**4. Data Handling:** +* **Allocation:** + * Use `new(T)` to allocate memory for a new zero-value of type T and return a pointer to it. + * Use `make(T, args)` to create slices, maps, and channels, and return an initialized (not zeroed) value of type T. +* **Composite Literals:** Use composite literals to create instances of structs, arrays, slices, and maps. Omit the type name from the elements of the literal when it is redundant. + +**5. General Principles:** +* Write idiomatic Go code. Do not simply translate code from other languages like C++, Java, or Python. +* Strive for simplicity and clarity. +* Keep comments concise and informative, explaining what the code *does*, not *how* it does it. + +--- +applyTo: "*.*" +--- + +When you want to create new documentation files, follow these steps: + +1. Create a new Markdown file in the appropriate directory, that is docs/. +2. Use the existing documentation files as a reference for structure and formatting. +3. Include relevant information, code snippets, and examples to illustrate the topic. +4. Follow the established naming conventions and directory structure. +5. Update any necessary configuration files (e.g., `mkdocs.yml`, `README.md`) to include the new documentation. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a2b52b9..1ae5e75 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Docker +name: CI/CD Pipeline on: push: @@ -10,7 +10,7 @@ on: env: REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} + BASE_IMAGE_NAME: ${{ github.repository }} jobs: test: @@ -25,15 +25,174 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Build image for testing + - name: Build services image for testing uses: docker/build-push-action@v5 with: context: . - file: ./Dockerfile + file: ./build/Dockerfile push: false - tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} - cache-from: type=gha - cache-to: type=gha,mode=max + tags: ${{ env.REGISTRY }}/${{ env.BASE_IMAGE_NAME }}-services:test + cache-from: type=gha,scope=services + cache-to: type=gha,mode=max,scope=services + + - name: Build Traefik image for testing + uses: docker/build-push-action@v5 + with: + context: ./build/traefik + file: ./build/traefik/Dockerfile + push: false + tags: ${{ env.REGISTRY }}/${{ env.BASE_IMAGE_NAME }}-traefik:test + cache-from: type=gha,scope=traefik + cache-to: type=gha,mode=max,scope=traefik + + - name: Build Prometheus image for testing + uses: docker/build-push-action@v5 + with: + context: ./build/prometheus + file: ./build/prometheus/Dockerfile + push: false + tags: ${{ env.REGISTRY }}/${{ env.BASE_IMAGE_NAME }}-prometheus:test + cache-from: type=gha,scope=prometheus + cache-to: type=gha,mode=max,scope=prometheus + + - name: Build Grafana image for testing + uses: docker/build-push-action@v5 + with: + context: ./build/grafana + file: ./build/grafana/Dockerfile + push: false + tags: ${{ env.REGISTRY }}/${{ env.BASE_IMAGE_NAME }}-grafana:test + cache-from: type=gha,scope=grafana + cache-to: type=gha,mode=max,scope=grafana + + - name: Install test dependencies + run: | + sudo apt-get update -qq + sudo apt-get install -y dnsutils curl + + - name: Run integration tests + run: | + chmod +x test/test.sh + make test + + - name: Validate compose configuration + run: | + docker compose config + docker compose build --dry-run || echo "Dry run not supported, skipping" + + - name: Upload test logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ github.run_id }} + path: | + test-logs/ + retention-days: 7 + + security-scan: + if: github.ref != 'refs/heads/main' + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + scan-type: "fs" + scan-ref: "." + format: "sarif" + output: "trivy-results.sarif" + + - name: Upload Trivy scan results + uses: github/codeql-action/upload-sarif@v3 + if: always() + with: + sarif_file: "trivy-results.sarif" + + dev-deploy: + runs-on: ubuntu-latest + # Deploy dev images from any branch that's not main (for testing) + if: github.ref != 'refs/heads/main' && github.event_name == 'push' + permissions: + contents: read + packages: write + strategy: + fail-fast: false + matrix: + include: + - name: traefik + context: ./build/traefik + dockerfile: ./build/traefik/Dockerfile + image_name: ${{ github.repository }}-traefik + cache_scope: traefik + - name: services + context: . + dockerfile: ./build/Dockerfile + image_name: ${{ github.repository }}-services + cache_scope: services + - name: prometheus + context: ./build/prometheus + dockerfile: ./build/prometheus/Dockerfile + image_name: ${{ github.repository }}-prometheus + cache_scope: prometheus + - name: grafana + context: ./build/grafana + dockerfile: ./build/grafana/Dockerfile + image_name: ${{ github.repository }}-grafana + cache_scope: grafana + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract branch name + id: branch + run: echo "branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT + + - name: Generate dev tags + id: dev-tags + run: | + BRANCH_NAME="${{ steps.branch.outputs.branch }}" + # Sanitize branch name for Docker tag (replace special chars with hyphens) + SAFE_BRANCH=$(echo "$BRANCH_NAME" | sed 's/[^a-zA-Z0-9._-]/-/g' | sed 's/--*/-/g' | sed 's/^-\|-$//g') + SHORT_SHA="${{ github.sha }}" + SHORT_SHA="${SHORT_SHA:0:7}" + + echo "dev_tag=${SAFE_BRANCH}" >> $GITHUB_OUTPUT + echo "dev_sha_tag=${SAFE_BRANCH}-${SHORT_SHA}" >> $GITHUB_OUTPUT + + - name: Build and push dev image (${{ matrix.name }}) + uses: docker/build-push-action@v5 + with: + context: ${{ matrix.context }} + file: ${{ matrix.dockerfile }} + platforms: linux/amd64,linux/arm64 + push: true + tags: | + ${{ env.REGISTRY }}/${{ matrix.image_name }}:${{ steps.dev-tags.outputs.dev_tag }} + ${{ env.REGISTRY }}/${{ matrix.image_name }}:${{ steps.dev-tags.outputs.dev_sha_tag }} + labels: | + org.opencontainers.image.title=${{ matrix.name }} HTTP Proxy (Dev) + org.opencontainers.image.description=Spark HTTP Proxy - ${{ matrix.name }} component (Development build from ${{ steps.branch.outputs.branch }}) + org.opencontainers.image.vendor=SparkFabrik + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.ref.name=${{ steps.branch.outputs.branch }} + cache-from: type=gha,scope=${{ matrix.cache_scope }} + cache-to: type=gha,mode=max,scope=${{ matrix.cache_scope }} deploy: runs-on: ubuntu-latest @@ -41,6 +200,31 @@ jobs: permissions: contents: read packages: write + security-events: write # For security scanning + strategy: + fail-fast: false # Continue building other images if one fails + matrix: + include: + - name: traefik + context: ./build/traefik + dockerfile: ./build/traefik/Dockerfile + image_name: ${{ github.repository }}-traefik + cache_scope: traefik + - name: services + context: . + dockerfile: ./build/Dockerfile + image_name: ${{ github.repository }}-services + cache_scope: services + - name: prometheus + context: ./build/prometheus + dockerfile: ./build/prometheus/Dockerfile + image_name: ${{ github.repository }}-prometheus + cache_scope: prometheus + - name: grafana + context: ./build/grafana + dockerfile: ./build/grafana/Dockerfile + image_name: ${{ github.repository }}-grafana + cache_scope: grafana steps: - name: Checkout repository uses: actions/checkout@v4 @@ -59,20 +243,36 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + images: ${{ env.REGISTRY }}/${{ matrix.image_name }} tags: | type=ref,event=branch type=sha,prefix={{branch}}- type=raw,value=latest,enable={{is_default_branch}} + labels: | + org.opencontainers.image.title=${{ matrix.name }} HTTP Proxy + org.opencontainers.image.description=Spark HTTP Proxy - ${{ matrix.name }} component + org.opencontainers.image.vendor=SparkFabrik - - name: Build and push Docker image + - name: Build and push Docker image (${{ matrix.name }}) uses: docker/build-push-action@v5 with: - context: . - file: ./Dockerfile + context: ${{ matrix.context }} + file: ${{ matrix.dockerfile }} platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max + cache-from: type=gha,scope=${{ matrix.cache_scope }} + cache-to: type=gha,mode=max,scope=${{ matrix.cache_scope }} + # Security and optimization + provenance: true + sbom: true + + - name: Run security scan + uses: docker/scout-action@v1 + if: github.event_name != 'pull_request' + with: + command: cves + image: ${{ env.REGISTRY }}/${{ matrix.image_name }}:latest + only-severities: critical,high + exit-code: true diff --git a/.gitignore b/.gitignore index 0f0f583..5b36169 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ join-networks.tar.gz +.env diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 8820846..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,14 +0,0 @@ -# 2.6.0 - -- Switch to the alpine branch of nginx-proxy. -- Update to golang 1.8. - -# 2.5.8 - -- increase `proxy_buffers` and `proxy_buffer_size` nginx params. - -# 2.5.7 - -- Parse port off of host if present. - -# End of Changelog. Ooops. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 47c887b..0000000 --- a/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM golang:1.24 AS builder -ARG TARGETARCH -WORKDIR /go/src/github.com/sparkfabrik/http-proxy -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd/ ./cmd/ -COPY pkg/ ./pkg/ -RUN GOOS=linux GOARCH=$TARGETARCH CGO_ENABLED=0 go build -v -o join-networks ./cmd/join-networks -RUN GOOS=linux GOARCH=$TARGETARCH CGO_ENABLED=0 go build -v -o dns-server ./cmd/dns-server - -FROM jwilder/nginx-proxy:1.7-alpine -LABEL Author="Brian Palmer " - -RUN apk upgrade --no-cache \ - && apk add --no-cache --virtual=run-deps \ - su-exec \ - curl \ - bind-tools \ - && rm -rf /tmp/* \ - /var/cache/apk/* \ - /var/tmp/* - -COPY --from=builder /go/src/github.com/sparkfabrik/http-proxy/join-networks /app/join-networks -COPY --from=builder /go/src/github.com/sparkfabrik/http-proxy/dns-server /app/dns-server - -COPY Procfile /app/ - -# override nginx configs -COPY dinghy.nginx.conf /etc/nginx/conf.d/ - -# override nginx-proxy templating -COPY nginx.tmpl Procfile reload-nginx /app/ - -COPY htdocs /var/www/default/htdocs/ - -ENV DOMAIN_TLD=loc -ENV DNS_IP=127.0.0.1 -ENV DNS_PORT=19322 - -EXPOSE 19322 diff --git a/Makefile b/Makefile index cdb156b..70bd32d 100644 --- a/Makefile +++ b/Makefile @@ -1,36 +1,35 @@ DOCKER_IMAGE_NAME ?= sparkfabrik/http-proxy:latest +.PHONY: help docker-build docker-run docker-logs build test test-dns compose-up + help: ## Show help message @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -docker-build: ## Build the Docker image - docker build -t $(DOCKER_IMAGE_NAME) . - -docker-run: docker-build ## Run the Docker container - docker rm -vf http-proxy || true - docker run -d -v /var/run/docker.sock:/tmp/docker.sock:ro \ - --name=http-proxy \ - -p 80:80 \ - -p 19322:19322/udp \ - -e CONTAINER_NAME=http-proxy \ - -e DNS_IP=127.0.0.1 \ - -e DOMAIN_TLD=loc \ - $(DOCKER_IMAGE_NAME) - -docker-logs: ## Show logs of the Docker container - docker logs -f http-proxy - -build: ## Build the go apps - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o join-networks ./cmd/join-networks - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o dns-server ./cmd/dns-server - -test-dns: ## Test DNS resolution (run in another terminal while dnsmasq is running) - @echo "Clear dns cache and restart mDNSResponder" - @sudo dscacheutil -flushcache && sudo killall -HUP mDNSResponder - @echo "Testing DNS resolution on port 19322:" - @echo "Testing hostmachine.loc:" - dig @127.0.0.1 -p 19322 hostmachine.loc - @echo "Testing any .loc domain:" - dig @127.0.0.1 -p 19322 test.loc - @echo "Testing specific domain with IP with dnscacheutil:" - dscacheutil -q host -a name test.loc +dev-up: ## Run the development environment (basic stack) + @echo "Starting development environment (basic stack)..." + @docker compose --profile metrics down -v + @docker compose up -d --build --remove-orphans + +dev-up-metrics: ## Run the development environment with monitoring stack + @echo "Starting development environment with monitoring..." + @docker compose --profile metrics down -v + @docker compose --profile metrics up -d --build --remove-orphans + +dev-down: ## Stop the development environment + @echo "Stopping development environment..." + @docker compose --profile metrics down -v + +dev-logs-join-networks: ## Show logs for the joined networks + @echo "Showing logs for the joined networks..." + @docker-compose logs -f join_networks + +test: ## Run integration tests + @echo "Running integration tests..." + @chmod +x test/test.sh + @./test/test.sh + +compose-up: ## Run Traefik with Docker + @docker rm -vf http-proxy || true + @docker-compose up -d --remove-orphans + @cd build/traefik/test && \ + docker-compose up -d diff --git a/Procfile b/Procfile deleted file mode 100644 index f8c7f01..0000000 --- a/Procfile +++ /dev/null @@ -1,3 +0,0 @@ -nginx: nginx -dockergen: docker-gen -watch -only-exposed -notify-output -notify "/app/reload-nginx" /app/nginx.tmpl /etc/nginx/conf.d/default.conf -dns: /app/dns-server diff --git a/README.md b/README.md index 744423d..fee3eeb 100644 --- a/README.md +++ b/README.md @@ -3,351 +3,527 @@ [![GitHub Container Registry](https://img.shields.io/badge/ghcr.io-sparkfabrik%2Fhttp--proxy-blue)](https://ghcr.io/sparkfabrik/http-proxy) [![CI Pipeline](https://github.com/sparkfabrik/http-proxy/actions/workflows/ci.yml/badge.svg)](https://github.com/sparkfabrik/http-proxy/actions/workflows/ci.yml) -This is a refactored and enhanced version of the [codekitchen/dinghy-http-proxy](https://github.com/codekitchen/dinghy-http-proxy) project. +**Automatic HTTP routing for Docker containers** - A modern Traefik-based proxy that automatically discovers your containers and creates routing rules based on environment variables. -Spark HTTP Proxy is an HTTP Proxy and DNS server originally designed for -[Dinghy](https://github.com/codekitchen/dinghy) but enhanced for broader use cases and improved maintainability. +Perfect for local development environments, this proxy eliminates manual configuration by detecting containers with `VIRTUAL_HOST` environment variables and instantly making them accessible via custom domains. **Only explicitly configured containers are managed**, ensuring security by default. -The proxy is based on jwilder's excellent -[nginx-proxy](https://github.com/jwilder/nginx-proxy) project, with -modifications to make it more suitable for local development work. +## Features -A DNS resolver is also added. By default it will resolve all `*.docker` domains -to the Docker VM, but this can be changed. +- ๐Ÿš€ **Automatic Container Discovery** - Zero-configuration HTTP routing for containers with `VIRTUAL_HOST` environment variables or Traefik labels +- ๐ŸŒ **Built-in DNS Server** - Resolves custom domains (`.loc`, `.dev`, etc.) to localhost, eliminating manual `/etc/hosts` editing +- ๐ŸŒ **Dynamic Network Management** - Automatically joins Docker networks containing manageable containers for seamless routing +- ๐Ÿ” **Automatic HTTPS Support** - Provides both HTTP and HTTPS routes with auto-generated certificates and mkcert integration for trusted local certificates +- ๐Ÿ“Š **Monitoring Ready** - Optional Prometheus metrics and Grafana dashboards for traffic monitoring and performance analysis -## What's New in This Refactor +> **Note**: This is a refactored and enhanced version of the [codekitchen/dinghy-http-proxy](https://github.com/codekitchen/dinghy-http-proxy) project. Spark HTTP Proxy is an HTTP Proxy and DNS server originally designed for [Dinghy](https://github.com/codekitchen/dinghy) but enhanced for broader use cases and improved maintainability. -This version includes several improvements over the original dinghy-http-proxy: +## Quick Start -### Code Organization +To get started quickly, check the complete examples in the `example/` directory. The examples include ready-to-use Docker Compose configurations that demonstrate various use cases and configurations. -- **Go Project Structure**: Reorganized into standard Go project layout with `cmd/` and `pkg/` directories -- **Multiple Applications**: Split into separate applications: - - `cmd/dns-server/` - DNS server with security hardening - - `cmd/join-networks/` - Network management with robust retry logic -- **Shared Packages**: Created reusable packages in `pkg/`: - - `pkg/config/` - Centralized configuration management - - `pkg/logger/` - Shared logging utilities +## Container Configuration -### Security Enhancements +**Important**: Only containers with explicit configuration are automatically managed by the proxy. Containers without `VIRTUAL_HOST` environment variables or `traefik.*` labels are ignored to ensure security and prevent unintended exposure. -- **DNS Security**: DNS server now silently drops queries for non-configured TLD domains instead of responding with NXDOMAIN -- **Input Validation**: Enhanced validation throughout the codebase -- **Configuration Cleanup**: Removed unused environment variables for cleaner security posture +Add these environment variables to any container you want to be automatically routed: -### Network Management +```yaml +# docker-compose.yml +services: + myapp: + image: nginx:alpine + environment: + - VIRTUAL_HOST=myapp.local # Required: your custom domain + - VIRTUAL_PORT=8080 # Optional: defaults to exposed port or 80 + expose: + - "8080" +``` + +### Supported Patterns + +- **Single domain**: `VIRTUAL_HOST=myapp.local` +- **Multiple domains**: `VIRTUAL_HOST=app.local,api.local` +- **Wildcards**: `VIRTUAL_HOST=*.myapp.local` +- **Regex patterns**: `VIRTUAL_HOST=~^api\\..*\\.local$` + +## Container Management + +The proxy uses **opt-in container discovery** (`exposedByDefault: false`). Only containers with explicit configuration are managed: + +- **Dinghy**: Containers with `VIRTUAL_HOST=domain.local` environment variable +- **Traefik**: Containers with labels starting with `traefik.*` + +Unmanaged containers are ignored and never exposed. + +## Network Management -- **Robust Network Operations**: Advanced retry mechanisms with exponential backoff -- **Connectivity Validation**: Automatic connectivity checks during network operations -- **Rollback Capabilities**: Automatic rollback on operation failures to maintain consistent state -- **Smart Network Discovery**: Intelligent detection of active bridge networks +The proxy automatically joins Docker networks that contain manageable containers, enabling seamless routing without manual network configuration. This process is handled by the `join-networks` service. -### Development & Maintenance +๐Ÿ“– **[Detailed Network Joining Flow Documentation](docs/network-joining-flow.md)** - Complete technical documentation with flow diagrams explaining how automatic network discovery and joining works. -- **Enhanced Error Handling**: Comprehensive error handling and logging throughout -- **Graceful Shutdown**: Proper signal handling for clean shutdowns -- **Dry-run Mode**: Testing capabilities without making actual changes -- **Updated Dependencies**: Latest Go modules and security improvements +## DNS Server -## Project Structure +The HTTP proxy includes a **built-in DNS server** that automatically resolves configured domains to localhost, eliminating the need to manually edit `/etc/hosts` or configure system DNS. -This project follows the standard Go project layout: +### DNS Configuration +The DNS server supports both **Top-Level Domains (TLDs)** and **specific domains**: + +```yaml +# docker-compose.yml +services: + dns: + environment: + # Configure which domains to handle (comma-separated) + - HTTP_PROXY_DNS_TLDS=loc,dev # Handle any *.loc and *.dev domains + - HTTP_PROXY_DNS_TLDS=spark.loc,api.dev # Handle only specific domains + - HTTP_PROXY_DNS_TLDS=loc # Handle any *.loc domains (default) + + # Where to resolve domains (default: 127.0.0.1) + - HTTP_PROXY_DNS_TARGET_IP=127.0.0.1 + + # DNS server port (default: 19322) + - HTTP_PROXY_DNS_PORT=19322 ``` -โ”œโ”€โ”€ cmd/ # Main applications -โ”‚ โ”œโ”€โ”€ dns-server/ # DNS server with security hardening -โ”‚ โ”‚ โ””โ”€โ”€ main.go -โ”‚ โ””โ”€โ”€ join-networks/ # Network management application -โ”‚ โ””โ”€โ”€ main.go -โ”œโ”€โ”€ pkg/ # Shared packages -โ”‚ โ”œโ”€โ”€ config/ # Configuration management -โ”‚ โ”‚ โ””โ”€โ”€ config.go -โ”‚ โ””โ”€โ”€ logger/ # Logging utilities -โ”‚ โ””โ”€โ”€ logger.go -โ”œโ”€โ”€ Dockerfile # Multi-stage build for both applications -โ”œโ”€โ”€ Makefile # Build automation -โ”œโ”€โ”€ go.mod # Go module definition -โ””โ”€โ”€ ... # Other project files + +### DNS Usage Patterns + +#### TLD Support (Recommended) + +Configure TLDs to handle any subdomain automatically: + +```bash +# Environment: HTTP_PROXY_DNS_TLDS=loc +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.loc โ†’ 127.0.0.1 +โœ… anything.loc โ†’ 127.0.0.1 +โŒ myapp.dev โ†’ Not handled ``` -### Applications +#### Multiple TLDs -- **DNS Server** (`cmd/dns-server/`): Provides DNS resolution for configured TLD domains with security hardening -- **Join Networks** (`cmd/join-networks/`): Manages Docker network connections with robust retry logic and connectivity validation +Support multiple development TLDs: -### Shared Packages +```bash +# Environment: HTTP_PROXY_DNS_TLDS=loc,dev,docker +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 +โœ… service.docker โ†’ 127.0.0.1 +``` -- **Config** (`pkg/config/`): Centralized configuration management with environment variable support -- **Logger** (`pkg/logger/`): Shared logging utilities with consistent formatting +#### Specific Domains -## Configuration +Handle only specific domains for precise control: -### Exposed Ports +```bash +# Environment: HTTP_PROXY_DNS_TLDS=spark.loc,api.dev +โœ… spark.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 +โŒ other.loc โ†’ Not handled +โŒ different.dev โ†’ Not handled +``` -The proxy will by default use the first port exposed by your container as the -HTTP port to proxy to. This can be overridden by setting the VIRTUAL_PORT -environment variable on the container to the desired HTTP port. +## Certificate Management -### Docker Compose Projects +When certificates are generated or updated, **restart the proxy** to load the new certificates: -The proxy will auto-generate a hostname based on the docker tags that -docker-compose adds to each container. This hostname is of the form -`..`. For instance, assuming the default `*.docker` TLD, -a "web" service in a "myapp" docker-compose project will be automatically made -available at http://web.myapp.docker/. +```bash +docker compose restart +``` -### Explicitly Setting a Hostname +## Advanced Configuration with Traefik Labels -As in the base nginx-proxy, you can configure a container's hostname by setting -the `VIRTUAL_HOST` environment variable in the container. +While `VIRTUAL_HOST` environment variables provide simple automatic routing, you can also use **Traefik labels** for more advanced configuration. Both methods work together seamlessly. -You can set the `VIRTUAL_HOST` -environment variable either with the `-e` option to docker or -the environment hash in docker-compose. For instance setting -`VIRTUAL_HOST=myrailsapp.docker` will make the container's exposed port -available at http://myrailsapp.docker/. +### Basic Traefik Labels Example -This will work even if dinghy auto-generates a hostname based on the -docker-compose tags. +```yaml +services: + myapp: + image: nginx:alpine + labels: + # Define the routing rule - which domain/path routes to this service + - "traefik.http.routers.myapp.rule=Host(`myapp.docker`)" -#### Multiple Hosts + # Specify which entrypoint to use (http = port 80) + - "traefik.http.routers.myapp.entrypoints=http" -If you need to support multiple virtual hosts for a container, you can separate each entry with commas. For example, `foo.bar.com,baz.bar.com,bar.com` and each host will be setup the same. + # Set the target port for load balancing + - "traefik.http.services.myapp.loadbalancer.server.port=80" +``` -Additionally you can customize the port for each host by appending a port -number: `foo.bar.com,baz.bar.com:3000`. Each name will point to its specified -port and any name without a port will use the default. +> **Note**: `traefik.enable=true` is **not required** since auto-discovery is always enabled in this proxy. -#### Wildcard Hosts +### Traefik Labels Breakdown -You can also use wildcards at the beginning and the end of host name, like `*.bar.com` or `foo.bar.*`. Or even a regular expression, which can be very useful in conjunction with a wildcard DNS service like [xip.io](http://xip.io), using `~^foo\.bar\..*\.xip\.io` will match `foo.bar.127.0.0.1.xip.io`, `foo.bar.10.0.2.2.xip.io` and all other given IPs. More information about this topic can be found in the nginx documentation about [`server_names`](http://nginx.org/en/docs/http/server_names.html). +| Label | Purpose | Example | +| ---------------- | -------------------------------------------- | ----------------------------------------------------------- | +| **Router Rule** | Defines which requests route to this service | `traefik.http.routers.myapp.rule=Host(\`myapp.docker\`)` | +| **Entrypoints** | Which proxy port to listen on | `traefik.http.routers.myapp.entrypoints=http` | +| **Service Port** | Target port on the container | `traefik.http.services.myapp.loadbalancer.server.port=8080` | -### Enabling CORS +### Understanding Traefik Core Concepts -You can set the `CORS_ENABLED` -environment variable either with the `-e` option to docker or -the environment hash in docker-compose. For instance setting -`CORS_ENABLED=true` will allow the container's web proxy to accept cross domain -requests. +To effectively use Traefik labels, it helps to understand the key concepts: -If you want to be more specific, you can also set `CORS_DOMAINS` (along with `CORS_ENABLED`) to specify the domains you want to whitelist. They need to be separated using comma. +#### **Entrypoints** - The "Front Door" -This is especially helpful when you have to deal with CORS with authenticated cross domain requests. +An **entrypoint** is where Traefik listens for incoming traffic. Think of it as the "front door" of your proxy. -More information on this topic on [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers). +```yaml +# In our Traefik configuration: +entrypoints: + http: # โ† This is just a custom name! You can call it anything + address: ":80" # Listen on port 80 for HTTP traffic + websecure: # โ† Another custom name + address: ":443" # Listen on port 443 for HTTPS traffic (if configured) + api: # โ† You could even call it "api" or "http" or "frontend" + address: ":8080" # Listen on port 8080 +``` -### Subdomain Support +**Important**: `http` is just a **custom name** that we chose. You could name your entrypoints anything: -If you want your container to also be available at all subdomains to the given -domain, prefix a dot `.` to the provided hostname. For instance setting -`VIRTUAL_HOST=.myrailsapp.docker` will also make your app avaiable at -`*.myrailsapp.docker`. +- `http`, `https`, `frontend`, `api`, `public` - whatever makes sense to you! -This happens automatically for the auto-generated docker-compose hostnames. +When you specify `traefik.http.routers.myapp.entrypoints=http`, you're telling Traefik: -### SSL Support +> _"Route requests that come through the entrypoint named 'http' (which happens to be port 80) to my application"_ -SSL is supported using single host certificates using naming conventions. +The entrypoint name must match between: -To enable SSL, just put your certificates and privates keys in the `HOME/.dinghy/certs` directory -for any virtual hosts in use. The certificate and keys should be named after the virtual host with a `.crt` and -`.key` extension. For example, a container with `VIRTUAL_HOST=foo.bar.com.docker` should have a -`foo.bar.com.docker.crt` and `foo.bar.com.docker.key` file in the certs directory. +1. **Traefik configuration** (where you define `web: address: ":80"`) +2. **Container labels** (where you reference `entrypoints=web`) -#### How SSL Support Works +#### **Load Balancer** - The "Traffic Director" -The SSL cipher configuration is based on [mozilla nginx intermediate profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Nginx) which -should provide compatibility with clients back to Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, -Windows XP IE8, Android 2.3, Java 7. The configuration also enables HSTS, and SSL -session caches. +The **load balancer** determines how traffic gets distributed to your actual application containers. -The default behavior for the proxy when port 80 and 443 are exposed is as follows: +```yaml +# This label creates a load balancer configuration: +- "traefik.http.services.myapp.loadbalancer.server.port=8080" +``` -- If a container has a usable cert, port 80 will redirect to 443 for that container so that HTTPS - is always preferred when available. -- If the container does not have a usable cert, port 80 will be used. +This tells Traefik: -To serve traffic in both SSL and non-SSL modes without redirecting to SSL, you can include the -environment variable `HTTPS_METHOD=noredirect` (the default is `HTTPS_METHOD=redirect`). You can also -disable the non-SSL site entirely with `HTTPS_METHOD=nohttp`. +> _"When routing to this service, send traffic to port 8080 on the container"_ -#### How to quickly generate self-signed certificates +#### **The Complete Flow** -You can generate self-signed certificates using `openssl`. +Here's how a request flows through Traefik: -```bash -openssl req -x509 -newkey rsa:2048 -keyout foo.bar.com.docker.key \ --out foo.bar.com.docker.crt -days 365 -nodes \ --subj "/C=US/ST=Oregon/L=Portland/O=Company Name/OU=Org/CN=foo.bar.com.docker" \ --config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS:foo.bar.com.docker")) \ --reqexts SAN -extensions SAN +``` +1. [Browser] โ†’ http://myapp.docker + โ†“ +2. [Entrypoint :80] โ† "web" entrypoint receives the request + โ†“ +3. [Router] โ† Checks rule: Host(`myapp.docker`) โœ“ Match! + โ†“ +4. [Service] โ† Routes to the configured service + โ†“ +5. [Load Balancer] โ† Forwards to container port 8080 + โ†“ +6. [Container] โ† Your app receives the request ``` -To prevent your browser to emit warning regarding self-signed certificates, you can install them on your system as trusted certificates. +#### **Advanced Load Balancer Features** -## Using Outside of Dinghy +While we typically use simple port mapping, Traefik's load balancer supports much more: -Since this functionality is generally useful for local development work even -outside of Dinghy, this proxy now supports running standalone. +```yaml +services: + # Multiple container instances (automatic load balancing) + web-app: + image: nginx:alpine + deploy: + replicas: 3 # 3 instances of the same app + labels: + - "traefik.http.routers.webapp.rule=Host(`webapp.docker`)" + - "traefik.http.routers.webapp.entrypoints=web" + # Traefik automatically balances between all 3 instances! + + # Health check configuration + api-service: + image: myapi:latest + labels: + - "traefik.http.routers.api.rule=Host(`api.docker`)" + - "traefik.http.routers.api.entrypoints=web" + - "traefik.http.services.api.loadbalancer.server.port=3000" + # Configure health checks + - "traefik.http.services.api.loadbalancer.healthcheck.path=/health" + - "traefik.http.services.api.loadbalancer.healthcheck.interval=30s" +``` -#### Environment variables +#### **Why This Architecture Matters** -#### Environment Variables +This separation of concerns provides powerful flexibility: -The proxy supports several environment variables for customization: +- **Entrypoints**: Control _where_ Traefik listens (ports, protocols) +- **Routers**: Control _which_ requests go _where_ (domains, paths, headers) +- **Services**: Control _how_ traffic reaches your apps (ports, health checks, load balancing) -**DNS Server Configuration:** +Example of advanced routing: -- `DOMAIN_TLD` (default: `docker`) - The DNS server will only respond to domains with this TLD (e.g., `*.docker`) -- `DNS_IP` (default: `127.0.0.1`) - IP address that DNS queries should resolve to -- `LOG_LEVEL` (default: `info`) - Logging level (debug, info, warn, error) +```yaml +services: + # Same app, different routing based on subdomain + app-v1: + image: myapp:v1 + labels: + - "traefik.http.routers.app-v1.rule=Host(`v1.myapp.docker`)" + - "traefik.http.routers.app-v1.entrypoints=web" + - "traefik.http.services.app-v1.loadbalancer.server.port=8080" + + app-v2: + image: myapp:v2 + labels: + - "traefik.http.routers.app-v2.rule=Host(`v2.myapp.docker`)" + - "traefik.http.routers.app-v2.entrypoints=web" + - "traefik.http.services.app-v2.loadbalancer.server.port=8080" + + # Route 90% traffic to v1, 10% to v2 (canary deployment) + app-main: + image: myapp:v1 + labels: + - "traefik.http.routers.app-main.rule=Host(`myapp.docker`)" + - "traefik.http.routers.app-main.entrypoints=web" + - "traefik.http.services.app-main.loadbalancer.server.port=8080" + # Weight-based routing (advanced feature) + - "traefik.http.services.app-main.loadbalancer.server.weight=90" +``` -**Network Management:** +## HTTPS Support -- `CONTAINER_NAME` - Name of the container (used for network operations) +The proxy automatically exposes both HTTP and HTTPS for all applications configured with `VIRTUAL_HOST`. Both protocols are available without any additional configuration. -**Security Features:** +### Automatic HTTP and HTTPS Routes -- The DNS server now silently drops queries for non-configured TLD domains for enhanced security -- Network operations include automatic connectivity validation and rollback capabilities +When you set `VIRTUAL_HOST=myapp.local`, you automatically get: -### OS X +- **HTTP**: `http://myapp.local` (port 80) +- **HTTPS**: `https://myapp.local` (port 443) -You'll need the IP of your VM: +```yaml +services: + myapp: + image: nginx:alpine + environment: + - VIRTUAL_HOST=myapp.local # Creates both HTTP and HTTPS routes automatically +``` -- For docker-machine, run `docker-machine ip ` to get the IP. -- For Docker for Mac, you can use `127.0.0.1` as the IP, since it forwards docker ports to the host machine. +### Self-Signed Certificates -Then start the proxy: +Traefik automatically generates self-signed certificates for HTTPS routes. For trusted certificates in development, you can use mkcert to generate wildcard certificates. - docker run -d --restart=always \ - -v /var/run/docker.sock:/tmp/docker.sock:ro \ - -v ~/.dinghy/certs:/etc/nginx/certs \ - -p 80:80 -p 443:443 -p 19322:19322/udp \ - -e DNS_IP= -e CONTAINER_NAME=http-proxy \ - --name http-proxy \ - sparkfabrik/http-proxy +### Trusted Local Certificates with mkcert -You will also need to configure OS X to use the DNS resolver. To do this, create -a file `/etc/resolver/docker` (creating the `/etc/resolver` directory if it does -not exist) with these contents: +For browser-trusted certificates without warnings, generate wildcard certificates using [mkcert](https://github.com/FiloSottile/mkcert) (install with `brew install mkcert` on macOS): +```bash +# Install the local CA +mkcert -install + +# Create the certificates directory +mkdir -p ~/.local/spark/http-proxy/certs + +# Generate wildcard certificate for .loc domains +mkcert -cert-file ~/.local/spark/http-proxy/certs/wildcard.loc.pem \ + -key-file ~/.local/spark/http-proxy/certs/wildcard.loc-key.pem \ + "*.loc" + +# For complex multi-level domains, you can generate additional certificates: +# mkcert -cert-file ~/.local/spark/http-proxy/certs/sparkfabrik.loc.pem \ +# -key-file ~/.local/spark/http-proxy/certs/sparkfabrik.loc-key.pem \ +# "*.sparkfabrik.loc" ``` -nameserver -port 19322 + +#### Start the proxy + +The certificates will be automatically detected and loaded when you start the proxy: + +```bash +docker compose up -d ``` -You only need to do this step once, or when the VM's IP changes. +The Traefik container's entrypoint script scans `~/.local/spark/http-proxy/certs/` for certificate files and automatically generates the TLS configuration in `/traefik/dynamic/auto-tls.yml`. You don't need to manually edit any configuration files! -### Linux +Now your `.loc` domains will use trusted certificates! ๐ŸŽ‰ -For running Docker directly on a Linux host machine, the proxy can still be -useful for easy access to your development environments. Similar to OS X, start -the proxy: +โœ… `https://myapp.loc` - Trusted +โœ… `https://api.loc` - Trusted +โœ… `https://project.loc` - Trusted - docker run -d --restart=always \ - -v /var/run/docker.sock:/tmp/docker.sock:ro \ - -v ~/.dinghy/certs:/etc/nginx/certs \ - -p 80:80 -p 443:443 -p 19322:19322/udp \ - -e CONTAINER_NAME=http-proxy \ - --name http-proxy \ - sparkfabrik/http-proxy +**Note**: The `*.loc` certificate covers single-level subdomains. For multi-level domains like `app.project.sparkfabrik.loc`, generate additional certificates as shown in the commented example above. -The `DNS_IP` environment variable is not necessary when Docker is running -directly on the host, as it defaults to `127.0.0.1`. +#### How Certificate Matching Works -Different Linux distributions will require different steps for configuring DNS -resolution. The [Dory](https://github.com/FreedomBen/dory) project may be useful -here, it knows how to configure common distros for `dinghy-http-proxy`. +Traefik automatically matches certificates to incoming HTTPS requests using **SNI (Server Name Indication)**: -### Windows +1. **Certificate Detection**: The entrypoint script scans `/traefik/certs` and extracts domain information from each certificate's Subject Alternative Names (SAN) +2. **Automatic Matching**: When a browser requests `https://myapp.loc`, Traefik: -- For Docker for Windows, you can use `127.0.0.1` as the DNS IP. + - Receives the domain name via SNI + - Looks through available certificates for one that matches `myapp.loc` + - Finds the `*.loc` wildcard certificate and uses it + - Serves the HTTPS response with the trusted certificate -From Powershell: +3. **Wildcard Coverage**: -``` -docker run -d --restart=always ` - -v /var/run/docker.sock:/tmp/docker.sock:ro ` - -p 80:80 -p 443:443 -p 19322:19322/udp ` - -e CONTAINER_NAME=http-proxy ` - -e DNS_IP=127.0.0.1 ` - --name http-proxy ` - sparkfabrik/http-proxy -``` + - `*.loc` covers: `myapp.loc`, `api.loc`, `database.loc` + - `*.loc` does NOT cover: `sub.myapp.loc`, `api.project.loc` + - For multi-level domains, generate specific certificates like `*.project.loc` -From docker-compose: +4. **Fallback**: If no matching certificate is found, Traefik generates a self-signed certificate for that domain -``` -version: '2' +You can see which domains each certificate covers in the container logs when it starts up. + +### Using Traefik Labels Instead of VIRTUAL_HOST + +If you prefer to use Traefik labels instead of `VIRTUAL_HOST`, you can achieve the same HTTP and HTTPS routes manually: + +```yaml services: + myapp: + image: nginx:alpine + labels: + # HTTP router + - "traefik.http.routers.myapp.rule=Host(`myapp.local`)" + - "traefik.http.routers.myapp.entrypoints=http" + - "traefik.http.routers.myapp.service=myapp" + + # HTTPS router + - "traefik.http.routers.myapp-tls.rule=Host(`myapp.local`)" + - "traefik.http.routers.myapp-tls.entrypoints=https" + - "traefik.http.routers.myapp-tls.tls=true" + - "traefik.http.routers.myapp-tls.service=myapp" + + # Service configuration + - "traefik.http.services.myapp.loadbalancer.server.port=80" +``` + +This manual approach gives you the same result as `VIRTUAL_HOST=myapp.local` but with more control over the configuration. + +## Dinghy Layer Compatibility + +This HTTP proxy provides compatibility with the original [dinghy-http-proxy](https://github.com/codekitchen/dinghy-http-proxy) environment variables: + +### Supported Environment Variables + +| Variable | Support | Description | +| -------------- | ----------- | -------------------------------- | +| `VIRTUAL_HOST` | โœ… **Full** | Automatic HTTP and HTTPS routing | +| `VIRTUAL_PORT` | โœ… **Full** | Backend port configuration | + +### Migration Notes - http-proxy: - container_name: http-proxy - image: sparkfabrik/http-proxy +- **Security**: **`exposedByDefault: false`** ensures only containers with `VIRTUAL_HOST` or `traefik.*` labels are managed +- **HTTPS**: Unlike the original dinghy-http-proxy, HTTPS is automatically enabled for all `VIRTUAL_HOST` entries +- **Multiple domains**: Comma-separated domains in `VIRTUAL_HOST` work the same way +- **Container selection**: Unmanaged containers are completely ignored, preventing accidental exposure + +## DNS Server + +The HTTP proxy includes a **built-in DNS server** that automatically resolves configured domains to localhost, eliminating the need to manually edit `/etc/hosts` or configure system DNS. + +### DNS Configuration + +The DNS server supports both **Top-Level Domains (TLDs)** and **specific domains**: + +```yaml +# docker-compose.yml +services: + dns: environment: - - DNS_IP=127.0.0.1 - - CONTAINER_NAME=http-proxy - ports: - - "80:80" - - "443:443" - - "19322:19322/udp" - volumes: - - /var/run/docker.sock:/tmp/docker.sock:ro + # Configure which domains to handle (comma-separated) + - HTTP_PROXY_DNS_TLDS=loc,dev # Handle any *.loc and *.dev domains + - HTTP_PROXY_DNS_TLDS=spark.loc,api.dev # Handle only specific domains + - HTTP_PROXY_DNS_TLDS=loc # Handle any *.loc domains (default) + + # Where to resolve domains (default: 127.0.0.1) + - HTTP_PROXY_DNS_TARGET_IP=127.0.0.1 + + # DNS server port (default: 19322) + - HTTP_PROXY_DNS_PORT=19322 ``` -You will have to add the hosts to `C:\Windows\System32\drivers\etc\hosts` manually. There are various Powershell scripts available to help manage this: +### DNS Usage Patterns -- http://get-carbon.org/Set-HostsEntry.html -- https://gist.github.com/markembling/173887 +#### TLD Support (Recommended) -## Development +Configure TLDs to handle any subdomain automatically: -### Building the Project +```bash +# Environment: HTTP_PROXY_DNS_TLDS=loc +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.loc โ†’ 127.0.0.1 +โœ… anything.loc โ†’ 127.0.0.1 +โŒ myapp.dev โ†’ Not handled +``` -The project uses a Makefile for build automation: +#### Multiple TLDs + +Support multiple development TLDs: ```bash -# Build both applications -make build +# Environment: HTTP_PROXY_DNS_TLDS=loc,dev,docker +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 +โœ… service.docker โ†’ 127.0.0.1 +``` -# Build individual applications -make build-dns-server -make build-join-networks +#### Specific Domains -# Build Docker image -make docker-build +Handle only specific domains for precise control: -# Clean build artifacts -make clean +```bash +# Environment: HTTP_PROXY_DNS_TLDS=spark.loc,api.dev +โœ… spark.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 +โŒ other.loc โ†’ Not handled +โŒ different.dev โ†’ Not handled ``` -### Testing +### System DNS Configuration -The applications support dry-run mode for testing: +To use the built-in DNS server, configure your system to use it for domain resolution: + +#### Linux (systemd-resolved) ```bash -# Test DNS server configuration -./build/dns-server -dry-run +# Configure systemd-resolved to use http-proxy DNS for .loc domains +sudo mkdir -p /etc/systemd/resolved.conf.d +sudo tee /etc/systemd/resolved.conf.d/http-proxy.conf > /dev/null < 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "[{{code}}] on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Most requested services", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 11, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "label_replace(\n 1 - (sum by (service)\n (rate(traefik_service_request_duration_seconds_bucket{le=\"1.2\",service=~\"$service.*\"}[$interval])) / sum by (service) \n (rate(traefik_service_request_duration_seconds_count{service=~\"$service.*\"}[$interval]))\n ) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\"\n)", + "legendFormat": "{{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Services failing SLO of 1200ms", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "label_replace(\n 1 - (sum by (service)\n (rate(traefik_service_request_duration_seconds_bucket{le=\"0.3\",service=~\"$service.*\"}[$interval])) / sum by (service) \n (rate(traefik_service_request_duration_seconds_count{service=~\"$service.*\"}[$interval]))\n ) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\"\n)", + "legendFormat": "{{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Services failing SLO of 300ms", + "type": "timeseries" + } + ], + "title": "SLO", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 16, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 8, + "x": 0, + "y": 19 + }, + "id": 17, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "topk(15,\n label_replace(\n sum by (service,method,code) \n (rate(traefik_service_requests_total{service=~\"$service.*\",code=~\"2..\",protocol=\"http\"}[$interval])) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "{{method}}[{{code}}] on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "2xx over $interval", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 8, + "x": 8, + "y": 19 + }, + "id": 18, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "topk(15,\n label_replace(\n sum by (service,method,code) \n (rate(traefik_service_requests_total{service=~\"$service.*\",code=~\"5..\",protocol=\"http\"}[$interval])) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "{{method}}[{{code}}] on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "5xx over $interval", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 8, + "x": 16, + "y": 19 + }, + "id": 19, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "topk(15,\n label_replace(\n sum by (service,method,code) \n (rate(traefik_service_requests_total{service=~\"$service.*\",code!~\"2..|5..\",protocol=\"http\"}[$interval])) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "{{method}}[{{code}}] on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Other codes over $interval", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 20, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "topk(15,\n label_replace(\n sum by (service,method) \n (rate(traefik_service_requests_bytes_total{service=~\"$service.*\",protocol=\"http\"}[$interval])) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "{{method}} on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Requests Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 24, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "topk(15,\n label_replace(\n sum by (service,method) \n (rate(traefik_service_responses_bytes_total{service=~\"$service.*\",protocol=\"http\"}[$interval])) > 0,\n \"service\", \"$1\", \"service\", \"([^-]+-[^-]+).*\")\n)", + "legendFormat": "{{method}} on {{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Responses Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 39 + }, + "id": 21, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(traefik_open_connections{entrypoint=~\"$entrypoint\"}) by (entrypoint)\n", + "legendFormat": "{{entrypoint}}", + "range": true, + "refId": "A" + } + ], + "title": "Connections per Entrypoint", + "type": "timeseries" + } + ], + "title": "HTTP Details", + "type": "row" + } + ], + "refresh": false, + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "DS_PROMETHEUS", + "label": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "auto": true, + "auto_count": 30, + "auto_min": "1m", + "current": { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + "hide": 0, + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "2h", + "value": "2h" + }, + { + "selected": false, + "text": "4h", + "value": "4h" + }, + { + "selected": false, + "text": "8h", + "value": "8h" + } + ], + "query": "1m,5m,10m,30m,1h,2h,4h,8h", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(traefik_open_connections, entrypoint)", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "entrypoint", + "options": [], + "query": { + "query": "label_values(traefik_open_connections, entrypoint)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(traefik_service_requests_total, service)", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "service", + "options": [], + "query": { + "query": "label_values(traefik_service_requests_total, service)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Traefik Official Standalone Dashboard", + "uid": "n5bu_kv45", + "version": 7, + "weekStart": "" +} \ No newline at end of file diff --git a/build/grafana/provisioning/dashboards/dashboard.yml b/build/grafana/provisioning/dashboards/dashboard.yml new file mode 100644 index 0000000..f5932e7 --- /dev/null +++ b/build/grafana/provisioning/dashboards/dashboard.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: "default" + orgId: 1 + folder: "" + type: file + disableDeletion: false + editable: true + allowUiUpdates: true + options: + path: /var/lib/grafana/dashboards diff --git a/build/grafana/provisioning/datasources/prometheus.yml b/build/grafana/provisioning/datasources/prometheus.yml new file mode 100644 index 0000000..1a57b69 --- /dev/null +++ b/build/grafana/provisioning/datasources/prometheus.yml @@ -0,0 +1,9 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + editable: true diff --git a/build/prometheus/Dockerfile b/build/prometheus/Dockerfile new file mode 100644 index 0000000..52a5997 --- /dev/null +++ b/build/prometheus/Dockerfile @@ -0,0 +1,14 @@ +FROM prom/prometheus:v3.4.1 + +# Copy the Prometheus configuration +COPY prometheus.yml /etc/prometheus/prometheus.yml + +# Expose port 9090 (Prometheus default port) +EXPOSE 9090 + +# Use the default Prometheus entrypoint with our config +CMD ["--config.file=/etc/prometheus/prometheus.yml", \ + "--storage.tsdb.path=/prometheus", \ + "--web.console.libraries=/etc/prometheus/console_libraries", \ + "--web.console.templates=/etc/prometheus/consoles", \ + "--web.enable-lifecycle"] diff --git a/build/prometheus/prometheus.yml b/build/prometheus/prometheus.yml new file mode 100644 index 0000000..4e2c4ae --- /dev/null +++ b/build/prometheus/prometheus.yml @@ -0,0 +1,14 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: "traefik" + static_configs: + - targets: ["traefik:8082"] + scrape_interval: 5s + metrics_path: /metrics + + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] diff --git a/build/traefik/Dockerfile b/build/traefik/Dockerfile new file mode 100644 index 0000000..702858c --- /dev/null +++ b/build/traefik/Dockerfile @@ -0,0 +1,20 @@ +FROM traefik:v3.4 + +# Copy static configuration and entrypoint script +COPY traefik.yml /etc/traefik/traefik.yml +COPY entrypoint.sh /entrypoint.sh + +# Make entrypoint executable +RUN chmod +x /entrypoint.sh + +# Create directories for dynamic configuration and user certificates +RUN mkdir -p /traefik/dynamic /traefik/certs + +# Use custom entrypoint that processes certificates and starts Traefik +ENTRYPOINT ["/entrypoint.sh", "--configfile=/etc/traefik/traefik.yml"] + +# Expose ports +EXPOSE 80 443 8080 + +# Use the default traefik entrypoint +CMD ["traefik"] diff --git a/build/traefik/entrypoint.sh b/build/traefik/entrypoint.sh new file mode 100644 index 0000000..864ac74 --- /dev/null +++ b/build/traefik/entrypoint.sh @@ -0,0 +1,93 @@ +#!/bin/sh + +# Traefik entrypoint script that auto-generates TLS configuration from user certificates + +CERTS_DIR="/traefik/certs" +DYNAMIC_DIR="/traefik/dynamic" +TLS_CONFIG_FILE="${DYNAMIC_DIR}/auto-tls.yml" + +generate_tls_config() { + echo "Scanning for certificates in ${CERTS_DIR}..." + + # Check if certificates directory exists and has files + if [ ! -d "${CERTS_DIR}" ]; then + echo "No certificates directory found at ${CERTS_DIR}" + return + fi + + # Look for certificate files (both .pem and .crt extensions) + cert_files=$(find "${CERTS_DIR}" -name "*.pem" -o -name "*.crt" | grep -v "\-key") + + if [ -z "$cert_files" ]; then + echo "No certificate files found in ${CERTS_DIR}" + return + fi + + echo "Found certificates, generating TLS configuration..." + + # Start TLS configuration + cat > "${TLS_CONFIG_FILE}" << 'EOF' +# Auto-generated TLS configuration from user certificates +tls: + certificates: +EOF + + # Process each certificate file + for cert_file in $cert_files; do + # Get the basename without extension + cert_base=$(basename "$cert_file" .pem) + cert_base=$(basename "$cert_base" .crt) + + # Look for corresponding key file + key_file="" + for ext in pem crt key; do + possible_key="${CERTS_DIR}/${cert_base}-key.${ext}" + if [ -f "$possible_key" ]; then + key_file="$possible_key" + break + fi + + possible_key="${CERTS_DIR}/${cert_base}.key" + if [ -f "$possible_key" ]; then + key_file="$possible_key" + break + fi + done + + if [ -n "$key_file" ]; then + # Extract domains from certificate + domains=$(openssl x509 -in "$cert_file" -noout -text 2>/dev/null | \ + grep -A1 "Subject Alternative Name" | \ + grep "DNS:" | \ + sed 's/.*DNS://g' | \ + sed 's/,.*DNS:/ /g' | \ + sed 's/,.*//g' | \ + tr -d ' ') + + if [ -n "$domains" ]; then + echo " - Adding certificate: $(basename "$cert_file") for domains: $domains" + cat >> "${TLS_CONFIG_FILE}" << EOF + - certFile: ${cert_file} + keyFile: ${key_file} +EOF + else + echo " - Adding certificate: $(basename "$cert_file") (auto-detect domains)" + cat >> "${TLS_CONFIG_FILE}" << EOF + - certFile: ${cert_file} + keyFile: ${key_file} +EOF + fi + else + echo " - Warning: No key file found for certificate $(basename "$cert_file")" + fi + done + + echo "TLS configuration written to ${TLS_CONFIG_FILE}" +} + +# Generate TLS configuration from user certificates +generate_tls_config + +# Start Traefik with the original arguments +echo "Starting Traefik..." +exec traefik "$@" diff --git a/build/traefik/traefik.yml b/build/traefik/traefik.yml new file mode 100644 index 0000000..36acf36 --- /dev/null +++ b/build/traefik/traefik.yml @@ -0,0 +1,102 @@ +# Traefik configuration for local development +# Equivalent to nginx settings for generous timeouts and body sizes + +# Global configuration +global: + checkNewVersion: false + sendAnonymousUsage: false + +# API and dashboard +api: + dashboard: true + insecure: true + +# Logging +log: + level: INFO + +accessLog: {} + +# Metrics configuration for Prometheus +metrics: + prometheus: + addEntryPointsLabels: true + addServicesLabels: true + addRoutersLabels: true + buckets: + - 0.1 + - 0.3 + - 1.2 + - 5.0 + entryPoint: metrics + +# Entry points +entryPoints: + http: + address: ":80" + # Transport configuration for generous timeouts + transport: + respondingTimeouts: + # Equivalent to proxy_read_timeout 86400s (24 hours) + readTimeout: "86400s" + # Equivalent to proxy_send_timeout 86400s (24 hours) + writeTimeout: "86400s" + # Idle timeout for keep-alive connections + idleTimeout: "300s" + + https: + address: ":443" + # Transport configuration for generous timeouts (same as HTTP) + transport: + respondingTimeouts: + readTimeout: "86400s" + writeTimeout: "86400s" + idleTimeout: "300s" + + # Metrics endpoint for Prometheus + metrics: + address: ":8082" + +# Providers +providers: + docker: + endpoint: "unix:///var/run/docker.sock" + exposedByDefault: false + watch: true + + file: + # Watch the dynamic directory for changes from dinghy-layer and certificates + directory: "/traefik/dynamic" + watch: true + +# Global HTTP configuration to disable HSTS +http: + middlewares: + disable-hsts: + headers: + customResponseHeaders: + Strict-Transport-Security: "" + sslForceHost: false + sslRedirect: false + +# Servers transport configuration for backend connections +serversTransport: + insecureSkipVerify: true + # For large uploads and long-running requests + maxIdleConnsPerHost: 200 + +# TLS configuration for local development +tls: + options: + default: + # Allow older TLS versions for development + minVersion: "VersionTLS10" + # Disable HSTS for local development + sniStrict: false + # Cipher suites for development (less restrictive) + cipherSuites: + - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + - "TLS_RSA_WITH_AES_256_GCM_SHA384" + - "TLS_RSA_WITH_AES_128_GCM_SHA256" diff --git a/cmd/dinghy-layer/main.go b/cmd/dinghy-layer/main.go new file mode 100644 index 0000000..128fca7 --- /dev/null +++ b/cmd/dinghy-layer/main.go @@ -0,0 +1,483 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/client" + "github.com/sparkfabrik/http-proxy/pkg/config" + "github.com/sparkfabrik/http-proxy/pkg/logger" + "github.com/sparkfabrik/http-proxy/pkg/service" + "github.com/sparkfabrik/http-proxy/pkg/utils" + "gopkg.in/yaml.v3" +) + +const ( + // DefaultTraefikDynamicDir is the default directory for Traefik dynamic configuration files + DefaultTraefikDynamicDir = "/traefik/dynamic" + + // ConfigFilePermissions defines the permissions for config files + ConfigFilePermissions = 0644 + + // ConfigDirPermissions defines the permissions for config directories + ConfigDirPermissions = 0755 +) + +// CompatibilityLayer implements the service.EventHandler interface +type CompatibilityLayer struct { + dockerClient *client.Client + logger *logger.Logger + config *CompatibilityConfig +} + +// CompatibilityConfig holds the configuration for the compatibility layer +type CompatibilityConfig struct { + DryRun bool + LogLevel string + TraefikDynamicDir string +} + +// Validate checks if the configuration is valid +func (c *CompatibilityConfig) Validate() error { + if c.TraefikDynamicDir == "" { + return fmt.Errorf("traefik dynamic directory cannot be empty") + } + + return utils.ValidateLogLevel(c.LogLevel) +} + +// NewCompatibilityLayer creates a new CompatibilityLayer instance +func NewCompatibilityLayer(cfg *CompatibilityConfig) *CompatibilityLayer { + return &CompatibilityLayer{ + config: cfg, + } +} + +// GetName returns the service name +func (cl *CompatibilityLayer) GetName() string { + return "dinghy-compatibility" +} + +// SetDependencies sets the Docker client and logger from the service framework +func (cl *CompatibilityLayer) SetDependencies(dockerClient *client.Client, logger *logger.Logger) { + cl.dockerClient = dockerClient + cl.logger = logger +} + +// TraefikLabels represents the labels to be applied to containers +type TraefikLabels struct { + Enable string + Rule string + Port string + RouterName string + ServiceName string +} + +// ContainerInfo holds essential container information for processing +type ContainerInfo struct { + ID string + Name string + VirtualHost string + VirtualPort string + IsRunning bool +} + +// extractContainerInfo extracts relevant information from a container inspection +func (cl *CompatibilityLayer) extractContainerInfo(inspect types.ContainerJSON) ContainerInfo { + return ContainerInfo{ + ID: inspect.ID, + Name: strings.TrimPrefix(inspect.Name, "/"), + VirtualHost: utils.GetDockerEnvVar(inspect.Config.Env, "VIRTUAL_HOST"), + VirtualPort: utils.GetDockerEnvVar(inspect.Config.Env, "VIRTUAL_PORT"), + IsRunning: inspect.State.Running, + } +} + +// HandleInitialScan performs initial processing of existing containers +func (cl *CompatibilityLayer) HandleInitialScan(ctx context.Context) error { + containers, err := utils.RetryContainerList(ctx, cl.dockerClient, container.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list containers: %w", err) + } + + cl.logger.Info("Scanning existing containers", "count", len(containers)) + + for _, cont := range containers { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if err := cl.processContainer(ctx, cont.ID); err != nil { + cl.logger.Error("Failed to process container", + "error", err, + "container_id", utils.FormatDockerID(cont.ID), + "container_name", cont.Names) + // Continue processing other containers instead of failing fast + } + } + } + + return nil +} + +// HandleEvent processes a Docker event +func (cl *CompatibilityLayer) HandleEvent(ctx context.Context, event events.Message) error { + switch event.Action { + case "start": + return cl.processContainer(ctx, event.Actor.ID) + case "die": + return cl.removeTraefikConfig(event.Actor.ID) + default: + // Unhandled events are not an error, just log and continue + cl.logger.Debug("Unhandled container action", "action", event.Action, "container_id", utils.FormatDockerID(event.Actor.ID)) + return nil + } +} + +func main() { + ctx := context.Background() + + // Initialize configuration + cfg := &CompatibilityConfig{ + DryRun: config.GetEnvOrDefault("DRY_RUN", "false") == "true", + LogLevel: config.GetEnvOrDefault("LOG_LEVEL", "info"), + TraefikDynamicDir: config.GetEnvOrDefault("TRAEFIK_DYNAMIC_DIR", DefaultTraefikDynamicDir), + } + + // Validate configuration + if err := cfg.Validate(); err != nil { + fmt.Fprintf(os.Stderr, "Invalid configuration: %v\n", err) + os.Exit(1) + } + + // Create handler + handler := NewCompatibilityLayer(cfg) + + // Run service with shared framework + if err := service.RunWithSignalHandling(ctx, handler.GetName(), cfg.LogLevel, handler); err != nil { + fmt.Fprintf(os.Stderr, "Service failed: %v\n", err) + os.Exit(1) + } +} + +func (cl *CompatibilityLayer) processContainer(ctx context.Context, containerID string) error { + inspect, err := utils.RetryContainerInspect(ctx, cl.dockerClient, containerID) + if err != nil { + return fmt.Errorf("failed to inspect container %s: %w", containerID, err) + } + + // Extract container information + containerInfo := cl.extractContainerInfo(inspect) + + // Skip if container is not running + if !containerInfo.IsRunning { + cl.logger.Debug("Skipping non-running container", + "container_id", utils.FormatDockerID(containerID), + "container_name", containerInfo.Name) + return nil + } + + // Skip if no VIRTUAL_HOST found + if containerInfo.VirtualHost == "" { + cl.logger.Debug("Skipping container without VIRTUAL_HOST", + "container_id", utils.FormatDockerID(containerID), + "container_name", containerInfo.Name) + return nil + } + + // Skip if traefik labels are already set. + // Check for traefik labels (any label starting with "traefik.") + labels := inspect.Config.Labels + for label := range labels { + if strings.HasPrefix(label, "traefik.") { + cl.logger.Debug("Skipping container with existing Traefik label", + "container_id", utils.FormatDockerID(containerID), + "container_name", containerInfo.Name, + "label", label) + return nil + } + } + + cl.logger.Info("Found container with VIRTUAL_HOST", + "container_id", utils.FormatDockerID(containerID), + "container_name", containerInfo.Name, + "virtual_host", containerInfo.VirtualHost, + "virtual_port", containerInfo.VirtualPort) + + // Generate Traefik configuration + traefikConfig := cl.generateTraefikConfig(inspect, containerInfo) + + cl.logger.Info("Generated Traefik configuration", + "container_id", utils.FormatDockerID(containerID), + "routers", len(traefikConfig.HTTP.Routers), + "services", len(traefikConfig.HTTP.Services)) + + // Write Traefik configuration to file + return cl.writeTraefikConfig(containerID, traefikConfig) +} + +func (cl *CompatibilityLayer) generateTraefikConfig(inspect types.ContainerJSON, containerInfo ContainerInfo) *config.TraefikConfig { + traefikConfig := config.NewTraefikConfig() + + // Generate service name from container name + serviceName := generateServiceName(inspect.Name) + + // Parse VIRTUAL_HOST (can contain multiple hosts separated by commas) + hosts := parseVirtualHosts(containerInfo.VirtualHost) + + // Get container IP address + containerIP := getContainerIP(inspect) + if containerIP == "" { + cl.logger.Error("Could not determine container IP", "container_id", utils.FormatDockerID(inspect.ID)) + return traefikConfig + } + + for i, host := range hosts { + routerName := fmt.Sprintf("%s-%d", serviceName, i) + + // Set up router rule + var rule string + if isWildcardHost(host.hostname) { + // Handle wildcard hosts + rule = fmt.Sprintf("HostRegexp(`%s`)", convertWildcardToRegex(host.hostname)) + } else { + // Regular host + rule = fmt.Sprintf("Host(`%s`)", host.hostname) + } + + // Create HTTP router + httpRouter := &config.Router{ + Rule: rule, + Service: serviceName, + EntryPoints: []string{"http"}, + } + traefikConfig.HTTP.Routers[routerName] = httpRouter + + // Create HTTPS router (always created now) + httpsRouterName := fmt.Sprintf("%s-tls-%d", serviceName, i) + httpsRouter := &config.Router{ + Rule: rule, + Service: serviceName, + EntryPoints: []string{"https"}, + TLS: &config.RouterTLSConfig{}, + } + traefikConfig.HTTP.Routers[httpsRouterName] = httpsRouter + } + + // Set up service + port := getEffectivePort(hosts, containerInfo.VirtualPort, inspect) + serverURL := fmt.Sprintf("http://%s:%s", containerIP, port) + + loadBalancer := &config.LoadBalancer{ + Servers: []config.Server{ + {URL: serverURL}, + }, + } + + traefikConfig.HTTP.Services[serviceName] = &config.Service{ + LoadBalancer: loadBalancer, + } + + return traefikConfig +} + +func getContainerIP(inspect types.ContainerJSON) string { + // Try to get IP from the first network + if inspect.NetworkSettings != nil && inspect.NetworkSettings.Networks != nil { + for _, network := range inspect.NetworkSettings.Networks { + if network.IPAddress != "" { + return network.IPAddress + } + } + } + return "" +} + +func getEffectivePort(hosts []virtualHost, virtualPort string, inspect types.ContainerJSON) string { + // Check if any host specifies a port + for _, host := range hosts { + if host.port != "" { + return host.port + } + } + + // Use VIRTUAL_PORT if specified + if virtualPort != "" { + return virtualPort + } + + // Fall back to default port detection + return getDefaultPort(inspect) +} + +func (cl *CompatibilityLayer) writeTraefikConfig(containerID string, cfg *config.TraefikConfig) error { + if cl.config.DryRun { + cl.logger.Info("DRY RUN: Would write Traefik config", + "container_id", utils.FormatDockerID(containerID), + "config_file", cl.configFileName(containerID)) + return nil + } + + // Ensure the dynamic config directory exists + if err := os.MkdirAll(cl.config.TraefikDynamicDir, ConfigDirPermissions); err != nil { + return fmt.Errorf("failed to create Traefik dynamic directory: %w", err) + } + + // Generate config file path + configFile := filepath.Join(cl.config.TraefikDynamicDir, cl.configFileName(containerID)) + + // Marshal config to YAML + configData, err := yaml.Marshal(cfg) + if err != nil { + return fmt.Errorf("failed to marshal Traefik config: %w", err) + } + + // Write config file + if err := os.WriteFile(configFile, configData, ConfigFilePermissions); err != nil { + return fmt.Errorf("failed to write Traefik config file: %w", err) + } + + cl.logger.Info("Wrote Traefik configuration", + "container_id", utils.FormatDockerID(containerID), + "config_file", configFile) + + return nil +} + +func (cl *CompatibilityLayer) removeTraefikConfig(containerID string) error { + if cl.config.DryRun { + cl.logger.Info("DRY RUN: Would remove Traefik config", + "container_id", utils.FormatDockerID(containerID), + "config_file", cl.configFileName(containerID)) + return nil + } + + configFile := filepath.Join(cl.config.TraefikDynamicDir, cl.configFileName(containerID)) + + // Check if file exists + if _, err := os.Stat(configFile); os.IsNotExist(err) { + cl.logger.Debug("Traefik config file does not exist", "config_file", configFile) + return nil + } + + // Remove config file + if err := os.Remove(configFile); err != nil { + return fmt.Errorf("failed to remove Traefik config file: %w", err) + } + + cl.logger.Info("Removed Traefik configuration", + "container_id", utils.FormatDockerID(containerID), + "config_file", configFile) + + return nil +} + +type virtualHost struct { + hostname string + port string +} + +func parseVirtualHosts(virtualHostEnv string) []virtualHost { + var hosts []virtualHost + + // Split by comma for multiple hosts + hostEntries := strings.Split(virtualHostEnv, ",") + + for _, entry := range hostEntries { + entry = strings.TrimSpace(entry) + if entry == "" { + continue + } + + // Check if port is specified (host:port format) + parts := strings.Split(entry, ":") + if len(parts) == 2 && isPort(parts[1]) { + hosts = append(hosts, virtualHost{ + hostname: parts[0], + port: parts[1], + }) + } else { + hosts = append(hosts, virtualHost{ + hostname: entry, + port: "", + }) + } + } + + return hosts +} + +func isPort(s string) bool { + port, err := strconv.Atoi(s) + return err == nil && port > 0 && port <= 65535 +} + +func isWildcardHost(hostname string) bool { + return strings.Contains(hostname, "*") || strings.HasPrefix(hostname, "~") +} + +func convertWildcardToRegex(hostname string) string { + if strings.HasPrefix(hostname, "~") { + // Already a regex, return as-is (remove the ~ prefix) + return strings.TrimPrefix(hostname, "~") + } + + // Convert wildcard to regex + regex := strings.ReplaceAll(hostname, ".", "\\.") + regex = strings.ReplaceAll(regex, "*", ".*") + return fmt.Sprintf("^%s$", regex) +} + +func generateServiceName(containerName string) string { + // Remove leading slash and sanitize name for Traefik + name := strings.TrimPrefix(containerName, "/") + // Replace invalid characters with hyphens + reg := regexp.MustCompile(`[^a-zA-Z0-9-]`) + name = reg.ReplaceAllString(name, "-") + // Remove consecutive hyphens + reg = regexp.MustCompile(`-+`) + name = reg.ReplaceAllString(name, "-") + // Trim hyphens from start and end + name = strings.Trim(name, "-") + + if name == "" { + name = "service" + } + + return name +} + +func getDefaultPort(inspect types.ContainerJSON) string { + // Get the first exposed port or return "80" as default + if inspect.Config.ExposedPorts != nil { + for port := range inspect.Config.ExposedPorts { + if strings.HasSuffix(string(port), "/tcp") { + return strings.TrimSuffix(string(port), "/tcp") + } + } + } + + // Check port bindings + if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil { + for port := range inspect.NetworkSettings.Ports { + if strings.HasSuffix(string(port), "/tcp") { + return strings.TrimSuffix(string(port), "/tcp") + } + } + } + + return "80" +} + +// configFileName returns the config file name for a container +func (cl *CompatibilityLayer) configFileName(containerID string) string { + return fmt.Sprintf("%s.yaml", utils.FormatDockerID(containerID)) +} diff --git a/cmd/dns-server/main.go b/cmd/dns-server/main.go index 1c1ca1d..af9fcf9 100644 --- a/cmd/dns-server/main.go +++ b/cmd/dns-server/main.go @@ -16,14 +16,23 @@ import ( ) type DNSServer struct { - customTLD string - targetIP string - port string - logger *logger.Logger + customDomains []string + targetIP string + port string + logger *logger.Logger } // handleDNSRequest processes incoming DNS queries func (s *DNSServer) handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) { + // Only respond to queries for our configured domains/TLDs + // Security: Silently drop queries for domains we're not authoritative for + // This prevents DNS amplification attacks and reduces information leakage + if len(s.customDomains) == 0 { + s.logger.Debug("No custom domains/TLDs configured, dropping query") + return + } + + // First, validate that all questions are for domains we handle for _, question := range r.Question { name := strings.ToLower(question.Name) @@ -32,11 +41,20 @@ func (s *DNSServer) handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) { name, w.RemoteAddr())) - // Only respond to queries for our configured TLD - // Security: Silently drop queries for domains we're not authoritative for - // This prevents DNS amplification attacks and reduces information leakage - if !strings.HasSuffix(name, "."+s.customTLD+".") { - s.logger.Debug(fmt.Sprintf("Dropping query for %s (not our TLD: .%s)", name, s.customTLD)) + // Check if domain matches any configured domain/TLD + // Support both TLDs (e.g., "loc") and specific domains (e.g., "spark.loc") + domainWithoutDot := strings.TrimSuffix(name, ".") + found := false + + for _, domain := range s.customDomains { + // Check if it's an exact match or a subdomain + if domainWithoutDot == domain || strings.HasSuffix(domainWithoutDot, "."+domain) { + found = true + break + } + } + if !found { + s.logger.Debug(fmt.Sprintf("Dropping query for %s (not matching configured domains)", name)) return } } @@ -74,42 +92,47 @@ func (s *DNSServer) handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) { func main() { var ( port = flag.String("port", "", "DNS server port (overrides config)") - customTLD = flag.String("tld", "", "Custom TLD to handle (overrides config)") + customTLD = flag.String("tld", "", "Custom domain/TLD to handle (overrides config)") targetIP = flag.String("ip", "", "IP address to resolve to (overrides config)") ) flag.Parse() // Load configuration cfg := config.Load() - log := logger.New("dns-server") + log := logger.NewWithLevel("dns-server", logger.LevelInfo) // Override config with command line flags if provided if *port != "" { cfg.DNSPort = *port } if *customTLD != "" { - cfg.DomainTLD = *customTLD + cfg.Domains = *customTLD } if *targetIP != "" { cfg.DNSIP = *targetIP } server := &DNSServer{ - customTLD: cfg.DomainTLD, - targetIP: cfg.DNSIP, - port: cfg.DNSPort, - logger: log, + customDomains: cfg.SplitDomains(), + targetIP: cfg.DNSIP, + port: cfg.DNSPort, + logger: log, + } + + if len(server.customDomains) == 0 { + log.Error("No domains/TLDs configured") + os.Exit(1) } // Validate target IP if net.ParseIP(cfg.DNSIP) == nil { - log.Error(fmt.Sprintf("Invalid target IP address: %s", cfg.DNSIP)) + log.Error("Invalid target IP address", "ip", cfg.DNSIP) os.Exit(1) } - log.Info(fmt.Sprintf("Starting DNS server on port %s", cfg.DNSPort)) - log.Info(fmt.Sprintf("Handling TLD: .%s", cfg.DomainTLD)) - log.Info(fmt.Sprintf("Resolving to: %s", cfg.DNSIP)) + log.Info("Starting DNS server", "port", cfg.DNSPort) + log.Info("Handling domains/TLDs", "domains", cfg.SplitDomains()) + log.Info("Resolving to", "target_ip", cfg.DNSIP) // Create DNS server dns.HandleFunc(".", server.handleDNSRequest) @@ -145,7 +168,7 @@ func main() { // Check for startup errors select { case err := <-errChan: - log.Error(err.Error()) + log.Error("Server startup failed", "error", err) os.Exit(1) case <-time.After(100 * time.Millisecond): } diff --git a/cmd/join-networks/main.go b/cmd/join-networks/main.go index 0de937a..09220ce 100644 --- a/cmd/join-networks/main.go +++ b/cmd/join-networks/main.go @@ -4,131 +4,121 @@ import ( "context" "flag" "fmt" - "log" "os" - "os/signal" "strings" - "syscall" - "time" + "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/docker/go-connections/nat" + "github.com/sparkfabrik/http-proxy/pkg/logger" + "github.com/sparkfabrik/http-proxy/pkg/service" + "github.com/sparkfabrik/http-proxy/pkg/utils" ) const ( bridgeDriverName = "bridge" defaultBridgeOption = "com.docker.network.bridge.default_bridge" defaultBridgeName = "bridge" - maxRetries = 3 - retryDelay = 2 * time.Second - stabilizationDelay = 1 * time.Second ) -// main sets up signal handling and runs the network join application -func main() { - containerName := flag.String("container-name", "", "the name of this docker container") - dryRun := flag.Bool("dry-run", false, "show what would be done without making changes") - flag.Parse() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - errChan := make(chan error, 1) - go func() { - errChan <- run(ctx, *containerName, *dryRun) - }() - - select { - case err := <-errChan: - if err != nil { - log.Fatalf("Error: %v", err) - } - log.Println("Application completed successfully") - case sig := <-sigChan: - log.Printf("Received signal %v, initiating graceful shutdown...", sig) - cancel() - - select { - case err := <-errChan: - if err != nil && err != context.Canceled { - log.Printf("Error during shutdown: %v", err) - } - case <-time.After(10 * time.Second): - log.Println("Shutdown timeout exceeded, forcing exit") - } +// NetworkJoiner manages automatic Docker network connections for the HTTP proxy container. +// It monitors Docker events and maintains optimal network connectivity by joining networks +// that contain manageable containers and leaving networks that become empty. +type NetworkJoiner struct { + dockerClient *client.Client + logger *logger.Logger + httpProxyContainerName string +} - log.Println("Application shut down gracefully") - } +// NetworkJoinerConfig holds configuration parameters for the NetworkJoiner service. +// HTTPProxyContainerName specifies which container to manage network connections for. +type NetworkJoinerConfig struct { + HTTPProxyContainerName string + LogLevel string } -// run executes the main application logic for joining/leaving networks -func run(ctx context.Context, containerName string, dryRun bool) error { - if strings.TrimSpace(containerName) == "" { - return fmt.Errorf("container-name is required") +// Validate checks if the configuration is valid +func (c *NetworkJoinerConfig) Validate() error { + if strings.TrimSpace(c.HTTPProxyContainerName) == "" { + return fmt.Errorf("container-name cannot be empty") } - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return fmt.Errorf("failed to create docker client: %w", err) - } - defer dockerClient.Close() + return utils.ValidateLogLevel(c.LogLevel) +} - containerID, err := getContainerID(ctx, dockerClient, containerName) - if err != nil { - return fmt.Errorf("failed to get container ID: %w", err) +// NewNetworkJoiner creates a new NetworkJoiner with configuration +func NewNetworkJoiner(cfg *NetworkJoinerConfig) *NetworkJoiner { + return &NetworkJoiner{ + httpProxyContainerName: cfg.HTTPProxyContainerName, } +} - preState, err := captureContainerNetworkState(ctx, dockerClient, containerID) - if err != nil { - return fmt.Errorf("failed to capture pre-operation state: %w", err) - } - log.Printf("Pre-operation state: %s", preState.summary()) +// GetName returns the service name for the EventHandler interface +func (nj *NetworkJoiner) GetName() string { + return "join-networks" +} - currentNetworks, err := getJoinedNetworks(ctx, dockerClient, containerID) - if err != nil { - return fmt.Errorf("failed to get current networks: %w", err) - } +// SetDependencies sets the Docker client and logger from the service framework +func (nj *NetworkJoiner) SetDependencies(dockerClient *client.Client, logger *logger.Logger) { + nj.dockerClient = dockerClient + nj.logger = logger +} - bridgeNetworks, err := getActiveBridgeNetworks(ctx, dockerClient, containerID) - if err != nil { - return fmt.Errorf("failed to get bridge networks: %w", err) - } +// HandleInitialScan scans all existing Docker bridge networks and connects the HTTP proxy +// to any networks that contain manageable containers (containers with VIRTUAL_HOST or traefik labels). +// This runs once at service startup to establish initial network connectivity. +func (nj *NetworkJoiner) HandleInitialScan(ctx context.Context) error { + nj.logger.Debug("Performing initial network scan and join") + return nj.performInitialNetworkJoin(ctx, nj.httpProxyContainerName) +} - defaultBridgeID, err := getDefaultBridgeNetworkID(ctx, dockerClient) - if err != nil { - log.Printf("Warning: could not identify default bridge network: %v", err) +// HandleEvent responds to Docker container lifecycle events to dynamically manage network connections. +// - Container 'start' events: Re-scans networks to join any new networks with manageable containers +// - Container 'die' events: Checks for empty networks (no manageable containers) and leaves them +// - Other events: Ignored to avoid unnecessary processing +func (nj *NetworkJoiner) HandleEvent(ctx context.Context, event events.Message) error { + action := string(event.Action) + switch action { + case "start": + return nj.handleContainerStart(ctx) + case "die": + return nj.handleContainerStop(ctx) + default: + nj.logger.Debug("Unhandled container action", "action", action) + return nil } +} - toJoin := getNetworksToJoin(currentNetworks, bridgeNetworks) - toLeave := getNetworksToLeave(currentNetworks, bridgeNetworks, defaultBridgeID) - - log.Printf("Plan: Currently in %d networks, found %d bridge networks, %d to join, %d to leave", - len(currentNetworks), len(bridgeNetworks), len(toJoin), len(toLeave)) +// ContainerInfo consolidates essential container state from Docker API inspection. +// Focuses on network connections to minimize API calls and provide network context. +type ContainerInfo struct { + ID string + Networks map[string]NetworkInfo +} - if dryRun { - log.Println("DRY RUN MODE - No changes will be made") - logPlannedOperations(ctx, dockerClient, toJoin, toLeave) - return nil - } +// NetworkOperation encapsulates a simple network management operation including +// the target container and planned join/leave operations. +type NetworkOperation struct { + HTTPProxyContainerName string + ContainerID string + ToJoin []string + ToLeave []string +} - if err := performNetworkOperationsWithRollback(ctx, dockerClient, containerName, containerID, toJoin, toLeave, preState); err != nil { - return fmt.Errorf("network operations failed: %w", err) - } +// NetworkSet represents a set of network IDs for cleaner set operations +type NetworkSet map[string]bool - log.Println("Network operations completed successfully") - return nil +// Contains checks if a network ID is in the set +func (ns NetworkSet) Contains(networkID string) bool { + return ns[networkID] } -type NetworkState struct { - Networks map[string]NetworkInfo - PortBindings nat.PortMap - HasExternal bool +// Add adds a network ID to the set +func (ns NetworkSet) Add(networkID string) { + ns[networkID] = true } +// NetworkInfo contains details about a network connection type NetworkInfo struct { ID string Name string @@ -136,261 +126,260 @@ type NetworkInfo struct { IP string } -func (ns *NetworkState) summary() string { - return fmt.Sprintf("networks=%d, ports=%d, external=%t", - len(ns.Networks), len(ns.PortBindings), ns.HasExternal) +// main parses command line arguments and runs the network join service +func main() { + containerName := flag.String("container-name", "http-proxy", "the name of this docker container") + logLevel := flag.String("log-level", "info", "log level (debug, info, warn, error)") + flag.Parse() + + // Create and validate configuration + cfg := &NetworkJoinerConfig{ + HTTPProxyContainerName: *containerName, + LogLevel: *logLevel, + } + + if err := cfg.Validate(); err != nil { + fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) + os.Exit(1) + } + + // Create the handler + handler := NewNetworkJoiner(cfg) + + // Run the service using the shared service framework + ctx := context.Background() + if err := service.RunWithSignalHandling(ctx, "join-networks", cfg.LogLevel, handler); err != nil { + fmt.Fprintf(os.Stderr, "Service failed: %v\n", err) + os.Exit(1) + } } -// captureContainerNetworkState takes a snapshot of container's current network configuration -func captureContainerNetworkState(ctx context.Context, dockerClient *client.Client, containerID string) (*NetworkState, error) { - containerJSON, err := dockerClient.ContainerInspect(ctx, containerID) +// performInitialNetworkJoin orchestrates the network discovery and connection process. +// It inspects the HTTP proxy container's current state, discovers all bridge networks with +// manageable containers, calculates which networks to join/leave, and executes the operations. +func (nj *NetworkJoiner) performInitialNetworkJoin(ctx context.Context, containerProxy string) error { + // Get current container state + containerInfo, err := nj.getContainerInfo(ctx, containerProxy) if err != nil { - return nil, err + return fmt.Errorf("failed to get container info: %w", err) } - state := &NetworkState{ - Networks: make(map[string]NetworkInfo), - PortBindings: containerJSON.NetworkSettings.Ports, - HasExternal: false, + currentNetworks := make(NetworkSet) + for networkID := range containerInfo.Networks { + currentNetworks.Add(networkID) } - for networkName, networkData := range containerJSON.NetworkSettings.Networks { - state.Networks[networkData.NetworkID] = NetworkInfo{ - ID: networkData.NetworkID, - Name: networkName, - Gateway: networkData.Gateway, - IP: networkData.IPAddress, - } - if networkData.Gateway != "" { - state.HasExternal = true - } + bridgeNetworks, err := nj.getActiveBridgeNetworks(ctx, containerInfo.ID) + if err != nil { + return fmt.Errorf("failed to get bridge networks: %w", err) } - return state, nil -} + defaultBridgeID, err := nj.getDefaultBridgeNetworkID(ctx) + if err != nil { + nj.logger.Warn("Could not identify default bridge network", "error", err) + } -// performNetworkOperationsWithRollback executes network operations with automatic rollback on failure -func performNetworkOperationsWithRollback(ctx context.Context, dockerClient *client.Client, containerName, containerID string, - toJoin, toLeave []string, originalState *NetworkState) error { + toJoin := nj.getNetworksToJoin(currentNetworks, bridgeNetworks) + toLeave := nj.getNetworksToLeave(currentNetworks, bridgeNetworks, defaultBridgeID) - var operationsPerformed []func() error + nj.logger.Info("Network operation plan", + "current_networks", len(currentNetworks), + "bridge_networks", len(bridgeNetworks), + "to_join", len(toJoin), + "to_leave", len(toLeave)) - defer func() { - if r := recover(); r != nil { - log.Printf("PANIC during network operations, attempting rollback: %v", r) - rollbackOperations(operationsPerformed) - } - }() - - for _, networkID := range toJoin { - select { - case <-ctx.Done(): - log.Println("Shutdown signal received, stopping network operations") - return ctx.Err() - default: - } + // Create operation struct + operation := &NetworkOperation{ + HTTPProxyContainerName: containerProxy, + ContainerID: containerInfo.ID, + ToJoin: toJoin, + ToLeave: toLeave, + } - if err := safeJoinNetwork(ctx, dockerClient, containerName, networkID); err != nil { - log.Printf("Failed to join network %s, rolling back...", networkID[:12]) - rollbackOperations(operationsPerformed) - return err - } + return nj.performNetworkOperations(ctx, operation) +} - operationsPerformed = append(operationsPerformed, func() error { - return safeLeaveNetwork(ctx, dockerClient, containerName, networkID) - }) +// handleContainerStart responds to container start events by re-scanning all networks +// to detect newly created networks or networks that now contain manageable containers. +// This ensures the HTTP proxy can immediately route to new services without manual intervention. +func (nj *NetworkJoiner) handleContainerStart(ctx context.Context) error { + // Re-scan and join any new bridge networks + nj.logger.Debug("Container started, checking for new networks to join") + return nj.performInitialNetworkJoin(ctx, nj.httpProxyContainerName) +} - time.Sleep(stabilizationDelay) +// handleContainerStop responds to container stop events by identifying networks that +// no longer contain any manageable containers and safely disconnecting from them. +// This prevents the HTTP proxy from staying connected to unused networks, optimizing resource usage. +func (nj *NetworkJoiner) handleContainerStop(ctx context.Context) error { + nj.logger.Debug("Container stopped, checking for empty networks to leave") - if err := quickConnectivityCheck(ctx, dockerClient, containerID); err != nil { - log.Printf("Connectivity lost after joining %s, rolling back...", networkID[:12]) - rollbackOperations(operationsPerformed) - return fmt.Errorf("connectivity lost after joining network: %w", err) - } + // Get current container info + containerInfo, err := nj.getContainerInfo(ctx, nj.httpProxyContainerName) + if err != nil { + return fmt.Errorf("failed to get container info: %w", err) } - for _, networkID := range toLeave { - select { - case <-ctx.Done(): - log.Println("Shutdown signal received, stopping network operations") - return ctx.Err() - default: - } - - if err := ensureAlternativeConnectivity(ctx, dockerClient, containerID, networkID); err != nil { - log.Printf("Cannot leave network %s: would lose connectivity: %v", networkID[:12], err) + // Check each network the container is connected to + var networksToLeave []string + for networkID := range containerInfo.Networks { + // Skip default bridge network + defaultBridgeID, err := nj.getDefaultBridgeNetworkID(ctx) + if err == nil && networkID == defaultBridgeID { continue } - if err := safeLeaveNetwork(ctx, dockerClient, containerName, networkID); err != nil { - log.Printf("Warning: failed to leave network %s: %v", networkID[:12], err) + // Check if network has any manageable containers + hasActiveContainers, err := utils.HasManageableContainersInNetwork(ctx, nj.dockerClient, networkID, nj.httpProxyContainerName) + if err != nil { + nj.logger.Warn("Failed to check network for manageable containers", + "network_id", utils.FormatDockerID(networkID), "error", err) continue } - time.Sleep(stabilizationDelay) - - if err := quickConnectivityCheck(ctx, dockerClient, containerID); err != nil { - log.Printf("Connectivity lost after leaving %s, attempting to rejoin...", networkID[:12]) - if rejoinErr := safeJoinNetwork(ctx, dockerClient, containerName, networkID); rejoinErr != nil { - log.Printf("Failed to rejoin network %s: %v", networkID[:12], rejoinErr) - } - return fmt.Errorf("connectivity lost after leaving network: %w", err) + if !hasActiveContainers { + networksToLeave = append(networksToLeave, networkID) } } - finalState, err := captureContainerNetworkState(ctx, dockerClient, containerID) - if err != nil { - return fmt.Errorf("failed to capture final state: %w", err) - } - - log.Printf("Final state: %s", finalState.summary()) - - if !finalState.HasExternal && originalState.HasExternal { - return fmt.Errorf("lost external connectivity during operations") - } - - return nil -} - -// safeJoinNetwork connects a container to a network with retry logic -func safeJoinNetwork(ctx context.Context, dockerClient *client.Client, containerName, networkID string) error { - netName := getNetworkName(ctx, dockerClient, networkID) - log.Printf("Joining network %s (%s)", netName, networkID[:12]) - - return retryOperation(func() error { - return dockerClient.NetworkConnect(ctx, networkID, containerName, &network.EndpointSettings{}) - }, fmt.Sprintf("join network %s", networkID[:12])) -} + if len(networksToLeave) > 0 { + nj.logger.Info("Found empty networks to leave", "count", len(networksToLeave)) -// safeLeaveNetwork disconnects a container from a network with retry logic -func safeLeaveNetwork(ctx context.Context, dockerClient *client.Client, containerName, networkID string) error { - netName := getNetworkName(ctx, dockerClient, networkID) - log.Printf("Leaving network %s (%s)", netName, networkID[:12]) - - return retryOperation(func() error { - return dockerClient.NetworkDisconnect(ctx, networkID, containerName, true) - }, fmt.Sprintf("leave network %s", networkID[:12])) -} - -// retryOperation executes an operation with configurable retry attempts -func retryOperation(operation func() error, description string) error { - var lastErr error - - for attempt := 1; attempt <= maxRetries; attempt++ { - if err := operation(); err != nil { - lastErr = err - if attempt < maxRetries { - log.Printf("Attempt %d/%d failed for %s: %v, retrying in %v...", - attempt, maxRetries, description, err, retryDelay) - - timer := time.NewTimer(retryDelay) - <-timer.C - timer.Stop() - continue - } - } else { - if attempt > 1 { - log.Printf("Operation %s succeeded on attempt %d", description, attempt) + // Leave empty networks + for _, networkID := range networksToLeave { + if err := nj.safeLeaveNetwork(ctx, nj.httpProxyContainerName, networkID); err != nil { + nj.logger.Error("Failed to leave empty network", + "network_id", utils.FormatDockerID(networkID), "error", err) } - return nil } } - return fmt.Errorf("operation %s failed after %d attempts: %w", description, maxRetries, lastErr) + return nil } -// quickConnectivityCheck verifies that the container maintains network connectivity -func quickConnectivityCheck(ctx context.Context, dockerClient *client.Client, containerID string) error { - containerJSON, err := dockerClient.ContainerInspect(ctx, containerID) +// getContainerInfo performs a comprehensive Docker API inspection of the specified container, +// extracting network connections, port bindings, and connectivity status in a single API call. +// This optimizes performance by avoiding multiple API calls and provides complete container state. +func (nj *NetworkJoiner) getContainerInfo(ctx context.Context, containerName string) (*ContainerInfo, error) { + containerJSON, err := utils.RetryContainerInspect(ctx, nj.dockerClient, containerName) if err != nil { - return fmt.Errorf("failed to inspect container: %w", err) + return nil, fmt.Errorf("failed to inspect container %s: %w", containerName, err) } - networkCount := len(containerJSON.NetworkSettings.Networks) - if networkCount == 0 { - return fmt.Errorf("container has no network connections") - } + networks := make(map[string]NetworkInfo) - for _, networkData := range containerJSON.NetworkSettings.Networks { - if networkData.Gateway != "" && networkData.IPAddress != "" { - return nil + for networkName, networkData := range containerJSON.NetworkSettings.Networks { + networks[networkData.NetworkID] = NetworkInfo{ + ID: networkData.NetworkID, + Name: networkName, + Gateway: networkData.Gateway, + IP: networkData.IPAddress, } } - return fmt.Errorf("no external connectivity found") + return &ContainerInfo{ + ID: containerJSON.ID, + Networks: networks, + }, nil } -// ensureAlternativeConnectivity checks if leaving a network would break external connectivity -func ensureAlternativeConnectivity(ctx context.Context, dockerClient *client.Client, containerID, networkToLeave string) error { - containerJSON, err := dockerClient.ContainerInspect(ctx, containerID) - if err != nil { - return err - } - - externalConnections := 0 - for _, networkData := range containerJSON.NetworkSettings.Networks { - if networkData.NetworkID != networkToLeave && networkData.Gateway != "" { - externalConnections++ +// performNetworkOperations executes the planned network join/leave operations. +// Operations are performed in sequence: leave unwanted networks first, then join new networks. +// If any operation fails, the process exits to allow restart and recovery. +func (nj *NetworkJoiner) performNetworkOperations(ctx context.Context, op *NetworkOperation) error { + // Execute leave operations first + if len(op.ToLeave) > 0 { + if err := nj.executeLeaveOperations(ctx, op); err != nil { + return err } } - if externalConnections == 0 { - return fmt.Errorf("leaving this network would remove last external connection") + // Execute join operations + if len(op.ToJoin) > 0 { + return nj.executeJoinOperations(ctx, op) } return nil } -// rollbackOperations executes cleanup operations in reverse order -func rollbackOperations(operations []func() error) { - log.Printf("Rolling back %d operations...", len(operations)) +// executeJoinOperations connects the HTTP proxy to each specified network. +// If any operation fails, the process will exit and restart. +func (nj *NetworkJoiner) executeJoinOperations(ctx context.Context, op *NetworkOperation) error { + for _, networkID := range op.ToJoin { + if err := utils.CheckContext(ctx); err != nil { + return err + } - for i := len(operations) - 1; i >= 0; i-- { - if err := operations[i](); err != nil { - log.Printf("Rollback operation %d failed: %v", i, err) + if err := nj.safeJoinNetwork(ctx, op.HTTPProxyContainerName, networkID); err != nil { + nj.logger.Error("Failed to join network", "network_id", utils.FormatDockerID(networkID), "error", err) + return err } } + return nil } -// getNetworkName retrieves the human-readable name for a network ID -func getNetworkName(ctx context.Context, dockerClient *client.Client, networkID string) string { - if netResource, err := dockerClient.NetworkInspect(ctx, networkID, network.InspectOptions{}); err == nil { - return netResource.Name - } - return "unknown" -} +// executeLeaveOperations disconnects the HTTP proxy from specified networks. +// If any operation fails, the process will exit and restart. +func (nj *NetworkJoiner) executeLeaveOperations(ctx context.Context, op *NetworkOperation) error { + for _, networkID := range op.ToLeave { + if err := utils.CheckContext(ctx); err != nil { + return err + } -// logPlannedOperations displays what network operations would be performed in dry-run mode -func logPlannedOperations(ctx context.Context, dockerClient *client.Client, toJoin, toLeave []string) { - if len(toJoin) > 0 { - log.Println("Would JOIN networks:") - for _, networkID := range toJoin { - name := getNetworkName(ctx, dockerClient, networkID) - log.Printf(" - %s (%s)", name, networkID[:12]) + if err := nj.safeLeaveNetwork(ctx, op.HTTPProxyContainerName, networkID); err != nil { + nj.logger.Error("Failed to leave network", "network_id", utils.FormatDockerID(networkID), "error", err) + return err } } + return nil +} - if len(toLeave) > 0 { - log.Println("Would LEAVE networks:") - for _, networkID := range toLeave { - name := getNetworkName(ctx, dockerClient, networkID) - log.Printf(" - %s (%s)", name, networkID[:12]) - } +// safeJoinNetwork connects the HTTP proxy container to a specified network. +func (nj *NetworkJoiner) safeJoinNetwork(ctx context.Context, containerName, networkID string) error { + netName := nj.getNetworkName(ctx, networkID) + nj.logger.Info("Joining network", "name", netName, "id", utils.FormatDockerID(networkID)) + + err := utils.RetryNetworkConnect(ctx, nj.dockerClient, networkID, containerName, &network.EndpointSettings{}) + if err != nil { + nj.logger.Error("Failed to join network", "name", netName, "id", utils.FormatDockerID(networkID), "error", err) + return fmt.Errorf("failed to join network %s: %w", utils.FormatDockerID(networkID), err) } + + nj.logger.Debug("Successfully joined network", "name", netName, "id", utils.FormatDockerID(networkID)) + return nil } -// getContainerID retrieves the full container ID from a container name -func getContainerID(ctx context.Context, dockerClient *client.Client, containerName string) (string, error) { - containerJSON, err := dockerClient.ContainerInspect(ctx, containerName) +// safeLeaveNetwork disconnects the HTTP proxy container from a specified network. +// The 'force' flag ensures disconnection even if the container is running. +func (nj *NetworkJoiner) safeLeaveNetwork(ctx context.Context, containerName, networkID string) error { + netName := nj.getNetworkName(ctx, networkID) + nj.logger.Info("Leaving network", "name", netName, "id", utils.FormatDockerID(networkID)) + + err := nj.dockerClient.NetworkDisconnect(ctx, networkID, containerName, true) if err != nil { - return "", fmt.Errorf("failed to inspect container %s: %w", containerName, err) + nj.logger.Error("Failed to leave network", "name", netName, "id", utils.FormatDockerID(networkID), "error", err) + return fmt.Errorf("failed to leave network %s: %w", utils.FormatDockerID(networkID), err) } - return containerJSON.ID, nil + + nj.logger.Debug("Successfully left network", "name", netName, "id", utils.FormatDockerID(networkID)) + return nil +} + +// getNetworkName retrieves the human-readable name for a network ID for logging purposes. +// Falls back to a formatted ID if the network name cannot be determined, ensuring +// consistent logging even when networks are in transitional states. +func (nj *NetworkJoiner) getNetworkName(ctx context.Context, networkID string) string { + if netResource, err := utils.RetryNetworkInspect(ctx, nj.dockerClient, networkID, network.InspectOptions{}); err == nil { + return netResource.Name + } + return "unknown" } -// getDefaultBridgeNetworkID finds the ID of the default Docker bridge network -func getDefaultBridgeNetworkID(ctx context.Context, dockerClient *client.Client) (string, error) { - networks, err := dockerClient.NetworkList(ctx, network.ListOptions{}) +// getDefaultBridgeNetworkID identifies the Docker default bridge network by name and driver. +// The default bridge is excluded from automatic management because it contains system +// containers and should not be used for custom application routing. +func (nj *NetworkJoiner) getDefaultBridgeNetworkID(ctx context.Context) (string, error) { + networks, err := nj.dockerClient.NetworkList(ctx, network.ListOptions{}) if err != nil { return "", err } @@ -403,27 +392,14 @@ func getDefaultBridgeNetworkID(ctx context.Context, dockerClient *client.Client) return "", fmt.Errorf("default bridge network not found") } -// getJoinedNetworks returns the networks that the container is currently connected to -func getJoinedNetworks(ctx context.Context, dockerClient *client.Client, containerID string) (map[string]bool, error) { - networks := make(map[string]bool) +// getActiveBridgeNetworks discovers all Docker bridge networks that contain manageable containers. +// Scans each bridge network to identify containers with VIRTUAL_HOST environment variables +// or Traefik labels, excluding the HTTP proxy container itself and any non-manageable containers. +// Only considers containers that have dinghy env vars (VIRTUAL_HOST) or traefik labels +func (nj *NetworkJoiner) getActiveBridgeNetworks(ctx context.Context, containerID string) (NetworkSet, error) { + networks := make(NetworkSet) - containerJSON, err := dockerClient.ContainerInspect(ctx, containerID) - if err != nil { - return nil, fmt.Errorf("failed to inspect container %s: %w", containerID, err) - } - - for _, net := range containerJSON.NetworkSettings.Networks { - networks[net.NetworkID] = true - } - - return networks, nil -} - -// getActiveBridgeNetworks returns bridge networks that should be joined based on activity criteria -func getActiveBridgeNetworks(ctx context.Context, dockerClient *client.Client, containerID string) (map[string]bool, error) { - networks := make(map[string]bool) - - allNetworks, err := dockerClient.NetworkList(ctx, network.ListOptions{}) + allNetworks, err := nj.dockerClient.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to list networks: %w", err) } @@ -433,49 +409,72 @@ func getActiveBridgeNetworks(ctx context.Context, dockerClient *client.Client, c continue } - net, err := dockerClient.NetworkInspect(ctx, netOverview.ID, network.InspectOptions{}) + net, err := utils.RetryNetworkInspect(ctx, nj.dockerClient, netOverview.ID, network.InspectOptions{}) if err != nil { - log.Printf("Warning: failed to get info for network %s: %v", netOverview.ID, err) + nj.logger.Warn("Failed to get info for network", "network_id", netOverview.ID, "error", err) continue } - _, containsSelf := net.Containers[containerID] isDefaultBridge := net.Options[defaultBridgeOption] == "true" || net.Name == defaultBridgeName - hasMultipleContainers := len(net.Containers) > 1 - hasOtherContainers := len(net.Containers) == 1 && !containsSelf - if isDefaultBridge || hasMultipleContainers || hasOtherContainers { - networks[net.ID] = true - log.Printf("Including bridge network %s (%s) - Default: %t, MultipleContainers: %t, OtherContainers: %t", - net.Name, net.ID[:12], isDefaultBridge, hasMultipleContainers, hasOtherContainers) + // Always include default bridge + if isDefaultBridge { + networks.Add(net.ID) + nj.logger.Debug("Including default bridge network", + "name", net.Name, + "id", utils.FormatDockerID(net.ID)) + continue + } + + // For non-default networks, only include if they have manageable containers + hasManageableContainers, err := utils.HasManageableContainersInNetwork(ctx, nj.dockerClient, net.ID, containerID) + if err != nil { + nj.logger.Warn("Failed to check network for manageable containers", + "network_id", utils.FormatDockerID(net.ID), "error", err) + continue + } + + if hasManageableContainers { + networks.Add(net.ID) + nj.logger.Info("Including bridge network with manageable containers", + "name", net.Name, + "id", utils.FormatDockerID(net.ID)) + } else { + nj.logger.Debug("Skipping network without manageable containers", + "name", net.Name, + "id", utils.FormatDockerID(net.ID)) } } return networks, nil } -// getNetworksToJoin returns networks that the container should join -func getNetworksToJoin(currentNetworks, bridgeNetworks map[string]bool) []string { +// getNetworksToJoin calculates which bridge networks the HTTP proxy should connect to +// by comparing currently connected networks against networks containing manageable containers. +// Returns networks that have manageable containers but are not yet connected to the proxy. +func (nj *NetworkJoiner) getNetworksToJoin(currentNetworks, bridgeNetworks NetworkSet) []string { var networkIDs []string for networkID := range bridgeNetworks { - if !currentNetworks[networkID] { + if !currentNetworks.Contains(networkID) { networkIDs = append(networkIDs, networkID) } } return networkIDs } -// getNetworksToLeave returns networks that the container should leave, protecting the default bridge -func getNetworksToLeave(currentNetworks, bridgeNetworks map[string]bool, defaultBridgeID string) []string { +// getNetworksToLeave identifies networks the HTTP proxy should disconnect from because +// they no longer contain manageable containers. Excludes the default bridge network +// to maintain basic Docker connectivity and only disconnects from networks without manageable containers. +func (nj *NetworkJoiner) getNetworksToLeave(currentNetworks, bridgeNetworks NetworkSet, defaultBridgeID string) []string { var networkIDs []string for networkID := range currentNetworks { if networkID == defaultBridgeID { - log.Printf("Protecting default bridge network %s from disconnection", networkID[:12]) + nj.logger.Debug("Protecting default bridge network from disconnection", "network_id", utils.FormatDockerID(networkID)) continue } - if !bridgeNetworks[networkID] { + if !bridgeNetworks.Contains(networkID) { networkIDs = append(networkIDs, networkID) } } diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..d0c2a46 --- /dev/null +++ b/compose.yml @@ -0,0 +1,93 @@ +services: + dinghy_layer: + build: + context: . + dockerfile: build/Dockerfile + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - traefik_dynamic:/traefik/dynamic + command: ["sh", "-c", "/usr/local/bin/dinghy-layer"] + labels: + - "traefik.enable=false" + + join_networks: + build: + context: . + dockerfile: build/Dockerfile + volumes: + - /var/run/docker.sock:/var/run/docker.sock + command: + ["sh", "-c", "/usr/local/bin/join-networks -container-name http-proxy"] + labels: + - "traefik.enable=false" + + dns: + build: + context: . + dockerfile: build/Dockerfile + ports: + - "19322:19322/udp" + command: ["sh", "-c", "/usr/local/bin/dns-server"] + environment: + - HTTP_PROXY_DNS_TLDS=${HTTP_PROXY_DNS_TLDS:-loc} + - HTTP_PROXY_DNS_TARGET_IP=${HTTP_PROXY_DNS_TARGET_IP:-127.0.0.1} + - HTTP_PROXY_DNS_PORT=${HTTP_PROXY_DNS_PORT:-19322} + labels: + - "traefik.enable=false" + + traefik: + build: + context: ./build/traefik + dockerfile: Dockerfile + container_name: http-proxy + ports: + - "80:80" + - "443:443" + - "30000:8080" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - traefik_dynamic:/traefik/dynamic + - "${LOCAL_HOME:-$HOME}/.local/spark/http-proxy/certs:/traefik/certs:ro" + labels: + - "traefik.enable=false" + + # Optional Prometheus service (enabled with --profile metrics) + prometheus: + build: + context: ./build/prometheus + dockerfile: Dockerfile + container_name: http-proxy-prometheus + ports: + - "9090" + volumes: + - prometheus_data:/prometheus + labels: + - "traefik.enable=false" + profiles: + - metrics + + # Optional Grafana service (enabled with --profile metrics) + grafana: + build: + context: ./build/grafana + dockerfile: Dockerfile + container_name: http-proxy-grafana + ports: + - "30001:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin} + - GF_USERS_ALLOW_SIGN_UP=false + - GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/var/lib/grafana/dashboards/traefik-official.json + volumes: + - grafana_data:/var/lib/grafana + labels: + - "traefik.enable=false" + depends_on: + - prometheus + profiles: + - metrics + +volumes: + traefik_dynamic: + prometheus_data: + grafana_data: diff --git a/dinghy.nginx.conf b/dinghy.nginx.conf deleted file mode 100644 index 2486f00..0000000 --- a/dinghy.nginx.conf +++ /dev/null @@ -1,7 +0,0 @@ -client_max_body_size 4g; -client_header_buffer_size 256k; -large_client_header_buffers 8 1024k; -proxy_read_timeout 86400s; -proxy_send_timeout 86400s; -proxy_buffers 8 1024k; -proxy_buffer_size 1024k; diff --git a/docs/dns-server.md b/docs/dns-server.md new file mode 100644 index 0000000..6f3d10d --- /dev/null +++ b/docs/dns-server.md @@ -0,0 +1,310 @@ +# DNS Server Configuration and Usage + +The HTTP proxy includes a **built-in DNS server** that automatically resolves configured domains to localhost, eliminating the need to manually edit `/etc/hosts` or configure system DNS. + +## Features + +- **TLD Support**: Handle any domain with specific top-level domains (e.g., `*.loc`, `*.dev`) +- **Specific Domain Support**: Handle only explicitly configured domains +- **Mixed Configuration**: Support both TLDs and specific domains simultaneously +- **Configurable Target**: Resolve domains to any IP address (default: `127.0.0.1`) +- **Standard DNS Protocol**: Works with all DNS clients and system resolvers + +## Configuration + +### Environment Variables + +| Variable | Default | Description | +| -------------------------- | ----------- | ---------------------------------------------------------- | +| `HTTP_PROXY_DNS_TLDS` | `loc` | Comma-separated list of TLDs or specific domains to handle | +| `HTTP_PROXY_DNS_TARGET_IP` | `127.0.0.1` | IP address to resolve all configured domains to | +| `HTTP_PROXY_DNS_PORT` | `19322` | UDP port for the DNS server to listen on | + +### Docker Compose Configuration + +```yaml +services: + dns: + image: ghcr.io/sparkfabrik/http-proxy-services:latest + environment: + - HTTP_PROXY_HTTP_PROXY_DNS_TLDS=${DNS_TLDS:-loc} + - HTTP_PROXY_DNS_TARGET_IP=${DNS_TARGET_IP:-127.0.0.1} + - HTTP_PROXY_DNS_PORT=${DNS_PORT:-19322} + ports: + - "19322:19322/udp" +``` + +## Usage Patterns + +### 1. TLD Support (Recommended) + +Handle any subdomain of specific top-level domains: + +```bash +# Configuration +HTTP_PROXY_HTTP_PROXY_DNS_TLDS=loc + +# Resolves: +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.loc โ†’ 127.0.0.1 +โœ… anything.loc โ†’ 127.0.0.1 +โœ… sub.domain.loc โ†’ 127.0.0.1 + +# Does not resolve: +โŒ myapp.dev โ†’ Not handled +โŒ example.com โ†’ Not handled +``` + +### 2. Multiple TLDs + +Support multiple development environments: + +```bash +# Configuration +HTTP_PROXY_HTTP_PROXY_DNS_TLDS=loc,dev,docker + +# Resolves: +โœ… myapp.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 +โœ… service.docker โ†’ 127.0.0.1 +``` + +### 3. Specific Domains + +Handle only explicitly configured domains: + +```bash +# Configuration +HTTP_PROXY_HTTP_PROXY_DNS_TLDS=spark.loc,api.dev + +# Resolves: +โœ… spark.loc โ†’ 127.0.0.1 +โœ… api.dev โ†’ 127.0.0.1 + +# Does not resolve: +โŒ other.loc โ†’ Not handled +โŒ spark.dev โ†’ Not handled +โŒ api.loc โ†’ Not handled +``` + +### 4. Mixed Configuration + +Combine TLDs and specific domains: + +```bash +# Configuration +HTTP_PROXY_HTTP_PROXY_DNS_TLDS=loc,myproject.dev + +# Resolves: +โœ… anything.loc โ†’ 127.0.0.1 # TLD match +โœ… myproject.dev โ†’ 127.0.0.1 # Specific domain match + +# Does not resolve: +โŒ other.dev โ†’ Not handled # Not the specific domain +โŒ anything.com โ†’ Not handled # Different TLD +``` + +## System Integration + +### macOS - Domain-Specific Resolution (Recommended) + +Configure your system to use the DNS server only for specific domains: + +```bash +# Create resolver directory +sudo mkdir -p /etc/resolver + +# Configure .loc domains +echo "nameserver 127.0.0.1" | sudo tee /etc/resolver/loc +echo "port 19322" | sudo tee -a /etc/resolver/loc + +# Configure .dev domains (if using multiple TLDs) +echo "nameserver 127.0.0.1" | sudo tee /etc/resolver/dev +echo "port 19322" | sudo tee -a /etc/resolver/dev +``` + +### Linux - systemd-resolved (Recommended) + +Configure systemd-resolved to use the DNS server for specific domains: + +```bash +# Configure systemd-resolved to use http-proxy DNS for .loc domains +sudo mkdir -p /etc/systemd/resolved.conf.d +sudo tee /etc/systemd/resolved.conf.d/http-proxy.conf > /dev/null <> /etc/hosts +curl http://myapp.loc +``` + +### Browser Testing + +1. Configure system DNS as described above +2. Open browser and navigate to `http://myapp.loc` +3. Should resolve without editing hosts file + +## Troubleshooting + +### DNS Server Not Responding + +```bash +# Check if DNS server is running +docker compose ps dns + +# Check DNS server logs +docker compose logs dns + +# Test DNS server accessibility +nc -u -v 127.0.0.1 19322 +``` + +### Domain Not Resolving + +```bash +# Verify DNS server configuration +docker compose exec dns env | grep DNS + +# Test specific domain directly +dig @127.0.0.1 -p 19322 your-domain.loc + +# Check system DNS configuration +scutil --dns | grep 127.0.0.1 # macOS +systemd-resolve --status | grep "172.17.0.1" # Linux + +# Test systemd-resolved configuration (Linux) +resolvectl query your-domain.loc # Should use configured DNS +resolvectl status # Show DNS servers per interface +``` + +### Performance Considerations + +- The DNS server is designed for development use +- It handles standard DNS queries efficiently +- For high-traffic scenarios, consider using system hosts file instead +- Response time is typically < 1ms for configured domains + +## Security Notes + +- The DNS server only resolves configured domains +- Unknown domains are rejected (NXDOMAIN response) +- All configured domains resolve to the same target IP +- No DNS forwarding or recursive resolution is performed +- Suitable for development environments only + +## Integration Examples + +### Development Stack + +```yaml +# docker-compose.yml +services: + dns: + environment: + - HTTP_PROXY_HTTP_PROXY_DNS_TLDS=myproject.loc + - HTTP_PROXY_DNS_TARGET_IP=127.0.0.1 + + web: + environment: + - VIRTUAL_HOST=app.myproject.loc + + api: + environment: + - VIRTUAL_HOST=api.myproject.loc +``` + +### Multi-Environment Setup + +```yaml +# docker-compose.yml +services: + dns: + environment: + - HTTP_PROXY_HTTP_PROXY_DNS_TLDS=dev,staging,loc + - HTTP_PROXY_DNS_TARGET_IP=127.0.0.1 + + # Development + app-dev: + environment: + - VIRTUAL_HOST=myapp.dev + + # Staging + app-staging: + environment: + - VIRTUAL_HOST=myapp.staging +``` + +## Advanced Configuration + +### Custom Target IP + +Point domains to a different IP address: + +```yaml +services: + dns: + environment: + - HTTP_PROXY_HTTP_PROXY_DNS_TLDS=loc + - HTTP_PROXY_DNS_TARGET_IP=192.168.1.100 # Point to another machine +``` + +### Custom Port + +Run DNS server on a different port: + +```yaml +services: + dns: + environment: + - HTTP_PROXY_DNS_PORT=5353 + ports: + - "5353:5353/udp" +``` + +### Health Checks + +Monitor DNS server health: + +```yaml +services: + dns: + healthcheck: + test: ["CMD", "dig", "@127.0.0.1", "-p", "19322", "health.loc", "+short"] + interval: 30s + timeout: 10s + retries: 3 +``` diff --git a/docs/network-joining-flow.md b/docs/network-joining-flow.md new file mode 100644 index 0000000..d2b1ce1 --- /dev/null +++ b/docs/network-joining-flow.md @@ -0,0 +1,268 @@ +# Network Joining Flow Documentation + +This document explains how the `join-networks` service automatically manages Docker network connections for the HTTP proxy container. + +## Overview + +The `join-networks` service ensures that the HTTP proxy container is automatically connected to all Docker networks that contain manageable containers (containers with `VIRTUAL_HOST` environment variables). This enables automatic routing without manual network configuration. + +### Key Concepts + +- **HTTP Proxy Container**: The main Traefik container that routes HTTP traffic +- **Manageable Containers**: Any container with `VIRTUAL_HOST` environment variable or Traefik labels +- **Join-Networks Service**: A background service that monitors Docker events and manages network connections +- **Automatic Network Discovery**: The process of scanning Docker networks to find containers that need routing + +### How It Works + +1. **At Startup**: The service scans all Docker bridge networks and connects the HTTP proxy to networks containing manageable containers +2. **During Runtime**: When containers start/stop, the service automatically joins new networks or leaves empty ones +3. **Security**: Only explicitly configured containers (with `VIRTUAL_HOST` or Traefik labels) are considered for routing +4. **Fail-Fast**: If any network operation fails, the service exits and relies on container restart for recovery + +## Architecture Flow + +```mermaid +graph TD + A[Service Startup] --> B[Initial Network Scan] + B --> C[Get Container Info] + C --> D[Find Bridge Networks] + D --> E[Check Networks for Manageable Containers] + E --> F{Has Manageable Containers?} + F -->|Yes| G[Add to Join List] + F -->|No| H[Skip Network] + G --> I[Execute Network Operations] + H --> I + I --> J[Service Running - Listen for Events] + + J --> K{Docker Event} + K -->|Container Start| L[Handle Container Start] + K -->|Container Stop| M[Handle Container Stop] + K -->|Other| N[Ignore Event] + + L --> O[Re-scan Networks] + O --> P[Join New Networks] + P --> J + + M --> Q[Check Current Networks] + Q --> R{Network Empty?} + R -->|Yes| S[Leave Empty Network] + R -->|No| T[Keep Connection] + S --> J + T --> J + N --> J + + style A fill:#e1f5fe + style J fill:#f3e5f5 + style I fill:#e8f5e8 + style S fill:#fff3e0 +``` + +## HTTP Proxy and Container Interactions + +This sequence diagram shows how the HTTP proxy container interacts with other containers and networks: + +```mermaid +sequenceDiagram + participant HP as HTTP Proxy Container + participant JS as Join-Networks Service + participant DA as Docker API + participant N1 as Network: app-network + participant N2 as Network: api-network + participant C1 as App Container
(VIRTUAL_HOST=app.local) + participant C2 as API Container
(VIRTUAL_HOST=api.local) + participant C3 as Database Container
(no VIRTUAL_HOST) + + Note over HP,C3: Initial State: Containers and Networks Exist + + HP->>+JS: Service Startup + JS->>+DA: Get HTTP Proxy Container Info + DA-->>-JS: Current Networks: [bridge] + + JS->>+DA: List All Bridge Networks + DA-->>-JS: [bridge, app-network, api-network] + + Note over JS,C3: Scan each network for manageable containers + + JS->>+DA: Get Containers in app-network + DA-->>-JS: [C1 - has VIRTUAL_HOST] + + JS->>+DA: Get Containers in api-network + DA-->>-JS: [C2 - has VIRTUAL_HOST, C3 - no VIRTUAL_HOST] + + Note over JS: Decision: Join app-network & api-network
Skip bridge (default), Ignore C3 (not manageable) + + JS->>+DA: Connect HTTP Proxy to app-network + DA-->>-JS: โœ… Connected + + JS->>+DA: Connect HTTP Proxy to api-network + DA-->>-JS: โœ… Connected + + Note over HP,C3: HTTP Proxy now routes traffic to C1 and C2 + + rect rgb(230, 245, 255) + Note over HP,C3: New Container Scenario + C1->>+DA: Container Starts (new-app with VIRTUAL_HOST=new.local) + DA->>JS: Event: Container Start + JS->>+DA: Re-scan Networks + DA-->>-JS: Found new-app-network with manageable container + JS->>+DA: Connect HTTP Proxy to new-app-network + DA-->>-JS: โœ… Connected + Note over HP: HTTP Proxy now routes to new.local + end + + rect rgb(255, 245, 230) + Note over HP,C3: Container Stop Scenario + C2->>+DA: Container Stops + DA->>JS: Event: Container Stop + JS->>+DA: Check api-network for manageable containers + DA-->>-JS: Only C3 remains (not manageable) + JS->>+DA: Disconnect HTTP Proxy from api-network + DA-->>-JS: โœ… Disconnected + Note over HP: HTTP Proxy no longer routes to api-network + end + + Note over HP,C3: Result: HTTP Proxy automatically manages
network connections based on manageable containers +``` + +## Detailed Process Flow + +### 1. Initial Network Scan + +```mermaid +sequenceDiagram + participant S as Service + participant D as Docker API + participant N as Networks + participant C as Containers + + S->>D: Get HTTP Proxy Container Info + D-->>S: Container Details & Current Networks + + S->>D: List All Bridge Networks + D-->>N: Network List + + loop For Each Network + S->>D: List Containers in Network + D-->>C: Container List + S->>S: Check for Manageable Containers + alt Has Manageable Containers + S->>S: Add to Join List + else No Manageable Containers + S->>S: Skip Network + end + end + + S->>S: Calculate Network Operations + S->>D: Execute Network Operations + D-->>S: Success or Process Exit on Failure +``` + +### 2. Event-Driven Network Management + +```mermaid +stateDiagram-v2 + [*] --> Listening: Service Started + + Listening --> ContainerStart: Docker Event: Container Start + Listening --> ContainerStop: Docker Event: Container Stop + + ContainerStart --> ScanNetworks: Re-scan for new networks + ScanNetworks --> JoinNetworks: Found new networks with containers + JoinNetworks --> Listening: Networks joined successfully + + ContainerStop --> CheckNetworks: Check current network connections + CheckNetworks --> LeaveEmpty: Found empty networks + CheckNetworks --> Listening: All networks have containers + LeaveEmpty --> Listening: Empty networks left + + state ScanNetworks { + [*] --> FindBridge + FindBridge --> CheckContainers + CheckContainers --> [*] + } + + state CheckNetworks { + [*] --> GetCurrentNetworks + GetCurrentNetworks --> CheckEachNetwork + CheckEachNetwork --> [*] + } +``` + +### 3. Simplified Network Operations + +```mermaid +graph TD + A[Network Operation Request] --> B[Execute Operation] + B --> C{Success?} + C -->|Yes| D[Operation Complete] + C -->|No| E[Log Error & Exit Process] + + E --> F[Container Restart] + F --> G[Fresh Service Start] + + style A fill:#e3f2fd + style D fill:#e8f5e8 + style E fill:#ffebee + style F fill:#fff3e0 + style G fill:#e1f5fe +``` + +## Key Components + +### NetworkJoiner Service + +- **Purpose**: Manages automatic network joining/leaving for the HTTP proxy +- **Interface**: Implements `service.EventHandler` +- **Configuration**: Uses `NetworkJoinerConfig` with HTTP proxy container name + +### Network Discovery Process + +1. **Find Bridge Networks**: Lists all Docker bridge networks +2. **Filter Networks**: Excludes default bridge and non-bridge networks +3. **Check for Manageable Containers**: Looks for containers with `VIRTUAL_HOST` env vars or Traefik labels +4. **Calculate Operations**: Determines which networks to join/leave + +### Failure Handling Strategy + +- **Fail-Fast Approach**: Any network operation failure causes immediate process exit +- **Container Restart**: Relies on Docker/Kubernetes to restart the service automatically +- **Retry Logic**: Built into Docker API calls for transient failures +- **Clean State**: Each restart starts with a fresh scan of the current state + +### Event Handling + +- **Container Start**: Triggers network re-scan to join new networks +- **Container Stop**: Checks for empty networks that can be safely left +- **Filtering**: Only processes events for manageable containers + +## Configuration + +The service is configured via command-line flags: + +- `--container-name`: Name of the HTTP proxy container (default: "http-proxy") +- `--log-level`: Logging verbosity level (default: "info") + +### Internal Configuration Constants + +- **Max Retries**: 3 attempts for Docker API operations +- **Retry Delay**: 2-second delay between retry attempts +- **Bridge Driver**: Only processes "bridge" type networks +- **Default Bridge Protection**: Never disconnects from the default Docker bridge network + +## Error Handling + +The service uses a simplified, fail-fast error handling approach: + +- **Network Operation Failures**: Logged and process exits immediately +- **Docker API Errors**: Built-in retry logic with exponential backoff +- **Container Info Errors**: Process exits if critical information cannot be obtained +- **Service Recovery**: Container orchestration handles automatic restart and recovery + +## Benefits + +1. **Zero Configuration**: Automatically detects and connects to relevant networks +2. **Dynamic Management**: Responds to container lifecycle events in real-time +3. **Safety First**: Multiple safety checks prevent connectivity loss +4. **Efficient**: Only joins networks with manageable containers +5. **Resilient**: Handles failures with fail-fast approach and automatic restart recovery diff --git a/example/README.md b/example/README.md new file mode 100644 index 0000000..7ad1bcf --- /dev/null +++ b/example/README.md @@ -0,0 +1,230 @@ +# HTTP Proxy Stack Example + +This directory contains example configurations for using the http-proxy stack with pre-built images from GitHub Container Registry. + +## Available Images + +### Stable Release Images (recommended) + +- **`ghcr.io/sparkfabrik/http-proxy-traefik:latest`** - Traefik HTTP proxy +- **`ghcr.io/sparkfabrik/http-proxy-services:latest`** - Background services (dinghy-layer, join-networks, dns-server) + +### Development Images (for testing) + +Development images are built from feature branches and include: + +- **`ghcr.io/sparkfabrik/http-proxy-traefik:`** - Latest from branch +- **`ghcr.io/sparkfabrik/http-proxy-traefik:-`** - Specific commit +- **`ghcr.io/sparkfabrik/http-proxy-services:`** - Latest from branch +- **`ghcr.io/sparkfabrik/http-proxy-services:-`** - Specific commit + +To use development images, update the `compose.yml` tags accordingly: + +```yaml +# Example: Use images from 'feature/new-routing' branch +services: + dinghy_layer: + image: ghcr.io/sparkfabrik/http-proxy-services:feature-new-routing + # ... other services + traefik: + image: ghcr.io/sparkfabrik/http-proxy-traefik:feature-new-routing +``` + +## Files + +- **`compose.yml`** - The main HTTP proxy stack using stable published images +- **`compose.examples.yml`** - Example applications demonstrating different routing configurations +- **`html/index.html`** - Sample HTML file for the nginx example + +## Quick Start + +### 1. Basic HTTP Proxy + +```bash +# Using Docker Compose directly +docker compose up -d + +# Using the convenience script +./bin/spark-http-proxy start + +# Check status +docker compose ps +``` + +### 2. HTTP Proxy with Monitoring + +```bash +# Using Docker Compose with profiles +docker compose --profile metrics up -d + +# Using the convenience script (recommended) +./bin/spark-http-proxy start-with-metrics + +# Check all services including monitoring +docker compose ps +``` + +### 3. Access Services + +**Basic Stack:** + +- **Traefik Dashboard**: + +**With Monitoring:** + +- **Traefik Dashboard**: +- **Grafana Dashboard**: (admin/admin) +- **Prometheus**: + +### 4. Convenience Commands + +```bash +# Open dashboards in browser +./bin/spark-http-proxy dashboard # Traefik +./bin/spark-http-proxy grafana # Grafana (if running) +./bin/spark-http-proxy prometheus # Prometheus (if running) + +# Stop only monitoring services (keep proxy running) +./bin/spark-http-proxy stop-metrics +``` + +### 5. Start Example Applications (Optional) + +```bash +# Start example applications +docker compose -f compose.examples.yml up -d + +# Check all services +docker compose -f compose.examples.yml ps +``` + +**Example Apps Access:** + +- +- +- +- and +- and +- and (HTTPS example) + +## DNS Configuration + +To resolve `.docker` domains, configure your system DNS to use the proxy's DNS server: + +```bash +# Add to /etc/resolver/docker (macOS) +nameserver 127.0.0.1 +port 19322 + +# Or add to /etc/resolv.conf (Linux) +nameserver 127.0.0.1:19322 +``` + +## Configuration Methods + +The example applications demonstrate different ways to configure routing: + +### 1. Traefik Labels (Recommended) + +```yaml +services: + myapp: + image: myapp:latest + labels: + - "traefik.enable=true" + - "traefik.http.routers.myapp.rule=Host(`myapp.docker`)" + - "traefik.http.routers.myapp.entrypoints=http" +``` + +### 2. VIRTUAL_HOST Environment Variable + +```yaml +services: + myapp: + image: myapp:latest + environment: + - VIRTUAL_HOST=myapp.docker + - VIRTUAL_PORT=8080 # Optional, defaults to 80 +``` + +### 3. Multi-domain VIRTUAL_HOST + +```yaml +services: + myapp: + image: myapp:latest + environment: + - VIRTUAL_HOST=myapp.docker,api.myapp.docker,www.myapp.docker +``` + +## Adding Your Own Services + +To add your own services to be proxied: + +1. **Using a separate compose file** (recommended): + + ```yaml + services: + myapp: + image: myapp:latest + environment: + - VIRTUAL_HOST=myapp.docker + + networks: + default: + name: http-proxy_default + external: true + ``` + +2. **Or add to the main compose.yml** and restart the stack. + +## Cleanup + +```bash +# Stop example applications +docker compose -f compose.examples.yml down + +# Stop the proxy stack +docker compose down + +# Remove volumes (optional) +docker compose down -v +``` + +## Troubleshooting + +### Service not accessible + +1. Check if the container is running: `docker compose ps` +2. Verify DNS resolution: `dig myapp.docker @127.0.0.1 -p 19322` +3. Check Traefik dashboard: +4. View logs: `docker compose logs service-name` + +### DNS not working + +1. Verify DNS server is running: `docker compose ps dns` +2. Test DNS server: `dig test.docker @127.0.0.1 -p 19322` +3. Check system DNS configuration + +For more troubleshooting information, see the main project README. +docker compose logs -f + +```` + +## Cleanup + +```bash +# Stop and remove containers +docker compose down + +# Remove volumes as well +docker compose down -v +```` + +## Notes + +- The proxy automatically discovers new containers with `VIRTUAL_HOST` or Traefik labels +- No restart required when adding new services +- DNS server provides automatic resolution for `.docker` domains +- Traefik dashboard shows all configured routes and services +- All infrastructure services are excluded from Traefik discovery (`traefik.enable=false`) diff --git a/example/bin/spark-http-proxy b/example/bin/spark-http-proxy new file mode 100755 index 0000000..cbe9bde --- /dev/null +++ b/example/bin/spark-http-proxy @@ -0,0 +1,155 @@ +#!/usr/bin/env bash + +set -e + +# Configuration - can be overridden by environment variables +export COMPOSE_PROJECT_NAME=${COMPOSE_PROJECT_NAME:-spark-http-proxy} +export COMPOSE_FILE=${COMPOSE_FILE:-/opt/sparkdock/http-proxy-stack/compose.yml} + +# Fallback to local directory if system path doesn't exist +if [ ! -f "$COMPOSE_FILE" ]; then + COMPOSE_FILE="$(dirname "$0")/../compose.yml" +fi + +show_usage() { + echo "Usage: $0 [options]" + echo "Commands:" + echo " start Start HTTP proxy (basic stack)" + echo " start-with-metrics Start HTTP proxy with monitoring stack" + echo " stop-metrics Stop only monitoring services" + echo " clean Stop all services and remove volumes" + echo " dashboard Open Traefik dashboard (http://localhost:30000)" + echo " grafana Open Grafana dashboard (http://localhost:30001)" + echo " prometheus Open Prometheus (http://localhost:9090)" + echo " generate-mkcert Generate SSL certificates" + echo " up, down, logs... Standard Docker Compose commands" + echo "" + echo "Examples:" + echo " $0 start # Basic HTTP proxy only" + echo " $0 start-with-metrics # HTTP proxy + monitoring" + echo " $0 stop-metrics # Remove monitoring, keep proxy" + echo " $0 clean # Stop everything" + echo " $0 up --profile metrics # Alternative metrics start" + echo "" + echo "Config: $COMPOSE_FILE" +} + +generate_mkcert() { + local domain="$1" + + if [ -z "$domain" ]; then + read -p "Enter domain name: " domain + fi + + if [ -z "$domain" ]; then + echo "Error: Domain name required" + exit 1 + fi + + if ! command -v mkcert >/dev/null 2>&1; then + echo "Error: mkcert not installed" + exit 1 + fi + + local cert_dir="$HOME/.local/spark/http-proxy/certs" + mkdir -p "$cert_dir" + + echo "Generating certificates for: $domain" + mkcert -cert-file "$cert_dir/$domain.pem" \ + -key-file "$cert_dir/$domain-key.pem" \ + "$domain" +} + +open_dashboard() { + local url="http://localhost:30000" + echo "Opening Traefik dashboard: $url" + + if command -v open >/dev/null 2>&1; then + open "$url" + elif command -v xdg-open >/dev/null 2>&1; then + xdg-open "$url" + else + echo "Cannot open browser automatically. Please visit: $url" + exit 1 + fi +} + +open_grafana() { + local url="http://localhost:30001" + echo "Opening Grafana dashboard: $url (admin/admin)" + + if command -v open >/dev/null 2>&1; then + open "$url" + elif command -v xdg-open >/dev/null 2>&1; then + xdg-open "$url" + else + echo "Cannot open browser automatically. Please visit: $url" + echo "Login: admin/admin" + exit 1 + fi +} + +open_prometheus() { + local url="http://localhost:9090" + echo "Opening Prometheus: $url" + + if command -v open >/dev/null 2>&1; then + open "$url" + elif command -v xdg-open >/dev/null 2>&1; then + xdg-open "$url" + else + echo "Cannot open browser automatically. Please visit: $url" + exit 1 + fi +} + +case "$1" in + ""|"-h"|"--help") + show_usage + exit 0 + ;; + start) + echo "๐Ÿš€ Starting HTTP Proxy (basic stack)..." + # Stop everything first to ensure clean state + docker compose -f "$COMPOSE_FILE" --profile metrics down -v + docker compose -f "$COMPOSE_FILE" up -d --pull always + echo "โœ… HTTP Proxy running - Traefik Dashboard: http://localhost:30000" + echo "๐Ÿ’ก To add monitoring: $0 start-with-metrics" + ;; + start-with-metrics) + echo "๐Ÿš€ Starting HTTP Proxy with monitoring..." + # Stop everything first to ensure clean state + docker compose -f "$COMPOSE_FILE" --profile metrics down -v + docker compose -f "$COMPOSE_FILE" --profile metrics up -d --pull always + echo "โœ… HTTP Proxy + Monitoring running:" + echo " - Traefik Dashboard: http://localhost:30000" + echo " - Grafana Dashboard: http://localhost:30001 (admin/admin)" + echo " - Prometheus: http://localhost:9090" + ;; + stop-metrics) + echo "๐Ÿ›‘ Stopping monitoring services..." + docker compose -f "$COMPOSE_FILE" stop prometheus grafana + docker compose -f "$COMPOSE_FILE" rm -f prometheus grafana + echo "โœ… Monitoring stopped (HTTP Proxy still running)" + ;; + clean) + echo "๐Ÿงน Stopping all services and cleaning volumes..." + docker compose -f "$COMPOSE_FILE" --profile metrics down -v + echo "โœ… All services stopped and volumes removed" + ;; + dashboard) + open_dashboard + ;; + grafana) + open_grafana + ;; + prometheus) + open_prometheus + ;; + generate-mkcert) + generate_mkcert "$2" + ;; + *) + docker compose -f "$COMPOSE_FILE" "$@" + ;; +esac diff --git a/example/compose.examples.yml b/example/compose.examples.yml new file mode 100644 index 0000000..ae9170f --- /dev/null +++ b/example/compose.examples.yml @@ -0,0 +1,82 @@ +# Example Applications for HTTP Proxy Stack +# This file demonstrates different ways to configure applications to work with the http-proxy +# +# Prerequisites: +# 1. Start the http-proxy stack first: +# docker compose up -d +# 2. Configure DNS (optional but recommended): +# Set your system DNS to 127.0.0.1:19322 or use custom TLDs +# 3. Then start these examples: +# docker compose -f compose.examples.yml up -d +# +# DNS Configuration: +# The http-proxy includes a built-in DNS server that resolves .docker domains to localhost. +# You can configure different TLDs or specific domains using environment variables: +# - HTTP_PROXY_DNS_TLDS=docker,loc,dev (supports multiple TLDs) +# - HTTP_PROXY_DNS_TLDS=spark.loc,api.dev (supports specific domains) +# - HTTP_PROXY_DNS_TARGET_IP=127.0.0.1 (IP to resolve domains to) +# +# Access examples: +# - http://whoami-traefik.docker +# - http://whoami-virtual.docker +# - http://whoami-custom.docker +# - http://whoami-multi1.docker and http://whoami-multi2.docker +# - http://nginx.docker and http://www.nginx.docker + +services: + # Example 1: Using Traefik labels (recommended) + whoami-traefik: + image: traefik/whoami:latest + labels: + - "traefik.enable=true" + - "traefik.http.routers.whoami-traefik.rule=Host(`whoami-traefik.docker`)" + - "traefik.http.routers.whoami-traefik.entrypoints=http" + - "traefik.http.services.whoami-traefik.loadbalancer.server.port=80" + + # Example 2: Using VIRTUAL_HOST (dinghy/dinghy-http-proxy compatible) + whoami-virtual: + image: traefik/whoami:latest + environment: + - VIRTUAL_HOST=whoami-virtual.docker + + # Example 3: Using VIRTUAL_HOST with custom port + whoami-custom-port: + image: traefik/whoami:latest + environment: + - VIRTUAL_HOST=whoami-custom.docker + - VIRTUAL_PORT=80 + + # Example 4: Multi-domain VIRTUAL_HOST + whoami-multi: + image: traefik/whoami:latest + environment: + - VIRTUAL_HOST=whoami-multi1.docker,whoami-multi2.docker + + # Example 5: Web application with multiple domains + nginx-example: + image: nginx:alpine + environment: + - VIRTUAL_HOST=nginx.docker,www.nginx.docker + + # Example 6: VIRTUAL_HOST automatically creates both HTTP and HTTPS routes + whoami-automatic-https: + image: traefik/whoami:latest + environment: + - VIRTUAL_HOST=whoami-https.docker # Automatically available on both HTTP and HTTPS + + # Example 7: API with Traefik CORS labels (if needed) + whoami-cors: + image: traefik/whoami:latest + environment: + - VIRTUAL_HOST=api.docker + labels: + # Example CORS configuration using Traefik labels + - "traefik.http.middlewares.api-cors.headers.accesscontrolalloworiginlist=*" + - "traefik.http.middlewares.api-cors.headers.accesscontrolallowmethods=GET,OPTIONS,PUT,POST,DELETE,PATCH" + - "traefik.http.routers.api-http.middlewares=api-cors" + - "traefik.http.routers.api-https.middlewares=api-cors" + +networks: + default: + name: http-proxy_default + external: true diff --git a/example/compose.yml b/example/compose.yml new file mode 100644 index 0000000..6dec5b5 --- /dev/null +++ b/example/compose.yml @@ -0,0 +1,110 @@ +# HTTP Proxy Stack - Production Ready Configuration +# This demonstrates how to use the published images from GitHub Container Registry +# +# Usage: +# Basic HTTP Proxy: +# docker compose up -d +# +# With Monitoring (Prometheus + Grafana): +# docker compose --profile metrics up -d +# +# Access: +# - Traefik Dashboard: http://localhost:30000 +# - DNS Server: UDP port 19322 +# - HTTP Proxy: Port 80 +# - HTTPS Proxy: Port 443 (manual configuration required) +# - Grafana (optional): http://localhost:30001 (admin/admin) +# - Prometheus (optional): http://localhost:9090 + +services: + # Watches Docker events and generates Traefik configuration for VIRTUAL_HOST + dinghy_layer: + image: ghcr.io/sparkfabrik/http-proxy-services:feature-98_migrate_to_traefik + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - traefik_dynamic:/traefik/dynamic + command: ["sh", "-c", "/usr/local/bin/dinghy-layer"] + labels: + - "traefik.enable=false" + restart: unless-stopped + + # Automatically joins containers to the http-proxy network + join_networks: + image: ghcr.io/sparkfabrik/http-proxy-services:feature-98_migrate_to_traefik + volumes: + - /var/run/docker.sock:/var/run/docker.sock + command: + ["sh", "-c", "/usr/local/bin/join-networks -container-name http-proxy"] + labels: + - "traefik.enable=false" + restart: unless-stopped + + dns: + image: ghcr.io/sparkfabrik/http-proxy-services:feature-98_migrate_to_traefik + ports: + - "19322:19322/udp" + environment: + - HTTP_PROXY_DNS_TLDS=${HTTP_PROXY_DNS_TLDS:-loc} # Comma-separated list of TLDs or specific domains to handle + - HTTP_PROXY_DNS_TARGET_IP=${HTTP_PROXY_DNS_TARGET_IP:-127.0.0.1} # IP address to resolve domains to + - HTTP_PROXY_DNS_PORT=${HTTP_PROXY_DNS_PORT:-19322} # Port for DNS server to listen on + command: ["sh", "-c", "/usr/local/bin/dns-server"] + labels: + - "traefik.enable=false" + restart: unless-stopped + + # Main HTTP reverse proxy + traefik: + image: ghcr.io/sparkfabrik/http-proxy-traefik:feature-98_migrate_to_traefik + container_name: http-proxy + ports: + - "80:80" + - "443:443" + - "30000:8080" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - traefik_dynamic:/traefik/dynamic + - "${HOME}/.local/spark/http-proxy/certs:/traefik/certs:ro" # Auto-load user certificates + labels: + - "traefik.enable=false" + restart: unless-stopped + + prometheus: + image: ghcr.io/sparkfabrik/http-proxy-prometheus:feature-98_migrate_to_traefik + container_name: http-proxy-prometheus + ports: + - "9090" + volumes: + - prometheus_data:/prometheus + labels: + - "traefik.enable=false" + restart: unless-stopped + profiles: + - metrics + + grafana: + image: ghcr.io/sparkfabrik/http-proxy-grafana:feature-98_migrate_to_traefik + container_name: http-proxy-grafana + ports: + - "30001:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin} + - GF_USERS_ALLOW_SIGN_UP=false + - GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/var/lib/grafana/dashboards/traefik-official.json + volumes: + - grafana_data:/var/lib/grafana + labels: + - "traefik.enable=false" + depends_on: + - prometheus + restart: unless-stopped + profiles: + - metrics + +volumes: + traefik_dynamic: + prometheus_data: + grafana_data: + +networks: + default: + name: http-proxy_default diff --git a/go.mod b/go.mod index ce666da..7222906 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,15 @@ go 1.24 require ( github.com/docker/docker v27.5.1+incompatible - github.com/docker/go-connections v0.4.0 + github.com/docker/go-connections v0.5.0 github.com/miekg/dns v1.1.66 + gopkg.in/yaml.v3 v3.0.1 ) require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/containerd/log v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -18,11 +20,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/moby/term v0.5.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.36.0 // indirect @@ -31,7 +34,7 @@ require ( go.opentelemetry.io/otel/trace v1.36.0 // indirect golang.org/x/mod v0.24.0 // indirect golang.org/x/net v0.40.0 // indirect - golang.org/x/sync v0.13.0 // indirect + golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.32.0 // indirect diff --git a/go.sum b/go.sum index c2261d9..569d730 100644 --- a/go.sum +++ b/go.sum @@ -1,20 +1,19 @@ -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -26,8 +25,6 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -36,26 +33,30 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5uk github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -87,7 +88,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -97,12 +97,11 @@ golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -112,7 +111,6 @@ golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -130,8 +128,10 @@ google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/htdocs/EmojiSymbols-Regular.woff b/htdocs/EmojiSymbols-Regular.woff deleted file mode 100755 index f07716f..0000000 Binary files a/htdocs/EmojiSymbols-Regular.woff and /dev/null differ diff --git a/htdocs/favicon.ico b/htdocs/favicon.ico deleted file mode 100644 index 3652a3b..0000000 Binary files a/htdocs/favicon.ico and /dev/null differ diff --git a/htdocs/index.html b/htdocs/index.html deleted file mode 100644 index 167a610..0000000 --- a/htdocs/index.html +++ /dev/null @@ -1,46 +0,0 @@ - - - - - Dinghy HTTP Proxy - - - - - -
- -
-
-
-
-

Welcome to the Dinghy HTTP Proxy

-
-

- - Read the configuration documentation to get started. -

-

If you expected to see your application at this URL, it may not be running, - or it may be improperly configured. -

-
-
- - - diff --git a/htdocs/main.css b/htdocs/main.css deleted file mode 100644 index cfef22f..0000000 --- a/htdocs/main.css +++ /dev/null @@ -1,769 +0,0 @@ -/* -EmojiSymbols Font (c)blockworks - Kenichi Kaneko -http://emojisymbols.com/ -*/ -@font-face { - font-family: "EmojiSymbols"; - src: url(/EmojiSymbols-Regular.woff) format("woff"); - text-decoration: none; - font-style: normal; -} - -.dinghy-boat { - font-family: Times, EmojiSymbols; -} - -html, body -{ - height: 100%; -} - -body -{ - margin: 0px; - padding: 0px; - background: #2056ac; - font-family: 'Questrial', sans-serif; - font-size: 12pt; - color: rgba(0,0,0,.6); -} - - -h1, h2, h3 -{ - margin: 0; - padding: 0; - color: #404040; -} - -p, ol, ul -{ - margin-top: 0; -} - -ol, ul -{ - padding: 0; - list-style: none; -} - -p -{ - line-height: 180%; -} - -strong -{ -} - -a -{ - color: #2056ac; -} - -a:hover -{ - text-decoration: none; -} - - -.container -{ - margin: 0px auto; - width: 1200px; -} - - -/*********************************************************************************/ -/* Form Style */ -/*********************************************************************************/ - - form - { - } - - form label - { - display: block; - text-align: left; - margin-bottom: 0.5em; - } - - form .submit - { - margin-top: 2em; - line-height: 1.5em; - font-size: 1.3em; - } - - form input.text, - form select, - form textarea - { - position: relative; - -webkit-appearance: none; - display: block; - border: 0; - background: #fff; - background: rgba(255,255,255,0.75); - width: 100%; - border-radius: 0.50em; - margin: 1em 0em; - padding: 1.50em 1em; - box-shadow: inset 0 0.1em 0.1em 0 rgba(0,0,0,0.05); - border: solid 1px rgba(0,0,0,0.15); - -moz-transition: all 0.35s ease-in-out; - -webkit-transition: all 0.35s ease-in-out; - -o-transition: all 0.35s ease-in-out; - -ms-transition: all 0.35s ease-in-out; - transition: all 0.35s ease-in-out; - font-size: 1em; - outline: none; - } - - form input.text:hover, - form select:hover, - form textarea:hover - { - } - - form input.text:focus, - form select:focus, - form textarea:focus - { - box-shadow: 0 0 2px 1px #E0E0E0; - background: #fff; - } - - form textarea - { - min-height: 12em; - } - - form .formerize-placeholder - { - color: #555 !important; - } - - form ::-webkit-input-placeholder - { - color: #555 !important; - } - - form :-moz-placeholder - { - color: #555 !important; - } - - form ::-moz-placeholder - { - color: #555 !important; - } - - form :-ms-input-placeholder - { - color: #555 !important; - } - - form ::-moz-focus-inner - { - border: 0; - } - - -/*********************************************************************************/ -/* Image Style */ -/*********************************************************************************/ - -.image -{ - display: inline-block; - border: 1px solid rgba(0,0,0,.1); -} - -.image img -{ - display: block; - width: 100%; -} - -.image-full -{ - display: block; - width: 100%; - margin: 0 0 3em 0; -} - -.image-left -{ - float: left; - margin: 0 2em 2em 0; -} - -.image-centered -{ - display: block; - margin: 0 0 2em 0; -} - -.image-centered img -{ - margin: 0 auto; - width: auto; -} - -/*********************************************************************************/ -/* List Styles */ -/*********************************************************************************/ - -ul.style1 -{ -} - - -/*********************************************************************************/ -/* Social Icon Styles */ -/*********************************************************************************/ - -ul.contact -{ - margin: 0; - padding: 2em 0em 0em 0em; - list-style: none; -} - -ul.contact li -{ - display: inline-block; - padding: 0em 0.10em; - font-size: 1em; -} - -ul.contact li span -{ - display: none; - margin: 0; - padding: 0; -} - -ul.contact li a -{ - color: #FFF; -} - -ul.contact li a:before -{ - display: inline-block; - background: #4C93B9; - width: 40px; - height: 40px; - line-height: 40px; - text-align: center; - color: rgba(255,255,255,1); -} - - -/*********************************************************************************/ -/* Button Style */ -/*********************************************************************************/ - -.button -{ - display: inline-block; - margin-top: 2em; - padding: 0.8em 2em; - background: #64ABD1; - line-height: 1.8em; - letter-spacing: 1px; - text-decoration: none; - font-size: 1em; - color: #FFF; -} - -.button:before -{ - display: inline-block; - background: #8DCB89; - margin-right: 1em; - width: 40px; - height: 40px; - line-height: 40px; - border-radius: 20px; - text-align: center; - color: #272925; -} - -.button-small -{ -} - -/*********************************************************************************/ -/* Heading Titles */ -/*********************************************************************************/ - -.title -{ - margin-bottom: 3em; -} - -.title h2 -{ - font-size: 2.8em; -} - -.title .byline -{ - font-size: 1.1em; - color: #6F6F6F#; -} - -/*********************************************************************************/ -/* Header */ -/*********************************************************************************/ - -#header-wrapper -{ - overflow: hidden; - background: #2056ac; -} - -#header -{ - text-align: center; -} - -/*********************************************************************************/ -/* Logo */ -/*********************************************************************************/ - -#logo -{ - padding: 8em 0em 4em 0em; -} - -#logo h1 -{ - display: block; - margin-bottom: 0.20em; - padding: 0.20em 0.9em; - font-size: 3.5em; -} - -#logo a -{ - text-decoration: none; - color: #FFF; -} - -#logo span -{ - text-transform: uppercase; - font-size: 2.90em; - color: rgba(255,255,255,1); -} - -#logo span a -{ - color: rgba(255,255,255,0.8); -} - - - -/*********************************************************************************/ -/* Menu */ -/*********************************************************************************/ - -#menu -{ - height: 60px; -} - -#menu ul -{ - display: inline-block; - padding: 0em 2em; - text-align: center; -} - -#menu li -{ - display: inline-block; -} - -#menu li a, #menu li span -{ - display: inline-block; - padding: 0em 1.5em; - text-decoration: none; - font-size: 0.90em; - font-weight: 600; - text-transform: uppercase; - line-height: 60px; - outline: 0; - color: #FFF; -} - -#menu li:hover a, #menu li.active a, #menu li.active span -{ - background: #FFF; - border-radius: 7px 7px 0px 0px; - color: #2056ac; -} - -#menu .current_page_item a -{ -} - - -/*********************************************************************************/ -/* Banner */ -/*********************************************************************************/ - -#banner -{ - padding-top: 5em; -} - -/*********************************************************************************/ -/* Wrapper */ -/*********************************************************************************/ - - -.wrapper -{ - overflow: hidden; - padding: 0em 0em 5em 0em; - background: #FFF; -} - -#wrapper1 -{ - background: #FFF; -} - -#wrapper2 -{ - overflow: hidden; - background: #F3F3F3; - padding: 5em 0em; - text-align: center; -} - -#wrapper3 -{ -} - -#wrapper4 -{ -} - -/*********************************************************************************/ -/* Welcome */ -/*********************************************************************************/ - -#welcome -{ - overflow: hidden; - width: 1000px; - padding: 6em 100px 0em 100px; - text-align: center; -} - -#welcome .content -{ - padding: 0em 8em; -} - -#welcome .title h2 -{ -} - -#welcome a, -#welcome strong -{ -} - -/*********************************************************************************/ -/* Page */ -/*********************************************************************************/ - -#page-wrapper -{ - overflow: hidden; - background: #2F1E28; - padding: 3em 0em 6em 0em; - text-align: center; -} - -#page -{ -} - -/*********************************************************************************/ -/* Content */ -/*********************************************************************************/ - -#content -{ - float: left; - width: 700px; - padding-right: 100px; - border-right: 1px solid rgba(0,0,0,.1); -} - -/*********************************************************************************/ -/* Sidebar */ -/*********************************************************************************/ - -#sidebar -{ - float: right; - width: 350px; -} - -/*********************************************************************************/ -/* Footer */ -/*********************************************************************************/ - -#footer -{ - overflow: hidden; - padding: 5em 0em; - background: #E3F0F7; - text-align: center; -} - - - -#footer .fbox1, -#footer .fbox2, -#footer .fbox3 -{ - float: left; - width: 320px; - padding: 0px 40px 0px 40px; -} - -#footer .icon -{ - display: block; - margin-bottom: 1em; - font-size: 3em; -} - - -#footer .title span -{ - color: rgba(255,255,255,0.4); -} - -/*********************************************************************************/ -/* Copyright */ -/*********************************************************************************/ - -#copyright -{ - overflow: hidden; - padding: 5em 0em; - border-top: 20px solid rgba(255,255,255,0.08); - text-align: center; -} - -#copyright p -{ - letter-spacing: 1px; - font-size: 0.90em; - color: rgba(255,255,255,0.6); -} - -#copyright a -{ - text-decoration: none; - color: rgba(255,255,255,0.8); -} - -/*********************************************************************************/ -/* Newsletter */ -/*********************************************************************************/ - -#newsletter -{ - overflow: hidden; - padding: 8em 0em; - background: #EDEDED; - text-align: center; -} - -#newsletter .title h2 -{ - color: rgba(0,0,0,0.8); -} - -#newsletter .content -{ - width: 600px; - margin: 0px auto; -} - -/*********************************************************************************/ -/* Portfolio */ -/*********************************************************************************/ - -#portfolio -{ - overflow: hidden; - padding-top: 5em; - border-top: 1px solid rgba(0,0,0,0.2); -} - -#portfolio .box -{ - text-align: center; - color: rgba(0,0,0,0.5); -} - -#portfolio h3 -{ - display: block; - padding-bottom: 1em; - font-size: 1em; - color: rgba(0,0,0,0.6); -} - -#portfolio .title -{ - text-align: center; -} - -#portfolio .title h2 -{ - color: rgba(0,0,0,0.8); -} - -.column1, -.column2, -.column3, -.column4 -{ - width: 282px; -} - -.column1, -.column2, -.column3 -{ - float: left; - margin-right: 24px; -} - -.column4 -{ - float: right; -} - -/*********************************************************************************/ -/* Three Columns */ -/*********************************************************************************/ - -#three-column -{ - overflow: hidden; - margin-top: 5em; - padding-top: 1em; - border-top: 1px solid rgba(0,0,0,0.2); - text-align: center; -} - -#three-column h2 -{ - margin: 1em 0em; - font-size: 1.5em; - font-weight: 700; -} - -#three-column .icon -{ - position: relative; - display: block; - margin: 0px auto 0.80em auto; - background: none; - line-height: 150px; - font-size: 4em; - width: 150px; - height: 150px; - border-radius: 100px; - border: 6px solid #67128F; - text-align: center; - color: #FFF; - -} - -#three-column #tbox1, -#three-column #tbox2, -#three-column #tbox3 -{ - float: left; - width: 320px; - padding: 80px 40px 80px 40px; -} - -#three-column .title -{ - text-align: center; -} - -#three-column .title h2 -{ - font-size: 1.60em; -} - -#three-column .title .byline -{ - padding-top: 0.50em; - font-size: 0.90em; - color: #858585; -} - -#three-column .arrow-down -{ - border-top-color: #292929; -} - - -ul.tools -{ - margin: 0; - padding: 0em 0em 0em 0em; - list-style: none; -} - -ul.tools li -{ - display: inline-block; - padding: 0em .2em; - font-size: 4em; -} - -ul.tools li span -{ - display: none; - margin: 0; - padding: 0; -} - -ul.tools li a -{ - color: #FFF; -} - -ul.tools li a:before -{ - display: inline-block; - background: #1ABC9C; - width: 120px; - height: 120px; - border-radius: 50%; - line-height: 120px; - text-align: center; - color: #FFFFFF; -} diff --git a/nginx.tmpl b/nginx.tmpl deleted file mode 100644 index 634a189..0000000 --- a/nginx.tmpl +++ /dev/null @@ -1,283 +0,0 @@ -{{ $CurrentContainer := where $ "Hostname" .Env.HOSTNAME | first }} -{{ $TLD := .Env.DOMAIN_TLD }} - -# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the -# scheme used to connect to this server -map $http_x_forwarded_proto $proxy_x_forwarded_proto { - default $http_x_forwarded_proto; - '' $scheme; -} - -# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any -# Connection header that may have been passed to this server -map $http_upgrade $proxy_connection { - default upgrade; - '' close; -} - -# Apply fix for very long server names -server_names_hash_bucket_size 128; - -# Default dhparam -ssl_dhparam /etc/nginx/dhparam/dhparam.pem; - -gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - -log_format vhost '$host $remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent"'; - -access_log off; - -{{ if (exists "/etc/nginx/proxy.conf") }} -include /etc/nginx/proxy.conf; -{{ else }} -# HTTP 1.1 support -proxy_http_version 1.1; -proxy_buffering off; -proxy_set_header Host $http_host; -proxy_set_header Upgrade $http_upgrade; -proxy_set_header Connection $proxy_connection; -proxy_set_header X-Real-IP $remote_addr; -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto; -{{ end }} - -{{ $is_default_server_https := (and (exists (printf "/etc/nginx/certs/%s.crt" $TLD)) (exists (printf "/etc/nginx/certs/%s.key" $TLD))) }} - -{{ if $is_default_server_https }} - -server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; -} - -server { - server_name _; - listen 443 http2 ssl; - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA; - - ssl_prefer_server_ciphers on; - ssl_session_timeout 5m; - ssl_session_cache shared:SSL:50m; - - ssl_certificate /etc/nginx/certs/{{ (printf "%s.crt" $TLD) }}; - ssl_certificate_key /etc/nginx/certs/{{ (printf "%s.key" $TLD) }}; - - root /var/www/default/htdocs; - error_page 404 /index.html; -} - -{{ else }} - -server { - listen 80 default_server; - server_name _; - root /var/www/default/htdocs; - error_page 404 /index.html; -} - -{{end}} - -{{ define "server" }} - -{{/* Parse port off of host if present ("host:port"). */}} -{{ $host_port := split (trim .Host) ":" }} -{{ $host := index $host_port 0 }} -{{ $port := when (eq (len $host_port) 2) (last $host_port) nil }} - -{{/* Get the VIRTUAL_PROTO defined by containers w/ the same vhost, falling back to "http" */}} -{{ $proto := or (first (groupByKeys .Containers "Env.VIRTUAL_PROTO")) "http" }} - -upstream {{ $host }} { -{{ range $container := .Containers }} - {{ $port := coalesce $port $container.Env.VIRTUAL_PORT (first $container.Addresses).Port "80" }} - {{ $address := or (first $container.Addresses).IP (first $container.Networks).IP }} - server {{ $address }}:{{ $port }}; -{{ end }} -} - -{{/* Get the HTTPS_METHOD defined by containers w/ the same vhost, falling back to "redirect" */}} -{{ $https_method := or (first (groupByKeys .Containers "Env.HTTPS_METHOD")) "redirect" }} - -{{/* Get the first cert name defined by containers w/ the same vhost */}} -{{ $certName := (first (groupByKeys .Containers "Env.CERT_NAME")) }} - -{{/* Get the first cors enabled defined by containers w/ the same vhost */}} -{{ $corsEnabled := or (first (groupByKeys .Containers "Env.CORS_ENABLED")) "false" }} - -{{/* Use corsDomains if defined, wildcard otherwise */}} -{{ $corsDomains := or (first (groupByKeys .Containers "Env.CORS_DOMAINS")) "*" }} - -{{/* Get the best matching cert by name for the vhost. */}} -{{ $vhostCert := (closest (dir "/etc/nginx/certs") (printf "%s.crt" $host))}} - -{{/* vhostCert is actually a filename so remove any suffixes since they are added later */}} -{{ $vhostCert := replace $vhostCert ".crt" "" -1 }} -{{ $vhostCert := replace $vhostCert ".key" "" -1 }} - -{{/* Use the cert specifid on the container or fallback to the best vhost match */}} -{{ $cert := (coalesce $certName $vhostCert) }} - -{{ $is_https := (and (ne $cert "") (exists (printf "/etc/nginx/certs/%s.crt" $cert)) (exists (printf "/etc/nginx/certs/%s.key" $cert))) }} - -{{ if $is_https }} - -{{ if eq $https_method "redirect" }} -server { - server_name {{ $host }}; - listen 80; - access_log /var/log/nginx/access.log vhost; - return 301 https://$host$request_uri; -} -{{ end }} - -server { - server_name {{ $host }}; - listen 443 http2 ssl; - access_log /var/log/nginx/access.log vhost; - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA; - - ssl_prefer_server_ciphers on; - ssl_session_timeout 5m; - ssl_session_cache shared:SSL:50m; - - ssl_certificate /etc/nginx/certs/{{ (printf "%s.crt" $cert) }}; - ssl_certificate_key /etc/nginx/certs/{{ (printf "%s.key" $cert) }}; - - {{ if (exists (printf "/etc/nginx/certs/%s.dhparam.pem" $cert)) }} - ssl_dhparam {{ printf "/etc/nginx/certs/%s.dhparam.pem" $cert }}; - {{ end }} - - add_header Strict-Transport-Security "max-age={{ or (first (groupByKeys .Containers "Env.HSTS_MAX_AGE")) "31536000" }}"; - - {{ if (exists (printf "/etc/nginx/vhost.d/%s" $host)) }} - include {{ printf "/etc/nginx/vhost.d/%s" $host }}; - {{ else if (exists "/etc/nginx/vhost.d/default") }} - include /etc/nginx/vhost.d/default; - {{ end }} - - location / { - {{ if (parseBool $corsEnabled) }} - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - # - # Custom headers and headers various browsers *should* be OK with but aren't - # - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - # - # Tell client that this pre-flight info is valid for 20 days - # - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - if ($request_method = 'POST') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - } - if ($request_method = 'GET') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - } - {{ end }} - proxy_pass {{ trim $proto }}://{{ trim $host }}; - {{ if (exists (printf "/etc/nginx/htpasswd/%s" $host)) }} - auth_basic "Restricted {{ $host }}"; - auth_basic_user_file {{ (printf "/etc/nginx/htpasswd/%s" $host) }}; - {{ end }} - {{ if (exists (printf "/etc/nginx/vhost.d/%s_location" $host)) }} - include {{ printf "/etc/nginx/vhost.d/%s_location" $host}}; - {{ else if (exists "/etc/nginx/vhost.d/default_location") }} - include /etc/nginx/vhost.d/default_location; - {{ end }} - } -} - -{{ end }} - -{{ if or (not $is_https) (eq $https_method "noredirect") }} - -server { - server_name {{ $host }}; - listen 80; - access_log /var/log/nginx/access.log vhost; - - {{ if (exists (printf "/etc/nginx/vhost.d/%s" $host)) }} - include {{ printf "/etc/nginx/vhost.d/%s" $host }}; - {{ else if (exists "/etc/nginx/vhost.d/default") }} - include /etc/nginx/vhost.d/default; - {{ end }} - - location / { - {{ if (parseBool $corsEnabled) }} - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - # - # Custom headers and headers various browsers *should* be OK with but aren't - # - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - # - # Tell client that this pre-flight info is valid for 20 days - # - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - if ($request_method = 'POST') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - } - if ($request_method = 'GET') { - add_header 'Access-Control-Allow-Origin' '{{ $corsDomains }}'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - } - {{ end }} - proxy_pass {{ trim $proto }}://{{ trim $host }}; - {{ if (exists (printf "/etc/nginx/htpasswd/%s" $host)) }} - auth_basic "Restricted {{ $host }}"; - auth_basic_user_file {{ (printf "/etc/nginx/htpasswd/%s" $host) }}; - {{ end }} - {{ if (exists (printf "/etc/nginx/vhost.d/%s_location" $host)) }} - include {{ printf "/etc/nginx/vhost.d/%s_location" $host}}; - {{ else if (exists "/etc/nginx/vhost.d/default_location") }} - include /etc/nginx/vhost.d/default_location; - {{ end }} - } -} -{{ end }} -{{ end }} - -{{ $explicit := whereExist $ "Env.VIRTUAL_HOST" }} -{{ range $host, $containers := groupByMulti $explicit "Env.VIRTUAL_HOST" "," }} - {{ template "server" (dict "Containers" $containers "Host" $host) }} -{{ end }} - -{{ range $project, $projContainers := groupByLabel $ "com.docker.compose.project" }} - {{ range $service, $containers := groupByLabel $projContainers "com.docker.compose.service" }} - {{ $host := printf "%s.%s.%s" $service $project $TLD }} - {{ $dottedHost := printf ".%s" $host }} - {{/* Don't create the implicit host if an explicit VIRTUAL_HOST with the same name has been defined */}} - {{ if and (eq 0 (len (where $ "Env.VIRTUAL_HOST" $host))) (eq 0 (len (where $ "Env.VIRTUAL_HOST" $dottedHost))) }} - {{ $container := first $containers }} - {{ template "server" (dict "Containers" $containers "Host" $dottedHost) }} - {{ end }} - {{ end }} -{{ end }} diff --git a/pkg/config/config.go b/pkg/config/config.go index 7220dc2..b50e615 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -3,26 +3,39 @@ package config import ( "os" "strconv" + "strings" ) -// Config holds common configuration values used across the application -type Config struct { - DomainTLD string - DNSIP string - DNSPort string +// DnsServerConfig holds common configuration values used across the application +type DnsServerConfig struct { + Domains string // Comma-separated list of domains/TLDs to handle + DNSIP string + DNSPort string } // Load loads configuration from environment variables with defaults -func Load() *Config { - return &Config{ - DomainTLD: getEnvOrDefault("DOMAIN_TLD", "loc"), - DNSIP: getEnvOrDefault("DNS_IP", "127.0.0.1"), - DNSPort: getEnvOrDefault("DNS_PORT", "19322"), +func Load() *DnsServerConfig { + return &DnsServerConfig{ + Domains: GetEnvOrDefault("HTTP_PROXY_DNS_TLDS", "loc"), + DNSIP: GetEnvOrDefault("HTTP_PROXY_DNS_TARGET_IP", "127.0.0.1"), + DNSPort: GetEnvOrDefault("HTTP_PROXY_DNS_PORT", "19322"), } } -// getEnvOrDefault returns the environment variable value or a default if not set -func getEnvOrDefault(key, defaultValue string) string { +// SplitDomains splits the comma-separated domains/TLDs string into a slice +func (c *DnsServerConfig) SplitDomains() []string { + domains := []string{} + for _, domain := range strings.Split(c.Domains, ",") { + domain = strings.TrimSpace(domain) + if domain != "" { + domains = append(domains, domain) + } + } + return domains +} + +// GetEnvOrDefault returns the environment variable value or a default if not set +func GetEnvOrDefault(key, defaultValue string) string { if value := os.Getenv(key); value != "" { return value } diff --git a/pkg/config/traefik.go b/pkg/config/traefik.go new file mode 100644 index 0000000..e92f991 --- /dev/null +++ b/pkg/config/traefik.go @@ -0,0 +1,81 @@ +package config + +// TraefikConfig represents the structure for Traefik dynamic configuration +type TraefikConfig struct { + HTTP *HTTPConfig `yaml:"http,omitempty"` + TLS *TLSConfig `yaml:"tls,omitempty"` +} + +// HTTPConfig represents HTTP configuration +type HTTPConfig struct { + Routers map[string]*Router `yaml:"routers,omitempty"` + Services map[string]*Service `yaml:"services,omitempty"` + Middlewares map[string]*Middleware `yaml:"middlewares,omitempty"` +} + +// Router represents a Traefik router configuration +type Router struct { + Rule string `yaml:"rule,omitempty"` + Service string `yaml:"service,omitempty"` + EntryPoints []string `yaml:"entryPoints,omitempty"` + Middlewares []string `yaml:"middlewares,omitempty"` + TLS *RouterTLSConfig `yaml:"tls,omitempty"` +} + +// RouterTLSConfig represents TLS configuration for a router +type RouterTLSConfig struct { + // Empty struct enables TLS with auto-generated certificates +} + +// Middleware represents a Traefik middleware configuration +type Middleware struct { + Headers *HeadersMiddleware `yaml:"headers,omitempty"` +} + +// HeadersMiddleware represents headers middleware configuration +type HeadersMiddleware struct { + AccessControlAllowCredentials *bool `yaml:"accessControlAllowCredentials,omitempty"` + AccessControlAllowHeaders []string `yaml:"accessControlAllowHeaders,omitempty"` + AccessControlAllowMethods []string `yaml:"accessControlAllowMethods,omitempty"` + AccessControlAllowOriginList []string `yaml:"accessControlAllowOriginList,omitempty"` + AccessControlMaxAge *int64 `yaml:"accessControlMaxAge,omitempty"` + CustomRequestHeaders map[string]string `yaml:"customRequestHeaders,omitempty"` + CustomResponseHeaders map[string]string `yaml:"customResponseHeaders,omitempty"` +} + +// Service represents a Traefik service configuration +type Service struct { + LoadBalancer *LoadBalancer `yaml:"loadBalancer,omitempty"` +} + +// LoadBalancer represents a load balancer configuration +type LoadBalancer struct { + Servers []Server `yaml:"servers,omitempty"` +} + +// Server represents a server configuration +type Server struct { + URL string `yaml:"url,omitempty"` +} + +// TLSConfig represents TLS configuration for certificates +type TLSConfig struct { + Certificates []TLSCertificate `yaml:"certificates,omitempty"` +} + +// TLSCertificate represents a TLS certificate configuration +type TLSCertificate struct { + CertFile string `yaml:"certFile,omitempty"` + KeyFile string `yaml:"keyFile,omitempty"` +} + +// NewTraefikConfig creates a new Traefik configuration +func NewTraefikConfig() *TraefikConfig { + return &TraefikConfig{ + HTTP: &HTTPConfig{ + Routers: make(map[string]*Router), + Services: make(map[string]*Service), + Middlewares: make(map[string]*Middleware), + }, + } +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 668f9fe..2e79970 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,38 +1,98 @@ package logger import ( - "log" + "log/slog" "os" + "strings" ) // Logger provides structured logging for the application type Logger struct { - *log.Logger + *slog.Logger + component string } -// New creates a new logger instance -func New(prefix string) *Logger { +// LogLevel represents the logging level +type LogLevel string + +const ( + LevelDebug LogLevel = "debug" + LevelInfo LogLevel = "info" + LevelWarn LogLevel = "warn" + LevelError LogLevel = "error" +) + +// New creates a new structured logger instance +func New(component string) *Logger { + return NewWithLevel(component, LevelInfo) +} + +// NewWithLevel creates a new logger with specified log level +func NewWithLevel(component string, level LogLevel) *Logger { + var slogLevel slog.Level + switch level { + case LevelDebug: + slogLevel = slog.LevelDebug + case LevelWarn: + slogLevel = slog.LevelWarn + case LevelError: + slogLevel = slog.LevelError + default: + slogLevel = slog.LevelInfo + } + + // Create handler with JSON output for structured logging + opts := &slog.HandlerOptions{ + Level: slogLevel, + } + + var handler slog.Handler + if isJSONFormat() { + handler = slog.NewJSONHandler(os.Stdout, opts) + } else { + handler = slog.NewTextHandler(os.Stdout, opts) + } + + // Create logger with component field as the first attribute + logger := slog.New(handler).With("component", component) + return &Logger{ - Logger: log.New(os.Stdout, prefix+" ", log.LstdFlags), + Logger: logger, + component: component, } } -// Info logs an info message -func (l *Logger) Info(v ...interface{}) { - l.Println("[INFO]", v) +// isJSONFormat determines if we should use JSON logging format +// based on environment variables +func isJSONFormat() bool { + format := strings.ToLower(os.Getenv("LOG_FORMAT")) + return format == "json" +} + +// Info logs an info message with optional key-value pairs +func (l *Logger) Info(msg string, args ...interface{}) { + l.Logger.Info(msg, args...) } -// Error logs an error message -func (l *Logger) Error(v ...interface{}) { - l.Println("[ERROR]", v) +// Error logs an error message with optional key-value pairs +func (l *Logger) Error(msg string, args ...interface{}) { + l.Logger.Error(msg, args...) } -// Debug logs a debug message -func (l *Logger) Debug(v ...interface{}) { - l.Println("[DEBUG]", v) +// Debug logs a debug message with optional key-value pairs +func (l *Logger) Debug(msg string, args ...interface{}) { + l.Logger.Debug(msg, args...) } -// Warn logs a warning message -func (l *Logger) Warn(v ...interface{}) { - l.Println("[WARN]", v) +// Warn logs a warning message with optional key-value pairs +func (l *Logger) Warn(msg string, args ...interface{}) { + l.Logger.Warn(msg, args...) +} + +// With returns a new logger with the given key-value pairs added to all log entries +func (l *Logger) With(args ...interface{}) *Logger { + return &Logger{ + Logger: l.Logger.With(args...), + component: l.component, + } } diff --git a/pkg/service/docker_event_service.go b/pkg/service/docker_event_service.go new file mode 100644 index 0000000..fc07b72 --- /dev/null +++ b/pkg/service/docker_event_service.go @@ -0,0 +1,232 @@ +package service + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/sparkfabrik/http-proxy/pkg/logger" + "github.com/sparkfabrik/http-proxy/pkg/utils" +) + +const ( + // DefaultDockerTimeout is the default timeout for Docker operations + DefaultDockerTimeout = 30 * time.Second +) + +// EventHandler defines the interface for processing Docker events +type EventHandler interface { + // HandleInitialScan performs initial processing of existing containers + HandleInitialScan(ctx context.Context) error + + // HandleEvent processes a Docker event + HandleEvent(ctx context.Context, event events.Message) error + + // GetName returns the service name for logging + GetName() string + + // SetDependencies injects Docker client and logger + SetDependencies(client *client.Client, logger *logger.Logger) +} + +// Service represents a Docker-event-driven service +type Service struct { + client *client.Client + logger *logger.Logger + handler EventHandler + serviceName string +} + +// NewService creates a new Docker event-driven service +func NewService(ctx context.Context, serviceName string, logLevel string, handler EventHandler) (*Service, error) { + // Initialize logger + log := logger.NewWithLevel(serviceName, logger.LogLevel(logLevel)) + + // Initialize Docker client + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, fmt.Errorf("failed to create Docker client: %w", err) + } + + // Test Docker connection with timeout + pingCtx, cancel := context.WithTimeout(ctx, DefaultDockerTimeout) + defer cancel() + + if _, err := dockerClient.Ping(pingCtx); err != nil { + dockerClient.Close() + return nil, fmt.Errorf("failed to connect to Docker daemon: %w", err) + } + + log.Debug("Successfully connected to Docker daemon") + + // Inject dependencies into handler + handler.SetDependencies(dockerClient, log) + + return &Service{ + client: dockerClient, + logger: log, + handler: handler, + serviceName: serviceName, + }, nil +} + +// GetDockerClient returns the Docker client for use by handlers +func (s *Service) GetDockerClient() *client.Client { + return s.client +} + +// GetLogger returns the logger for use by handlers +func (s *Service) GetLogger() *logger.Logger { + return s.logger +} + +// Close cleanly shuts down the service +func (s *Service) Close() error { + return s.client.Close() +} + +// Run starts the service with signal handling and event processing +func (s *Service) Run(ctx context.Context) error { + s.logger.Info("Starting service", "name", s.serviceName) + + // Setup signal handling + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + // Start the event loop + errChan := make(chan error, 1) + go func() { + errChan <- s.runEventLoop(ctx) + }() + + // Wait for shutdown signal or error + select { + case <-sigChan: + s.logger.Info("Received shutdown signal") + if err := s.Close(); err != nil { + s.logger.Error("Error while closing service", "error", err) + } + return context.Canceled + case err := <-errChan: + if err != nil { + s.logger.Error("Service error", "error", err) + return err + } + s.logger.Info("Service completed successfully") + return nil + } +} + +// runEventLoop handles the initial scan and Docker event processing +func (s *Service) runEventLoop(ctx context.Context) error { + // Initial scan of existing containers + s.logger.Debug("Performing initial scan") + if err := s.handler.HandleInitialScan(ctx); err != nil { + s.logger.Error("Initial scan failed", "error", err) + return err + } + + // Listen for Docker events + eventsChan, errChan := s.client.Events(ctx, events.ListOptions{ + Filters: filters.NewArgs( + filters.Arg("type", "container"), + filters.Arg("event", "start"), + filters.Arg("event", "die"), + ), + }) + + for { + select { + case <-ctx.Done(): + return nil + case event := <-eventsChan: + s.processEventSafely(ctx, event) + case err := <-errChan: + if err != nil { + s.logger.Error("Docker events error", "error", err) + // Reconnect and continue + time.Sleep(5 * time.Second) + eventsChan, errChan = s.client.Events(ctx, events.ListOptions{ + Filters: filters.NewArgs( + filters.Arg("type", "container"), + filters.Arg("event", "start"), + filters.Arg("event", "die"), + ), + }) + } + } + } +} + +// processEventSafely wraps event processing with proper error handling and logging +func (s *Service) processEventSafely(ctx context.Context, event events.Message) { + // Respect context cancellation + select { + case <-ctx.Done(): + s.logger.Debug("Context cancelled, skipping event processing") + return + default: + } + + if err := s.handler.HandleEvent(ctx, event); err != nil { + s.logger.Error("Failed to process event", + "error", err, + "action", event.Action, + "container_id", utils.FormatDockerID(event.Actor.ID)) + } +} + +// RunWithSignalHandling is a convenience function that sets up a complete service lifecycle +func RunWithSignalHandling(ctx context.Context, serviceName string, logLevel string, handler EventHandler) error { + service, err := NewService(ctx, serviceName, logLevel, handler) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to initialize %s: %v\n", serviceName, err) + os.Exit(1) + } + defer service.Close() + + serviceCtx, cancel := context.WithCancel(ctx) + defer cancel() + + // Setup signal handling + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + // Start the service + errChan := make(chan error, 1) + go func() { + errChan <- service.Run(serviceCtx) + }() + + // Wait for shutdown signal or error + select { + case err := <-errChan: + if err != nil && err != context.Canceled { + service.GetLogger().Error("Service failed", "error", err) + os.Exit(1) + } + service.GetLogger().Info("Service completed successfully") + case sig := <-sigChan: + service.GetLogger().Info("Received shutdown signal", "signal", sig) + cancel() + + // Wait for graceful shutdown with timeout + select { + case err := <-errChan: + if err != nil && err != context.Canceled { + service.GetLogger().Error("Error during shutdown", "error", err) + } + case <-time.After(10 * time.Second): + service.GetLogger().Warn("Shutdown timeout, forcing exit") + } + } + + service.GetLogger().Info("Shutting down gracefully") + return nil +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go new file mode 100644 index 0000000..0cf5a20 --- /dev/null +++ b/pkg/utils/utils.go @@ -0,0 +1,274 @@ +package utils + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +// RetryConfig configures retry behavior for operations +type RetryConfig struct { + // MaxAttempts is the maximum number of attempts (including the first one) + MaxAttempts int + // InitialDelay is the delay before the first retry + InitialDelay time.Duration + // MaxDelay is the maximum delay between retries + MaxDelay time.Duration + // BackoffMultiplier is the factor by which the delay increases after each retry + BackoffMultiplier float64 +} + +// DefaultRetryConfig returns a sensible default retry configuration for Docker operations +func DefaultRetryConfig() RetryConfig { + return RetryConfig{ + MaxAttempts: 3, + InitialDelay: 100 * time.Millisecond, + MaxDelay: 2 * time.Second, + BackoffMultiplier: 2.0, + } +} + +// RetryableFunc is a function that can be retried. It should return an error if the operation +// should be retried, or nil if successful. The context can be used to cancel the operation. +type RetryableFunc func(ctx context.Context) error + +// Retry executes a function with retry logic and exponential backoff +// It respects context cancellation and returns the last error encountered +func Retry(ctx context.Context, config RetryConfig, fn RetryableFunc) error { + if config.MaxAttempts <= 0 { + config.MaxAttempts = 1 + } + + var lastErr error + delay := config.InitialDelay + + for attempt := 1; attempt <= config.MaxAttempts; attempt++ { + // Check if context is cancelled before attempting + if err := CheckContext(ctx); err != nil { + return err + } + + lastErr = fn(ctx) + if lastErr == nil { + return nil // Success + } + + // Don't sleep after the last attempt + if attempt == config.MaxAttempts { + break + } + + // Calculate next delay with exponential backoff + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // Continue to next attempt + } + + // Update delay for next iteration + delay = time.Duration(float64(delay) * config.BackoffMultiplier) + if delay > config.MaxDelay { + delay = config.MaxDelay + } + } + + return fmt.Errorf("operation failed after %d attempts: %w", config.MaxAttempts, lastErr) +} + +// RetryContainerInspect wraps ContainerInspect with retry logic +func RetryContainerInspect(ctx context.Context, dockerClient *client.Client, containerID string) (types.ContainerJSON, error) { + var result types.ContainerJSON + + err := Retry(ctx, DefaultRetryConfig(), func(ctx context.Context) error { + var err error + result, err = dockerClient.ContainerInspect(ctx, containerID) + return err + }) + + return result, err +} + +// RetryContainerList wraps ContainerList with retry logic +func RetryContainerList(ctx context.Context, dockerClient *client.Client, options container.ListOptions) ([]types.Container, error) { + var result []types.Container + + err := Retry(ctx, DefaultRetryConfig(), func(ctx context.Context) error { + var err error + result, err = dockerClient.ContainerList(ctx, options) + return err + }) + + return result, err +} + +// RetryNetworkConnect wraps NetworkConnect with retry logic +func RetryNetworkConnect(ctx context.Context, dockerClient *client.Client, networkID, containerName string, config *network.EndpointSettings) error { + return Retry(ctx, DefaultRetryConfig(), func(ctx context.Context) error { + return dockerClient.NetworkConnect(ctx, networkID, containerName, config) + }) +} + +// RetryNetworkInspect wraps NetworkInspect with retry logic +func RetryNetworkInspect(ctx context.Context, dockerClient *client.Client, networkID string, options network.InspectOptions) (network.Inspect, error) { + var result network.Inspect + + err := Retry(ctx, DefaultRetryConfig(), func(ctx context.Context) error { + var err error + result, err = dockerClient.NetworkInspect(ctx, networkID, options) + return err + }) + + return result, err +} + +// FormatDockerID returns a shortened version of a Docker ID for logging +// This can be used for container IDs, network IDs, or any Docker resource ID +func FormatDockerID(id string) string { + if len(id) >= 12 { + return id[:12] + } + return id +} + +// GetDockerEnvVar extracts an environment variable value from a Docker container's env slice +// This is commonly used when inspecting containers to get specific environment variables +func GetDockerEnvVar(env []string, key string) string { + prefix := key + "=" + for _, e := range env { + if strings.HasPrefix(e, prefix) { + return strings.TrimPrefix(e, prefix) + } + } + return "" +} + +// ValidLogLevels contains the set of valid log levels +var ValidLogLevels = map[string]bool{ + "debug": true, + "info": true, + "warn": true, + "error": true, +} + +// ValidateLogLevel checks if the provided log level is valid +func ValidateLogLevel(level string) error { + if !ValidLogLevels[level] { + return fmt.Errorf("invalid log level %q, must be one of: debug, info, warn, error", level) + } + return nil +} + +// CheckContext returns an error if the context is cancelled +// This is useful for long-running operations that should respect cancellation +func CheckContext(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } +} + +// ShouldManageContainer checks if a container should be managed based on dinghy env vars or traefik labels +// Returns true if the container has VIRTUAL_HOST environment variable or traefik labels +func ShouldManageContainer(env []string, labels map[string]string) bool { + // Check for dinghy VIRTUAL_HOST environment variable + if GetDockerEnvVar(env, "VIRTUAL_HOST") != "" { + return true + } + + // Check for traefik labels (any label starting with "traefik.") + for label := range labels { + if strings.HasPrefix(label, "traefik.") { + return true + } + } + + return false +} + +// GetRunningContainersInNetwork returns all running containers connected to the specified network, +// optionally excluding a container by name +func GetRunningContainersInNetwork(ctx context.Context, dockerClient *client.Client, networkID, excludeContainerName string) ([]types.Container, error) { + // Get all containers with retry logic + containers, err := RetryContainerList(ctx, dockerClient, container.ListOptions{All: true}) + if err != nil { + return nil, fmt.Errorf("failed to list containers: %w", err) + } + + var networkContainers []types.Container + for _, cont := range containers { + // Skip non-running containers + if cont.State != "running" { + continue + } + + // Skip if this is the excluded container + if excludeContainerName != "" && len(cont.Names) > 0 { + containerName := strings.TrimPrefix(cont.Names[0], "/") + if containerName == excludeContainerName { + continue + } + } + + // Check if this container is connected to the network + inspect, err := RetryContainerInspect(ctx, dockerClient, cont.ID) + if err != nil { + continue // Skip containers we can't inspect + } + + isConnected := false + for _, networkData := range inspect.NetworkSettings.Networks { + if networkData.NetworkID == networkID { + isConnected = true + break + } + } + + if isConnected { + networkContainers = append(networkContainers, cont) + } + } + + return networkContainers, nil +} + +// HasManageableContainersInNetwork checks if a network has any manageable containers, +// optionally excluding a specific container +func HasManageableContainersInNetwork(ctx context.Context, dockerClient *client.Client, networkID, excludeContainerName string) (bool, error) { + containers, err := GetRunningContainersInNetwork(ctx, dockerClient, networkID, excludeContainerName) + if err != nil { + return false, err + } + + for _, cont := range containers { + // Inspect the container to get env vars and labels + inspect, err := RetryContainerInspect(ctx, dockerClient, cont.ID) + if err != nil { + continue // Skip containers we can't inspect + } + + if ShouldManageContainer(inspect.Config.Env, inspect.Config.Labels) { + return true, nil + } + } + + return false, nil +} + +// SliceToSet converts a slice of strings to a map[string]struct{} for O(1) lookups +// This is useful for creating sets from slices where you only need to check existence +func SliceToSet(slice []string) map[string]struct{} { + set := make(map[string]struct{}) + for _, item := range slice { + set[item] = struct{}{} + } + return set +} diff --git a/reload-nginx b/reload-nginx deleted file mode 100755 index 6342c18..0000000 --- a/reload-nginx +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -e - -name=${CONTAINER_NAME:-dinghy_http_proxy} - -# Join any networks we haven't joined yet, so that we can talk to containers on -# those networks. This allows us to talk to containers created with -# docker-compose v2 configs. -/app/join-networks -container-name $name - -# Now that we can reach these other hosts, reload nginx. Order is important -# here, as nginx errors on startup if it can't resolve any of the specified -# reverse proxy hosts. -nginx -s reload diff --git a/test/test-certs.sh b/test/test-certs.sh new file mode 100755 index 0000000..7766318 --- /dev/null +++ b/test/test-certs.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +# Test script to verify certificate auto-detection functionality +# This script simulates the certificate setup and tests that the entrypoint script works correctly + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +TEMP_CERTS_DIR="${PROJECT_ROOT}/test/temp-certs" +TEMP_DYNAMIC_DIR="${PROJECT_ROOT}/test/temp-dynamic" + +echo "๐Ÿงช Testing certificate auto-detection..." + +# Clean up any previous test artifacts +rm -rf "${TEMP_CERTS_DIR}" "${TEMP_DYNAMIC_DIR}" +mkdir -p "${TEMP_CERTS_DIR}" "${TEMP_DYNAMIC_DIR}" + +# Create mock certificate files +echo "๐Ÿ“ Creating test certificate files..." +cat > "${TEMP_CERTS_DIR}/wildcard.loc.pem" << 'EOF' +-----BEGIN CERTIFICATE----- +MOCK_CERTIFICATE_DATA_FOR_TESTING +-----END CERTIFICATE----- +EOF + +cat > "${TEMP_CERTS_DIR}/wildcard.loc-key.pem" << 'EOF' +-----BEGIN PRIVATE KEY----- +MOCK_PRIVATE_KEY_DATA_FOR_TESTING +-----END PRIVATE KEY----- +EOF + +# Create test files for other formats +cat > "${TEMP_CERTS_DIR}/example.crt" << 'EOF' +-----BEGIN CERTIFICATE----- +MOCK_CERTIFICATE_DATA_FOR_TESTING_CRT +-----END CERTIFICATE----- +EOF + +cat > "${TEMP_CERTS_DIR}/example-key.pem" << 'EOF' +-----BEGIN PRIVATE KEY----- +MOCK_PRIVATE_KEY_DATA_FOR_TESTING_CRT +-----END PRIVATE KEY----- +EOF + +echo "๐Ÿ“ Test certificate files created:" +ls -la "${TEMP_CERTS_DIR}" + +# Test the entrypoint script logic (simulate it) +echo "๐Ÿ” Testing certificate detection logic..." + +# Simulate the entrypoint script behavior +CERTS_DIR="${TEMP_CERTS_DIR}" +DYNAMIC_DIR="${TEMP_DYNAMIC_DIR}" +TLS_CONFIG_FILE="${DYNAMIC_DIR}/auto-tls.yml" + +# Look for certificate files (both .pem and .crt extensions) +cert_files=$(find "${CERTS_DIR}" -name "*.pem" -o -name "*.crt" | grep -v "\-key" | head -10) + +if [ -z "$cert_files" ]; then + echo "โŒ ERROR: No certificate files found!" + exit 1 +fi + +echo "โœ… Found certificate files: $cert_files" + +# Start TLS configuration +cat > "${TLS_CONFIG_FILE}" << 'EOF' +# Auto-generated TLS configuration from user certificates +tls: + certificates: +EOF + +# Process each certificate file +for cert_file in $cert_files; do + # Get the basename without extension + cert_base=$(basename "$cert_file" .pem) + cert_base=$(basename "$cert_base" .crt) + + # Look for corresponding key file + key_file="" + for ext in pem crt key; do + possible_key="${CERTS_DIR}/${cert_base}-key.${ext}" + if [ -f "$possible_key" ]; then + key_file="$possible_key" + break + fi + + possible_key="${CERTS_DIR}/${cert_base}.key" + if [ -f "$possible_key" ]; then + key_file="$possible_key" + break + fi + done + + if [ -n "$key_file" ]; then + # Extract domains from certificate (simulate the new logic) + domains=$(openssl x509 -in "$cert_file" -noout -text 2>/dev/null | \ + grep -A1 "Subject Alternative Name" | \ + grep "DNS:" | \ + sed 's/.*DNS://g' | \ + sed 's/,.*DNS:/ /g' | \ + sed 's/,.*//g' | \ + tr -d ' ') + + if [ -n "$domains" ]; then + echo " - Adding certificate: $(basename "$cert_file") for domains: $domains" + else + echo " - Adding certificate: $(basename "$cert_file") (auto-detect domains)" + fi + + cat >> "${TLS_CONFIG_FILE}" << EOF + - certFile: ${cert_file} + keyFile: ${key_file} +EOF + else + echo " - Warning: No key file found for certificate $(basename "$cert_file")" + fi +done + +echo "๐Ÿ“‹ Generated TLS configuration:" +cat "${TLS_CONFIG_FILE}" + +# Verify the configuration looks correct +if grep -q "certFile:" "${TLS_CONFIG_FILE}" && grep -q "keyFile:" "${TLS_CONFIG_FILE}"; then + echo "โœ… TLS configuration generated successfully!" + echo "โœ… Certificate auto-detection test PASSED!" +else + echo "โŒ TLS configuration appears invalid!" + echo "โŒ Certificate auto-detection test FAILED!" + exit 1 +fi + +# Clean up +echo "๐Ÿงน Cleaning up test files..." +rm -rf "${TEMP_CERTS_DIR}" "${TEMP_DYNAMIC_DIR}" + +echo "๐ŸŽ‰ All tests completed successfully!" diff --git a/test/test.sh b/test/test.sh new file mode 100755 index 0000000..4590e20 --- /dev/null +++ b/test/test.sh @@ -0,0 +1,650 @@ +#!/bin/bash + +# HTTP Proxy Integration Test Script +# Tests the refactored dinghy-layer and join-networks services + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging function +log() { + echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $1" +} + +success() { + echo -e "${GREEN}โœ“${NC} $1" +} + +error() { + echo -e "${RED}โœ—${NC} $1" +} + +warning() { + echo -e "${YELLOW}โš ${NC} $1" +} + +# Test configuration +TEST_DOMAIN="spark.loc" +HTTP_PORT="80" + +# Container configurations +TRAEFIK_CONTAINER="test-traefik-app" +VIRTUAL_HOST_CONTAINER="test-virtual-host-app" +VIRTUAL_HOST_PORT_CONTAINER="test-virtual-host-port-app" +MULTI_VIRTUAL_HOST_CONTAINER="test-multi-virtual-host-app" + +# Hostname configurations for DNS testing +TRAEFIK_HOSTNAME="app1.${TEST_DOMAIN}" +VIRTUAL_HOST_HOSTNAME="app2.${TEST_DOMAIN}" +VIRTUAL_HOST_PORT_HOSTNAME="app3.${TEST_DOMAIN}" +MULTI_VIRTUAL_HOST_HOSTNAME1="app4.${TEST_DOMAIN}" +MULTI_VIRTUAL_HOST_HOSTNAME2="app5.${TEST_DOMAIN}" + +# Wait function +wait_for_container() { + local container_name=$1 + local max_attempts=30 + local attempt=1 + + log "Waiting for container ${container_name} to be ready..." + + while [ $attempt -le $max_attempts ]; do + if docker ps --format "table {{.Names}}" | grep -q "^${container_name}$"; then + if docker exec "$container_name" curl -f -s http://localhost >/dev/null 2>&1; then + success "Container ${container_name} is ready" + return 0 + fi + fi + + sleep 2 + attempt=$((attempt + 1)) + done + + error "Container ${container_name} failed to become ready" + return 1 +} + +# HTTP test function +test_http_access() { + local hostname=$1 + local max_attempts=10 + local attempt=1 + + log "Testing HTTP access to ${hostname}..." + + while [ $attempt -le $max_attempts ]; do + if curl -f -s -H "Host: ${hostname}" http://localhost:${HTTP_PORT} >/dev/null 2>&1; then + success "HTTP access to ${hostname} works" + return 0 + fi + + sleep 3 + attempt=$((attempt + 1)) + done + + error "HTTP access to ${hostname} failed after ${max_attempts} attempts" + return 1 +} + +# Test DNS functionality +test_dns() { + local hostname="$1" + local expected_ip="127.0.0.1" + local dns_port="19322" + local should_resolve="$2" # Optional parameter: "should_resolve" or "should_not_resolve" + + # Default to should resolve if not specified + if [ -z "$should_resolve" ]; then + should_resolve="should_resolve" + fi + + # Check if dig is available + if ! command -v dig >/dev/null 2>&1; then + log "dig command not available, skipping DNS test for ${hostname}" + return 0 + fi + + log "Testing DNS resolution for ${hostname}..." + + # Test DNS resolution using dig with timeout and error handling + local result + local dig_exit_code + + # Capture both output and exit code + result=$(dig @127.0.0.1 -p $dns_port "$hostname" +short +time=2 +tries=1 2>/dev/null) + dig_exit_code=$? + + if [ "$should_resolve" = "should_not_resolve" ]; then + # This domain should NOT resolve + # For non-configured domains, the DNS server should either: + # 1. Return empty response (silently drop) + # 2. Return NXDOMAIN + # 3. Timeout (if the server drops the query) + if [ $dig_exit_code -ne 0 ] || [ -z "$result" ] || [[ "$result" == *"timed out"* ]] || [[ "$result" == *"connection refused"* ]]; then + success "DNS correctly rejected ${hostname} (not configured)" + return 0 + else + error "DNS incorrectly resolved ${hostname} to ${result} (should have been rejected)" + return 1 + fi + else + # This domain SHOULD resolve + if [ $dig_exit_code -ne 0 ]; then + error "DNS resolution failed for ${hostname} (exit code: ${dig_exit_code})" + return 1 + fi + + if [ -z "$result" ] || [[ "$result" == *"timed out"* ]] || [[ "$result" == *"connection refused"* ]]; then + error "DNS resolution failed for ${hostname} (no response or timeout)" + return 1 + fi + + # Clean up the result (remove any trailing dots or whitespace) + result=$(echo "$result" | tr -d '\n' | sed 's/\.$//') + + if [ "$result" = "$expected_ip" ]; then + success "DNS resolution for ${hostname} works (resolved to ${result})" + return 0 + else + error "DNS resolution for ${hostname} returned unexpected result: ${result} (expected ${expected_ip})" + return 1 + fi + fi +} + +# Check if DNS server is running and accessible +check_dns_server() { + local dns_port="19322" + local max_attempts=10 + local attempt=1 + + log "Checking if DNS server is accessible..." + + while [ $attempt -le $max_attempts ]; do + # Try to query a simple domain - we don't care about the result, just that the server responds + if dig @127.0.0.1 -p $dns_port "test.spark.loc" +short +time=2 +tries=1 >/dev/null 2>&1; then + success "DNS server is accessible on port ${dns_port}" + return 0 + fi + + # Check if it's a connection refused (server not running) vs timeout (server running but not responding) + local test_result + test_result=$(dig @127.0.0.1 -p $dns_port "test.spark.loc" +short +time=1 +tries=1 2>&1) + + if [[ "$test_result" == *"connection refused"* ]]; then + log "DNS server not yet available (connection refused), waiting... (attempt ${attempt}/${max_attempts})" + else + log "DNS server responding but query failed, waiting... (attempt ${attempt}/${max_attempts})" + fi + + sleep 2 + attempt=$((attempt + 1)) + done + + error "DNS server is not accessible after ${max_attempts} attempts" + return 1 +} + +# Test all DNS functionality +test_all_dns() { + log "Testing DNS server functionality..." + log "==================================" + + # First, check if DNS server is accessible + if ! check_dns_server; then + error "DNS server is not accessible, skipping DNS tests" + return 1 + fi + + local dns_tests_passed=0 + local dns_tests_total=0 + + # Test 1: Basic hostname resolution (configured domains should resolve) + log "Test 1: Testing configured domain resolution..." + for hostname in "$TRAEFIK_HOSTNAME" "$VIRTUAL_HOST_HOSTNAME" "$VIRTUAL_HOST_PORT_HOSTNAME" "$MULTI_VIRTUAL_HOST_HOSTNAME1" "$MULTI_VIRTUAL_HOST_HOSTNAME2"; do + dns_tests_total=$((dns_tests_total + 1)) + if test_dns "$hostname" "should_resolve"; then + dns_tests_passed=$((dns_tests_passed + 1)) + fi + done + + # Test 2: TLD support - any subdomain of configured TLD should resolve + log "Test 2: Testing TLD support (any .spark.loc domain should resolve)..." + + local tld_test_domains=( + "test.spark.loc" + "example.spark.loc" + "api.test.spark.loc" + ) + + for hostname in "${tld_test_domains[@]}"; do + dns_tests_total=$((dns_tests_total + 1)) + if test_dns "$hostname" "should_resolve"; then + dns_tests_passed=$((dns_tests_passed + 1)) + fi + done + + # Test 3: Negative tests - domains that should NOT resolve + log "Test 3: Testing rejection of non-configured domains..." + + local negative_test_domains=( + "example.com" + "test.org" + "service.local" + "wrong.tld" + ) + + for hostname in "${negative_test_domains[@]}"; do + dns_tests_total=$((dns_tests_total + 1)) + if test_dns "$hostname" "should_not_resolve"; then + dns_tests_passed=$((dns_tests_passed + 1)) + fi + done + + # Test 4: Edge cases + log "Test 4: Testing edge cases..." + + # Test malformed domains (these should not resolve) + local edge_case_domains=( + "." + ".loc" + ) + + for hostname in "${edge_case_domains[@]}"; do + dns_tests_total=$((dns_tests_total + 1)) + if test_dns "$hostname" "should_not_resolve"; then + dns_tests_passed=$((dns_tests_passed + 1)) + fi + done + + # Test valid DNS format with trailing dot (should resolve) + log "Testing valid DNS format with trailing dot..." + dns_tests_total=$((dns_tests_total + 1)) + if test_dns "spark.loc." "should_resolve"; then + dns_tests_passed=$((dns_tests_passed + 1)) + fi + + log "DNS Test Results: ${dns_tests_passed}/${dns_tests_total} tests passed" + + if [ "$dns_tests_passed" -eq "$dns_tests_total" ]; then + success "All DNS tests passed!" + return 0 + else + error "Some DNS tests failed (${dns_tests_passed}/${dns_tests_total})" + return 1 + fi +} + +# Test DNS server with different configurations using docker-compose +test_dns_configurations() { + log "Testing DNS server with different configurations..." + log "=================================================" + + local original_dir=$(pwd) + cd "$(dirname "$0")/.." + + # Test configuration 1: Single TLD (loc) + log "Configuration Test 1: Single TLD (loc)" + test_with_dns_config "loc" "test.loc,example.loc" "example.com,test.org" + + # Test configuration 2: Multiple TLDs (loc,dev) + log "Configuration Test 2: Multiple TLDs (loc,dev)" + test_with_dns_config "loc,dev" "test.loc,example.dev" "example.com,test.org" + + # Test configuration 3: Specific domains (spark.loc,spark.dev) + log "Configuration Test 3: Specific domains (spark.loc,spark.dev)" + test_with_dns_config "spark.loc,spark.dev" "spark.loc,api.spark.loc,spark.dev,api.spark.dev" "other.loc,example.com" + + cd "$original_dir" + + # Restore original DNS configuration + unset HTTP_PROXY_DNS_TLDS + docker-compose up -d dns --quiet-pull 2>/dev/null || true + sleep 3 + + success "DNS configuration tests completed" +} + +# Helper function to test with a specific DNS configuration +test_with_dns_config() { + local config="$1" + local should_resolve="$2" + local should_not_resolve="$3" + + log "Testing with HTTP_PROXY_DNS_TLDS='${config}'" + + # Set environment variable and restart DNS service + export HTTP_PROXY_DNS_TLDS="$config" + docker-compose up -d dns --quiet-pull 2>/dev/null || true + + # Wait for DNS service to be ready + sleep 5 + + if ! check_dns_server; then + warning "DNS server not accessible for config '${config}', skipping" + return 1 + fi + + local config_tests_passed=0 + local config_tests_total=0 + + # Test domains that should resolve + IFS=',' read -ra RESOLVE_DOMAINS <<< "$should_resolve" + for domain in "${RESOLVE_DOMAINS[@]}"; do + config_tests_total=$((config_tests_total + 1)) + if test_dns "$domain" "should_resolve" >/dev/null 2>&1; then + config_tests_passed=$((config_tests_passed + 1)) + fi + done + + # Test domains that should NOT resolve + IFS=',' read -ra NO_RESOLVE_DOMAINS <<< "$should_not_resolve" + for domain in "${NO_RESOLVE_DOMAINS[@]}"; do + config_tests_total=$((config_tests_total + 1)) + if test_dns "$domain" "should_not_resolve" >/dev/null 2>&1; then + config_tests_passed=$((config_tests_passed + 1)) + fi + done + + log "Config test results for '${config}': ${config_tests_passed}/${config_tests_total}" + + if [ "$config_tests_passed" -eq "$config_tests_total" ]; then + success "Configuration test passed for: ${config}" + return 0 + else + warning "Configuration test failed for: ${config} (${config_tests_passed}/${config_tests_total})" + return 1 + fi +} + +# Test DNS on a specific port +test_dns_on_port() { + local hostname="$1" + local port="$2" + local should_resolve="$3" + local expected_ip="127.0.0.1" + + # Check if dig is available + if ! command -v dig >/dev/null 2>&1; then + return 0 + fi + + # Test DNS resolution using dig on specific port with error handling + local result + local dig_exit_code + + # Capture both output and exit code + result=$(dig @127.0.0.1 -p "$port" "$hostname" +short +time=2 +tries=1 2>/dev/null) + dig_exit_code=$? + + if [ "$should_resolve" = "should_not_resolve" ]; then + # This domain should NOT resolve + if [ $dig_exit_code -ne 0 ] || [ -z "$result" ] || [[ "$result" == *"timed out"* ]] || [[ "$result" == *"connection refused"* ]]; then + return 0 + else + return 1 + fi + else + # This domain should resolve + if [ $dig_exit_code -eq 0 ] && [ -n "$result" ] && [ "$result" = "$expected_ip" ]; then + return 0 + else + return 1 + fi + fi +} + +cleanup() { + log "Cleaning up test containers..." + + docker rm -f "$TRAEFIK_CONTAINER" 2>/dev/null || true + docker rm -f "$VIRTUAL_HOST_CONTAINER" 2>/dev/null || true + docker rm -f "$VIRTUAL_HOST_PORT_CONTAINER" 2>/dev/null || true + docker rm -f "$MULTI_VIRTUAL_HOST_CONTAINER" 2>/dev/null || true + + success "Cleanup completed" +} + +# Full stack cleanup and rebuild +full_cleanup_and_rebuild() { + log "Full cleanup and rebuild of HTTP proxy stack..." + log "===============================================" + + # Stop and remove all containers from the stack + log "Stopping and removing all stack containers..." + cd "$(dirname "$0")/.." + docker compose down --volumes --remove-orphans 2>/dev/null || true + + # Remove any dangling containers that might interfere + docker rm -f "$TRAEFIK_CONTAINER" 2>/dev/null || true + docker rm -f "$VIRTUAL_HOST_CONTAINER" 2>/dev/null || true + docker rm -f "$VIRTUAL_HOST_PORT_CONTAINER" 2>/dev/null || true + docker rm -f "$MULTI_VIRTUAL_HOST_CONTAINER" 2>/dev/null || true + + # Remove any dangling images from previous builds (optional, but ensures clean state) + log "Cleaning up dangling images..." + docker image prune -f >/dev/null 2>&1 || true + + # Rebuild all images from scratch + log "Building all images from scratch..." + docker compose build --pull + + success "Full cleanup and rebuild completed" +} + +# Main test function +main() { + log "Starting HTTP Proxy Integration Tests" + log "======================================" + + # Check if we should skip rebuild + if [ "$1" = "--no-rebuild" ]; then + log "Skipping full rebuild (--no-rebuild flag detected)" + # Just cleanup test containers + cleanup + else + # Full cleanup and rebuild to ensure clean state + full_cleanup_and_rebuild + fi + + # Step 1: Start the HTTP proxy stack + log "Starting HTTP proxy stack..." + cd "$(dirname "$0")/.." + docker compose up -d + + # Wait for services to be ready + log "Waiting for proxy services to start..." + sleep 10 + + # Step 2: Create test containers + log "Creating test containers..." + + # Container 1: Traefik labels + log "Creating container with Traefik labels: ${TRAEFIK_CONTAINER}" + docker run -d \ + --name "$TRAEFIK_CONTAINER" \ + --label "traefik.enable=true" \ + --label "traefik.http.routers.${TRAEFIK_CONTAINER}.rule=Host(\`app1.${TEST_DOMAIN}\`)" \ + --label "traefik.http.services.${TRAEFIK_CONTAINER}.loadbalancer.server.port=80" \ + --network http-proxy_default \ + nginx:alpine + + # Container 2: VIRTUAL_HOST only + log "Creating container with VIRTUAL_HOST: ${VIRTUAL_HOST_CONTAINER}" + docker run -d \ + --name "$VIRTUAL_HOST_CONTAINER" \ + --env "VIRTUAL_HOST=app2.${TEST_DOMAIN}" \ + nginx:alpine + + # Container 3: VIRTUAL_HOST and VIRTUAL_PORT + log "Creating container with VIRTUAL_HOST and VIRTUAL_PORT: ${VIRTUAL_HOST_PORT_CONTAINER}" + docker run -d \ + --name "$VIRTUAL_HOST_PORT_CONTAINER" \ + --env "VIRTUAL_HOST=app3.${TEST_DOMAIN}" \ + --env "VIRTUAL_PORT=80" \ + nginx:alpine + + # Container 4: Multiple comma-separated VIRTUAL_HOST values + log "Creating container with multiple VIRTUAL_HOST values: ${MULTI_VIRTUAL_HOST_CONTAINER}" + docker run -d \ + --name "$MULTI_VIRTUAL_HOST_CONTAINER" \ + --env "VIRTUAL_HOST=app4.${TEST_DOMAIN},app5.${TEST_DOMAIN}" \ + --env "VIRTUAL_PORT=80" \ + nginx:alpine + + # Wait for containers to be ready + wait_for_container "$TRAEFIK_CONTAINER" + wait_for_container "$VIRTUAL_HOST_CONTAINER" + wait_for_container "$VIRTUAL_HOST_PORT_CONTAINER" + wait_for_container "$MULTI_VIRTUAL_HOST_CONTAINER" + + # Give some time for the proxy to detect and configure routes + log "Waiting for proxy configuration to propagate..." + sleep 15 + + # Step 3: Test HTTP access + log "Testing HTTP access to all containers..." + log "=======================================" + + local test_passed=0 + local test_total=5 + + # Test Traefik labeled container + if test_http_access "app1.${TEST_DOMAIN}"; then + test_passed=$((test_passed + 1)) + fi + + # Test VIRTUAL_HOST container + if test_http_access "app2.${TEST_DOMAIN}"; then + test_passed=$((test_passed + 1)) + fi + + # Test VIRTUAL_HOST + VIRTUAL_PORT container + if test_http_access "app3.${TEST_DOMAIN}"; then + test_passed=$((test_passed + 1)) + fi + + # Test multi-VIRTUAL_HOST container (first hostname) + if test_http_access "app4.${TEST_DOMAIN}"; then + test_passed=$((test_passed + 1)) + fi + + # Test multi-VIRTUAL_HOST container (second hostname) + if test_http_access "app5.${TEST_DOMAIN}"; then + test_passed=$((test_passed + 1)) + fi + + # Show detailed curl responses for debugging + log "Detailed HTTP responses:" + log "========================" + + for app in app1 app2 app3 app4 app5; do + log "Testing ${app}.${TEST_DOMAIN}:" + if curl -f -s -H "Host: ${app}.${TEST_DOMAIN}" http://localhost:${HTTP_PORT} | head -5; then + success "Response received from ${app}.${TEST_DOMAIN}" + else + error "No response from ${app}.${TEST_DOMAIN}" + fi + echo + done + + # Show container logs for debugging + log "Container logs for debugging:" + log "=============================" + + echo "Dinghy Layer logs:" + docker compose logs --tail=10 dinghy_layer 2>/dev/null || true + echo + + echo "Join Networks logs:" + docker compose logs --tail=10 join_networks 2>/dev/null || true + echo + + echo "DNS Server logs:" + docker compose logs --tail=10 dns 2>/dev/null || true + echo + + # Step 4: Test DNS functionality + if ! test_all_dns; then + error "DNS tests failed" + return 1 + fi + + # Step 5: Test DNS server configurations + log "Step 5: Testing DNS server configurations..." + log "===========================================" + + # Only run configuration tests if we have dig available + if command -v dig >/dev/null 2>&1; then + if ! test_dns_configurations; then + warning "DNS configuration tests failed, but continuing..." + # Don't fail the entire test suite for configuration tests + fi + else + log "Skipping DNS configuration tests (dig command not available)" + fi + + # Final results + log "Test Results:" + log "=============" + log "Passed: ${test_passed}/${test_total} HTTP tests" + + if [ $test_passed -eq $test_total ]; then + success "All tests passed! HTTP proxy is working correctly." + return 0 + else + error "Some tests failed. Check the logs above for details." + return 1 + fi +} + +# Handle script interruption +trap cleanup EXIT + +# Check if help is requested +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "HTTP Proxy Integration Test Script" + echo "" + echo "Usage: $0 [options]" + echo "" + echo "Options:" + echo " --no-rebuild Skip full cleanup and rebuild (faster for development)" + echo " --help, -h Show this help message" + echo "" + echo "This script tests the HTTP proxy functionality by:" + echo "1. Full cleanup and rebuild of all Docker images (unless --no-rebuild)" + echo "2. Starting the HTTP proxy stack with docker-compose" + echo "3. Creating test containers with different configurations:" + echo " - Traefik labels" + echo " - VIRTUAL_HOST environment variable" + echo " - VIRTUAL_HOST + VIRTUAL_PORT environment variables" + echo " - Multiple comma-separated VIRTUAL_HOST values" + echo "4. Testing HTTP access to all containers using curl" + echo "5. Testing DNS resolution with comprehensive coverage:" + echo " - Basic hostname resolution for configured domains" + echo " - TLD support (any subdomain of configured TLD should resolve)" + echo " - Negative tests (non-configured domains should be rejected)" + echo " - Edge cases and malformed domain handling" + echo "6. Testing different DNS server configurations using docker-compose:" + echo " - Single TLD: loc" + echo " - Multiple TLDs: loc,dev" + echo " - Specific domains: spark.loc,spark.dev" + echo "" + echo "All test containers use the domain suffix: ${TEST_DOMAIN}" + echo "" + echo "DNS Tests verify that the server:" + echo "- Resolves configured domains and their subdomains" + echo "- Rejects queries for non-configured domains (security)" + echo "- Handles both TLD patterns (*.loc) and specific domains (spark.loc)" + echo "- Supports comma-separated domain lists in HTTP_PROXY_DNS_TLDS environment variable" + exit 0 +fi + +# Run the main test +main "$@"