Switch to Caddy + Docker Registry
Some checks are pending
CI/CD Pipeline (Fully Isolated DinD) / Run Tests (DinD) (push) Waiting to run
CI/CD Pipeline (Fully Isolated DinD) / Build and Push Docker Images (DinD) (push) Blocked by required conditions
CI/CD Pipeline (Fully Isolated DinD) / Deploy to Production (push) Blocked by required conditions

This commit is contained in:
continuist 2025-07-13 10:26:07 -04:00
parent 0c7b65ad70
commit 03dac72b90
6 changed files with 292 additions and 288 deletions

View file

@ -53,13 +53,8 @@ jobs:
timeout 15 bash -c 'until docker exec ci-dind docker version > /dev/null 2>&1; do echo "Waiting for Docker daemon inside DinD..."; sleep 5; done' timeout 15 bash -c 'until docker exec ci-dind docker version > /dev/null 2>&1; do echo "Waiting for Docker daemon inside DinD..."; sleep 5; done'
echo "DinD container is ready" echo "DinD container is ready"
# Copy Harbor certificate to DinD container # Login to Docker Registry (using HTTPS port 443)
docker cp /etc/ssl/registry/registry.crt ci-dind:/usr/local/share/ca-certificates/ echo "${{ secrets.REGISTRY_PASSWORD }}" | docker exec -i ci-dind docker login ${{ secrets.CI_HOST }}:443 -u ${{ secrets.REGISTRY_USER }} --password-stdin
docker exec ci-dind chown root:root /usr/local/share/ca-certificates/registry.crt
docker exec ci-dind update-ca-certificates
# Login to Harbor registry (using HTTPS port 443)
echo "${{ secrets.HARBOR_CI_PASSWORD }}" | docker exec -i ci-dind docker login ${{ secrets.CI_HOST }}:443 -u ${{ secrets.HARBOR_CI_USER }} --password-stdin
echo "DinD container setup complete" echo "DinD container setup complete"
fi fi
@ -78,46 +73,46 @@ jobs:
docker exec ci-dind rm -rf /workspace/* /workspace/.* 2>/dev/null || true docker exec ci-dind rm -rf /workspace/* /workspace/.* 2>/dev/null || true
docker cp /tmp/ci-workspace/. ci-dind:/workspace/ docker cp /tmp/ci-workspace/. ci-dind:/workspace/
- name: Check and prepare Harbor base images - name: Check and prepare Docker Registry base images
run: | run: |
# Set environment variables # Set environment variables
export CI_HOST="${{ secrets.CI_HOST }}" export CI_HOST="${{ secrets.CI_HOST }}"
export APP_NAME="${{ secrets.APP_NAME || 'sharenet' }}" export APP_NAME="${{ secrets.APP_NAME || 'sharenet' }}"
export HARBOR_CI_USER="${{ secrets.HARBOR_CI_USER }}" export REGISTRY_USER="${{ secrets.REGISTRY_USER }}"
export HARBOR_CI_PASSWORD="${{ secrets.HARBOR_CI_PASSWORD }}" export REGISTRY_PASSWORD="${{ secrets.REGISTRY_PASSWORD }}"
# Login to Harbor # Login to Docker Registry
echo "Logging into Harbor registry..." echo "Logging into Docker Registry..."
echo "$HARBOR_CI_PASSWORD" | docker exec -i ci-dind docker login "$CI_HOST:443" -u "$HARBOR_CI_USER" --password-stdin echo "$REGISTRY_PASSWORD" | docker exec -i ci-dind docker login "$CI_HOST:443" -u "$REGISTRY_USER" --password-stdin
# Check if base images exist in Harbor, pull from Docker Hub if not # Check if base images exist in Docker Registry, pull from Docker Hub if not
BASE_IMAGES=("rust:1.75-slim" "node:20-slim" "postgres:15-alpine") BASE_IMAGES=("rust:1.75-slim" "node:20-slim" "postgres:15-alpine")
for image in "${BASE_IMAGES[@]}"; do for image in "${BASE_IMAGES[@]}"; do
image_name=$(echo "$image" | cut -d: -f1) image_name=$(echo "$image" | cut -d: -f1)
image_tag=$(echo "$image" | cut -d: -f2) image_tag=$(echo "$image" | cut -d: -f2)
harbor_image="$CI_HOST:443/$APP_NAME/$image_name:$image_tag" registry_image="$CI_HOST:443/$APP_NAME/$image_name:$image_tag"
echo "Checking if $harbor_image exists in Harbor..." echo "Checking if $registry_image exists in Docker Registry..."
# Try to pull from Harbor first # Try to pull from Docker Registry first
if docker exec ci-dind docker pull "$harbor_image" 2>/dev/null; then if docker exec ci-dind docker pull "$registry_image" 2>/dev/null; then
echo "✓ Found $harbor_image in Harbor" echo "✓ Found $registry_image in Docker Registry"
else else
echo "✗ $harbor_image not found in Harbor, pulling from Docker Hub..." echo "✗ $registry_image not found in Docker Registry, pulling from Docker Hub..."
# Pull from Docker Hub # Pull from Docker Hub
if docker exec ci-dind docker pull "$image"; then if docker exec ci-dind docker pull "$image"; then
echo "✓ Successfully pulled $image from Docker Hub" echo "✓ Successfully pulled $image from Docker Hub"
# Tag for Harbor # Tag for Docker Registry
docker exec ci-dind docker tag "$image" "$harbor_image" docker exec ci-dind docker tag "$image" "$registry_image"
# Push to Harbor # Push to Docker Registry
if docker exec ci-dind docker push "$harbor_image"; then if docker exec ci-dind docker push "$registry_image"; then
echo "✓ Successfully pushed $harbor_image to Harbor" echo "✓ Successfully pushed $registry_image to Docker Registry"
else else
echo "✗ Failed to push $harbor_image to Harbor" echo "✗ Failed to push $registry_image to Docker Registry"
exit 1 exit 1
fi fi
else else
@ -127,7 +122,7 @@ jobs:
fi fi
done done
echo "All base images are ready in Harbor!" echo "All base images are ready in Docker Registry!"
- name: Start testing environment - name: Start testing environment
run: | run: |
@ -332,11 +327,10 @@ jobs:
- name: Make scripts executable - name: Make scripts executable
run: chmod +x scripts/*.sh run: chmod +x scripts/*.sh
- name: Configure Docker for Harbor access - name: Configure Docker for Docker Registry access
run: | run: |
# Configure Docker to access Harbor registry on CI Linode (using HTTPS) # Configure Docker to access Docker Registry on CI Linode (using HTTPS)
# The Harbor certificate should already be installed on the production server # Since we're using Caddy with automatic HTTPS, no certificate configuration is needed
# as described in the CI guide Step 13
# Wait for Docker to be ready # Wait for Docker to be ready
timeout 30 bash -c 'until docker info; do sleep 1; done' timeout 30 bash -c 'until docker info; do sleep 1; done'
@ -351,8 +345,8 @@ jobs:
- name: Pull and deploy application - name: Pull and deploy application
run: | run: |
# Pull latest images from Harbor registry # Pull latest images from Docker Registry
echo "Pulling latest images from Harbor registry..." echo "Pulling latest images from Docker Registry..."
docker compose -f docker-compose.prod.yml pull docker compose -f docker-compose.prod.yml pull
# Deploy the application stack # Deploy the application stack

View file

@ -8,7 +8,7 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ Forgejo Host │ │ CI/CD Linode │ │ Production Linode│ │ Forgejo Host │ │ CI/CD Linode │ │ Production Linode│
│ (Repository) │ │ (Actions Runner)│ │ (Docker Deploy) │ │ (Repository) │ │ (Actions Runner)│ │ (Docker Deploy) │
│ │ │ + Harbor Registry│ │ │ │ │ │ + Docker Registry│ │ │
│ │ │ + DinD Container│ │ │ │ │ │ + DinD Container│ │ │
└─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────────┘
│ │ │ │ │ │
@ -23,7 +23,7 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
1. **Code Push**: Developer pushes code to Forgejo repository 1. **Code Push**: Developer pushes code to Forgejo repository
2. **Automated Testing**: CI/CD Linode runs tests in isolated DinD environment 2. **Automated Testing**: CI/CD Linode runs tests in isolated DinD environment
3. **Image Building**: If tests pass, Docker images are built within DinD 3. **Image Building**: If tests pass, Docker images are built within DinD
4. **Registry Push**: Images are pushed to Harbor registry from DinD 4. **Registry Push**: Images are pushed to Docker Registry from DinD
5. **Production Deployment**: Production Linode pulls images and deploys 5. **Production Deployment**: Production Linode pulls images and deploys
6. **Health Check**: Application is verified and accessible 6. **Health Check**: Application is verified and accessible
@ -36,9 +36,9 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
- ✅ **Fast cleanup** - just restart DinD container - ✅ **Fast cleanup** - just restart DinD container
### **For CI/CD Operations:** ### **For CI/CD Operations:**
- ✅ **Zero resource contention** with Harbor - ✅ **Zero resource contention** with Docker Registry
- ✅ **Simple cleanup** - one-line container restart - ✅ **Simple cleanup** - one-line container restart
- ✅ **Perfect isolation** - CI/CD can't affect Harbor - ✅ **Perfect isolation** - CI/CD can't affect Docker Registry
- ✅ **Consistent environment** - same setup every time - ✅ **Consistent environment** - same setup every time
### **For Maintenance:** ### **For Maintenance:**
@ -65,10 +65,9 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
### CI/CD Linode Features ### CI/CD Linode Features
- Forgejo Actions runner for automated builds - Forgejo Actions runner for automated builds
- **Docker-in-Docker (DinD) container** for isolated CI operations - **Docker-in-Docker (DinD) container** for isolated CI operations
- Harbor container registry for image storage - Docker Registry with Caddy reverse proxy for image storage
- Harbor web UI for image management - Unauthenticated pulls, authenticated pushes
- Built-in vulnerability scanning with Trivy - Automatic HTTPS with Caddy
- Role-based access control and audit logs
- Secure SSH communication with production - Secure SSH communication with production
- **Simplified cleanup** - just restart DinD container - **Simplified cleanup** - just restart DinD container
@ -85,7 +84,7 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
- **Automated deployment** to production - **Automated deployment** to production
- **Rollback capability** with image versioning - **Rollback capability** with image versioning
- **Health monitoring** and logging - **Health monitoring** and logging
- **Zero resource contention** between CI/CD and Harbor - **Zero resource contention** between CI/CD and Docker Registry
## Security Model and User Separation ## Security Model and User Separation
@ -654,183 +653,133 @@ sudo apt install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo usermod -aG docker CI_SERVICE_USER sudo usermod -aG docker CI_SERVICE_USER
``` ```
### Step 5: Set Up Harbor Container Registry ### Step 5: Set Up Docker Registry with Caddy
Harbor provides a secure, enterprise-grade container registry with vulnerability scanning, role-based access control, and audit logging. We'll set up a basic Docker Registry with Caddy as a reverse proxy, configured to allow unauthenticated pulls but require authentication for pushes.
#### 5.1 Create Harbor Service User #### 5.1 Create Registry Service User
```bash ```bash
# Create dedicated user and group for Harbor # Create dedicated user and group for Docker Registry
sudo groupadd -r harbor sudo groupadd -r registry
sudo useradd -r -g harbor -s /bin/bash -m -d /opt/harbor harbor sudo useradd -r -g registry -s /bin/bash -m -d /opt/registry registry
# Set secure password for emergency access # Set secure password for emergency access
echo "harbor:$(openssl rand -base64 32)" | sudo chpasswd echo "registry:$(openssl rand -base64 32)" | sudo chpasswd
# Add harbor user to docker group # Add registry user to docker group
sudo usermod -aG docker harbor sudo usermod -aG docker registry
# Add CI_DEPLOY_USER to harbor group for monitoring access # Add CI_DEPLOY_USER to registry group for monitoring access
sudo usermod -aG harbor CI_DEPLOY_USER sudo usermod -aG registry CI_DEPLOY_USER
# Set proper permissions on /opt/harbor directory # Set proper permissions on /opt/registry directory
sudo chown harbor:harbor /opt/harbor sudo chown registry:registry /opt/registry
sudo chmod 755 /opt/harbor sudo chmod 755 /opt/registry
``` ```
#### 5.2 Generate SSL Certificates #### 5.2 Create Docker Compose Setup
```bash ```bash
# Create system SSL directory for Harbor certificates # Create registry directory structure
sudo mkdir -p /etc/ssl/registry sudo mkdir -p /opt/registry
sudo chown registry:registry /opt/registry
cd /opt/registry
# Get your actual IP address # Copy registry configuration from repository
YOUR_ACTUAL_IP=$(curl -4 -s ifconfig.me) # The registry folder contains the Docker Compose and Caddy configuration files
echo "Your IP address is: $YOUR_ACTUAL_IP" sudo cp /opt/APP_NAME/registry/docker-compose.registry.yml docker-compose.yml
sudo cp /opt/APP_NAME/registry/Caddyfile Caddyfile
# Create OpenSSL configuration file with proper SANs # Update Caddyfile with your actual IP address
sudo tee /etc/ssl/registry/openssl.conf << EOF sudo sed -i "s/registry.example.com/YOUR_CI_CD_IP/g" Caddyfile
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
[req_distinguished_name] # Create environment file for registry authentication
C = US # First, create a secure password hash
ST = State REGISTRY_PASSWORD="your-secure-registry-password"
L = City REGISTRY_PASSWORD_HASH=$(htpasswd -nbB registry-user "$REGISTRY_PASSWORD" | cut -d: -f2)
O = Organization
CN = $YOUR_ACTUAL_IP
[v3_req] sudo tee .env << EOF
keyUsage = keyEncipherment, dataEncipherment REGISTRY_USERNAME=registry-user
extendedKeyUsage = serverAuth REGISTRY_PASSWORD_HASH=$REGISTRY_PASSWORD_HASH
subjectAltName = @alt_names
[alt_names]
IP.1 = $YOUR_ACTUAL_IP
DNS.1 = $YOUR_ACTUAL_IP
DNS.2 = localhost
EOF EOF
# Generate self-signed certificate with proper SANs # Set proper permissions
sudo openssl req -x509 -newkey rsa:4096 -keyout /etc/ssl/registry/registry.key -out /etc/ssl/registry/registry.crt -days 365 -nodes -extensions v3_req -config /etc/ssl/registry/openssl.conf sudo chown registry:registry .env
sudo chmod 600 .env
# Set proper permissions for harbor user
sudo chown harbor:harbor /etc/ssl/registry/registry.key
sudo chown harbor:harbor /etc/ssl/registry/registry.crt
sudo chmod 600 /etc/ssl/registry/registry.key
sudo chmod 644 /etc/ssl/registry/registry.crt
sudo chmod 644 /etc/ssl/registry/openssl.conf
``` ```
#### 5.3 Configure Docker to Trust Harbor Registry #### 5.3 Configure Docker Registry
```bash ```bash
# Add the certificate to system CA certificates # Create registry data directory
sudo cp /etc/ssl/registry/registry.crt /usr/local/share/ca-certificates/registry.crt sudo mkdir -p /opt/registry/data
sudo update-ca-certificates sudo chown registry:registry /opt/registry/data
# Restart Docker to ensure it picks up the new CA certificates # Create registry configuration (no authentication needed - Caddy handles it)
sudo systemctl restart docker sudo tee /opt/registry/config.yml << 'EOF'
version: 0.1
log:
level: debug
storage:
filesystem:
rootdirectory: /var/lib/registry
delete:
enabled: true
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
middleware:
repository:
- name: AwsEc2PublicBlock
storage:
- name: Redirect
options:
baseurl: https://YOUR_CI_CD_IP
EOF
# Set proper permissions
sudo chown registry:registry /opt/registry/config.yml
``` ```
#### 5.4 Install Harbor
#### 5.5 Start Docker Registry with Docker Compose
```bash ```bash
# Switch to harbor user # Switch to registry user
sudo su - harbor sudo su - registry
# Set DB_PASSWORD environment variable # Navigate to registry directory
export DB_PASSWORD=$(openssl rand -base64 32 | tr -d "=+/" | cut -c1-25) cd /opt/registry
# IMPORTANT: Save the DB_PASSWORD in your password manager for safekeeping # Start the Docker Registry and Caddy services
echo "DB_PASSWORD: $DB_PASSWORD"
# Download and install Harbor
cd /opt/harbor
# Switch to the CI_DEPLOY_USER
sudo su - CI_DEPLOY_USER
sudo wget https://github.com/goharbor/harbor/releases/download/v2.10.0/harbor-offline-installer-v2.10.0.tgz
sudo tar -xzf harbor-offline-installer-v2.10.0.tgz
cd harbor
sudo cp harbor.yml.tmpl harbor.yml
# Edit harbor.yml configuration
sudo nano harbor.yml
```
**Important**: In the `harbor.yml` file, update:
- `hostname: YOUR_CI_CD_IP` (replace with your actual IP)
- `certificate: /etc/ssl/registry/registry.crt`
- `private_key: /etc/ssl/registry/registry.key`
- `password: <the DB_PASSWORD generated above>`
**Note**: The default Harbor admin password is "Harbor12345" and will be changed in Step 5.6
```bash
# Run the following as the CI_DEPLOY_USER
sudo su - CI_DEPLOY_USER
cd /opt/harbor/harbor
# Install Harbor with Trivy vulnerability scanner
sudo ./prepare
sudo ./install.sh --with-trivy
sudo docker compose down
sudo chown -R harbor:harbor harbor
# Switch to the harbor user
sudo su - harbor
cd /opt/harbor/harbor
# Run the following to patially adjust the permissions correctly for the harbor user
./install.sh --with-trivy
# Exit harbor user shell to switch back to the CI_DEPLOY_USER
exit
cd /opt/harbor/harbor
# Run the following to adjust the permissions for various en files
sudo chown harbor:harbor common/config/jobservice/env
sudo chown harbor:harbor common/config/db/env
sudo chown harbor:harbor common/config/registryctl/env
sudo chown harbor:harbor common/config/trivy-adapter/env
sudo chown harbor:harbor common/config/core/env
# Switch back to harbor user and bring Harbor back up
sudo su - harbor
cd /opt/harbor/harbor
docker compose up -d docker compose up -d
# Verify that all Harbor containers are healthy # Verify services are running
docker compose ps -a docker compose ps
# Verify using the Harbor API that all Harbor processes are healthy # Exit registry user shell
curl -k https://localhost/api/v2.0/health exit
``` ```
#### 5.5 Create Systemd Service #### 5.6 Create Systemd Service for Docker Compose
```bash ```bash
# Create systemd service file for Harbor # Create systemd service file for Docker Registry with Docker Compose
sudo tee /etc/systemd/system/harbor.service << EOF sudo tee /etc/systemd/system/docker-registry.service << EOF
[Unit] [Unit]
Description=Harbor Container Registry Description=Docker Registry with Caddy
After=docker.service After=docker.service
Requires=docker.service Requires=docker.service
[Service] [Service]
Type=oneshot Type=oneshot
RemainAfterExit=yes RemainAfterExit=yes
User=harbor User=registry
Group=harbor Group=registry
WorkingDirectory=/opt/harbor/harbor WorkingDirectory=/opt/registry
ExecStart=/usr/bin/docker compose up -d ExecStart=/usr/bin/docker compose up -d
ExecStop=/usr/bin/docker compose down ExecStop=/usr/bin/docker compose down
ExecReload=/usr/bin/docker compose down && /usr/bin/docker compose up -d ExecReload=/usr/bin/docker compose down && /usr/bin/docker compose up -d
@ -839,32 +788,39 @@ ExecReload=/usr/bin/docker compose down && /usr/bin/docker compose up -d
WantedBy=multi-user.target WantedBy=multi-user.target
EOF EOF
# Enable and start Harbor service # Enable and start Docker Registry service
sudo systemctl daemon-reload sudo systemctl daemon-reload
sudo systemctl enable harbor.service sudo systemctl enable docker-registry.service
sudo systemctl start harbor.service sudo systemctl start docker-registry.service
# Monitor startup (can take 2-3 minutes) # Monitor startup
sudo journalctl -u harbor.service -f sudo journalctl -u docker-registry.service -f
``` ```
#### 5.6 Configure Harbor Access #### 5.7 Configure Registry Access
1. **Access Harbor Web UI**: Open `https://YOUR_CI_CD_IP` in your browser The Docker Registry is now configured with the following access model:
2. **Login**: Username `admin`, Password `Harbor12345`
3. **Change admin password**: Click admin icon → Change Password
4. **Create project**: Projects → New Project → Name: `APP_NAME`, Access Level: `Public`
5. **Create CI user**: Administration → Users → New User → Username: `ci-user`, Password: `your-secure-password`
6. **Assign role**: Projects → `APP_NAME` → Members → + User → Select `ci-user`, Role: `Developer`
#### 5.7 Test Harbor Setup **Authentication Model:**
- **Pulls**: Unauthenticated (public read access)
- **Pushes**: Require authentication with `registry-user` credentials
**Registry Credentials:**
- **Username**: `registry-user`
- **Password**: The password you set in the environment file (default: `your-secure-registry-password`)
**Registry URL**: `https://YOUR_CI_CD_IP`
**Note**: The authentication is handled by Caddy using the environment variables in the `.env` file. The Docker Registry itself runs without authentication, but Caddy enforces authentication for push operations.
#### 5.8 Test Registry Setup
```bash ```bash
# Switch to CI_SERVICE_USER for testing (CI_SERVICE_USER runs CI pipeline and Docker operations) # Switch to CI_SERVICE_USER for testing (CI_SERVICE_USER runs CI pipeline and Docker operations)
sudo su - CI_SERVICE_USER sudo su - CI_SERVICE_USER
# Test Docker login and push # Test Docker login and push
echo "your-secure-password" | docker login YOUR_CI_CD_IP -u ci-user --password-stdin echo "your-secure-registry-password" | docker login YOUR_CI_CD_IP -u registry-user --password-stdin
# Create and push test image # Create and push test image
echo "FROM alpine:latest" > /tmp/test.Dockerfile echo "FROM alpine:latest" > /tmp/test.Dockerfile
@ -888,10 +844,10 @@ exit
``` ```
**Expected behavior**: **Expected behavior**:
- ✅ Push requires authentication - ✅ Push requires authentication with `registry-user` credentials
- ✅ Pull works without authentication - ✅ Pull works without authentication (public read access)
- ✅ Unauthorized push is blocked - ✅ Unauthorized push is blocked
- ✅ Web UI accessible at `https://YOUR_CI_CD_IP` - ✅ Registry accessible at `https://YOUR_CI_CD_IP`
### Step 6: Install Forgejo Actions Runner ### Step 6: Install Forgejo Actions Runner
@ -1160,31 +1116,16 @@ docker exec ci-dind docker version
**Why CI_SERVICE_USER**: The CI_SERVICE_USER is in the docker group and runs the CI pipeline, so it needs direct access to the DinD container for seamless CI/CD operations. **Why CI_SERVICE_USER**: The CI_SERVICE_USER is in the docker group and runs the CI pipeline, so it needs direct access to the DinD container for seamless CI/CD operations.
#### 7.2 Configure DinD for Harbor Registry #### 7.2 Configure DinD for Docker Registry
```bash ```bash
# Navigate to the application directory # Navigate to the application directory
cd /opt/APP_NAME cd /opt/APP_NAME
# Copy Harbor certificate to DinD container # Login to Docker Registry from within DinD
docker cp /etc/ssl/registry/registry.crt ci-dind:/usr/local/share/ca-certificates/ echo "your-registry-password" | docker exec -i ci-dind docker login YOUR_CI_CD_IP -u registry-user --password-stdin
# Fix certificate ownership (crucial for CA certificate trust) # Test Docker Registry connectivity from DinD
docker exec ci-dind chown root:root /usr/local/share/ca-certificates/registry.crt
# Update CA certificates
docker exec ci-dind update-ca-certificates
# Restart DinD container to pick up new CA certificates
docker restart ci-dind
# Wait for DinD to be ready again
sleep 30
# Login to Harbor from within DinD
echo "ci-user-password" | docker exec -i ci-dind docker login YOUR_CI_CD_IP -u ci-user --password-stdin
# Test Harbor connectivity from DinD (using certificate trust)
docker exec ci-dind docker pull alpine:latest docker exec ci-dind docker pull alpine:latest
docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP/APP_NAME/test:latest docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP/APP_NAME/test:latest
docker exec ci-dind docker push YOUR_CI_CD_IP/APP_NAME/test:latest docker exec ci-dind docker push YOUR_CI_CD_IP/APP_NAME/test:latest
@ -1242,32 +1183,32 @@ The CI/CD pipeline uses a three-stage approach with dedicated environments for e
- Rust toolchain for backend testing and migrations - Rust toolchain for backend testing and migrations
- Node.js toolchain for frontend testing - Node.js toolchain for frontend testing
- **Network**: All containers communicate through `ci-cd-test-network` - **Network**: All containers communicate through `ci-cd-test-network`
- **Setup**: DinD container created, Harbor certificate installed, Docker login performed, code cloned into DinD from Forgejo - **Setup**: DinD container created, Docker Registry login performed, code cloned into DinD from Forgejo
- **Cleanup**: Testing containers removed, DinD container kept running - **Cleanup**: Testing containers removed, DinD container kept running
**Job 2 (Building) - Direct Docker Commands:** **Job 2 (Building) - Direct Docker Commands:**
- **Purpose**: Image building and pushing to Harbor - **Purpose**: Image building and pushing to Docker Registry
- **Environment**: Same DinD container from Job 1 - **Environment**: Same DinD container from Job 1
- **Code Access**: Reuses code from Job 1, updates to latest commit - **Code Access**: Reuses code from Job 1, updates to latest commit
- **Process**: - **Process**:
- Uses Docker Buildx for efficient building - Uses Docker Buildx for efficient building
- Builds backend and frontend images separately - Builds backend and frontend images separately
- Pushes images to Harbor registry - Pushes images to Docker Registry
- **Harbor Access**: Reuses Harbor authentication from Job 1 - **Registry Access**: Reuses Docker Registry authentication from Job 1
- **Cleanup**: DinD container stopped and removed (clean slate for next run) - **Cleanup**: DinD container stopped and removed (clean slate for next run)
**Job 3 (Deployment) - `docker-compose.prod.yml`:** **Job 3 (Deployment) - `docker-compose.prod.yml`:**
- **Purpose**: Production deployment with pre-built images - **Purpose**: Production deployment with pre-built images
- **Environment**: Production runner on Production Linode - **Environment**: Production runner on Production Linode
- **Process**: - **Process**:
- Pulls images from Harbor registry - Pulls images from Docker Registry
- Deploys complete application stack - Deploys complete application stack
- Verifies all services are healthy - Verifies all services are healthy
- **Services**: PostgreSQL, backend, frontend, Nginx - **Services**: PostgreSQL, backend, frontend, Nginx
**Key Benefits:** **Key Benefits:**
- **🧹 Complete Isolation**: Each job has its own dedicated environment - **🧹 Complete Isolation**: Each job has its own dedicated environment
- **🚫 No Resource Contention**: Testing and building don't interfere with Harbor - **🚫 No Resource Contention**: Testing and building don't interfere with Docker Registry
- **⚡ Consistent Environment**: Same setup every time - **⚡ Consistent Environment**: Same setup every time
- **🎯 Purpose-Specific**: Each Docker Compose file serves a specific purpose - **🎯 Purpose-Specific**: Each Docker Compose file serves a specific purpose
- **🔄 Parallel Safety**: Jobs can run safely in parallel - **🔄 Parallel Safety**: Jobs can run safely in parallel
@ -1278,7 +1219,7 @@ The CI/CD pipeline uses a three-stage approach with dedicated environments for e
# Test DinD functionality # Test DinD functionality
docker exec ci-dind docker run --rm alpine:latest echo "DinD is working!" docker exec ci-dind docker run --rm alpine:latest echo "DinD is working!"
# Test Harbor integration # Test Docker Registry integration
docker exec ci-dind docker pull alpine:latest docker exec ci-dind docker pull alpine:latest
docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP/APP_NAME/dind-test:latest docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP/APP_NAME/dind-test:latest
docker exec ci-dind docker push YOUR_CI_CD_IP/APP_NAME/dind-test:latest docker exec ci-dind docker push YOUR_CI_CD_IP/APP_NAME/dind-test:latest
@ -1290,11 +1231,11 @@ docker exec ci-dind docker rmi YOUR_CI_CD_IP/APP_NAME/dind-test:latest
**Expected Output**: **Expected Output**:
- DinD container should be running and accessible - DinD container should be running and accessible
- Docker commands should work inside DinD - Docker commands should work inside DinD
- Harbor push/pull should work from DinD - Docker Registry push/pull should work from DinD
#### 7.4 Production Deployment Architecture #### 7.4 Production Deployment Architecture
The production deployment uses a separate Docker Compose file (`docker-compose.prod.yml`) that pulls built images from the Harbor registry and deploys the complete application stack. The production deployment uses a separate Docker Compose file (`docker-compose.prod.yml`) that pulls built images from the Docker Registry and deploys the complete application stack.
**Production Stack Components:** **Production Stack Components:**
- **PostgreSQL**: Production database with persistent storage - **PostgreSQL**: Production database with persistent storage
@ -1304,12 +1245,12 @@ The production deployment uses a separate Docker Compose file (`docker-compose.p
**Deployment Flow:** **Deployment Flow:**
1. **Production Runner**: Runs on Production Linode with `production` label 1. **Production Runner**: Runs on Production Linode with `production` label
2. **Image Pull**: Pulls latest images from Harbor registry on CI Linode 2. **Image Pull**: Pulls latest images from Docker Registry on CI Linode
3. **Stack Deployment**: Uses `docker-compose.prod.yml` to deploy complete stack 3. **Stack Deployment**: Uses `docker-compose.prod.yml` to deploy complete stack
4. **Health Verification**: Ensures all services are healthy before completion 4. **Health Verification**: Ensures all services are healthy before completion
**Key Benefits:** **Key Benefits:**
- **🔄 Image Registry**: Centralized image storage in Harbor - **🔄 Image Registry**: Centralized image storage in Docker Registry
- **📦 Consistent Deployment**: Same images tested in CI are deployed to production - **📦 Consistent Deployment**: Same images tested in CI are deployed to production
- **⚡ Fast Deployment**: Only pulls changed images - **⚡ Fast Deployment**: Only pulls changed images
- **🛡️ Rollback Capability**: Can easily rollback to previous image versions - **🛡️ Rollback Capability**: Can easily rollback to previous image versions
@ -1348,11 +1289,11 @@ sudo ufw --force enable
sudo ufw default deny incoming sudo ufw default deny incoming
sudo ufw default allow outgoing sudo ufw default allow outgoing
sudo ufw allow ssh sudo ufw allow ssh
sudo ufw allow 443/tcp # Harbor registry (public read access) sudo ufw allow 443/tcp # Docker Registry via Caddy (public read access)
``` ```
**Security Model**: **Security Model**:
- **Port 443 (Harbor)**: Public read access for public projects, authenticated write access - **Port 443 (Docker Registry)**: Public read access, authenticated write access
- **SSH**: Restricted to your IP addresses - **SSH**: Restricted to your IP addresses
- **All other ports**: Blocked - **All other ports**: Blocked
@ -1660,43 +1601,18 @@ ls -la /opt/APP_NAME
- Sets proper ownership for the PROD_SERVICE_USER - Sets proper ownership for the PROD_SERVICE_USER
- Ensures the directory exists before the CI workflow runs - Ensures the directory exists before the CI workflow runs
### Step 13: Configure Docker for Harbor Access ### Step 13: Configure Docker for Docker Registry Access
**Important**: The Production Linode needs to be able to pull Docker images from the Harbor registry on the CI/CD Linode. We need to configure Docker to trust the Harbor SSL certificate. **Important**: The Production Linode needs to be able to pull Docker images from the Docker Registry on the CI/CD Linode. Since we're using Caddy with automatic HTTPS, no additional certificate configuration is needed.
```bash ```bash
# Add Harbor certificate to system CA certificates
sudo mkdir -p /usr/local/share/ca-certificates
# Copy Harbor certificate from CI Linode to local machine, then to Production Linode
# First, from your local machine, copy the certificate from CI Linode:
scp CI_DEPLOY_USER@YOUR_CI_CD_IP:/etc/ssl/registry/registry.crt ./
# Then copy it to the Production Linode:
scp registry.crt PROD_DEPLOY_USER@YOUR_PRODUCTION_IP:/tmp/
# Remove the cert from your local machine as no longer needed
rm registry.crt
# Now on the Production Linode, move it to the correct location:
sudo mv /tmp/registry.crt /usr/local/share/ca-certificates/
# Fix certificate ownership (crucial for CA certificate trust)
sudo chown root:root /usr/local/share/ca-certificates/registry.crt
# Update CA certificates
sudo update-ca-certificates
# Restart Docker to apply changes
sudo systemctl restart docker
# Change to the PROD_SERVICE_USER # Change to the PROD_SERVICE_USER
sudo su - PROD_SERVICE_USER sudo su - PROD_SERVICE_USER
# Test that the certificate is working by pulling an image from Harbor # Test that Docker can pull images from the Docker Registry
docker pull YOUR_CI_CD_IP/APP_NAME/test:latest docker pull YOUR_CI_CD_IP/APP_NAME/test:latest
# If the pull succeeds, the certificate is working correctly # If the pull succeeds, the Docker Registry is accessible
# Change back to PROD_DEPLOY_USER # Change back to PROD_DEPLOY_USER
exit exit
@ -1705,21 +1621,13 @@ exit
**Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address. **Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address.
**What this does**: **What this does**:
- **Copies Harbor certificate**: Transfers the SSL certificate from CI Linode to Production Linode via your local machine - **Tests Docker Registry access**: Verifies that Docker can successfully pull images from the Docker Registry
- **Configures certificate trust**: Properly sets up Harbor certificate trust in Docker - **No certificate configuration needed**: Caddy handles HTTPS automatically
- **Fixes ownership issues**: Ensures certificate has correct ownership for CA trust - **Simple setup**: No complex certificate management required
- **Updates CA certificates**: Makes the certificate available to all applications
- **Restarts Docker**: Applies the new configuration
- **Tests certificate**: Verifies that Docker can successfully pull images from Harbor
**Note**: Since you don't have direct SSH access between the Linodes, you'll need to copy the certificate through your local machine using the deployment users:
1. From your local machine: `scp CI_DEPLOY_USER@YOUR_CI_CD_IP:/etc/ssl/registry/registry.crt ./`
2. Then: `scp registry.crt PROD_DEPLOY_USER@YOUR_PRODUCTION_IP:/tmp/`
3. On Production Linode: `sudo mv /tmp/registry.crt /usr/local/share/ca-certificates/`
### Step 14: Set Up Forgejo Runner for Production Deployment ### Step 14: Set Up Forgejo Runner for Production Deployment
**Important**: The Production Linode needs a Forgejo runner to execute the deployment job from the CI/CD workflow. This runner will pull images from Harbor and deploy using `docker-compose.prod.yml`. **Important**: The Production Linode needs a Forgejo runner to execute the deployment job from the CI/CD workflow. This runner will pull images from Docker Registry and deploy using `docker-compose.prod.yml`.
#### 14.1 Download Runner #### 14.1 Download Runner
@ -1870,7 +1778,7 @@ sudo journalctl -u forgejo-runner.service -f --no-pager
When the workflow runs, it will: When the workflow runs, it will:
1. Pull the latest Docker images from Harbor registry 1. Pull the latest Docker images from Docker Registry
2. Use the `docker-compose.prod.yml` file to deploy the application stack 2. Use the `docker-compose.prod.yml` file to deploy the application stack
3. Create the necessary environment variables for production deployment 3. Create the necessary environment variables for production deployment
4. Verify that all services are healthy after deployment 4. Verify that all services are healthy after deployment
@ -1882,7 +1790,7 @@ The production runner will automatically handle the deployment process when you
The `docker-compose.prod.yml` file is specifically designed for production deployment and differs from development setups: The `docker-compose.prod.yml` file is specifically designed for production deployment and differs from development setups:
**Key Features**: **Key Features**:
- **Image-based deployment**: Uses pre-built images from Harbor registry instead of building from source - **Image-based deployment**: Uses pre-built images from Docker Registry instead of building from source
- **Production networking**: All services communicate through a dedicated `sharenet-network` - **Production networking**: All services communicate through a dedicated `sharenet-network`
- **Health checks**: Each service includes health checks to ensure proper startup order - **Health checks**: Each service includes health checks to ensure proper startup order
- **Nginx reverse proxy**: Includes Nginx for SSL termination, load balancing, and security headers - **Nginx reverse proxy**: Includes Nginx for SSL termination, load balancing, and security headers
@ -1896,7 +1804,7 @@ The `docker-compose.prod.yml` file is specifically designed for production deplo
4. **Nginx**: Reverse proxy that serves the frontend and proxies API requests to backend 4. **Nginx**: Reverse proxy that serves the frontend and proxies API requests to backend
**Deployment Process**: **Deployment Process**:
1. The production runner pulls the latest images from Harbor registry 1. The production runner pulls the latest images from Docker Registry
2. Creates environment variables for the deployment 2. Creates environment variables for the deployment
3. Runs `docker compose -f docker-compose.prod.yml up -d` 3. Runs `docker compose -f docker-compose.prod.yml up -d`
4. Waits for all services to be healthy 4. Waits for all services to be healthy
@ -2010,7 +1918,7 @@ docker compose --version
#### 16.2 Test Harbor Access #### 16.2 Test Harbor Access
```bash ```bash
# Test pulling an image from the CI/CD Harbor registry # Test pulling an image from the CI/CD Docker Registry
docker pull YOUR_CI_CD_IP/APP_NAME/test:latest docker pull YOUR_CI_CD_IP/APP_NAME/test:latest
``` ```
@ -2027,20 +1935,20 @@ docker pull YOUR_CI_CD_IP/APP_NAME/test:latest
Go to your Forgejo repository and add these secrets in **Settings → Secrets and Variables → Actions**: Go to your Forgejo repository and add these secrets in **Settings → Secrets and Variables → Actions**:
**Required Secrets:** **Required Secrets:**
- `CI_HOST`: Your CI/CD Linode IP address (used for Harbor registry access) - `CI_HOST`: Your CI/CD Linode IP address (used for Docker Registry access)
- `PRODUCTION_IP`: Your Production Linode IP address - `PRODUCTION_IP`: Your Production Linode IP address
- `PROD_DEPLOY_USER`: The production deployment user name (e.g., `prod-deploy`) - `PROD_DEPLOY_USER`: The production deployment user name (e.g., `prod-deploy`)
- `PROD_SERVICE_USER`: The production service user name (e.g., `prod-service`) - `PROD_SERVICE_USER`: The production service user name (e.g., `prod-service`)
- `APP_NAME`: Your application name (e.g., `sharenet`) - `APP_NAME`: Your application name (e.g., `sharenet`)
- `POSTGRES_PASSWORD`: A strong password for the PostgreSQL database - `POSTGRES_PASSWORD`: A strong password for the PostgreSQL database
- `HARBOR_CI_USER`: Harbor username for CI operations (e.g., `ci-user`) - `REGISTRY_USER`: Docker Registry username for CI operations (e.g., `registry-user`)
- `HARBOR_CI_PASSWORD`: Harbor password for CI operations (the password you set for ci-user) - `REGISTRY_PASSWORD`: Docker Registry password for CI operations (the password you set in the environment file, default: `your-secure-registry-password`)
**Optional Secrets (for domain users):** **Optional Secrets (for domain users):**
- `DOMAIN`: Your domain name (e.g., `example.com`) - `DOMAIN`: Your domain name (e.g., `example.com`)
- `EMAIL`: Your email for SSL certificate notifications - `EMAIL`: Your email for SSL certificate notifications
**Note**: This setup uses custom Dockerfiles for testing environments with base images stored in Harbor registry. The CI pipeline automatically checks if base images exist in Harbor and pulls them from Docker Hub only when needed, eliminating rate limiting issues and providing better control over the testing environment. **Note**: This setup uses custom Dockerfiles for testing environments with base images stored in Docker Registry. The CI pipeline automatically checks if base images exist in Docker Registry and pulls them from Docker Hub only when needed, eliminating rate limiting issues and providing better control over the testing environment.
### Step 18: Test Complete Pipeline ### Step 18: Test Complete Pipeline
@ -2060,24 +1968,24 @@ The pipeline should execute these steps in order:
4. **Test Frontend**: Run frontend tests in isolated environment 4. **Test Frontend**: Run frontend tests in isolated environment
5. **Build Backend**: Build backend Docker image in DinD 5. **Build Backend**: Build backend Docker image in DinD
6. **Build Frontend**: Build frontend Docker image in DinD 6. **Build Frontend**: Build frontend Docker image in DinD
7. **Push to Registry**: Push images to Harbor registry from DinD 7. **Push to Registry**: Push images to Docker Registry from DinD
8. **Deploy to Production**: Deploy to production server 8. **Deploy to Production**: Deploy to production server
#### 18.3 Check Harbor #### 18.3 Check Docker Registry
```bash ```bash
# On CI/CD Linode # On CI/CD Linode
cd /opt/APP_NAME cd /opt/APP_NAME
# Check if new images were pushed (using correct Harbor port 443) # Check if new images were pushed (using correct registry port 443)
curl -k https://localhost:443/v2/_catalog curl -k https://localhost:443/v2/_catalog
# Check specific repository tags (using correct Harbor API structure) # Check specific repository tags
curl -k https://localhost:443/v2/APP_NAME/backend/tags/list curl -k https://localhost:443/v2/APP_NAME/backend/tags/list
curl -k https://localhost:443/v2/APP_NAME/frontend/tags/list curl -k https://localhost:443/v2/APP_NAME/frontend/tags/list
# Alternative: Check Harbor web UI # Alternative: Check registry via Caddy
# Open https://YOUR_CI_CD_IP in your browser and navigate to Projects → APP_NAME # Open https://YOUR_CI_CD_IP in your browser
``` ```
#### 18.4 Verify Production Deployment #### 18.4 Verify Production Deployment
@ -2213,7 +2121,7 @@ tail -f /tmp/monitor.log
You have successfully set up a complete CI/CD pipeline with: You have successfully set up a complete CI/CD pipeline with:
- ✅ **Automated testing** on every code push in isolated DinD environment - ✅ **Automated testing** on every code push in isolated DinD environment
- ✅ **Docker image building** and Harbor registry storage - ✅ **Docker image building** and Docker Registry storage
- ✅ **Automated deployment** to production - ✅ **Automated deployment** to production
- ✅ **Health monitoring** and logging - ✅ **Health monitoring** and logging
- ✅ **Backup and cleanup** automation - ✅ **Backup and cleanup** automation

25
registry/Caddyfile Normal file
View file

@ -0,0 +1,25 @@
(registry_auth) {
basicauth {
{env.REGISTRY_USERNAME} {env.REGISTRY_PASSWORD_HASH}
}
}
https://registry.example.com {
import registry_auth
reverse_proxy registry:5000
header {
X-Content-Type-Options nosniff
}
@push method POST PUT PATCH DELETE
handle @push {
import registry_auth
reverse_proxy registry:5000
}
@pull method GET HEAD OPTIONS
handle @pull {
reverse_proxy registry:5000
}
}

39
registry/README.md Normal file
View file

@ -0,0 +1,39 @@
# Docker Registry Configuration
This folder contains the configuration files for the Docker Registry setup used in the CI/CD pipeline.
## Files
- **`docker-compose.registry.yml`**: Docker Compose configuration for the registry and Caddy reverse proxy
- **`Caddyfile`**: Caddy configuration for HTTPS and authentication
- **`README.md`**: This documentation file
## Architecture
The registry setup uses:
- **Docker Registry**: Basic registry for storing Docker images
- **Caddy**: Reverse proxy with automatic HTTPS and authentication
- **Environment Variables**: For authentication credentials
## Authentication Model
- **Pulls**: Unauthenticated (public read access)
- **Pushes**: Require authentication with `registry-user` credentials
## Configuration
The setup is configured through:
1. **Environment Variables**: Stored in `.env` file (created during setup)
2. **Caddyfile**: Handles HTTPS and authentication
3. **Docker Compose**: Orchestrates the registry and Caddy services
## Usage
The registry is automatically set up during the CI/CD pipeline setup process. The configuration files are copied from this folder to the registry server and customized with the appropriate IP address and credentials.
## Security
- Authentication is handled by Caddy using environment variables
- HTTPS is automatically managed by Caddy
- Registry data is persisted in Docker volumes
- Environment file contains sensitive credentials and should be properly secured

View file

@ -0,0 +1,38 @@
version: '3'
services:
registry:
image: registry:2
container_name: registry
networks:
- sharenet-ci
volumes:
- registry-data:/var/lib/registry
ports:
- "127.0.0.1:5000:5000" # Localhost only
caddy:
image: caddy:latest
container_name: caddy
depends_on:
- registry
networks:
- sharenet-ci
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
env_file:
- .env
volumes:
registry-data:
caddy_data:
caddy_config:
networks:
sharenet-ci:
driver: bridge

View file

@ -111,12 +111,12 @@ monitor_ci_cd() {
echo "Uptime: $(uptime)" echo "Uptime: $(uptime)"
echo echo
log_info "Harbor Status:" log_info "Docker Registry Status:"
if docker ps --format "table {{.Names}}\t{{.Status}}" | grep -q harbor; then if docker ps --format "table {{.Names}}\t{{.Status}}" | grep -q registry; then
log_success "Harbor containers are running" log_success "Docker Registry containers are running"
docker ps --format "table {{.Names}}\t{{.Status}}" | grep harbor docker ps --format "table {{.Names}}\t{{.Status}}" | grep registry
else else
log_error "Harbor containers are not running" log_error "Docker Registry containers are not running"
fi fi
echo echo
@ -144,10 +144,10 @@ monitor_ci_cd() {
# Registry health check # Registry health check
log_info "Registry Health Check:" log_info "Registry Health Check:"
if curl -s -f -k https://localhost/api/v2.0/health > /dev/null 2>&1; then if curl -s -f -k https://localhost/v2/_catalog > /dev/null 2>&1; then
log_success "Harbor registry is accessible" log_success "Docker Registry is accessible"
else else
log_error "Harbor registry is not accessible" log_error "Docker Registry is not accessible"
fi fi
} }