Clean up steps for configuring DinD container to work with Harbor in CI workflow

This commit is contained in:
continuist 2025-06-30 21:40:32 -04:00
parent 4a4eddbb72
commit 6e07ea8d0f
2 changed files with 207 additions and 350 deletions

View file

@ -7,7 +7,7 @@ on:
branches: [ main ]
env:
REGISTRY: ${{ secrets.CI_HOST }}:5000
REGISTRY: ${{ secrets.CI_HOST }}:80
IMAGE_NAME: ${{ secrets.APP_NAME || 'sharenet' }}
jobs:
@ -21,21 +21,52 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup DinD Environment
run: |
# Check if DinD container already exists and is running
if ! docker ps --format "table {{.Names}}" | grep -q "^ci-dind$"; then
echo "Starting new DinD container..."
# Start DinD container for isolated CI operations
docker run -d \
--name ci-dind \
--privileged \
-p 2375:2375 \
-e DOCKER_TLS_CERTDIR="" \
docker:dind
# Wait for DinD to be ready
echo "Waiting for DinD container to be ready..."
timeout 60 bash -c 'until docker exec ci-dind docker version; do sleep 2; done'
# Copy Harbor certificate to DinD container
docker cp /etc/ssl/registry/registry.crt ci-dind:/usr/local/share/ca-certificates/
docker exec ci-dind update-ca-certificates
# Login to Harbor registry
echo "${{ secrets.HARBOR_CI_PASSWORD }}" | docker exec -i ci-dind docker login ${{ secrets.CI_HOST }}:80 -u ${{ secrets.HARBOR_CI_USER }} --password-stdin
echo "DinD container setup complete"
else
echo "DinD container already running, reusing existing setup"
# Verify DinD is still working
docker exec ci-dind docker version
fi
- name: Setup Containerized Testing Environment
run: |
# Start testing environment using dedicated compose file
docker compose -f docker-compose.test.yml up -d
# Start testing environment using dedicated compose file inside DinD
docker exec ci-dind docker compose -f /workspace/docker-compose.test.yml up -d
# Wait for all services to be ready
echo "Waiting for testing environment to be ready..."
timeout 120 bash -c 'until docker compose -f docker-compose.test.yml ps | grep -q "healthy" && docker compose -f docker-compose.test.yml ps | grep -q "Up"; do sleep 2; done'
timeout 120 bash -c 'until docker exec ci-dind docker compose -f /workspace/docker-compose.test.yml ps | grep -q "healthy" && docker exec ci-dind docker compose -f /workspace/docker-compose.test.yml ps | grep -q "Up"; do sleep 2; done'
# Verify all containers are running
docker compose -f docker-compose.test.yml ps
docker exec ci-dind docker compose -f /workspace/docker-compose.test.yml ps
- name: Install SQLx CLI in Rust container
run: |
docker exec ci-cd-test-rust cargo install sqlx-cli --no-default-features --features postgres
docker exec ci-dind docker exec ci-cd-test-rust cargo install sqlx-cli --no-default-features --features postgres
- name: Validate migration files
env:
@ -43,16 +74,16 @@ jobs:
run: |
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
timeout 60 bash -c 'until pg_isready -h localhost -p 5432 -U postgres; do sleep 1; done'
timeout 60 bash -c 'until docker exec ci-dind docker exec ci-cd-test-postgres pg_isready -h localhost -p 5432 -U postgres; do sleep 1; done'
# Create test database if it doesn't exist
docker exec ci-cd-test-rust sqlx database create --database-url "$DATABASE_URL" || true
docker exec ci-dind docker exec ci-cd-test-rust sqlx database create --database-url "$DATABASE_URL" || true
# Run initial migrations to set up the database
docker exec ci-cd-test-rust sqlx migrate run --database-url "$DATABASE_URL" || true
docker exec ci-dind docker exec ci-cd-test-rust sqlx migrate run --database-url "$DATABASE_URL" || true
# Validate migration files
docker exec ci-cd-test-rust ./scripts/validate_migrations.sh --verbose
docker exec ci-dind docker exec ci-cd-test-rust ./scripts/validate_migrations.sh --verbose
- name: Run backend tests
working-directory: ./backend
@ -60,27 +91,27 @@ jobs:
DATABASE_URL: postgres://postgres:postgres@localhost:5432/sharenet_test
run: |
# Run tests with increased parallelism for Rust
docker exec ci-cd-test-rust cargo test --all --jobs 4
docker exec ci-cd-test-rust cargo clippy --all -- -D warnings
docker exec ci-cd-test-rust cargo fmt --all -- --check
docker exec ci-dind docker exec ci-cd-test-rust cargo test --all --jobs 4
docker exec ci-dind docker exec ci-cd-test-rust cargo clippy --all -- -D warnings
docker exec ci-dind docker exec ci-cd-test-rust cargo fmt --all -- --check
- name: Install frontend dependencies
run: |
docker exec ci-cd-test-node npm ci
docker exec ci-dind docker exec ci-cd-test-node npm ci
- name: Run frontend tests
run: |
docker exec ci-cd-test-node npm run lint
docker exec ci-cd-test-node npm run type-check
docker exec ci-cd-test-node npm run build
docker exec ci-dind docker exec ci-cd-test-node npm run lint
docker exec ci-dind docker exec ci-cd-test-node npm run type-check
docker exec ci-dind docker exec ci-cd-test-node npm run build
- name: Cleanup Containerized Environment
- name: Cleanup Testing Environment
if: always()
run: |
# Stop and remove all testing containers
docker compose -f docker-compose.test.yml down
# Stop and remove all testing containers (but keep DinD running)
docker exec ci-dind docker compose -f /workspace/docker-compose.test.yml down
# Job 2: Building - Uses DinD for isolated image building and pushing
# Job 2: Building - Build and push Docker images using same DinD
build-and-push:
name: Build and Push Docker Images (DinD)
needs: [test]
@ -91,61 +122,54 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup DinD environment
- name: Set up Docker Buildx in DinD
run: |
# Start DinD container for isolated Docker operations
docker run -d \
--name ci-cd-build-dind \
--privileged \
-p 2375:2375 \
-e DOCKER_TLS_CERTDIR="" \
docker:dind
# Wait for DinD to be ready
echo "Waiting for DinD container to be ready..."
timeout 60 bash -c 'until docker exec ci-cd-build-dind docker version; do sleep 2; done'
# Configure Docker for Harbor registry (needed for pushing images)
docker exec ci-cd-build-dind sh -c 'echo "{\"insecure-registries\": [\"${{ secrets.CI_HOST }}:5000\"]}" > /etc/docker/daemon.json'
docker exec ci-cd-build-dind sh -c 'kill -HUP 1'
# Wait for Docker daemon to reload
sleep 5
# Verify DinD is working
docker exec ci-cd-build-dind docker version
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Set up Docker Buildx inside the existing DinD container
docker exec ci-dind docker buildx create --use --name ci-builder || true
docker exec ci-dind docker buildx inspect --bootstrap
- name: Build and push backend image
uses: docker/build-push-action@v5
with:
context: ./backend
file: ./backend/Dockerfile
push: true
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/backend:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64
run: |
# Build and push backend image using DinD
docker exec ci-dind docker buildx build \
--platform linux/amd64 \
--tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/backend:${{ github.sha }} \
--push \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-f ./backend/Dockerfile \
./backend
- name: Build and push frontend image
uses: docker/build-push-action@v5
with:
context: ./frontend
file: ./frontend/Dockerfile
push: true
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/frontend:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64
run: |
# Build and push frontend image using DinD
docker exec ci-dind docker buildx build \
--platform linux/amd64 \
--tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/frontend:${{ github.sha }} \
--push \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-f ./frontend/Dockerfile \
./frontend
- name: Cleanup DinD environment
- name: Cleanup Testing Environment
if: always()
run: |
# Stop and remove DinD container
docker stop ci-cd-build-dind || true
docker rm ci-cd-build-dind || true
# Clean up test containers but keep DinD running for reuse
docker exec ci-dind docker system prune -f || true
# Check if DinD needs restart due to resource accumulation
DISK_USAGE=$(docker exec ci-dind df -h /var/lib/docker 2>/dev/null | tail -1 | awk '{print $5}' | sed 's/%//' || echo "0")
echo "DinD disk usage: ${DISK_USAGE}%"
# Restart DinD if disk usage is high (>80%)
if [ "$DISK_USAGE" -gt 80 ]; then
echo "WARNING: High disk usage (${DISK_USAGE}%), restarting DinD container..."
docker restart ci-dind
echo "DinD container restarted"
else
echo "Disk usage acceptable (${DISK_USAGE}%), keeping DinD running"
fi
# Job 3: Deployment - Runs directly on production runner (no DinD needed)
deploy:
@ -177,7 +201,7 @@ jobs:
- name: Configure Docker for Harbor access
run: |
# Configure Docker to access Harbor registry on CI Linode
echo '{"insecure-registries": ["${{ secrets.CI_HOST }}:5000"]}' | sudo tee /etc/docker/daemon.json
echo '{"insecure-registries": ["${{ secrets.CI_HOST }}:80"]}' | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
# Wait for Docker to be ready

View file

@ -478,7 +478,23 @@ sudo usermod -aG docker SERVICE_USER
### Step 5: Set Up Harbor Container Registry
#### 5.1 Generate SSL Certificates
Harbor provides a secure, enterprise-grade container registry with vulnerability scanning, role-based access control, and audit logging.
#### 5.1 Create Harbor Service User
```bash
# Create dedicated user and group for Harbor
sudo groupadd -r harbor
sudo useradd -r -g harbor -s /bin/bash -m -d /opt/harbor harbor
# Set secure password for emergency access
echo "harbor:$(openssl rand -base64 32)" | sudo chpasswd
# Add harbor user to docker group
sudo usermod -aG docker harbor
```
#### 5.2 Generate SSL Certificates
```bash
# Create system SSL directory for Harbor certificates
@ -516,20 +532,15 @@ EOF
# Generate self-signed certificate with proper SANs
sudo openssl req -x509 -newkey rsa:4096 -keyout /etc/ssl/registry/registry.key -out /etc/ssl/registry/registry.crt -days 365 -nodes -extensions v3_req -config /etc/ssl/registry/openssl.conf
# Set proper permissions
# Set proper permissions for harbor user
sudo chown harbor:harbor /etc/ssl/registry/registry.key
sudo chown harbor:harbor /etc/ssl/registry/registry.crt
sudo chmod 600 /etc/ssl/registry/registry.key
sudo chmod 644 /etc/ssl/registry/registry.crt
sudo chmod 644 /etc/ssl/registry/openssl.conf
```
**Important**: The certificate is now generated with proper Subject Alternative Names (SANs) including your IP address, which is required for TLS certificate validation by Docker and other clients.
**Note**: The permissions are set to:
- `registry.key`: `600` (owner read/write only) - private key must be secure
- `registry.crt`: `644` (owner read/write, group/others read) - certificate can be read by services
- `openssl.conf`: `644` (owner read/write, group/others read) - configuration file for reference
#### 5.1.1 Configure Docker to Trust Harbor Registry
#### 5.3 Configure Docker to Trust Harbor Registry
```bash
# Add the certificate to system CA certificates
@ -549,301 +560,126 @@ EOF
sudo systemctl restart docker
```
**Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address. This configuration tells Docker to trust your Harbor registry and allows Docker login to work properly.
**Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address.
#### 5.2 Generate Secure Passwords and Secrets
#### 5.4 Install Harbor
```bash
# Set environment variables for Harbor
# Switch to harbor user
sudo su - harbor
# Set environment variables
export HARBOR_HOSTNAME=$YOUR_ACTUAL_IP
export HARBOR_ADMIN_PASSWORD="Harbor12345"
# Generate secure database password for Harbor
export DB_PASSWORD=$(openssl rand -base64 32 | tr -d "=+/" | cut -c1-25)
# Generate secure secrets for Harbor
export CORE_SECRET=$(openssl rand -hex 16)
export JOBSERVICE_SECRET=$(openssl rand -hex 16)
echo "Generated secrets:"
echo "DB_PASSWORD: $DB_PASSWORD"
echo "CORE_SECRET: $CORE_SECRET"
echo "JOBSERVICE_SECRET: $JOBSERVICE_SECRET"
# Save secrets securely for future reference
cat > /opt/APP_NAME/harbor-secrets.txt << EOF
# Harbor Secrets - KEEP THESE SECURE!
# Generated on: $(date)
# CI/CD IP: $YOUR_ACTUAL_IP
HARBOR_HOSTNAME=$HARBOR_HOSTNAME
HARBOR_ADMIN_PASSWORD=$HARBOR_ADMIN_PASSWORD
DB_PASSWORD=$DB_PASSWORD
CORE_SECRET=$CORE_SECRET
JOBSERVICE_SECRET=$JOBSERVICE_SECRET
# IMPORTANT: Store this file securely and keep a backup!
# You will need these secrets for:
# - Harbor upgrades
# - Database troubleshooting
# - Disaster recovery
# - Service restoration
EOF
# Set secure permissions on secrets file
chmod 600 /opt/APP_NAME/harbor-secrets.txt
echo "Secrets saved to /opt/APP_NAME/harbor-secrets.txt"
echo "IMPORTANT: Keep this file secure and backed up!"
```
**Important**:
- Change the default passwords for production use. The default admin password is `Harbor12345` - change this immediately after first login.
- The generated secrets (`CORE_SECRET` and `JOBSERVICE_SECRET`) are cryptographically secure random values used for encrypting sensitive data.
- Store these secrets securely as they will be needed for Harbor upgrades or troubleshooting.
- **CRITICAL**: The secrets file contains sensitive information. Keep it secure and backed up!
#### 5.3 Install Harbor Using Official Installer
```bash
# Switch to DEPLOY_USER (who has sudo access)
sudo su - DEPLOY_USER
cd /opt/APP_NAME
# Download Harbor 2.10.0 offline installer
sudo wget https://github.com/goharbor/harbor/releases/download/v2.10.0/harbor-offline-installer-v2.10.0.tgz
sudo tar -xzf harbor-offline-installer-v2.10.0.tgz
# Download and install Harbor
cd /opt/harbor
wget https://github.com/goharbor/harbor/releases/download/v2.10.0/harbor-offline-installer-v2.10.0.tgz
tar -xzf harbor-offline-installer-v2.10.0.tgz
cd harbor
sudo cp harbor.yml.tmpl harbor.yml
cp harbor.yml.tmpl harbor.yml
# Edit harbor.yml configuration
sudo nano harbor.yml
nano harbor.yml
```
**Important**: In the `harbor.yml` file, update the following variables:
**Important**: In the `harbor.yml` file, update:
- `hostname: YOUR_CI_CD_IP` (replace with your actual IP)
- `certificate: /etc/ssl/registry/registry.crt`
- `private_key: /etc/ssl/registry/registry.key`
- `password: <the DB_PASSWORD generated in Step 5.2>`
**Note**: Leave `harbor_admin_password` as `Harbor12345` for now. This will be changed at first login through the UI after launching Harbor.
#### 5.4 Prepare and Install Harbor
- `password: <the DB_PASSWORD generated above>`
```bash
# Prepare Harbor configuration
sudo ./prepare
# Install Harbor with Trivy vulnerability scanner
sudo ./install.sh --with-trivy
cd ..
# Change harbor folder permissions recursively to SERVICE_USER
sudo chown -R SERVICE_USER:SERVICE_USER harbor
# Switch to SERVICE_USER to run installation again as non-root
sudo su - SERVICE_USER
cd /opt/APP_NAME/harbor
# Install Harbor as SERVICE_USER (permissions are partially adjusted correctly)
./prepare
./install.sh --with-trivy
# Exit SERVICE_USER shell
# Exit harbor user shell
exit
```
#### 5.5 Fix Permission Issues
#### 5.5 Create Systemd Service
```bash
# Switch back to DEPLOY_USER to adjust the permissions for various env files
cd /opt/APP_NAME/harbor
# Create systemd service file for Harbor
sudo tee /etc/systemd/system/harbor.service << EOF
[Unit]
Description=Harbor Container Registry
After=docker.service
Requires=docker.service
sudo chown SERVICE_USER:SERVICE_USER common/config/jobservice/env
sudo chown SERVICE_USER:SERVICE_USER common/config/db/env
sudo chown SERVICE_USER:SERVICE_USER common/config/registryctl/env
sudo chown SERVICE_USER:SERVICE_USER common/config/trivy-adapter/env
sudo chown SERVICE_USER:SERVICE_USER common/config/core/env
[Service]
Type=oneshot
RemainAfterExit=yes
User=harbor
Group=harbor
WorkingDirectory=/opt/harbor/harbor
ExecStart=/usr/bin/docker compose up -d
ExecStop=/usr/bin/docker compose down
ExecReload=/usr/bin/docker compose down && /usr/bin/docker compose up -d
# Exit DEPLOY_USER shell
exit
[Install]
WantedBy=multi-user.target
EOF
# Enable and start Harbor service
sudo systemctl daemon-reload
sudo systemctl enable harbor.service
sudo systemctl start harbor.service
# Monitor startup (can take 2-3 minutes)
sudo journalctl -u harbor.service -f
```
#### 5.6 Test Harbor Installation
#### 5.6 Configure Harbor Access
1. **Access Harbor Web UI**: Open `https://YOUR_CI_CD_IP` in your browser
2. **Login**: Username `admin`, Password `Harbor12345`
3. **Change admin password**: Click admin icon → Change Password
4. **Create project**: Projects → New Project → Name: `APP_NAME`, Access Level: `Public`
5. **Create CI user**: Administration → Users → New User → Username: `ci-user`, Password: `your-secure-password`
6. **Assign role**: Projects → `APP_NAME` → Members → + User → Select `ci-user`, Role: `Developer`
#### 5.7 Test Harbor Setup
```bash
# Switch to SERVICE_USER
sudo su - SERVICE_USER
# Switch to DEPLOY_USER for testing
sudo su - DEPLOY_USER
cd /opt/APP_NAME/harbor
# Test Docker login and push
docker login YOUR_CI_CD_IP:80 -u ci-user -p "your-secure-password"
# Verify you can stop Harbor. All Harbor containers should stop.
docker compose down
# Verify you can bring Harbor back up. All Harbor containers should start back up.
docker compose up -d
# Exit SERVICE_USER shell
exit
```
**Important**: Harbor startup can take 2-3 minutes as it initializes the database and downloads vulnerability databases. The health check will ensure all services are running properly.
#### 5.7 Wait for Harbor Startup
```bash
# Monitor Harbor startup progress
cd /opt/APP_NAME/harbor
docker compose logs -f
```
**Expected output**: You should see logs from all Harbor services (core, database, redis, registry, portal, nginx, jobservice, trivy) starting up. Wait until you see "Harbor has been installed and started successfully" or similar success messages.
#### 5.8 Test Harbor Setup
```bash
# Check if all Harbor containers are running
cd /opt/APP_NAME/harbor
docker compose ps
# Test Harbor API (HTTPS)
curl -k https://localhost/api/v2.0/health
# Test Harbor UI (HTTPS)
curl -k -I https://localhost
# Expected output: HTTP/1.1 200 OK
```
**Important**: All Harbor services should show as "Up" in the `docker compose ps` output. The health check should return a JSON response indicating all services are healthy.
#### 5.9 Access Harbor Web UI
1. **Open your browser** and navigate to: `https://YOUR_CI_CD_IP`
2. **Login with default credentials**:
- Username: `admin`
- Password: `Harbor12345` (or your configured password)
3. **Change the admin password**:
- Click on the user icon "admin" in the top right corner of the UI
- Click "Change Password" from the dropdown menu
- Enter your current password and a new secure password
- Click "OK" to save the new password
#### 5.10 Configure Harbor for Public Read, Authenticated Write
1. **Create Application Project**:
- Go to **Projects** → **New Project**
- Set **Project Name**: `APP_NAME` (replace with your actual application name)
- Set **Access Level**: `Public`
- Click **OK**
2. **Create a User for CI/CD**:
- Go to **Administration****Users** → **New User**
- Set **Username**: `ci-user`
- Set **Email**: `ci@example.com`
- Set **Password**: `your-secure-password`
- Click **OK**
3. **Assign Project Role to ci-user**:
- Go to **Projects****APP_NAME****Members****+ User**
- Select **User**: `ci-user`
- Set **Role**: `Developer`
- Click **OK**
**Note**: With a public project, anyone can pull images without authentication, but only authenticated users (like `ci-user`) can push images. This provides the perfect balance of ease of use for deployments and security for image management.
#### 5.11 Test Harbor Authentication and Access Model
```bash
# Test Docker login to Harbor
docker login YOUR_CI_CD_IP
# Enter: ci-user and your-secure-password
# Create a test image
# Create and push test image
echo "FROM alpine:latest" > /tmp/test.Dockerfile
echo "RUN echo 'Hello from Harbor test image'" >> /tmp/test.Dockerfile
docker build -f /tmp/test.Dockerfile -t YOUR_CI_CD_IP:80/APP_NAME/test:latest /tmp
docker push YOUR_CI_CD_IP:80/APP_NAME/test:latest
# Build and tag test image for APP_NAME project
docker build -f /tmp/test.Dockerfile -t YOUR_CI_CD_IP/APP_NAME/test:latest /tmp
# Push to Harbor (requires authentication)
docker push YOUR_CI_CD_IP/APP_NAME/test:latest
# Test public pull (no authentication required)
docker logout YOUR_CI_CD_IP
docker pull YOUR_CI_CD_IP/APP_NAME/test:latest
# Verify the image was pulled successfully
docker images | grep APP_NAME/test
# Test public pull (no authentication)
docker logout YOUR_CI_CD_IP:80
docker pull YOUR_CI_CD_IP:80/APP_NAME/test:latest
# Test that unauthorized push is blocked
echo "FROM alpine:latest" > /tmp/unauthorized.Dockerfile
echo "RUN echo 'This push should fail'" >> /tmp/unauthorized.Dockerfile
docker build -f /tmp/unauthorized.Dockerfile -t YOUR_CI_CD_IP/APP_NAME/unauthorized:latest /tmp
docker push YOUR_CI_CD_IP/APP_NAME/unauthorized:latest
docker build -f /tmp/unauthorized.Dockerfile -t YOUR_CI_CD_IP:80/APP_NAME/unauthorized:latest /tmp
docker push YOUR_CI_CD_IP:80/APP_NAME/unauthorized:latest
# Expected: This should fail with authentication error
# Clean up test images
docker rmi YOUR_CI_CD_IP/APP_NAME/test:latest
docker rmi YOUR_CI_CD_IP/APP_NAME/unauthorized:latest
# Clean up
docker rmi YOUR_CI_CD_IP:80/APP_NAME/test:latest
docker rmi YOUR_CI_CD_IP:80/APP_NAME/unauthorized:latest
exit
```
**Expected behavior**:
- ✅ **Push requires authentication**: `docker push` only works when logged in
- ✅ **Pull works without authentication**: `docker pull` works without login for public projects
- ✅ **Unauthorized push is blocked**: `docker push` fails when not logged in
- ✅ **Web UI accessible**: Harbor UI is available at `https://YOUR_CI_CD_IP`
- ✅ Push requires authentication
- ✅ Pull works without authentication
- ✅ Unauthorized push is blocked
- ✅ Web UI accessible at `https://YOUR_CI_CD_IP`
#### 5.12 Harbor Access Model Summary
Your Harbor registry is now configured with the following access model:
**APP_NAME Project**:
- ✅ **Pull (read)**: No authentication required
- ✅ **Push (write)**: Requires authentication
- ✅ **Web UI**: Accessible to view images
**Security Features**:
- ✅ **Vulnerability scanning**: Automatic CVE scanning with Trivy
- ✅ **Role-based access control**: Different user roles (admin, developer, guest)
- ✅ **Audit logs**: Complete trail of all operations
#### 5.13 Troubleshooting Common Harbor Issues
**Certificate Issues**:
```bash
# If you get "tls: failed to verify certificate" errors:
# 1. Verify certificate has proper SANs
openssl x509 -in /etc/ssl/registry/registry.crt -text -noout | grep -A 5 "Subject Alternative Name"
# 2. Regenerate certificate if SANs are missing
sudo openssl req -x509 -newkey rsa:4096 -keyout /etc/ssl/registry/registry.key -out /etc/ssl/registry/registry.crt -days 365 -nodes -extensions v3_req -config /etc/ssl/registry/openssl.conf
# 3. Restart Harbor and Docker
cd /opt/APP_NAME/harbor && docker compose down && docker compose up -d
sudo systemctl restart docker
```
**Connection Issues**:
```bash
# If you get "connection refused" errors:
# 1. Check if Harbor is running
docker compose ps
# 2. Check Harbor logs
docker compose logs
# 3. Verify ports are open
netstat -tuln | grep -E ':(80|443)'
```
**Docker Configuration Issues**:
```bash
# If Docker still can't connect after certificate fixes:
# 1. Verify Docker daemon configuration
cat /etc/docker/daemon.json
# 2. Check if certificate is in system CA store
@ -1147,9 +983,7 @@ sudo docker run -d \
-e DOCKER_TLS_CERTDIR="" \
docker:dind
# Wait for DinD to be ready
echo "Waiting for DinD container to be ready..."
timeout 60 bash -c 'until sudo docker exec ci-dind docker version; do sleep 2; done'
# Wait for a minute or two for DinD to be ready (wait for Docker daemon inside DinD)
# Test DinD connectivity
sudo docker exec ci-dind docker version
@ -1169,22 +1003,17 @@ sudo docker exec ci-dind docker version
# Navigate to the application directory
cd /opt/APP_NAME
# Configure Docker daemon in DinD for Harbor registry
sudo docker exec ci-dind sh -c 'echo "{\"insecure-registries\": [\"localhost:5000\"]}" > /etc/docker/daemon.json'
# Copy Harbor certificate to DinD container
sudo docker cp /etc/ssl/registry/registry.crt ci-dind:/usr/local/share/ca-certificates/
sudo docker exec ci-dind update-ca-certificates
# Reload Docker daemon in DinD
sudo docker exec ci-dind sh -c 'kill -HUP 1'
# Wait for Docker daemon to reload
sleep 5
# Test Harbor connectivity from DinD
# Test Harbor connectivity from DinD (using certificate trust)
sudo docker exec ci-dind docker pull alpine:latest
sudo docker exec ci-dind docker tag alpine:latest localhost:5000/test/alpine:latest
sudo docker exec ci-dind docker push localhost:5000/test/alpine:latest
sudo docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP:80/test/alpine:latest
sudo docker exec ci-dind docker push YOUR_CI_CD_IP:80/test/alpine:latest
# Clean up test image
sudo docker exec ci-dind docker rmi localhost:5000/test/alpine:latest
sudo docker exec ci-dind docker rmi YOUR_CI_CD_IP:80/test/alpine:latest
```
**What this does**:
@ -1204,16 +1033,18 @@ The CI/CD pipeline uses a three-stage approach with dedicated environments for e
- Rust toolchain for backend testing and migrations
- Node.js toolchain for frontend testing
- **Network**: All containers communicate through `ci-cd-test-network`
- **Cleanup**: `docker compose -f docker-compose.test.yml down`
- **Setup**: DinD container created, Harbor certificate installed, Docker login performed
- **Cleanup**: Testing containers removed, DinD container kept running
**Job 2 (Building) - Direct Docker Commands:**
- **Purpose**: Isolated image building and pushing to Harbor
- **Environment**: Single DinD container for Docker operations
- **Purpose**: Image building and pushing to Harbor
- **Environment**: Same DinD container from Job 1
- **Process**:
- Uses Docker Buildx for efficient building
- Builds backend and frontend images separately
- Pushes images to Harbor registry
- **Cleanup**: Simple container stop/remove
- **Harbor Access**: Reuses Harbor authentication from Job 1
- **Cleanup**: DinD container stopped and removed (clean slate for next run)
**Job 3 (Deployment) - `docker-compose.prod.yml`:**
- **Purpose**: Production deployment with pre-built images
@ -1239,11 +1070,11 @@ docker exec ci-dind docker run --rm alpine:latest echo "DinD is working!"
# Test Harbor integration
docker exec ci-dind docker pull alpine:latest
docker exec ci-dind docker tag alpine:latest localhost:5000/test/dind-test:latest
docker exec ci-dind docker push localhost:5000/test/dind-test:latest
docker exec ci-dind docker tag alpine:latest YOUR_CI_CD_IP:80/test/dind-test:latest
docker exec ci-dind docker push YOUR_CI_CD_IP:80/test/dind-test:latest
# Clean up test
docker exec ci-dind docker rmi localhost:5000/test/dind-test:latest
docker exec ci-dind docker rmi YOUR_CI_CD_IP:80/test/dind-test:latest
```
**Expected Output**:
@ -1800,12 +1631,14 @@ curl http://localhost:3001/health
Go to your Forgejo repository and add these secrets in **Settings → Secrets and Variables → Actions**:
**Required Secrets:**
- `CI_CD_IP`: Your CI/CD Linode IP address
- `CI_HOST`: Your CI/CD Linode IP address (used for Harbor registry access)
- `PRODUCTION_IP`: Your Production Linode IP address
- `DEPLOY_USER`: The deployment user name (e.g., `deploy`, `ci`, `admin`)
- `SERVICE_USER`: The service user name (e.g., `appuser`, `service`, `app`)
- `APP_NAME`: Your application name (e.g., `sharenet`, `myapp`)
- `POSTGRES_PASSWORD`: A strong password for the PostgreSQL database
- `HARBOR_CI_USER`: Harbor username for CI operations (e.g., `ci-user`)
- `HARBOR_CI_PASSWORD`: Harbor password for CI operations (the password you set for ci-user)
**Optional Secrets (for domain users):**
- `DOMAIN`: Your domain name (e.g., `example.com`)