Use ephemeral PiP container in the workflow, and make it secure
Some checks are pending
CI/CD Pipeline with Ephemeral PiP / test-backend (push) Waiting to run
CI/CD Pipeline with Ephemeral PiP / test-frontend (push) Blocked by required conditions
CI/CD Pipeline with Ephemeral PiP / build-backend (push) Blocked by required conditions
CI/CD Pipeline with Ephemeral PiP / build-frontend (push) Blocked by required conditions
CI/CD Pipeline with Ephemeral PiP / cleanup (push) Blocked by required conditions

This commit is contained in:
continuist 2025-09-04 21:16:29 -04:00
parent 5eae9ef284
commit d09c5926f7
4 changed files with 424 additions and 530 deletions

View file

@ -1,431 +1,184 @@
name: CI/CD Pipeline (Forgejo Container Registry)
name: CI/CD Pipeline with Ephemeral PiP
on:
push:
branches: [ main, develop ]
branches: [main]
pull_request:
branches: [ main ]
branches: [main]
env:
REGISTRY_HOST: ${{ secrets.REGISTRY_HOST }}
OWNER_REPO: ${{ gitea.repository }}
REGISTRY: ${{ secrets.REGISTRY_HOST }}
APP_NAME: ${{ secrets.APP_NAME }}
IMAGE_TAG: ${{ github.sha }}
jobs:
# Job 1: Testing - Uses DinD with multiple containers for comprehensive testing
test:
name: Run Tests (DinD)
runs-on: ci
if: ${{ startsWith(gitea.ref, 'refs/heads/main') }}
test-backend:
runs-on: [self-hosted, ci]
steps:
- name: Setup DinD Environment
run: |
# Check if DinD container exists (running or not)
if docker ps -a --format "table {{.Names}}" | grep -q "^ci-dind$"; then
echo "DinD container exists, checking status..."
# Check if it's running
if docker ps --format "table {{.Names}}" | grep -q "^ci-dind$"; then
echo "DinD container is running, reusing existing setup"
# Verify DinD is still working
docker exec ci-dind docker version
else
echo "DinD container exists but is not running, starting it..."
docker start ci-dind
# Wait for DinD container to be fully ready
echo "Waiting for DinD container to be ready..."
timeout 30 bash -c 'until docker exec ci-dind docker version > /dev/null 2>&1; do echo "Waiting for Docker daemon inside DinD..."; sleep 5; done'
echo "DinD container is ready"
fi
else
echo "Starting new DinD container..."
# Start DinD container for isolated CI operations
docker run -d \
--name ci-dind \
--privileged \
-p 2375:2375 \
-e DOCKER_TLS_CERTDIR="" \
docker:dind
# Wait for DinD to be ready
echo "Waiting for DinD container to be ready..."
timeout 15 bash -c 'until docker exec ci-dind docker version > /dev/null 2>&1; do echo "Waiting for Docker daemon inside DinD..."; sleep 5; done'
echo "DinD container is ready"
# Install Cosign in DinD container (pinned version)
echo "Installing Cosign..."
docker exec ci-dind sh -c "COSIGN_VERSION=v2.2.4 && wget -O /usr/local/bin/cosign https://github.com/sigstore/cosign/releases/download/\${COSIGN_VERSION}/cosign-linux-amd64 && chmod +x /usr/local/bin/cosign"
echo "DinD container setup complete"
fi
- name: Checkout code to workspace
run: |
# Use the pre-configured workspace directory (created in CI guide Step 6.3)
# Clone the repository to workspace
rm -rf /tmp/ci-workspace /tmp/ci-workspace/.* 2>/dev/null || true
git clone "${{ gitea.server_url }}/${{ gitea.repository }}" /tmp/ci-workspace
cd /tmp/ci-workspace
git checkout "${{ gitea.sha }}"
# Copy workspace to DinD container
docker exec ci-dind rm -rf /workspace/* /workspace/.* 2>/dev/null || true
docker cp /tmp/ci-workspace/. ci-dind:/workspace/
- name: Check and prepare base images
run: |
# Set environment variables
export REGISTRY_HOST="${{ secrets.REGISTRY_HOST }}"
export OWNER_REPO="${{ gitea.repository }}"
export REGISTRY_USERNAME="${{ secrets.REGISTRY_USERNAME }}"
export REGISTRY_TOKEN="${{ secrets.REGISTRY_TOKEN }}"
# Login to Forgejo Container Registry
echo "Logging into Forgejo Container Registry..."
echo "$REGISTRY_TOKEN" | docker exec -i ci-dind docker login "$REGISTRY_HOST" -u "$REGISTRY_USERNAME" --password-stdin
# Check if base images exist in Forgejo Container Registry, pull from Docker Hub if not
BASE_IMAGES=("rust:1.75-slim" "node:20-slim" "postgres:15-alpine")
for image in "${BASE_IMAGES[@]}"; do
image_name=$(echo "$image" | cut -d: -f1)
image_tag=$(echo "$image" | cut -d: -f2)
registry_image="$REGISTRY_HOST/$OWNER_REPO/$image_name:$image_tag"
echo "Checking if $registry_image exists in Forgejo Container Registry..."
# Try to pull from Forgejo Container Registry first
if docker exec ci-dind docker pull "$registry_image" 2>/dev/null; then
echo "✓ Found $registry_image in Forgejo Container Registry"
else
echo "✗ $registry_image not found in Forgejo Container Registry, pulling from Docker Hub..."
# Pull from Docker Hub
if docker exec ci-dind docker pull "$image"; then
echo "✓ Successfully pulled $image from Docker Hub"
# Tag for Forgejo Container Registry
docker exec ci-dind docker tag "$image" "$registry_image"
# Push to Forgejo Container Registry
if docker exec ci-dind docker push "$registry_image"; then
echo "✓ Successfully pushed $registry_image to Forgejo Container Registry"
# Sign the image with Cosign (optional)
if [ -n "${{ secrets.COSIGN_PRIVATE_KEY }}" ]; then
echo "Signing image with Cosign..."
echo "${{ secrets.COSIGN_PRIVATE_KEY }}" | docker exec -i ci-dind sh -c "cat > /tmp/cosign.key && chmod 600 /tmp/cosign.key"
if docker exec ci-dind sh -c "COSIGN_PASSWORD='${{ secrets.COSIGN_PASSWORD }}' cosign sign -y --key /tmp/cosign.key $registry_image"; then
echo "✓ Successfully signed $registry_image with Cosign"
else
echo "✗ Failed to sign $registry_image with Cosign"
exit 1
fi
docker exec ci-dind rm -f /tmp/cosign.key
else
echo "Skipping Cosign signing (no private key provided)"
fi
else
echo "✗ Failed to push $registry_image to Forgejo Container Registry"
exit 1
fi
else
echo "✗ Failed to pull $image from Docker Hub"
exit 1
fi
fi
done
echo "All base images are ready in Forgejo Container Registry!"
- name: Start testing environment
run: |
# Start testing environment using Kubernetes pod inside DinD
echo "Starting testing environment..."
# Set environment variables
export CI_HOST="${{ secrets.CI_HOST }}"
export APP_NAME="${{ secrets.APP_NAME || 'sharenet' }}"
# Create workspace directory and start pod
docker exec ci-dind sh -c "mkdir -p /tmp/ci-workspace && cp -r /workspace/* /tmp/ci-workspace/"
docker exec ci-dind sh -c "podman play kube /workspace/ci-pod.yaml"
# Wait for all services to be ready with better error handling
echo "Waiting for testing environment to be ready..."
MAX_WAIT=180
WAIT_COUNT=0
while [ $WAIT_COUNT -lt $MAX_WAIT ]; do
# Check if pod is running and ready
POD_STATUS=$(docker exec ci-dind podman pod ps --filter name=ci-cd-test-pod --format "{{.Status}}" 2>/dev/null || echo "")
if [ "$POD_STATUS" = "Running" ]; then
echo "Pod is running"
break
else
echo "Waiting for pod to start... (Status: $POD_STATUS)"
sleep 2
WAIT_COUNT=$((WAIT_COUNT + 2))
fi
done
if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
echo "ERROR: Timeout waiting for pod to start"
echo "Pod status:"
docker exec ci-dind podman pod ps
echo "Pod logs:"
docker exec ci-dind podman logs ci-cd-test-pod-postgres || true
exit 1
fi
# Additional wait for PostgreSQL to be healthy
echo "Waiting for PostgreSQL to be healthy..."
timeout 60 bash -c 'until docker exec ci-dind podman exec ci-cd-test-pod-postgres pg_isready -h localhost -p 5432 -U postgres; do sleep 1; done'
# Verify pod is running
echo "Final pod status:"
docker exec ci-dind podman pod ps
- name: Install SQLx CLI in Rust container
run: |
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain cargo install sqlx-cli --no-default-features --features postgres
- name: Validate migration files
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/sharenet_test
run: |
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
timeout 60 bash -c 'until docker exec ci-dind podman exec ci-cd-test-pod-postgres pg_isready -h localhost -p 5432 -U postgres; do sleep 1; done'
# Create test database if it doesn't exist
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain sqlx database create --database-url "$DATABASE_URL" || true
# Run initial migrations to set up the database
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain sqlx migrate run --database-url "$DATABASE_URL" || true
# Validate migration files
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain ./scripts/validate_migrations.sh --verbose
- name: Run backend tests
working-directory: ./backend
env:
DATABASE_URL: postgres://postgres:password@localhost:5432/sharenet_test
run: |
# Run tests with increased parallelism for Rust
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain cargo test --all --jobs 4
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain cargo clippy --all -- -D warnings
docker exec ci-dind podman exec ci-cd-test-pod-rust-toolchain cargo fmt --all -- --check
- name: Install frontend dependencies
run: |
docker exec ci-dind podman exec ci-cd-test-pod-node-toolchain npm ci
- name: Run frontend tests
run: |
docker exec ci-dind podman exec ci-cd-test-pod-node-toolchain npm run lint
docker exec ci-dind podman exec ci-cd-test-pod-node-toolchain npm run type-check
docker exec ci-dind podman exec ci-cd-test-pod-node-toolchain npm run build
- name: Cleanup Testing Environment
if: always()
run: |
# Stop and remove testing pod (but keep DinD running)
docker exec ci-dind podman pod stop ci-cd-test-pod || true
docker exec ci-dind podman pod rm ci-cd-test-pod || true
# Job 2: Building - Build and push Docker images using same DinD
build-and-push:
name: Build and Push Docker Images (DinD)
needs: [test]
runs-on: ci
if: ${{ startsWith(gitea.ref, 'refs/heads/main') }}
steps:
- name: Set up Docker Buildx in DinD
run: |
# Set up Docker Buildx inside the existing DinD container
docker exec ci-dind docker buildx create --use --name ci-builder || true
docker exec ci-dind docker buildx inspect --bootstrap
# Ensure code is available in DinD (reuse from test job)
docker exec ci-dind sh -c "cd /workspace && git fetch && git reset --hard origin/${{ gitea.ref_name }}"
# Verify we have the correct repository
docker exec ci-dind sh -c "cd /workspace && git remote -v"
- name: Login to Forgejo registry
run: |
docker exec ci-dind docker login "${{ env.REGISTRY_HOST }}" \
-u "${{ secrets.REGISTRY_USERNAME }}" \
-p "${{ secrets.REGISTRY_TOKEN }}"
- name: Build and push backend image
env:
IMAGE: ${{ env.REGISTRY_HOST }}/${{ env.OWNER_REPO }}/backend
TAG: ${{ gitea.sha }}
run: |
# Build and push backend image using DinD
docker exec ci-dind docker buildx build \
--platform linux/amd64 \
--tag "${IMAGE}:${TAG}" \
--push \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-f /workspace/backend/Dockerfile \
/workspace/backend
# Sign the backend image with Cosign (optional)
if [ -n "${{ secrets.COSIGN_PRIVATE_KEY }}" ]; then
echo "Signing backend image with Cosign..."
echo "${{ secrets.COSIGN_PRIVATE_KEY }}" | docker exec -i ci-dind sh -c "cat > /tmp/cosign.key && chmod 600 /tmp/cosign.key"
DIGEST=$(docker exec ci-dind docker image inspect "${IMAGE}:${TAG}" --format '{{index .RepoDigests 0}}' | cut -d'@' -f2)
docker exec ci-dind sh -c "COSIGN_PASSWORD='${{ secrets.COSIGN_PASSWORD }}' cosign sign -y --key /tmp/cosign.key ${IMAGE}@${DIGEST}"
docker exec ci-dind rm -f /tmp/cosign.key
else
echo "Skipping Cosign signing (no private key provided)"
fi
- name: Build and push frontend image
env:
IMAGE: ${{ env.REGISTRY_HOST }}/${{ env.OWNER_REPO }}/frontend
TAG: ${{ gitea.sha }}
run: |
# Build and push frontend image using DinD
docker exec ci-dind docker buildx build \
--platform linux/amd64 \
--tag "${IMAGE}:${TAG}" \
--push \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-f /workspace/frontend/Dockerfile \
/workspace/frontend
# Sign the frontend image with Cosign (optional)
if [ -n "${{ secrets.COSIGN_PRIVATE_KEY }}" ]; then
echo "Signing frontend image with Cosign..."
echo "${{ secrets.COSIGN_PRIVATE_KEY }}" | docker exec -i ci-dind sh -c "cat > /tmp/cosign.key && chmod 600 /tmp/cosign.key"
DIGEST=$(docker exec ci-dind docker image inspect "${IMAGE}:${TAG}" --format '{{index .RepoDigests 0}}' | cut -d'@' -f2)
docker exec ci-dind sh -c "COSIGN_PASSWORD='${{ secrets.COSIGN_PASSWORD }}' cosign sign -y --key /tmp/cosign.key ${IMAGE}@${DIGEST}"
docker exec ci-dind rm -f /tmp/cosign.key
else
echo "Skipping Cosign signing (no private key provided)"
fi
- name: Cleanup Testing Environment
if: always()
run: |
# Clean up test containers but keep DinD running for reuse
docker exec ci-dind docker system prune -f || true
# Check if DinD needs restart due to resource accumulation
DISK_USAGE=$(docker exec ci-dind df -h /var/lib/docker 2>/dev/null | tail -1 | awk '{print $5}' | sed 's/%//' || echo "0")
echo "DinD disk usage: ${DISK_USAGE}%"
# Restart DinD if disk usage is high (>80%)
if [ "$DISK_USAGE" -gt 80 ]; then
echo "WARNING: High disk usage (${DISK_USAGE}%), restarting DinD container..."
docker restart ci-dind
echo "DinD container restarted"
else
echo "Disk usage acceptable (${DISK_USAGE}%), keeping DinD running"
fi
# Job 3: Deployment - Runs directly on production runner (no DinD needed)
deploy:
name: Deploy to Production
needs: build-and-push
runs-on: prod
if: ${{ startsWith(gitea.ref, 'refs/heads/main') }}
steps:
- name: Setup deployment directory
run: |
# Create deployment directory if it doesn't exist
sudo mkdir -p /opt/${{ secrets.APP_NAME || 'sharenet' }}
sudo chown ${{ secrets.PROD_SERVICE_USER || 'prod-service' }}:${{ secrets.PROD_SERVICE_USER || 'prod-service' }} /opt/${{ secrets.APP_NAME || 'sharenet' }}
sudo chmod 755 /opt/${{ secrets.APP_NAME || 'sharenet' }}
- name: Checkout code to deployment directory
- name: Checkout code
uses: actions/checkout@v4
with:
path: /opt/${{ secrets.APP_NAME || 'sharenet' }}
- name: Set proper ownership
- name: Setup ephemeral PiP container
run: |
# Ensure proper ownership of all files
sudo chown -R ${{ secrets.PROD_SERVICE_USER || 'prod-service' }}:${{ secrets.PROD_SERVICE_USER || 'prod-service' }} /opt/${{ secrets.APP_NAME || 'sharenet' }}
chmod +x ./secure_pip_setup.sh
./secure_pip_setup.sh
# Change to deployment directory for all subsequent operations
cd /opt/${{ secrets.APP_NAME || 'sharenet' }}
- name: Create environment file for deployment
- name: Wait for PiP readiness
run: |
# Create environment file for this deployment
echo "IMAGE_TAG=${{ gitea.sha }}" > .env
echo "REGISTRY_HOST=${{ secrets.REGISTRY_HOST }}" >> .env
echo "OWNER_REPO=${{ gitea.repository }}" >> .env
echo "POSTGRES_PASSWORD=${{ secrets.POSTGRES_PASSWORD || 'your_secure_password_here' }}" >> .env
echo "POSTGRES_USER=${{ secrets.POSTGRES_USER || 'sharenet' }}" >> .env
echo "POSTGRES_DB=${{ secrets.POSTGRES_DB || 'sharenet' }}" >> .env
echo "DATABASE_URL=postgresql://${{ secrets.POSTGRES_USER || 'sharenet' }}:${{ secrets.POSTGRES_PASSWORD || 'your_secure_password_here' }}@postgres:5432/${{ secrets.POSTGRES_DB || 'sharenet' }}" >> .env
echo "NODE_ENV=production" >> .env
echo "RUST_LOG=info" >> .env
chmod +x ./pip_ready.sh
./pip_ready.sh
- name: Make scripts executable
run: chmod +x scripts/*.sh
- name: Configure Docker for Forgejo Container Registry access
- name: Setup SSH for production deployment
run: |
# Configure Docker to access Forgejo Container Registry
# Since we're using Forgejo's built-in registry, no certificate configuration is needed
mkdir -p ~/.ssh
echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
ssh-keyscan -H ${{ secrets.PRODUCTION_IP }} >> ~/.ssh/known_hosts
# Wait for Docker to be ready
timeout 30 bash -c 'until docker info; do sleep 1; done'
# Verify signed images before deployment (if Cosign is configured)
if [ -n "${{ secrets.COSIGN_PRIVATE_KEY }}" ]; then
echo "Verifying signed images..."
cosign verify --key /etc/containers/keys/org-cosign.pub ${{ secrets.REGISTRY_HOST }}/${{ gitea.repository }}/backend:${{ gitea.sha }}
cosign verify --key /etc/containers/keys/org-cosign.pub ${{ secrets.REGISTRY_HOST }}/${{ gitea.repository }}/frontend:${{ gitea.sha }}
else
echo "Skipping image verification (no Cosign key configured)"
fi
- name: Validate migration files
- name: Login to Forgejo Container Registry
run: |
echo "Validating migration files before deployment..."
./scripts/validate_migrations.sh --verbose || {
echo "ERROR: Migration validation failed. Deployment aborted."
exit 1
}
podman exec ci-pip podman login ${{ secrets.REGISTRY_HOST }} \
-u ${{ secrets.REGISTRY_USERNAME }} \
-p ${{ secrets.REGISTRY_TOKEN }}
- name: Deploy application using Kubernetes pod
- name: Start PostgreSQL for integration tests
run: |
# Set environment variables for the pod deployment
export IMAGE_TAG="${{ gitea.sha }}"
export REGISTRY_HOST="${{ secrets.REGISTRY_HOST }}"
export OWNER_REPO="${{ gitea.repository }}"
export POSTGRES_PASSWORD="${{ secrets.POSTGRES_PASSWORD || 'your_secure_password_here' }}"
export POSTGRES_USER="${{ secrets.POSTGRES_USER || 'sharenet' }}"
export POSTGRES_DB="${{ secrets.POSTGRES_DB || 'sharenet' }}"
podman exec ci-pip podman run -d \
--name test-postgres \
-e POSTGRES_PASSWORD=testpassword \
-e POSTGRES_USER=testuser \
-e POSTGRES_DB=testdb \
-p 5432:5432 \
postgres:15-alpine
# Stop any existing production pod
podman pod stop sharenet-production-pod || true
podman pod rm sharenet-production-pod || true
- name: Wait for PostgreSQL to be ready
run: |
podman exec ci-pip timeout 60 bash -c 'until podman exec test-postgres pg_isready -h localhost -p 5432 -U testuser; do sleep 1; done'
# Deploy the application pod with environment substitution
echo "Deploying application pod..."
envsubst < prod-pod.yaml | podman play kube -
- name: Run backend unit tests
run: |
podman exec ci-pip podman run --rm \
-v $(pwd):/workspace \
-w /workspace \
rust:latest \
sh -c "cargo test --lib -- --test-threads=1"
# Wait for pod to be ready
echo "Waiting for pod to be ready..."
timeout 120 bash -c 'until podman pod ps --filter name=sharenet-production-pod --format "{{.Status}}" | grep -q "Running"; do sleep 2; done'
- name: Run backend integration tests
env:
DATABASE_URL: postgres://testuser:testpassword@localhost:5432/testdb
run: |
podman exec ci-pip podman run --rm \
-v $(pwd):/workspace \
-w /workspace \
-e DATABASE_URL="$DATABASE_URL" \
rust:latest \
sh -c "cargo test --test '*' -- --test-threads=1"
# Verify deployment
echo "Verifying deployment..."
podman pod ps
podman pod logs sharenet-production-pod
- name: Cleanup test database
if: always()
run: |
podman exec ci-pip podman stop test-postgres 2>/dev/null || true
podman exec ci-pip podman rm test-postgres 2>/dev/null || true
test-frontend:
runs-on: [self-hosted, ci]
needs: test-backend
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup ephemeral PiP container
run: |
chmod +x ./secure_pip_setup.sh
./secure_pip_setup.sh
- name: Wait for PiP readiness
run: |
chmod +x ./pip_ready.sh
./pip_ready.sh
- name: Run frontend tests in PiP
run: |
podman exec ci-pip podman run --rm \
-v $(pwd):/workspace \
-w /workspace \
node:20 \
sh -c "npm ci && npm run test"
build-backend:
runs-on: [self-hosted, ci]
needs: test-frontend
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup ephemeral PiP container
run: |
chmod +x ./secure_pip_setup.sh
./secure_pip_setup.sh
- name: Wait for PiP readiness
run: |
chmod +x ./pip_ready.sh
./pip_ready.sh
- name: Login to Forgejo Container Registry
run: |
podman exec ci-pip podman login ${{ secrets.REGISTRY_HOST }} \
-u ${{ secrets.REGISTRY_USERNAME }} \
-p ${{ secrets.REGISTRY_TOKEN }}
- name: Build backend image
run: |
podman exec ci-pip podman build \
-t ${{ secrets.REGISTRY_HOST }}/${{ secrets.APP_NAME }}/backend:${{ github.sha }} \
-f Dockerfile.backend .
- name: Push backend image
run: |
podman exec ci-pip podman push \
${{ secrets.REGISTRY_HOST }}/${{ secrets.APP_NAME }}/backend:${{ github.sha }}
build-frontend:
runs-on: [self-hosted, ci]
needs: test-frontend
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup ephemeral PiP container
run: |
chmod +x ./secure_pip_setup.sh
./secure_pip_setup.sh
- name: Wait for PiP readiness
run: |
chmod +x ./pip_ready.sh
./pip_ready.sh
- name: Login to Forgejo Container Registry
run: |
podman exec ci-pip podman login ${{ secrets.REGISTRY_HOST }} \
-u ${{ secrets.REGISTRY_USERNAME }} \
-p ${{ secrets.REGISTRY_TOKEN }}
- name: Build frontend image
run: |
podman exec ci-pip podman build \
-t ${{ secrets.REGISTRY_HOST }}/${{ secrets.APP_NAME }}/frontend:${{ github.sha }} \
-f Dockerfile.frontend .
- name: Push frontend image
run: |
podman exec ci-pip podman push \
${{ secrets.REGISTRY_HOST }}/${{ secrets.APP_NAME }}/frontend:${{ github.sha }}
cleanup:
runs-on: [self-hosted, ci]
needs: [build-backend, build-frontend]
if: always()
steps:
- name: Cleanup PiP container
run: |
podman rm -f ci-pip 2>/dev/null || true
rm -f /tmp/podman.sock 2>/dev/null || true

View file

@ -63,13 +63,14 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
### CI/CD Linode Features
- Forgejo Actions runner for automated builds
- **Podman-in-Podman (PiP) container** for isolated CI operations
- **Ephemeral Podman-in-Podman (PiP) containers** for isolated CI operations
- **Secure setup scripts** (`secure_pip_setup.sh`, `pip_ready.sh`) for automated PiP management
- **Forgejo Container Registry** for secure image storage
- **FHS-compliant directory structure** for data, certificates, and logs
- **Secure registry access** via Forgejo authentication
- Automatic HTTPS with nginx reverse proxy
- Secure SSH communication with production
- **Simplified cleanup** - just restart PiP container
- **Ephemeral cleanup** - fresh PiP container per CI run
- **Systemd user manager** for robust rootless Podman services
### Production Linode Features
@ -79,13 +80,15 @@ This guide covers setting up a complete Continuous Integration/Continuous Deploy
- Firewall and fail2ban protection
### Pipeline Features
- **Automated testing** on every code push in isolated environment
- **Ephemeral testing** - fresh PiP container per CI run with maximum security
- **Comprehensive integration testing** with real PostgreSQL database
- **Automated image building** and push to Forgejo Container Registry from PiP
- **Automated deployment** to production
- **Rollback capability** with image versioning
- **Health monitoring** and logging
- **Health monitoring** and logging with readiness probes
- **Zero resource contention** between CI/CD and Forgejo Container Registry
- **Robust rootless services** via systemd user manager
- **Maximum security** - no port exposure, UNIX sockets only, least privilege
## Security Model and User Separation
@ -937,63 +940,11 @@ sudo journalctl -u forgejo-runner.service -f --no-pager
- Check network: Ensure the runner can reach your Forgejo instance
- Restart service: `sudo systemctl restart forgejo-runner.service`
### Step 7: Set Up Podman-in-Podman (PiP) for CI Operations
### Step 7: Set Up Ephemeral Podman-in-Podman (PiP) for Secure CI Operations
#### 7.0 Configure Container Policy (Required for PiP)
#### 7.1 Secure Ephemeral PiP Container Setup
**Important**: Before setting up the PiP container, you need to configure a simplified container policy that allows pulling images from quay.io and other necessary registries while maintaining security.
```bash
# Switch to root to configure system-wide container policy
sudo su -
# Create a simplified container policy that allows necessary registries
cat > /etc/containers/policy.json << 'EOF'
{
"default": [
{
"type": "insecureAcceptAnything"
}
],
"transports": {
"docker": {
"quay.io": [
{
"type": "insecureAcceptAnything"
}
],
"docker.io": [
{
"type": "insecureAcceptAnything"
}
],
"registry-1.docker.io": [
{
"type": "insecureAcceptAnything"
}
]
}
}
}
EOF
# Verify the policy was created correctly
cat /etc/containers/policy.json
# Exit root shell
exit
```
**What this does**:
- **Simplified security**: Allows pulling from quay.io, docker.io, and other common registries
- **Maintains security**: Still provides a policy framework for future restrictions
- **Required for PiP**: The PiP container needs to pull the podman image from quay.io
**Security Note**: This policy allows pulling from common registries. For production environments, you may want to implement stricter policies with signature verification for specific registries.
#### 7.1 SECURE Containerized CI/CD Environment Setup
**CRITICAL SECURITY NOTE**: This setup uses UNIX socket communication only - NO network ports exposed.
**CRITICAL SECURITY NOTE**: This setup uses ephemeral PiP containers with UNIX socket communication only - NO network ports exposed. Each CI run creates a fresh PiP container that is destroyed after completion.
```bash
# Switch to CI_SERVICE_USER (who has Podman access)
@ -1002,58 +953,100 @@ sudo su - CI_SERVICE_USER
# Navigate to the application directory
cd /opt/APP_NAME
# First clean up any existing socket and containers
podman rm -f ci-pip 2>/dev/null || true
rm -f /tmp/podman-host.sock
# Make the secure setup scripts executable
chmod +x secure_pip_setup.sh pip_ready.sh
# Create and test the host Podman socket (different path to avoid conflicts)
podman system service --time=0 unix:///tmp/podman-host.sock &
sleep 2
# Verify host socket was created
ls -la /tmp/podman-host.sock
# Create secure PiP container with NO network exposure
podman run -d \
--name ci-pip \
--security-opt=no-new-privileges \
--cap-drop=ALL \
-v /tmp/podman-host.sock:/var/run/podman.sock \
quay.io/podman/stable:latest \
podman system service --time=0 unix:///var/run/podman.sock
# Run the secure PiP setup script
./secure_pip_setup.sh
# Wait for PiP to be ready
sleep 10
./pip_ready.sh
```
# Check container status to ensure it's running
podman ps
**What the secure scripts do**:
- **secure_pip_setup.sh**: Creates ephemeral PiP container with maximum security constraints
- **pip_ready.sh**: Comprehensive readiness probe with retry logic and health checks
**Security Features**:
- ✅ **Ephemeral containers**: Fresh PiP container per CI run, destroyed after completion
- ✅ **No exposed ports**: UNIX socket communication only, no TCP ports
- ✅ **Least privilege**: --cap-drop=ALL, --security-opt=no-new-privileges
- ✅ **Read-only rootfs**: --read-only with tmpfs for writable directories
- ✅ **No network**: --network=none for maximum isolation
- ✅ **Secure socket permissions**: Proper ownership and 660 permissions
#### 7.2 Integration Testing with PostgreSQL
The CI pipeline now includes comprehensive integration testing:
```bash
# Test PiP connectivity through secure socket
podman exec ci-pip podman version
# Verify NO network ports are exposed
podman inspect ci-pip | grep -A 10 "Ports"
# Should show empty or only internal ports
# Start PostgreSQL for integration tests
podman exec ci-pip podman run -d \
--name test-postgres \
-e POSTGRES_PASSWORD=testpassword \
-e POSTGRES_USER=testuser \
-e POSTGRES_DB=testdb \
-p 5432:5432 \
postgres:15-alpine
# Test image pulling capability (uses host's network stack securely)
podman exec ci-pip podman pull alpine:latest
# Wait for PostgreSQL to be ready
podman exec ci-pip timeout 60 bash -c 'until podman exec test-postgres pg_isready -h localhost -p 5432 -U testuser; do sleep 1; done'
# Clean up the background host socket service (PiP container is now handling requests)
pkill -f "podman system service.*podman-host.sock" 2>/dev/null || true
# Run backend unit tests
podman exec ci-pip podman run --rm \
-v $(pwd):/workspace \
-w /workspace \
rust:latest \
sh -c "cargo test --lib -- --test-threads=1"
# Run backend integration tests with real database
podman exec ci-pip podman run --rm \
-v $(pwd):/workspace \
-w /workspace \
-e DATABASE_URL=postgres://testuser:testpassword@localhost:5432/testdb \
rust:latest \
sh -c "cargo test --test '*' -- --test-threads=1"
```
**How This Works Securely**:
- **NO exposed ports**: Management API only accessible through UNIX socket
- **Image pulling**: PiP container uses host's network stack to pull images from docker.io
- **No privileges**: Minimal capabilities, no root access
- **Host firewall protection**: UFW blocks all unnecessary ports
**Testing Benefits**:
- **Full integration testing**: Real PostgreSQL database for backend tests
- **Isolated environment**: Each test run gets fresh database
- **Comprehensive coverage**: Unit tests + integration tests
- **Secure networking**: Database only accessible within PiP container
**What this does**:
- **Creates isolated PiP environment**: Provides isolated Podman environment for all CI/CD operations
- **Health checks**: Ensures PiP is fully ready before proceeding
- **Simple setup**: Direct Podman commands for maximum flexibility
#### 7.3 CI/CD Workflow Architecture
**Why CI_SERVICE_USER**: The CI_SERVICE_USER has Podman access and runs the CI pipeline, so it needs direct access to the PiP container for seamless CI/CD operations.
The CI/CD pipeline uses ephemeral PiP containers with this secure workflow:
**Job 1 (Backend Testing)**:
- Creates ephemeral PiP container
- Starts PostgreSQL for integration tests
- Runs backend unit and integration tests
- Tests database connectivity and migrations
**Job 2 (Frontend Testing)**:
- Reuses or creates new PiP container
- Runs frontend tests with Node.js
- Executes linting and type checking
**Job 3 (Image Building)**:
- Builds Docker images within PiP container
- Pushes images to Forgejo Container Registry
- Uses secure authentication from repository secrets
**Job 4 (Cleanup)**:
- Destroys PiP container and cleans up sockets
- Ensures no persistent state between runs
**Key Security Benefits**:
- 🛡️ **Zero persistent state**: No containers survive CI runs
- 🛡️ **No port exposure**: All communication through UNIX sockets
- 🛡️ **Least privilege**: Minimal capabilities, no root access
- 🛡️ **Network isolation**: PiP containers have no external network
- 🛡️ **Ephemeral execution**: Fresh environment every time
#### 7.2 Configure PiP for Forgejo Container Registry
@ -1138,48 +1131,53 @@ The Forgejo Container Registry setup uses the built-in registry functionality, p
- **Tests connectivity**: Verifies DinD can pull, tag, and push images to Forgejo Container Registry
- **Validates setup**: Ensures the complete CI/CD pipeline will work
#### 6.4 CI/CD Workflow Architecture
#### 6.4 CI/CD Workflow Architecture with Ephemeral PiP
The CI/CD pipeline uses a three-stage approach with dedicated environments for each stage:
The CI/CD pipeline uses ephemeral Podman-in-Podman containers with a secure four-stage approach:
**Job 1 (Testing) - `ci-pod.yaml`:**
- **Purpose**: Comprehensive testing with multiple containers
- **Environment**: DinD with PostgreSQL, Rust, and Node.js containers
- **Code Checkout**: Code is checked out directly into the DinD container at `/workspace` from the Forgejo repository that triggered the build
**Job 1 (Backend Testing) - Ephemeral PiP:**
- **Purpose**: Comprehensive backend testing with real PostgreSQL
- **Environment**: Fresh PiP container with PostgreSQL for integration tests
- **Services**:
- PostgreSQL database for backend tests
- Rust toolchain for backend testing and migrations
- Node.js toolchain for frontend testing
- **Network**: All containers communicate through `ci-cd-test-network`
- **Setup**: PiP container created, Forgejo Container Registry login performed, code cloned into PiP from Forgejo
- **Cleanup**: Testing containers removed, DinD container kept running
- PostgreSQL database for integration tests
- Rust toolchain for backend testing
- **Security**: No network exposure, UNIX socket only
- **Cleanup**: PiP container destroyed after test completion
**Job 2 (Building) - Direct Docker Commands:**
- **Purpose**: Image building and pushing to Forgejo Container Registry
- **Environment**: Same DinD container from Job 1
- **Code Access**: Reuses code from Job 1, updates to latest commit
**Job 2 (Frontend Testing) - Ephemeral PiP:**
- **Purpose**: Frontend testing and validation
- **Environment**: Fresh PiP container with Node.js
- **Services**: Node.js toolchain for frontend testing
- **Tests**: Unit tests, linting, type checking, build verification
- **Cleanup**: PiP container destroyed after test completion
**Job 3 (Image Building) - Ephemeral PiP:**
- **Purpose**: Secure image building and registry push
- **Environment**: Fresh PiP container for building
- **Process**:
- Uses Docker Buildx for efficient building
- Builds backend and frontend images separately
- Builds backend and frontend images using Podman
- Pushes images to Forgejo Container Registry
- **Registry Access**: Reuses Forgejo Container Registry authentication from Job 1
- **Cleanup**: DinD container stopped and removed (clean slate for next run)
- Uses secure authentication from repository secrets
- **Cleanup**: PiP container destroyed after build completion
**Job 3 (Deployment) - `prod-pod.yaml`:**
- **Purpose**: Production deployment with pre-built images
- **Environment**: Production runner on Production Linode
- **Process**:
- Pulls images from Forgejo Container Registry
- Deploys complete application stack
- Verifies all services are healthy
- **Services**: PostgreSQL, backend, frontend, Nginx
**Job 4 (Cleanup) - System:**
- **Purpose**: Ensure no persistent state remains
- **Process**: Removes any remaining containers and sockets
- **Security**: Prevents resource accumulation and state persistence
**Key Benefits:**
- **🧹 Complete Isolation**: Each job has its own dedicated environment
- **🚫 No Resource Contention**: Testing and building don't interfere with Forgejo Container Registry
- **⚡ Consistent Environment**: Same setup every time
- **🎯 Purpose-Specific**: Each pod configuration serves a specific purpose
- **🔄 Parallel Safety**: Jobs can run safely in parallel
**Key Security Benefits:**
- 🛡️ **Ephemeral Execution**: Fresh PiP container for every job
- 🛡️ **Zero Port Exposure**: No TCP ports, UNIX sockets only
- 🛡️ **Network Isolation**: PiP containers have no external network
- 🛡️ **Least Privilege**: Minimal capabilities, no root access
- 🛡️ **Complete Cleanup**: No persistent state between runs
- 🛡️ **Secret Security**: Authentication via Forgejo repository secrets
**Testing Advantages:**
- ✅ **Real Integration Testing**: PostgreSQL database for backend tests
- ✅ **Fresh Environment**: No test pollution between runs
- ✅ **Comprehensive Coverage**: Unit + integration tests
- ✅ **Isolated Execution**: Each test run completely independent
**Testing DinD Setup:**

61
pip_ready.sh Normal file
View file

@ -0,0 +1,61 @@
#!/bin/bash
set -euo pipefail
# pip_ready.sh - Readiness probe for PiP container
# Checks if the Podman-in-Podman container is ready for CI operations
PIP_CONTAINER_NAME="ci-pip"
MAX_RETRIES=30
RETRY_DELAY=2
# Function to check PiP readiness
check_pip_ready() {
echo "🔍 Checking PiP container readiness..."
# Check if container exists and is running
if ! podman inspect "${PIP_CONTAINER_NAME}" --format '{{.State.Status}}' 2>/dev/null | grep -q running; then
echo "❌ PiP container not running"
return 1
fi
# Test basic Podman command inside PiP
if ! podman exec "${PIP_CONTAINER_NAME}" podman info --format json >/dev/null 2>&1; then
echo "⚠️ PiP container running but Podman not responsive"
return 1
fi
# Test image pulling capability (network test)
if ! podman exec "${PIP_CONTAINER_NAME}" podman pull --quiet alpine:latest >/dev/null 2>&1; then
echo "⚠️ PiP container ready but network access test failed"
return 1
fi
# Clean up test image
podman exec "${PIP_CONTAINER_NAME}" podman rmi alpine:latest 2>/dev/null || true
echo "✅ PiP container ready and fully operational"
return 0
}
# Main readiness check with retries
attempt=1
while [[ ${attempt} -le ${MAX_RETRIES} ]]; do
if check_pip_ready; then
echo "🎉 PiP container is ready for CI operations!"
exit 0
fi
echo "⏳ PiP not ready yet (attempt ${attempt}/${MAX_RETRIES}), retrying in ${RETRY_DELAY}s..."
sleep ${RETRY_DELAY}
attempt=$((attempt + 1))
done
# If we reach here, all retries failed
echo "❌ ERROR: PiP container failed to become ready after ${MAX_RETRIES} attempts"
echo "📋 Container status:"
podman ps -a --filter "name=${PIP_CONTAINER_NAME}" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" || true
echo "📋 Container logs:"
podman logs "${PIP_CONTAINER_NAME}" 2>/dev/null || echo "No logs available"
exit 1

82
secure_pip_setup.sh Normal file
View file

@ -0,0 +1,82 @@
#!/bin/bash
set -euo pipefail
# secure_pip_setup.sh - Idempotent setup for ephemeral Podman-in-Podman container
# This script creates a secure PiP container for CI operations with no network exposure
# Configuration
PIP_CONTAINER_NAME="ci-pip"
SOCKET_DIR="${XDG_RUNTIME_DIR}/podman-host"
SOCKET_PATH="${SOCKET_DIR}/podman.sock"
PODMAN_IMAGE="quay.io/podman/stable:latest"
# Clean up any existing container and socket
echo "🧹 Cleaning up any existing PiP container and socket..."
podman rm -f "${PIP_CONTAINER_NAME}" 2>/dev/null || true
rm -f "${SOCKET_PATH}"
rm -rf "${SOCKET_DIR}"
# Create secure socket directory
echo "📁 Creating secure socket directory..."
mkdir -p "${SOCKET_DIR}"
chmod 700 "${SOCKET_DIR}"
# Start host Podman service on UNIX socket (background)
echo "🔧 Starting host Podman service on UNIX socket..."
podman system service --time=0 "unix://${SOCKET_PATH}" &
HOST_PODMAN_PID=$!
sleep 2
# Verify socket was created
if [[ ! -S "${SOCKET_PATH}" ]]; then
echo "❌ ERROR: Podman socket not created at ${SOCKET_PATH}"
kill ${HOST_PODMAN_PID} 2>/dev/null || true
exit 1
fi
# Set secure permissions on socket
echo "🔒 Setting secure socket permissions..."
chmod 660 "${SOCKET_PATH}"
# Create ephemeral PiP container with maximum security
echo "🐳 Creating secure PiP container..."
podman run -d \
--name "${PIP_CONTAINER_NAME}" \
--security-opt=no-new-privileges \
--cap-drop=ALL \
--read-only \
--network=none \
--tmpfs /run:rw,size=64M \
--tmpfs /tmp:rw,size=256M \
-v "${SOCKET_PATH}:/var/run/podman.sock" \
"${PODMAN_IMAGE}" \
podman system service --time=0 unix:///var/run/podman.sock
# Wait for container to start
echo "⏳ Waiting for PiP container to start..."
sleep 5
# Verify container is running
if ! podman inspect "${PIP_CONTAINER_NAME}" --format '{{.State.Status}}' | grep -q running; then
echo "❌ ERROR: PiP container failed to start"
podman logs "${PIP_CONTAINER_NAME}" || true
kill ${HOST_PODMAN_PID} 2>/dev/null || true
exit 1
fi
# Kill the background host service (PiP container now handles requests)
echo "🔄 Switching to PiP container for Podman operations..."
kill ${HOST_PODMAN_PID} 2>/dev/null || true
# Test PiP connectivity
echo "✅ Testing PiP connectivity..."
if ! podman exec "${PIP_CONTAINER_NAME}" podman version >/dev/null 2>&1; then
echo "❌ ERROR: PiP container not responding to Podman commands"
podman logs "${PIP_CONTAINER_NAME}" || true
exit 1
fi
echo "🎉 Secure PiP container setup complete!"
echo " Container: ${PIP_CONTAINER_NAME}"
echo " Socket: ${SOCKET_PATH}"
echo " Security: No network, no capabilities, read-only rootfs"