Refactored to add various scripts to repository instead of have user manually create them
Some checks are pending
CI/CD Pipeline / Test Backend (push) Waiting to run
CI/CD Pipeline / Test Frontend (push) Waiting to run
CI/CD Pipeline / Build and Push Docker Images (push) Blocked by required conditions
CI/CD Pipeline / Deploy to Production (push) Blocked by required conditions

This commit is contained in:
continuist 2025-06-28 11:43:57 -04:00
parent 61117b6fa6
commit 41f0bd8c4f
8 changed files with 1438 additions and 170 deletions

View file

@ -39,6 +39,9 @@ jobs:
toolchain: stable toolchain: stable
override: true override: true
- name: Install SQLx CLI
run: cargo install sqlx-cli --no-default-features --features postgres
- name: Cache Rust dependencies - name: Cache Rust dependencies
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
@ -50,8 +53,30 @@ jobs:
restore-keys: | restore-keys: |
${{ runner.os }}-cargo- ${{ runner.os }}-cargo-
- name: Make scripts executable
run: chmod +x scripts/*.sh
- name: Validate migration files
env:
DATABASE_URL: postgres://postgres:postgres@localhost:5432/${{ secrets.APP_NAME || 'sharenet' }}_test
run: |
# Wait for PostgreSQL to be ready
echo "Waiting for PostgreSQL to be ready..."
timeout 60 bash -c 'until pg_isready -h localhost -p 5432 -U postgres; do sleep 1; done'
# Create test database if it doesn't exist
sqlx database create --database-url "$DATABASE_URL" || true
# Run initial migrations to set up the database
sqlx migrate run --database-url "$DATABASE_URL" || true
# Validate migration files
./scripts/validate_migrations.sh --verbose
- name: Run backend tests - name: Run backend tests
working-directory: ./backend working-directory: ./backend
env:
DATABASE_URL: postgres://postgres:postgres@localhost:5432/${{ secrets.APP_NAME || 'sharenet' }}_test
run: | run: |
cargo test --all cargo test --all
cargo clippy --all -- -D warnings cargo clippy --all -- -D warnings
@ -128,6 +153,15 @@ jobs:
if: github.ref == 'refs/heads/main' if: github.ref == 'refs/heads/main'
steps: steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install SQLx CLI
run: cargo install sqlx-cli --no-default-features --features postgres
- name: Make scripts executable
run: chmod +x scripts/*.sh
- name: Deploy to production server - name: Deploy to production server
uses: appleboy/ssh-action@v1.0.3 uses: appleboy/ssh-action@v1.0.3
with: with:
@ -154,5 +188,12 @@ jobs:
# Make scripts executable # Make scripts executable
chmod +x scripts/*.sh chmod +x scripts/*.sh
# Validate migrations before deployment
echo "Validating migration files before deployment..."
./scripts/validate_migrations.sh --verbose || {
echo "ERROR: Migration validation failed. Deployment aborted."
exit 1
}
# Run deployment using the new deployment script # Run deployment using the new deployment script
./scripts/deploy.sh deploy ./scripts/deploy.sh deploy

View file

@ -747,73 +747,87 @@ forgejo-runner list
### Step 8: Set Up Monitoring and Cleanup ### Step 8: Set Up Monitoring and Cleanup
#### 8.1 Create Monitoring Script #### 8.1 Monitoring Script
**Important**: The repository includes a pre-configured monitoring script in the `scripts/` directory that can be used for both CI/CD and production monitoring.
**Repository Script**:
- `scripts/monitor.sh` - Comprehensive monitoring script with support for both CI/CD and production environments
**To use the repository monitoring script**:
```bash ```bash
cat > ~/monitor.sh << 'EOF' # Clone the repository if not already done
#!/bin/bash git clone https://your-forgejo-instance/your-username/APP_NAME.git /tmp/monitoring-setup
cd /tmp/monitoring-setup
echo "=== CI/CD Server Status ===" # Make the script executable
echo "Date: $(date)" chmod +x scripts/monitor.sh
echo "Uptime: $(uptime)"
echo ""
echo "=== Docker Status ===" # Test CI/CD monitoring
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" ./scripts/monitor.sh --type ci-cd
echo ""
echo "=== Registry Status ===" # Test production monitoring (if you have a production setup)
cd /opt/registry ./scripts/monitor.sh --type production
docker-compose ps
echo ""
echo "=== Actions Runner Status ===" # Clean up
sudo systemctl status forgejo-runner.service --no-pager cd /
echo "" rm -rf /tmp/monitoring-setup
```
echo "=== System Resources ==="
echo "CPU Usage:"
top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1
echo "Memory Usage:"
free -h | grep Mem
echo "Disk Usage:"
df -h /
echo ""
echo "=== Recent Logs ==="
docker-compose logs --tail=10
EOF
**Alternative: Create a local copy for convenience**:
```bash
# Copy the script to your home directory for easy access
cp /tmp/monitoring-setup/scripts/monitor.sh ~/monitor.sh
chmod +x ~/monitor.sh chmod +x ~/monitor.sh
# Test the local copy
~/monitor.sh --type ci-cd
``` ```
#### 8.2 Create Cleanup Script **Note**: The repository script is more comprehensive and includes proper error handling, colored output, and support for both CI/CD and production environments. It automatically detects the environment and provides appropriate monitoring information.
#### 8.2 Cleanup Script
**Important**: The repository includes a pre-configured cleanup script in the `scripts/` directory that can be used for both CI/CD and production cleanup operations.
**Repository Script**:
- `scripts/cleanup.sh` - Comprehensive cleanup script with support for both CI/CD and production environments
**To use the repository cleanup script**:
```bash ```bash
cat > ~/cleanup.sh << 'EOF' # Clone the repository if not already done
#!/bin/bash git clone https://your-forgejo-instance/your-username/APP_NAME.git /tmp/cleanup-setup
cd /tmp/cleanup-setup
echo "Cleaning up old Docker images..." # Make the script executable
chmod +x scripts/cleanup.sh
# Remove unused images # Test CI/CD cleanup (dry run first)
docker image prune -f ./scripts/cleanup.sh --type ci-cd --dry-run
# Remove unused volumes # Run CI/CD cleanup
docker volume prune -f ./scripts/cleanup.sh --type ci-cd
# Remove unused networks # Test production cleanup (dry run first)
docker network prune -f ./scripts/cleanup.sh --type production --dry-run
# Remove old registry images (keep last 10 tags per repository) # Clean up
cd /opt/registry cd /
docker-compose exec registry registry garbage-collect /etc/docker/registry/config.yml rm -rf /tmp/cleanup-setup
echo "Cleanup complete!"
EOF
chmod +x ~/cleanup.sh
``` ```
**Alternative: Create a local copy for convenience**:
```bash
# Copy the script to your home directory for easy access
cp /tmp/cleanup-setup/scripts/cleanup.sh ~/cleanup.sh
chmod +x ~/cleanup.sh
# Test the local copy (dry run)
~/cleanup.sh --type ci-cd --dry-run
```
**Note**: The repository script is more comprehensive and includes proper error handling, colored output, dry-run mode, and support for both CI/CD and production environments. It automatically detects the environment and provides appropriate cleanup operations.
#### 8.3 Test Cleanup Script #### 8.3 Test Cleanup Script
```bash ```bash
@ -823,8 +837,11 @@ docker pull nginx:latest
docker tag alpine:latest test-cleanup:latest docker tag alpine:latest test-cleanup:latest
docker tag nginx:latest test-cleanup2:latest docker tag nginx:latest test-cleanup2:latest
# Test cleanup with dry run first
./scripts/cleanup.sh --type ci-cd --dry-run
# Run the cleanup script # Run the cleanup script
./cleanup.sh ./scripts/cleanup.sh --type ci-cd
# Verify cleanup worked # Verify cleanup worked
echo "Checking remaining images:" echo "Checking remaining images:"
@ -844,15 +861,31 @@ docker network ls
- Remaining images should be minimal (only actively used ones) - Remaining images should be minimal (only actively used ones)
**If something goes wrong**: **If something goes wrong**:
- Check script permissions: `ls -la ~/cleanup.sh` - Check script permissions: `ls -la scripts/cleanup.sh`
- Verify Docker access: `docker ps` - Verify Docker access: `docker ps`
- Check registry access: `cd /opt/registry && docker-compose ps` - Check registry access: `cd /opt/registry && docker-compose ps`
- Run manually: `bash -x ~/cleanup.sh` - Run manually: `bash -x scripts/cleanup.sh`
#### 8.4 Set Up Automated Cleanup #### 8.4 Set Up Automated Cleanup
```bash ```bash
(crontab -l 2>/dev/null; echo "0 3 * * * /home/SERVICE_USER/cleanup.sh") | crontab - # Create a cron job to run cleanup daily at 3 AM using the repository script
(crontab -l 2>/dev/null; echo "0 3 * * * cd /tmp/cleanup-setup && ./scripts/cleanup.sh --type ci-cd >> /tmp/cleanup.log 2>&1") | crontab -
# Verify the cron job was added
crontab -l
```
**What this does:**
- **Runs automatically**: The cleanup script runs every day at 3:00 AM
- **Frequency**: Daily cleanup to prevent disk space issues
- **Logging**: All cleanup output is logged to `/tmp/cleanup.log`
- **What it cleans**: Unused Docker images, volumes, networks, and registry images
**Alternative: Use a local copy for automated cleanup**:
```bash
# If you created a local copy, use that instead
(crontab -l 2>/dev/null; echo "0 3 * * * ~/cleanup.sh --type ci-cd >> ~/cleanup.log 2>&1") | crontab -
``` ```
### Step 9: Configure Firewall ### Step 9: Configure Firewall
@ -1154,18 +1187,31 @@ IMAGE_NAME=APP_NAME
IMAGE_TAG=latest IMAGE_TAG=latest
# Database Configuration # Database Configuration
DATABASE_URL=postgresql://SERVICE_USER:your_secure_password_here@postgres:5432/APP_NAME POSTGRES_DB=sharenet
POSTGRES_USER=sharenet
DATABASE_URL=postgresql://sharenet:your_secure_password_here@postgres:5432/sharenet
# Application Configuration # Application Configuration
NODE_ENV=production NODE_ENV=production
RUST_LOG=info RUST_LOG=info
RUST_BACKTRACE=1
EOF EOF
``` ```
**Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address. **Important**: Replace `YOUR_CI_CD_IP` with your actual CI/CD Linode IP address.
**Default Environment Variables** (from `docker-compose.yml`):
- `POSTGRES_DB=sharenet` - PostgreSQL database name
- `POSTGRES_USER=sharenet` - PostgreSQL username
- `POSTGRES_PASSWORD=changeme` - PostgreSQL password (should be changed)
- `REGISTRY=your-username/sharenet` - Docker registry path (used as fallback)
- `IMAGE_NAME=your-username/sharenet` - Docker image name (used as fallback)
- `IMAGE_TAG=latest` - Docker image tag (used as fallback)
**Note**: The database user and database name can be controlled via the `POSTGRES_USER` and `POSTGRES_DB` secrets in your Forgejo repository settings. If you set these secrets, they will override the default values used in this environment file. **Note**: The database user and database name can be controlled via the `POSTGRES_USER` and `POSTGRES_DB` secrets in your Forgejo repository settings. If you set these secrets, they will override the default values used in this environment file.
**Security Note**: Always change the default `POSTGRES_PASSWORD` from `changeme` to a strong, unique password in production.
#### 18.4 Verify Repository Contents #### 18.4 Verify Repository Contents
```bash ```bash
@ -1193,86 +1239,96 @@ head -20 .forgejo/workflows/ci.yml
**Expected output**: You should see the `docker-compose.yml` file, `nginx/nginx.conf` file, `.forgejo/workflows/ci.yml` file, and other project files from your repository. **Expected output**: You should see the `docker-compose.yml` file, `nginx/nginx.conf` file, `.forgejo/workflows/ci.yml` file, and other project files from your repository.
#### 18.5 Create Deployment Script #### 18.5 Deployment Scripts
**Important**: The repository includes pre-configured deployment scripts in the `scripts/` directory that are used by the CI/CD pipeline. These scripts handle safe production deployments with database migrations, backups, and rollback capabilities.
**Repository Scripts** (used by CI/CD pipeline):
- `scripts/deploy.sh` - Main deployment script with migration support
- `scripts/deploy-local.sh` - Local development deployment script
- `scripts/migrate.sh` - Database migration management
- `scripts/validate_migrations.sh` - Migration validation
- `scripts/monitor.sh` - Comprehensive monitoring script for both CI/CD and production environments
- `scripts/cleanup.sh` - Comprehensive cleanup script for both CI/CD and production environments
- `scripts/backup.sh` - Comprehensive backup script for both CI/CD and production environments
**To use the repository deployment scripts**:
```bash ```bash
cat > /opt/APP_NAME/deploy.sh << 'EOF' # The scripts are already available in the cloned repository
#!/bin/bash cd /opt/APP_NAME
# Deployment script for APP_NAME # Make the scripts executable
set -e chmod +x scripts/deploy.sh scripts/deploy-local.sh
echo "Deploying APP_NAME..." # Test local deployment
./scripts/deploy-local.sh status
# Pull latest code from repository # Run local deployment
git pull origin main ./scripts/deploy-local.sh deploy
# Pull latest images # Test production deployment (dry run)
docker-compose pull ./scripts/deploy.sh check
# Stop existing containers # Run production deployment
docker-compose down ./scripts/deploy.sh deploy
# Start new containers
docker-compose up -d
# Clean up old images
docker image prune -f
# Verify deployment
echo "Verifying deployment..."
sleep 10 # Give containers time to start
if docker-compose ps | grep -q "Up"; then
echo "Deployment successful! All containers are running."
docker-compose ps
else
echo "Deployment failed! Some containers are not running."
docker-compose ps
docker-compose logs --tail=20
exit 1
fi
echo "Deployment complete!"
EOF
chmod +x /opt/APP_NAME/deploy.sh
``` ```
#### 18.6 Create Backup Script **Alternative: Create a local copy for convenience**:
```bash ```bash
cat > /opt/APP_NAME/backup.sh << 'EOF' # Copy the local deployment script to the application directory for easy access
#!/bin/bash cp scripts/deploy-local.sh /opt/APP_NAME/deploy-local.sh
chmod +x /opt/APP_NAME/deploy-local.sh
# Backup script for APP_NAME # Test the local copy
set -e cd /opt/APP_NAME
./deploy-local.sh status
BACKUP_DIR="/opt/APP_NAME/backups"
DATE=$(date +%Y%m%d_%H%M%S)
mkdir -p $BACKUP_DIR
# Backup database
docker-compose exec -T postgres pg_dump -U ${POSTGRES_USER:-sharenet} ${POSTGRES_DB:-sharenet} > $BACKUP_DIR/db_backup_$DATE.sql
# Backup configuration files
tar -czf $BACKUP_DIR/config_backup_$DATE.tar.gz .env docker-compose.yml nginx/
# Keep only last 7 days of backups
find $BACKUP_DIR -name "*.sql" -mtime +7 -delete
find $BACKUP_DIR -name "*.tar.gz" -mtime +7 -delete
echo "Backup completed: $BACKUP_DIR"
EOF
chmod +x /opt/APP_NAME/backup.sh
``` ```
**Note**: The repository scripts are more comprehensive and include proper error handling, colored output, and multiple commands. The `scripts/deploy.sh` is used by the CI/CD pipeline and includes database migration handling, backup creation, and rollback capabilities. The `scripts/deploy-local.sh` is designed for local development deployments and includes status checking, restart, and log viewing capabilities.
#### 18.6 Backup Script
**Important**: The repository includes a pre-configured backup script in the `scripts/` directory that can be used for both CI/CD and production backup operations.
**Repository Script**:
- `scripts/backup.sh` - Comprehensive backup script with support for both CI/CD and production environments
**To use the repository backup script**:
```bash
# The script is already available in the cloned repository
cd /opt/APP_NAME
# Make the script executable
chmod +x scripts/backup.sh
# Test production backup (dry run first)
./scripts/backup.sh --type production --app-name APP_NAME --dry-run
# Run production backup
./scripts/backup.sh --type production --app-name APP_NAME
# Test CI/CD backup (dry run first)
./scripts/backup.sh --type ci-cd --app-name APP_NAME --dry-run
```
**Alternative: Create a local copy for convenience**:
```bash
# Copy the script to the application directory for easy access
cp scripts/backup.sh /opt/APP_NAME/backup-local.sh
chmod +x /opt/APP_NAME/backup-local.sh
# Test the local copy (dry run)
cd /opt/APP_NAME
./backup-local.sh --type production --app-name APP_NAME --dry-run
```
**Note**: The repository script is more comprehensive and includes proper error handling, colored output, dry-run mode, and support for both CI/CD and production environments. It automatically detects the environment and provides appropriate backup operations.
#### 18.6.1 Set Up Automated Backup Scheduling #### 18.6.1 Set Up Automated Backup Scheduling
```bash ```bash
# Create a cron job to run backups daily at 2 AM # Create a cron job to run backups daily at 2 AM using the repository script
(crontab -l 2>/dev/null; echo "0 2 * * * /opt/APP_NAME/backup.sh >> /opt/APP_NAME/backup.log 2>&1") | crontab - (crontab -l 2>/dev/null; echo "0 2 * * * cd /opt/APP_NAME && ./scripts/backup.sh --type production --app-name APP_NAME >> /opt/APP_NAME/backup.log 2>&1") | crontab -
# Verify the cron job was added # Verify the cron job was added
crontab -l crontab -l
@ -1282,12 +1338,12 @@ crontab -l
- **Runs automatically**: The backup script runs every day at 2:00 AM - **Runs automatically**: The backup script runs every day at 2:00 AM
- **Frequency**: Daily backups to ensure minimal data loss - **Frequency**: Daily backups to ensure minimal data loss
- **Logging**: All backup output is logged to `/opt/APP_NAME/backup.log` - **Logging**: All backup output is logged to `/opt/APP_NAME/backup.log`
- **Retention**: The script automatically keeps only the last 7 days of backups - **Retention**: The script automatically keeps only the last 7 days of backups (configurable)
**To test the backup manually:** **To test the backup manually:**
```bash ```bash
cd /opt/APP_NAME cd /opt/APP_NAME
./backup.sh ./scripts/backup.sh --type production --app-name APP_NAME
``` ```
**To view backup logs:** **To view backup logs:**
@ -1295,49 +1351,49 @@ cd /opt/APP_NAME
tail -f /opt/APP_NAME/backup.log tail -f /opt/APP_NAME/backup.log
``` ```
#### 18.7 Create Monitoring Script **Alternative: Use a local copy for automated backup**:
```bash ```bash
cat > /opt/APP_NAME/monitor.sh << 'EOF' # If you created a local copy, use that instead
#!/bin/bash (crontab -l 2>/dev/null; echo "0 2 * * * cd /opt/APP_NAME && ./backup-local.sh --type production --app-name APP_NAME >> /opt/APP_NAME/backup.log 2>&1") | crontab -
# Monitoring script for APP_NAME
echo "=== APP_NAME Application Status ==="
echo
echo "Container Status:"
docker-compose ps
echo
echo "Recent Logs:"
docker-compose logs --tail=20
echo
echo "System Resources:"
echo "CPU Usage:"
top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1
echo
echo "Memory Usage:"
free -h
echo
echo "Disk Usage:"
df -h
echo
echo "Network Connections:"
netstat -tuln | grep -E ':(80|443|3000|3001)'
EOF
chmod +x /opt/APP_NAME/monitor.sh
``` ```
#### 18.7 Monitoring Script
**Important**: The repository includes a pre-configured monitoring script in the `scripts/` directory that can be used for production monitoring.
**Repository Script**:
- `scripts/monitor.sh` - Comprehensive monitoring script with support for both CI/CD and production environments
**To use the repository monitoring script**:
```bash
# The script is already available in the cloned repository
cd /opt/APP_NAME
# Make the script executable
chmod +x scripts/monitor.sh
# Test production monitoring
./scripts/monitor.sh --type production --app-name APP_NAME
```
**Alternative: Create a local copy for convenience**:
```bash
# Copy the script to the application directory for easy access
cp scripts/monitor.sh /opt/APP_NAME/monitor-local.sh
chmod +x /opt/APP_NAME/monitor-local.sh
# Test the local copy
cd /opt/APP_NAME
./monitor-local.sh --type production --app-name APP_NAME
```
**Note**: The repository script is more comprehensive and includes proper error handling, colored output, health checks, and automatic environment detection. It provides better monitoring information than a simple local script.
#### 18.7.1 Set Up Automated Monitoring #### 18.7.1 Set Up Automated Monitoring
```bash ```bash
# Create a cron job to run monitoring every 5 minutes # Create a cron job to run monitoring every 5 minutes using the repository script
(crontab -l 2>/dev/null; echo "*/5 * * * * /opt/APP_NAME/monitor.sh >> /opt/APP_NAME/monitor.log 2>&1") | crontab - (crontab -l 2>/dev/null; echo "*/5 * * * * cd /opt/APP_NAME && ./scripts/monitor.sh --type production --app-name APP_NAME >> /opt/APP_NAME/monitor.log 2>&1") | crontab -
# Verify the cron job was added # Verify the cron job was added
crontab -l crontab -l
@ -1347,12 +1403,12 @@ crontab -l
- **Runs automatically**: The monitoring script runs every 5 minutes - **Runs automatically**: The monitoring script runs every 5 minutes
- **Frequency**: Every 5 minutes to catch issues quickly - **Frequency**: Every 5 minutes to catch issues quickly
- **Logging**: All monitoring output is logged to `/opt/APP_NAME/monitor.log` - **Logging**: All monitoring output is logged to `/opt/APP_NAME/monitor.log`
- **What it monitors**: Container status, recent logs, CPU/memory/disk usage, network connections - **What it monitors**: Container status, recent logs, CPU/memory/disk usage, network connections, health checks
**To test the monitoring manually:** **To test the monitoring manually:**
```bash ```bash
cd /opt/APP_NAME cd /opt/APP_NAME
./monitor.sh ./scripts/monitor.sh --type production --app-name APP_NAME
``` ```
**To view monitoring logs:** **To view monitoring logs:**
@ -1579,8 +1635,8 @@ curl -I http://your-domain.com
# Test HTTPS access # Test HTTPS access
curl -I https://your-domain.com curl -I https://your-domain.com
# Test application endpoints # Test application health endpoint (checks backend services)
curl -I https://your-domain.com/api/health curl https://your-domain.com/health
``` ```
**If you don't have a domain (IP access only):** **If you don't have a domain (IP access only):**
@ -1588,20 +1644,31 @@ curl -I https://your-domain.com/api/health
# Test HTTP access via IP # Test HTTP access via IP
curl -I http://YOUR_PRODUCTION_IP curl -I http://YOUR_PRODUCTION_IP
# Test application endpoints # Test application health endpoint (checks backend services)
curl -I http://YOUR_PRODUCTION_IP/api/health curl http://YOUR_PRODUCTION_IP/health
``` ```
**Expected health endpoint response:**
```json
{
"status": "healthy",
"service": "sharenet-api",
"timestamp": "2024-01-01T12:00:00Z"
}
```
**Note**: The `/health` endpoint now proxies to the backend service and returns actual service status. If the backend is not running, this endpoint will return an error, making it a true health check for the application.
### Step 27: Test Monitoring ### Step 27: Test Monitoring
```bash ```bash
# On CI/CD server # On CI/CD server
cd /opt/registry cd /opt/registry
./monitor.sh ./scripts/monitor.sh --type ci-cd
# On Production server # On Production server
cd /opt/APP_NAME cd /opt/APP_NAME
./monitor.sh ./scripts/monitor.sh --type production --app-name APP_NAME
``` ```
### Step 28: Test Registry Access ### Step 28: Test Registry Access
@ -1683,11 +1750,11 @@ curl -I http://YOUR_CI_CD_IP:8080
```bash ```bash
# On CI/CD server # On CI/CD server
cd /opt/registry cd /opt/registry
./monitor.sh ./scripts/monitor.sh --type ci-cd
# On Production server # On Production server
cd /opt/APP_NAME cd /opt/APP_NAME
./monitor.sh ./scripts/monitor.sh --type production --app-name APP_NAME
``` ```
#### Weekly Maintenance #### Weekly Maintenance
@ -1696,12 +1763,29 @@ cd /opt/APP_NAME
2. **Review logs**: `docker-compose logs --tail=100` 2. **Review logs**: `docker-compose logs --tail=100`
3. **Update system**: `sudo apt update && sudo apt upgrade` 3. **Update system**: `sudo apt update && sudo apt upgrade`
4. **Test backups**: Verify backup files exist and are recent 4. **Test backups**: Verify backup files exist and are recent
```bash
# On Production server
cd /opt/APP_NAME
./scripts/backup.sh --type production --app-name APP_NAME --dry-run
# Check backup directory
ls -la backups/
```
#### Monthly Maintenance #### Monthly Maintenance
1. **Review security**: Check firewall rules and fail2ban status 1. **Review security**: Check firewall rules and fail2ban status
2. **Update certificates**: Ensure SSL certificates are valid (domain users only) 2. **Update certificates**: Ensure SSL certificates are valid (domain users only)
3. **Clean up old images**: Remove unused Docker images 3. **Clean up old images**: Run the cleanup script to remove unused Docker images
```bash
# On CI/CD server
cd /opt/registry
./scripts/cleanup.sh --type ci-cd
# On Production server
cd /opt/APP_NAME
./scripts/cleanup.sh --type production
```
4. **Review monitoring**: Check application performance and logs 4. **Review monitoring**: Check application performance and logs
5. **Verify registry access**: Test registry connectivity and authentication 5. **Verify registry access**: Test registry connectivity and authentication
@ -1743,4 +1827,4 @@ Your complete CI/CD pipeline is now ready! The setup includes:
- **IP-only users**: Access via `http://YOUR_PRODUCTION_IP` - **IP-only users**: Access via `http://YOUR_PRODUCTION_IP`
- **Registry UI**: Access via `http://YOUR_CI_CD_IP:8080` - **Registry UI**: Access via `http://YOUR_CI_CD_IP:8080`
For ongoing maintenance and troubleshooting, refer to the troubleshooting section and monitoring scripts provided in this guide. For ongoing maintenance and troubleshooting, refer to the troubleshooting section and monitoring scripts provided in this guide.

View file

@ -106,8 +106,13 @@ http {
# Health check endpoint # Health check endpoint
location /health { location /health {
access_log off; access_log off;
return 200 "healthy\n"; proxy_pass http://backend/health;
add_header Content-Type text/plain; proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header Content-Type application/json;
} }
} }
} }

273
scripts/backup.sh Executable file
View file

@ -0,0 +1,273 @@
#!/bin/bash
# Sharenet Backup Script
# This script creates backups of databases and configuration files
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
BACKUP_TYPE="${BACKUP_TYPE:-production}" # production or ci-cd
APP_NAME="${APP_NAME:-sharenet}"
BACKUP_DIR="${BACKUP_DIR:-/opt/APP_NAME/backups}"
RETENTION_DAYS="${RETENTION_DAYS:-7}"
DRY_RUN="${DRY_RUN:-false}"
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_help() {
cat << EOF
Sharenet Backup Script
Usage: $0 [OPTIONS]
Options:
--type TYPE Backup type: production or ci-cd (default: production)
--app-name NAME Application name (default: sharenet)
--backup-dir DIR Backup directory (default: /opt/APP_NAME/backups)
--retention DAYS Number of days to keep backups (default: 7)
--dry-run Show what would be done without executing
--help Show this help message
Environment Variables:
BACKUP_TYPE Set backup type (production/ci-cd)
APP_NAME Set application name
BACKUP_DIR Set backup directory path
RETENTION_DAYS Set retention period in days
DRY_RUN Set to 'true' for dry run mode
Examples:
$0 # Backup production environment
$0 --type ci-cd # Backup CI/CD environment
$0 --app-name myapp # Backup specific application
$0 --dry-run # Show what would be backed up
DRY_RUN=true $0 # Dry run mode
EOF
}
backup_database() {
log_info "Backing up database..."
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: docker-compose exec -T postgres pg_dump -U \${POSTGRES_USER:-sharenet} \${POSTGRES_DB:-sharenet} > \$BACKUP_DIR/db_backup_\$DATE.sql"
return
fi
# Check if we're in the application directory
if [ ! -f "docker-compose.yml" ]; then
log_error "docker-compose.yml not found. Please run this script from the application directory."
exit 1
fi
# Check if postgres container is running
if ! docker-compose ps | grep -q "postgres.*Up"; then
log_error "PostgreSQL container is not running"
exit 1
fi
# Create backup directory if it doesn't exist
mkdir -p "$BACKUP_DIR"
# Get current timestamp
local timestamp=$(date +%Y%m%d_%H%M%S)
local backup_file="$BACKUP_DIR/db_backup_${timestamp}.sql"
# Backup database
log_info "Creating database backup: $backup_file"
docker-compose exec -T postgres pg_dump -U ${POSTGRES_USER:-sharenet} ${POSTGRES_DB:-sharenet} > "$backup_file"
# Verify backup was created
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
log_success "Database backup created: $backup_file"
log_info "Backup size: $(du -h "$backup_file" | cut -f1)"
else
log_error "Database backup failed or is empty"
exit 1
fi
}
backup_configuration() {
log_info "Backing up configuration files..."
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: tar -czf \$BACKUP_DIR/config_backup_\$DATE.tar.gz .env docker-compose.yml nginx/"
return
fi
# Create backup directory if it doesn't exist
mkdir -p "$BACKUP_DIR"
# Get current timestamp
local timestamp=$(date +%Y%m%d_%H%M%S)
local backup_file="$BACKUP_DIR/config_backup_${timestamp}.tar.gz"
# Check which files exist
local files_to_backup=""
[ -f ".env" ] && files_to_backup="$files_to_backup .env"
[ -f "docker-compose.yml" ] && files_to_backup="$files_to_backup docker-compose.yml"
[ -d "nginx" ] && files_to_backup="$files_to_backup nginx/"
if [ -z "$files_to_backup" ]; then
log_warning "No configuration files found to backup"
return
fi
# Backup configuration files
log_info "Creating configuration backup: $backup_file"
tar -czf "$backup_file" $files_to_backup
# Verify backup was created
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
log_success "Configuration backup created: $backup_file"
log_info "Backup size: $(du -h "$backup_file" | cut -f1)"
else
log_error "Configuration backup failed or is empty"
exit 1
fi
}
cleanup_old_backups() {
log_info "Cleaning up old backups (keeping last $RETENTION_DAYS days)..."
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: find \$BACKUP_DIR -name \"*.sql\" -mtime +$RETENTION_DAYS -delete"
echo "Would run: find \$BACKUP_DIR -name \"*.tar.gz\" -mtime +$RETENTION_DAYS -delete"
return
fi
# Count files before cleanup
local sql_count=$(find "$BACKUP_DIR" -name "*.sql" 2>/dev/null | wc -l)
local tar_count=$(find "$BACKUP_DIR" -name "*.tar.gz" 2>/dev/null | wc -l)
# Remove old database backups
find "$BACKUP_DIR" -name "*.sql" -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
# Remove old configuration backups
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
# Count files after cleanup
local sql_count_after=$(find "$BACKUP_DIR" -name "*.sql" 2>/dev/null | wc -l)
local tar_count_after=$(find "$BACKUP_DIR" -name "*.tar.gz" 2>/dev/null | wc -l)
log_info "Cleaned up $((sql_count - sql_count_after)) old database backups"
log_info "Cleaned up $((tar_count - tar_count_after)) old configuration backups"
}
backup_production() {
log_info "Backing up production environment..."
# Check if we're in the application directory
if [ ! -f "docker-compose.yml" ]; then
log_error "docker-compose.yml not found. Please run this script from the application directory."
exit 1
fi
# Backup database
backup_database
# Backup configuration
backup_configuration
# Clean up old backups
cleanup_old_backups
log_success "Production backup completed successfully"
}
backup_ci_cd() {
log_info "Backing up CI/CD environment..."
# For CI/CD, we mainly backup configuration and registry data
log_info "CI/CD backup focuses on configuration and registry data..."
# Backup configuration if available
if [ -f "docker-compose.yml" ] || [ -f ".env" ] || [ -d "nginx" ]; then
backup_configuration
else
log_warning "No configuration files found for CI/CD backup"
fi
# Clean up old backups
cleanup_old_backups
log_success "CI/CD backup completed successfully"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--type)
BACKUP_TYPE="$2"
shift 2
;;
--app-name)
APP_NAME="$2"
shift 2
;;
--backup-dir)
BACKUP_DIR="$2"
shift 2
;;
--retention)
RETENTION_DAYS="$2"
shift 2
;;
--dry-run)
DRY_RUN="true"
shift
;;
--help|-h)
show_help
exit 0
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Replace APP_NAME placeholder in BACKUP_DIR
BACKUP_DIR=$(echo "$BACKUP_DIR" | sed "s/APP_NAME/$APP_NAME/g")
# Main backup logic
case "$BACKUP_TYPE" in
production)
backup_production
;;
ci-cd)
backup_ci_cd
;;
*)
log_error "Invalid backup type: $BACKUP_TYPE"
log_error "Valid types: production, ci-cd"
exit 1
;;
esac
log_success "Backup completed successfully"

209
scripts/cleanup.sh Executable file
View file

@ -0,0 +1,209 @@
#!/bin/bash
# Sharenet Cleanup Script
# This script cleans up Docker resources and registry images
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
CLEANUP_TYPE="${CLEANUP_TYPE:-ci-cd}" # ci-cd or production
REGISTRY_DIR="${REGISTRY_DIR:-/opt/registry}"
DRY_RUN="${DRY_RUN:-false}"
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_help() {
cat << EOF
Sharenet Cleanup Script
Usage: $0 [OPTIONS]
Options:
--type TYPE Cleanup type: ci-cd or production (default: ci-cd)
--registry-dir DIR Registry directory (default: /opt/registry)
--dry-run Show what would be done without executing
--help Show this help message
Environment Variables:
CLEANUP_TYPE Set cleanup type (ci-cd/production)
REGISTRY_DIR Set registry directory path
DRY_RUN Set to 'true' for dry run mode
Examples:
$0 # Cleanup CI/CD environment
$0 --type production # Cleanup production environment
$0 --dry-run # Show what would be cleaned
DRY_RUN=true $0 # Dry run mode
EOF
}
cleanup_docker_resources() {
log_info "Cleaning up Docker resources..."
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: docker image prune -f"
echo "Would run: docker volume prune -f"
echo "Would run: docker network prune -f"
return
fi
# Remove unused images
log_info "Removing unused Docker images..."
docker image prune -f
# Remove unused volumes
log_info "Removing unused Docker volumes..."
docker volume prune -f
# Remove unused networks
log_info "Removing unused Docker networks..."
docker network prune -f
log_success "Docker resources cleanup completed"
}
cleanup_registry() {
if [ "$CLEANUP_TYPE" != "ci-cd" ]; then
log_info "Skipping registry cleanup (not CI/CD environment)"
return
fi
log_info "Cleaning up registry images..."
if [ ! -d "$REGISTRY_DIR" ]; then
log_warning "Registry directory not found: $REGISTRY_DIR"
return
fi
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: cd $REGISTRY_DIR && docker-compose exec registry registry garbage-collect /etc/docker/registry/config.yml"
return
fi
# Change to registry directory
cd "$REGISTRY_DIR"
# Check if registry is running
if ! docker-compose ps | grep -q "registry.*Up"; then
log_warning "Registry is not running, skipping registry cleanup"
return
fi
# Run registry garbage collection (keep last 10 tags per repository)
log_info "Running registry garbage collection..."
docker-compose exec -T registry registry garbage-collect /etc/docker/registry/config.yml
log_success "Registry cleanup completed"
}
cleanup_production() {
log_info "Cleaning up production environment..."
# Check if we're in the application directory
if [ -f "docker-compose.yml" ]; then
log_info "Found docker-compose.yml, cleaning up application resources..."
if [ "$DRY_RUN" = "true" ]; then
log_warning "DRY RUN MODE - No changes will be made"
echo "Would run: docker-compose down"
echo "Would run: docker image prune -f"
return
fi
# Stop containers to free up resources
log_info "Stopping application containers..."
docker-compose down
# Clean up Docker resources
cleanup_docker_resources
# Start containers again
log_info "Starting application containers..."
docker-compose up -d
log_success "Production cleanup completed"
else
log_warning "Not in application directory (docker-compose.yml not found)"
cleanup_docker_resources
fi
}
cleanup_ci_cd() {
log_info "Cleaning up CI/CD environment..."
# Clean up Docker resources
cleanup_docker_resources
# Clean up registry
cleanup_registry
log_success "CI/CD cleanup completed"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--type)
CLEANUP_TYPE="$2"
shift 2
;;
--registry-dir)
REGISTRY_DIR="$2"
shift 2
;;
--dry-run)
DRY_RUN="true"
shift
;;
--help|-h)
show_help
exit 0
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Main cleanup logic
case "$CLEANUP_TYPE" in
production)
cleanup_production
;;
ci-cd)
cleanup_ci_cd
;;
*)
log_error "Invalid cleanup type: $CLEANUP_TYPE"
log_error "Valid types: production, ci-cd"
exit 1
;;
esac
log_success "Cleanup completed successfully"

197
scripts/deploy-local.sh Executable file
View file

@ -0,0 +1,197 @@
#!/bin/bash
# Sharenet Local Deployment Script
# This script handles local development deployments
set -e
# Configuration
APP_NAME="sharenet"
DOCKER_COMPOSE_FILE="docker-compose.yml"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Pre-deployment checks
pre_deployment_checks() {
log_info "Running pre-deployment checks..."
# Check if docker-compose.yml exists
if [ ! -f "$DOCKER_COMPOSE_FILE" ]; then
log_error "docker-compose.yml not found in current directory"
log_error "Current directory: $(pwd)"
exit 1
fi
# Check if Docker is running
if ! docker info >/dev/null 2>&1; then
log_error "Docker is not running or not accessible"
exit 1
fi
# Check if docker-compose is available
if ! command -v docker-compose >/dev/null 2>&1; then
log_error "docker-compose is not installed or not in PATH"
exit 1
fi
log_success "Pre-deployment checks passed"
}
# Pull latest code (if in git repository)
pull_latest_code() {
if [ -d ".git" ]; then
log_info "Pulling latest code from repository..."
if git pull origin main 2>/dev/null || git pull origin master 2>/dev/null; then
log_success "Code updated successfully"
else
log_warning "Could not pull latest code (not a git repository or no remote configured)"
fi
else
log_info "Not a git repository, skipping code pull"
fi
}
# Pull latest images
pull_images() {
log_info "Pulling latest Docker images..."
if docker-compose pull; then
log_success "Images pulled successfully"
else
log_error "Failed to pull images"
exit 1
fi
}
# Stop existing containers
stop_containers() {
log_info "Stopping existing containers..."
if docker-compose down; then
log_success "Containers stopped successfully"
else
log_warning "Some containers may not have stopped cleanly"
fi
}
# Start new containers
start_containers() {
log_info "Starting new containers..."
if docker-compose up -d; then
log_success "Containers started successfully"
else
log_error "Failed to start containers"
log_error "Recent logs:"
docker-compose logs --tail=20
exit 1
fi
}
# Verify deployment
verify_deployment() {
log_info "Verifying deployment..."
# Wait for containers to start
sleep 10
# Check if containers are running
if docker-compose ps | grep -q "Up"; then
log_success "Deployment successful! All containers are running."
docker-compose ps
else
log_error "Deployment failed! Some containers are not running."
docker-compose ps
log_error "Recent logs:"
docker-compose logs --tail=20
exit 1
fi
}
# Clean up old images
cleanup_images() {
log_info "Cleaning up old Docker images..."
if docker image prune -f; then
log_success "Old images cleaned up successfully"
else
log_warning "Image cleanup had some issues"
fi
}
# Show service status
show_status() {
log_info "Current service status:"
docker-compose ps
echo
log_info "Recent logs:"
docker-compose logs --tail=10
}
# Main deployment process
main() {
local command="${1:-deploy}"
case "$command" in
deploy)
log_info "Starting local deployment for $APP_NAME..."
pre_deployment_checks
pull_latest_code
pull_images
stop_containers
start_containers
verify_deployment
cleanup_images
log_success "Local deployment completed successfully"
;;
status)
log_info "Checking service status..."
if [ -f "$DOCKER_COMPOSE_FILE" ]; then
show_status
else
log_error "docker-compose.yml not found in current directory"
exit 1
fi
;;
restart)
log_info "Restarting services..."
pre_deployment_checks
stop_containers
start_containers
verify_deployment
log_success "Services restarted successfully"
;;
logs)
log_info "Showing recent logs..."
if [ -f "$DOCKER_COMPOSE_FILE" ]; then
docker-compose logs --tail=50 -f
else
log_error "docker-compose.yml not found in current directory"
exit 1
fi
;;
*)
log_error "Unknown command: $command"
echo "Usage: $0 {deploy|status|restart|logs}"
echo ""
echo "Commands:"
echo " deploy - Deploy the application (default)"
echo " status - Show current service status"
echo " restart - Restart all services"
echo " logs - Show and follow recent logs"
exit 1
;;
esac
}
# Handle interrupts gracefully
trap 'log_error "Deployment interrupted"; exit 1' INT TERM
# Run main function
main "$@"

197
scripts/monitor.sh Executable file
View file

@ -0,0 +1,197 @@
#!/bin/bash
# Sharenet Monitoring Script
# This script monitors the application status and system resources
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
APP_NAME="${APP_NAME:-sharenet}"
MONITOR_TYPE="${MONITOR_TYPE:-production}" # production or ci-cd
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_help() {
cat << EOF
Sharenet Monitoring Script
Usage: $0 [OPTIONS]
Options:
--type TYPE Monitoring type: production or ci-cd (default: production)
--app-name NAME Application name (default: sharenet)
--help Show this help message
Environment Variables:
MONITOR_TYPE Set monitoring type (production/ci-cd)
APP_NAME Set application name
Examples:
$0 # Monitor production environment
$0 --type ci-cd # Monitor CI/CD environment
$0 --app-name myapp # Monitor specific application
MONITOR_TYPE=ci-cd $0 # Monitor CI/CD environment
EOF
}
monitor_production() {
log_info "=== $APP_NAME Production Environment Status ==="
echo "Date: $(date)"
echo "Uptime: $(uptime)"
echo
# Check if we're in the application directory
if [ -f "docker-compose.yml" ]; then
log_info "Container Status:"
if docker-compose ps; then
log_success "Docker Compose is running"
else
log_error "Docker Compose is not running"
fi
echo
log_info "Recent Application Logs:"
docker-compose logs --tail=20
echo
else
log_warning "Not in application directory (docker-compose.yml not found)"
fi
log_info "System Resources:"
echo "CPU Usage:"
top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1
echo
echo "Memory Usage:"
free -h
echo
echo "Disk Usage:"
df -h
echo
echo "Network Connections:"
netstat -tuln | grep -E ':(80|443|3000|3001)' || log_warning "No application ports found"
echo
# Health check
log_info "Health Check:"
if curl -s -f http://localhost/health > /dev/null 2>&1; then
log_success "Application health check passed"
else
log_error "Application health check failed"
fi
}
monitor_ci_cd() {
log_info "=== CI/CD Server Status ==="
echo "Date: $(date)"
echo "Uptime: $(uptime)"
echo
log_info "Docker Status:"
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
echo
log_info "Registry Status:"
if [ -d "/opt/registry" ]; then
cd /opt/registry
docker-compose ps
cd - > /dev/null
else
log_warning "Registry directory not found"
fi
echo
log_info "Actions Runner Status:"
if systemctl is-active --quiet forgejo-runner.service; then
log_success "Forgejo runner is running"
systemctl status forgejo-runner.service --no-pager
else
log_error "Forgejo runner is not running"
fi
echo
log_info "System Resources:"
echo "CPU Usage:"
top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1
echo
echo "Memory Usage:"
free -h | grep Mem
echo
echo "Disk Usage:"
df -h /
echo
# Registry health check
log_info "Registry Health Check:"
if curl -s -f http://localhost:5000/v2/_catalog > /dev/null 2>&1; then
log_success "Registry is accessible"
else
log_error "Registry is not accessible"
fi
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--type)
MONITOR_TYPE="$2"
shift 2
;;
--app-name)
APP_NAME="$2"
shift 2
;;
--help|-h)
show_help
exit 0
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Main monitoring logic
case "$MONITOR_TYPE" in
production)
monitor_production
;;
ci-cd)
monitor_ci_cd
;;
*)
log_error "Invalid monitor type: $MONITOR_TYPE"
log_error "Valid types: production, ci-cd"
exit 1
;;
esac
log_success "Monitoring completed"

262
scripts/validate_migrations.sh Executable file
View file

@ -0,0 +1,262 @@
#!/bin/bash
# Sharenet Migration Validation Script
# This script validates that all migration files referenced in the database exist in the filesystem
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
MIGRATIONS_DIR="backend/migrations"
DATABASE_URL="${DATABASE_URL:-}"
VERBOSE="${VERBOSE:-false}"
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_help() {
cat << EOF
Sharenet Migration Validation Script
Usage: $0 [OPTIONS]
Options:
--database-url URL Database connection string
--verbose Show detailed output
--help Show this help message
Environment Variables:
DATABASE_URL Database connection string
VERBOSE Set to 'true' for verbose output
Examples:
$0 # Validate with default database
$0 --database-url postgres://... # Validate with specific database
VERBOSE=true $0 # Show detailed output
EOF
}
check_dependencies() {
log_info "Checking dependencies..."
if ! command -v sqlx &> /dev/null; then
log_error "sqlx CLI not found. Install with: cargo install sqlx-cli"
exit 1
fi
if [ ! -d "$MIGRATIONS_DIR" ]; then
log_error "Migrations directory not found: $MIGRATIONS_DIR"
exit 1
fi
log_success "Dependencies check passed"
}
check_database_connection() {
log_info "Testing database connection..."
if [ -z "$DATABASE_URL" ]; then
log_error "DATABASE_URL environment variable is required"
exit 1
fi
if ! sqlx migrate info --database-url "$DATABASE_URL" >/dev/null 2>&1; then
log_error "Cannot connect to database. Check DATABASE_URL and network connectivity"
log_error "DATABASE_URL: $DATABASE_URL"
exit 1
fi
log_success "Database connection successful"
}
validate_migration_files() {
log_info "Validating migration files..."
local has_errors=false
local missing_files=()
local extra_files=()
# Get list of applied migrations from database
local applied_migrations=$(sqlx migrate info --database-url "$DATABASE_URL" | grep -E '^[0-9]{14}' | awk '{print $1}' || true)
if [ "$VERBOSE" = "true" ]; then
log_info "Applied migrations in database:"
echo "$applied_migrations" | while read migration; do
if [ -n "$migration" ]; then
echo " - $migration"
fi
done
fi
# Check that all applied migrations exist as files
while IFS= read -r migration; do
if [ -n "$migration" ]; then
local migration_file=$(find "$MIGRATIONS_DIR" -name "${migration}_*.sql" -print -quit)
if [ -z "$migration_file" ]; then
log_error "Missing migration file for applied migration: $migration"
missing_files+=("$migration")
has_errors=true
elif [ "$VERBOSE" = "true" ]; then
log_success "Found migration file: $migration_file"
fi
fi
done <<< "$applied_migrations"
# Check for migration files that don't match applied migrations
local migration_files=$(find "$MIGRATIONS_DIR" -name "*.sql" -exec basename {} \; | sort)
if [ "$VERBOSE" = "true" ]; then
log_info "Migration files in filesystem:"
echo "$migration_files" | while read file; do
if [ -n "$file" ]; then
echo " - $file"
fi
done
fi
# Validate migration file naming convention
while IFS= read -r file; do
if [ -n "$file" ]; then
if [[ ! "$file" =~ ^[0-9]{14}_.*\.sql$ ]]; then
log_error "Invalid migration file naming convention: $file"
log_error "Expected format: YYYYMMDDHHMMSS_description.sql"
has_errors=true
fi
fi
done <<< "$migration_files"
# Check for duplicate migration timestamps
local timestamps=$(find "$MIGRATIONS_DIR" -name "*.sql" -exec basename {} \; | sed 's/^\([0-9]\{14\}\)_.*\.sql$/\1/' | sort)
local duplicate_timestamps=$(echo "$timestamps" | uniq -d)
if [ -n "$duplicate_timestamps" ]; then
log_error "Duplicate migration timestamps found:"
echo "$duplicate_timestamps" | while read timestamp; do
if [ -n "$timestamp" ]; then
log_error " - $timestamp"
find "$MIGRATIONS_DIR" -name "${timestamp}_*.sql" -exec basename {} \;
fi
done
has_errors=true
fi
# Summary
if [ "$has_errors" = true ]; then
log_error "Migration validation failed!"
if [ ${#missing_files[@]} -gt 0 ]; then
log_error "Missing migration files:"
for file in "${missing_files[@]}"; do
log_error " - $file"
done
fi
exit 1
else
log_success "All migration files are valid and consistent"
fi
}
validate_migration_content() {
log_info "Validating migration file content..."
local has_errors=false
# Check each migration file for basic SQL syntax
while IFS= read -r file; do
if [ -n "$file" ]; then
local filepath="$MIGRATIONS_DIR/$file"
# Basic SQL validation (check for common issues)
if grep -q "CREATE TABLE" "$filepath"; then
if ! grep -q ";" "$filepath"; then
log_warning "Migration file $file may be missing semicolons"
fi
fi
# Check for potential issues
if grep -q "DROP TABLE" "$filepath"; then
log_warning "Migration file $file contains DROP TABLE - ensure this is intentional"
fi
if grep -q "DROP DATABASE" "$filepath"; then
log_error "Migration file $file contains DROP DATABASE - this is not allowed"
has_errors=true
fi
fi
done < <(find "$MIGRATIONS_DIR" -name "*.sql" -exec basename {} \; | sort)
if [ "$has_errors" = true ]; then
log_error "Migration content validation failed!"
exit 1
else
log_success "Migration content validation passed"
fi
}
# Main validation function
main() {
local database_url=""
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--database-url)
database_url="$2"
shift 2
;;
--verbose)
VERBOSE="true"
shift
;;
--help|-h)
show_help
exit 0
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Set database URL if provided
if [ -n "$database_url" ]; then
export DATABASE_URL="$database_url"
fi
log_info "Starting migration validation..."
check_dependencies
check_database_connection
validate_migration_files
validate_migration_content
log_success "Migration validation completed successfully"
}
# Handle interrupts gracefully
trap 'log_error "Validation interrupted"; exit 1' INT TERM
# Run main function
main "$@"