Skip to content

new deploy workflow v0 #49

new deploy workflow v0

new deploy workflow v0 #49

Workflow file for this run

# Name of the GitHub Actions workflow - shown in the Actions tab
name: Build and Push to Amazon ECR
# Defines when this workflow will run
on:
push:
# Only run on pushes to main branch
branches:
- main
# Only trigger when files in generate/ directory change
# This prevents unnecessary deployments when other parts of the repo change
paths:
- "generate/**"
- "rvc/**"
- "start_docker_compose_prod.sh"
- "docker-compose.yml"
- ".github/workflows/deploy-ec2.yml"
# Add workflow_dispatch to allow manual triggering of just the deploy job
workflow_dispatch:
inputs:
run_deploy:
description: "Run deploy job"
required: true
default: true
type: boolean
jobs:
# This job determines which build jobs should run based on changed files
changes:
name: Detect Changes
runs-on: ubuntu-latest
# Skip this job for manual triggers
if: github.event_name == 'push'
outputs:
brainrot: ${{ steps.filter.outputs.generate }}
rvc: ${{ steps.filter.outputs.rvc }}
steps:
- uses: actions/checkout@v3
- uses: dorny/paths-filter@v2
id: filter
with:
filters: |
generate:
- 'generate/**'
rvc:
- 'rvc/**'
build-brainrot:
name: Build and Push Brainrot
needs: [changes]
# Only run if generate files changed or this is a manual trigger
if: needs.changes.outputs.brainrot == 'true'
runs-on: ubuntu-latest
environment: production
steps:
# Step 1: Get the code
- name: Checkout
uses: actions/checkout@v3
# Step 2: Free up disk space
- name: Free up disk space
run: |
echo "Freeing up disk space on CI system"
df -h
echo "Removing large directories"
sudo rm -rf /opt/hostedtoolcache
df -h
# Step 3: Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# Step 4: Set up AWS credentials for the entire workflow
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
# Step 5: Log into Amazon ECR
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
# Step 6: Create .env file from secrets
- name: Create .env file
run: |
cd generate
cat << EOF > .env
JORDAN_PETERSON_VOICE_ID=${{ secrets.JORDAN_PETERSON_VOICE_ID }}
JOE_ROGAN_VOICE_ID=${{ secrets.JOE_ROGAN_VOICE_ID }}
BARACK_OBAMA_VOICE_ID=${{ secrets.BARACK_OBAMA_VOICE_ID }}
KAMALA_HARRIS_VOICE_ID=${{ secrets.KAMALA_HARRIS_VOICE_ID }}
BEN_SHAPIRO_VOICE_ID=${{ secrets.BEN_SHAPIRO_VOICE_ID }}
ANDREW_TATE_VOICE_ID=${{ secrets.ANDREW_TATE_VOICE_ID }}
JOE_BIDEN_VOICE_ID=${{ secrets.JOE_BIDEN_VOICE_ID }}
DONALD_TRUMP_VOICE_ID=${{ secrets.DONALD_TRUMP_VOICE_ID }}
GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}
OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
NEETS_API_KEY=${{ secrets.NEETS_API_KEY }}
DB_HOST=${{ secrets.DB_HOST }}
DB_USER=${{ secrets.DB_USER }}
DB_PORT=${{ secrets.DB_PORT }}
DB_PASSWORD=${{ secrets.DB_PASSWORD }}
DB_URL=${{ secrets.DB_URL }}
DB_NAME=${{ secrets.DB_NAME }}
AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}
EOF
# Step 7: Build and push the Docker image
- name: Build and push Docker image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: brainrot
uses: docker/build-push-action@v4
with:
context: ./generate
platforms: linux/amd64
push: true
tags: |
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ github.sha }}
build-rvc:
name: Build and Push RVC
needs: [changes]
# Only run if rvc files changed
if: needs.changes.outputs.rvc == 'true'
runs-on: ubuntu-latest
environment: production
steps:
- name: Checkout
uses: actions/checkout@v3
# Add step to free up disk space
- name: Free up disk space
run: |
echo "Freeing up disk space on CI system"
df -h
# Remove unnecessary large directories
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/powershell
sudo rm -rf /usr/share/swift
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/lib/jvm
sudo rm -rf /opt/hostedtoolcache
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
# Remove unused packages
sudo apt-get autoremove -y
sudo apt-get clean -y
# Purge large packages that aren't needed for the build
sudo apt-get purge -y azure-cli google-cloud-sdk dotnet* firefox google-chrome-stable
sudo apt-get purge -y '^ghc-*' '^php*' '^mysql*' mono-complete
sudo apt-get purge -y llvm* powershell ruby-full postgresql*
# Additional aggressive cleanup
sudo rm -rf /usr/local/share/boost
sudo rm -rf /usr/local/lib/node_modules
sudo rm -rf /usr/local/share/cmake*
sudo rm -rf /usr/local/share/man
sudo rm -rf /var/lib/apt/lists/*
# Clean Docker images if Docker is installed
if command -v docker &> /dev/null; then
docker system prune -a -f --volumes
fi
# Final cleanup
sudo apt-get autoremove -y
sudo apt-get clean -y
# Show available space after cleanup
df -h
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
# New step: Download assets and logs from S3
- name: Download assets and logs from S3
run: |
# Remove existing directories (if they exist)
rm -rf ./rvc/assets ./rvc/logs
# Create directories
mkdir -p ./rvc/assets ./rvc/logs
# Download from S3
aws s3 sync s3://brainrot-weights/assets/ ./rvc/assets/
aws s3 sync s3://brainrot-weights/logs/ ./rvc/logs/
# Print status for debugging
echo "Downloaded $(find ./rvc/assets -type f | wc -l) files to assets directory"
echo "Downloaded $(find ./rvc/logs -type f | wc -l) files to logs directory"
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push Docker image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: rvc
uses: docker/build-push-action@v4
with:
context: ./rvc
platforms: linux/amd64
push: true
tags: |
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ github.sha }}
# Deploy job that runs after any builds that were triggered
deploy:
name: Deploy to EC2
runs-on: ubuntu-latest
environment: production
# Only depend on builds that actually ran
needs:
- changes
- build-brainrot
- build-rvc
# This special syntax makes the dependencies conditional
# The job will wait for build-brainrot only if it ran, and for build-rvc only if it ran
if: |
(github.event_name == 'workflow_dispatch' && github.event.inputs.run_deploy == 'true') ||
(github.event_name == 'push' && always() &&
(needs.changes.result == 'success') &&
(needs.changes.outputs.brainrot != 'true' || needs.build-brainrot.result == 'success') &&
(needs.changes.outputs.rvc != 'true' || needs.build-rvc.result == 'success'))
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
# Copy deployment script to EC2
- name: Copy deployment script
uses: appleboy/scp-action@v0.1.4
with:
host: ${{ secrets.EC2_HOST_IP }}
username: ec2-user
key: ${{ secrets.EC2_SSH_KEY }}
port: "22"
source: "start_docker_compose_prod.sh"
target: "/home/ec2-user/"
overwrite: true
# Deploy to EC2 via SSH
- name: Deploy to EC2
uses: appleboy/ssh-action@v1.0.0
env:
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_ACCESS_KEY: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
AWS_SECRET_KEY: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
with:
host: ${{ secrets.EC2_HOST_IP }}
port: "22"
username: ec2-user
key: ${{ secrets.EC2_SSH_KEY }}
envs: AWS_ACCOUNT_ID,AWS_ACCESS_KEY,AWS_SECRET_KEY
script: |
# Install Docker if not already installed
if ! command -v docker &> /dev/null; then
echo "Installing Docker..."
sudo yum update -y
sudo yum install -y docker
sudo systemctl enable docker
sudo systemctl start docker
sudo usermod -aG docker ec2-user
fi
# Install Docker Compose if not already installed
if ! command -v docker-compose &> /dev/null; then
echo "Installing Docker Compose..."
sudo curl -L "https://github.com/docker/compose/releases/download/v2.20.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
fi
# Make sure Docker is running
sudo systemctl start docker
# Aggressive disk cleanup before deployment
echo "Cleaning up disk space..."
# Remove unused Docker data
docker system prune -a -f --volumes
# Remove all stopped containers
docker rm $(docker ps -a -q) || true
# Remove all unused images
docker rmi $(docker images -q) || true
# Clean package manager cache
sudo yum clean all
# Remove temporary files
sudo rm -rf /tmp/*
# Show available disk space
df -h
# Configure AWS credentials for ECR access
mkdir -p ~/.aws
cat > ~/.aws/credentials << EOL
[default]
aws_access_key_id=$AWS_ACCESS_KEY
aws_secret_access_key=$AWS_SECRET_KEY
region=us-east-1
EOL
# Set AWS environment variables for this session
export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY
export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_KEY
export AWS_DEFAULT_REGION=us-east-1
export AWS_ACCOUNT_ID=$AWS_ACCOUNT_ID
# Make the script executable and run it
chmod +x /home/ec2-user/start_docker_compose_prod.sh
echo "Running Docker Compose deployment script"
/home/ec2-user/start_docker_compose_prod.sh