Skip to content

separate deploy job #48

separate deploy job

separate deploy job #48

Workflow file for this run

# Name of the GitHub Actions workflow - shown in the Actions tab
name: Build and Push to Amazon ECR
# Defines when this workflow will run
on:
push:
# Only run on pushes to main branch
branches:
- main
# Only trigger when files in generate/ directory change
# This prevents unnecessary deployments when other parts of the repo change
paths:
- "generate/**"
- "rvc/**"
- "start_docker_compose_prod.sh"
- "docker-compose.yml"
- ".github/workflows/deploy-ec2.yml"
jobs:
build-brainrot:
name: Build and Push Brainrot
runs-on: ubuntu-latest
environment: production
steps:
# Step 1: Get the code
- name: Checkout
uses: actions/checkout@v3
# Step 2: Free up disk space
- name: Free up disk space
run: |
echo "Freeing up disk space on CI system"
df -h
echo "Removing large directories"
sudo rm -rf /opt/hostedtoolcache
df -h
# Step 3: Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# Step 4: Set up AWS credentials for the entire workflow
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
# Step 5: Log into Amazon ECR
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
# Step 6: Create .env file from secrets
- name: Create .env file
run: |
cd generate
cat << EOF > .env
JORDAN_PETERSON_VOICE_ID=${{ secrets.JORDAN_PETERSON_VOICE_ID }}
JOE_ROGAN_VOICE_ID=${{ secrets.JOE_ROGAN_VOICE_ID }}
BARACK_OBAMA_VOICE_ID=${{ secrets.BARACK_OBAMA_VOICE_ID }}
KAMALA_HARRIS_VOICE_ID=${{ secrets.KAMALA_HARRIS_VOICE_ID }}
BEN_SHAPIRO_VOICE_ID=${{ secrets.BEN_SHAPIRO_VOICE_ID }}
ANDREW_TATE_VOICE_ID=${{ secrets.ANDREW_TATE_VOICE_ID }}
JOE_BIDEN_VOICE_ID=${{ secrets.JOE_BIDEN_VOICE_ID }}
DONALD_TRUMP_VOICE_ID=${{ secrets.DONALD_TRUMP_VOICE_ID }}
GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}
OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
NEETS_API_KEY=${{ secrets.NEETS_API_KEY }}
DB_HOST=${{ secrets.DB_HOST }}
DB_USER=${{ secrets.DB_USER }}
DB_PORT=${{ secrets.DB_PORT }}
DB_PASSWORD=${{ secrets.DB_PASSWORD }}
DB_URL=${{ secrets.DB_URL }}
DB_NAME=${{ secrets.DB_NAME }}
AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}
EOF
# Step 7: Build and push the Docker image
- name: Build and push Docker image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: brainrot
uses: docker/build-push-action@v4
with:
context: ./generate
platforms: linux/amd64
push: true
tags: |
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ github.sha }}
build-rvc:
name: Build and Push RVC
runs-on: ubuntu-latest
environment: production
steps:
- name: Checkout
uses: actions/checkout@v3
# Add step to free up disk space
- name: Free up disk space
run: |
echo "Freeing up disk space on CI system"
df -h
# Remove unnecessary large directories
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/powershell
sudo rm -rf /usr/share/swift
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/lib/jvm
sudo rm -rf /opt/hostedtoolcache
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
# Remove unused packages
sudo apt-get autoremove -y
sudo apt-get clean -y
# Purge large packages that aren't needed for the build
sudo apt-get purge -y azure-cli google-cloud-sdk dotnet* firefox google-chrome-stable
sudo apt-get purge -y '^ghc-*' '^php*' '^mysql*' mono-complete
sudo apt-get purge -y llvm* powershell ruby-full postgresql*
# Final cleanup
sudo apt-get autoremove -y
sudo apt-get clean -y
# Show available space after cleanup
df -h
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
# New step: Download assets and logs from S3
- name: Download assets and logs from S3
run: |
# Remove existing directories (if they exist)
rm -rf ./rvc/assets ./rvc/logs
# Create directories
mkdir -p ./rvc/assets ./rvc/logs
# Download from S3
aws s3 sync s3://brainrot-weights/assets/ ./rvc/assets/
aws s3 sync s3://brainrot-weights/logs/ ./rvc/logs/
# Print status for debugging
echo "Downloaded $(find ./rvc/assets -type f | wc -l) files to assets directory"
echo "Downloaded $(find ./rvc/logs -type f | wc -l) files to logs directory"
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build and push Docker image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: rvc
uses: docker/build-push-action@v4
with:
context: ./rvc
platforms: linux/amd64
push: true
tags: |
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:${{ github.sha }}
deploy:
name: Deploy to EC2
runs-on: ubuntu-latest
environment: production
needs: [build-brainrot, build-rvc]
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
- name: Copy deployment script
uses: appleboy/scp-action@v0.1.4
with:
host: ${{ secrets.EC2_HOST_IP }}
username: ec2-user
key: ${{ secrets.EC2_SSH_KEY }}
port: "22"
source: "start_docker_compose_prod.sh"
target: "/home/ec2-user/"
overwrite: true
- name: Deploy to EC2
uses: appleboy/ssh-action@v1.0.0
env:
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_ACCESS_KEY: ${{ secrets.ACTIONS_AWS_ACCESS_KEY_ID }}
AWS_SECRET_KEY: ${{ secrets.ACTIONS_AWS_SECRET_ACCESS_KEY }}
with:
host: ${{ secrets.EC2_HOST_IP }}
port: "22"
username: ec2-user
key: ${{ secrets.EC2_SSH_KEY }}
envs: AWS_ACCOUNT_ID,AWS_ACCESS_KEY,AWS_SECRET_KEY
script: |
# Install Docker if not already installed
if ! command -v docker &> /dev/null; then
echo "Installing Docker..."
sudo yum update -y
sudo yum install -y docker
sudo systemctl enable docker
sudo systemctl start docker
sudo usermod -aG docker ec2-user
fi
# Install Docker Compose if not already installed
if ! command -v docker-compose &> /dev/null; then
echo "Installing Docker Compose..."
sudo curl -L "https://github.com/docker/compose/releases/download/v2.20.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
fi
# Make sure Docker is running
sudo systemctl start docker
# Configure AWS credentials for ECR access
mkdir -p ~/.aws
cat > ~/.aws/credentials << EOL
[default]
aws_access_key_id=$AWS_ACCESS_KEY
aws_secret_access_key=$AWS_SECRET_KEY
region=us-east-1
EOL
# Set AWS environment variables for this session
export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY
export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_KEY
export AWS_DEFAULT_REGION=us-east-1
export AWS_ACCOUNT_ID=$AWS_ACCOUNT_ID
# Make the script executable and run it
chmod +x /home/ec2-user/start_docker_compose_prod.sh
echo "Running Docker Compose deployment script"
/home/ec2-user/start_docker_compose_prod.sh