Skip to content
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 4 additions & 28 deletions .github/scripts/pause.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/bin/bash
# This script pauses AWS resources (ECS service and RDS Aurora cluster) in the current AWS account.
# This script pauses AWS resources (ECS service) in the current AWS account.
# Note: DynamoDB doesn't require pausing like RDS as it's pay-per-request

set -e # Exit on error

Expand Down Expand Up @@ -28,29 +29,7 @@ function validate_args() {
fi
}

# Check if Aurora DB cluster exists and get its status
function check_aurora_cluster() {
local cluster_id="${STACK_PREFIX}-aurora-${ENVIRONMENT}"
local status=$(aws rds describe-db-clusters --db-cluster-identifier "$cluster_id" \
--query 'DBClusters[0].Status' --output text 2>/dev/null || echo "false")
echo "$status"
}

# Pause Aurora DB cluster if available
function pause_aurora_cluster() {
local cluster_id="${STACK_PREFIX}-aurora-${ENVIRONMENT}"
local status=$1

if [ "$status" = "false" ]; then
echo "Skipping Aurora pause operation: DB cluster does not exist"
return
elif [ "$status" = "available" ]; then
echo "Pausing Aurora cluster: $cluster_id"
aws rds stop-db-cluster --db-cluster-identifier "$cluster_id" --no-cli-pager --output json
else
echo "DB cluster is not in an available state. Current state: $status"
fi
}

# Check if ECS cluster exists
function check_ecs_cluster() {
Expand Down Expand Up @@ -92,16 +71,13 @@ function pause_ecs_service() {
# Main execution
validate_args

# Check and pause Aurora cluster
aurora_status=$(check_aurora_cluster)
[ "$aurora_status" = "false" ] || echo "Aurora cluster status: $aurora_status"

# Check and pause ECS service
ecs_status=$(check_ecs_cluster)
[ "$ecs_status" = "INACTIVE" ] || echo "ECS cluster status: $ecs_status"

# Perform pause operations
pause_ecs_service "$ecs_status"
pause_aurora_cluster "$aurora_status"

echo "Pause completed. Note: DynamoDB doesn't require pausing as it uses pay-per-request billing."

echo "Pause operations completed"
59 changes: 17 additions & 42 deletions .github/scripts/resume.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/bin/bash
# This script resumes AWS resources (ECS service and RDS Aurora cluster) in the specified AWS account.
# This script resumes AWS resources (ECS service) in the specified AWS account.
# Note: DynamoDB doesn't require resuming like RDS as it's always available

set -e # Exit on error

Expand All @@ -25,44 +26,28 @@ check_parameters() {
exit 1
fi
}

# Function to check if DB cluster exists and get its status
check_db_cluster() {
local prefix=$1
local env=$2
local cluster_id="${prefix}-aurora-${env}"
local status=$(aws rds describe-db-clusters --db-cluster-identifier ${cluster_id} --query 'DBClusters[0].Status' --output text 2>/dev/null || echo "not-found")
# Check if ECS cluster exists
function check_ecs_cluster() {
local cluster_name="ecs-cluster-${STACK_PREFIX}-node-api-${ENVIRONMENT}"
local status=$(aws ecs describe-clusters --clusters "$cluster_name" \
--query 'clusters[0].status' --output text 2>/dev/null || echo "INACTIVE")
echo "$status"
}

# Function to start DB cluster
start_db_cluster() {
local prefix=$1
local env=$2
local cluster_id="${prefix}-aurora-${env}"

echo "Starting DB cluster ${cluster_id}..."
aws rds start-db-cluster --db-cluster-identifier ${cluster_id} --no-cli-pager --output json

echo "Waiting for DB cluster to be available..."
if ! aws rds wait db-cluster-available --db-cluster-identifier ${cluster_id}; then
echo "Timeout waiting for DB cluster to become available"
return 1
fi

echo "DB cluster is now available"
return 0
}

# Function to resume ECS service
resume_ecs_service() {
local prefix=$1
local env=$2
local cluster="ecs-cluster-${prefix}-node-api-${env}"
local service="${prefix}-node-api-${env}-service"
local cluster_status=$3

if [ "$cluster_status" != "ACTIVE" ]; then
echo "Skipping ECS resume operation: Cluster $cluster does not exist"
return
fi
echo "Resuming ECS service ${service} on cluster ${cluster}..."

# Update scaling policy
aws application-autoscaling register-scalable-target \
--service-namespace ecs \
Expand Down Expand Up @@ -90,21 +75,11 @@ main() {
local prefix=$2

echo "Starting to resume resources for environment: ${env} with stack prefix: ${prefix}"

# Check DB cluster status
local db_status=$(check_db_cluster "$prefix" "$env")

if [ "$db_status" == "not-found" ]; then
echo "Skipping resume operation, DB cluster does not exist"
return 0
elif [ "$db_status" == "stopped" ]; then
start_db_cluster "$prefix" "$env" || return 1
else
echo "DB cluster is not in a stopped state. Current state: $db_status"
fi

# Resume ECS service
resume_ecs_service "$prefix" "$env"
# Check and pause ECS service
ecs_status=$(check_ecs_cluster)
[ "$ecs_status" = "INACTIVE" ] || echo "ECS cluster status: $ecs_status"
# Resume ECS service (DynamoDB is always available)
resume_ecs_service "$prefix" "$env" "$ecs_status"

echo "Resources have been resumed successfully"
}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/.builds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
strategy:
matrix:
# Only building frontend containers to run PR based e2e tests
package: [backend, migrations, frontend]
package: [backend, frontend]
timeout-minutes: 10
steps:
- uses: bcgov/action-builder-ghcr@v4.0.0
Expand Down
3 changes: 0 additions & 3 deletions .github/workflows/.deployer.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ jobs:
env:
target_env: ${{ inputs.environment_name }}
aws_license_plate: ${{ secrets.AWS_LICENSE_PLATE }}
flyway_image: ghcr.io/${{github.repository}}/migrations:${{inputs.tag}}
api_image: ghcr.io/${{github.repository}}/backend:${{inputs.tag}}
app_env: ${{inputs.app_env}}
stack_prefix: ${{ inputs.stack_prefix }}
Expand All @@ -98,7 +97,6 @@ jobs:
env:
target_env: ${{ inputs.environment_name }}
aws_license_plate: ${{ secrets.AWS_LICENSE_PLATE }}
flyway_image: ghcr.io/${{github.repository}}/migrations:${{inputs.tag}}
api_image: ghcr.io/${{github.repository}}/backend:${{inputs.tag}}
app_env: ${{inputs.app_env}}
stack_prefix: ${{ inputs.stack_prefix }}
Expand All @@ -115,7 +113,6 @@ jobs:
env:
target_env: ${{ inputs.environment_name }}
aws_license_plate: ${{ secrets.AWS_LICENSE_PLATE }}
flyway_image: ghcr.io/${{github.repository}}/migrations:${{inputs.tag}}
api_image: ghcr.io/${{github.repository}}/backend:${{inputs.tag}}
app_env: ${{inputs.app_env}}
stack_prefix: ${{ inputs.stack_prefix }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/.e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ jobs:
BACKEND_IMAGE: ghcr.io/${{ github.repository }}/backend:${{ inputs.tag }}
FLYWAY_IMAGE: ghcr.io/${{ github.repository }}/migrations:${{ inputs.tag }}
FRONTEND_IMAGE: ghcr.io/${{ github.repository }}/frontend:${{ inputs.tag }}
IS_OFFLINE: 'true' # this is for backend to run in offline mode
run: docker compose up -d --wait
continue-on-error: true
- name: Docker Compose Logs
Expand Down
12 changes: 0 additions & 12 deletions .github/workflows/.tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,6 @@ jobs:
if: ${{ ! github.event.pull_request.draft }}
runs-on: ubuntu-24.04
timeout-minutes: 5
services:
postgres:
image: postgis/postgis:17-3.5 # Updated to PostgreSQL 17 with PostGIS 3.5
env:
POSTGRES_PASSWORD: default
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- uses: bcgov-nr/action-test-and-analyse@v1.3.0
env:
Expand Down
148 changes: 148 additions & 0 deletions AWS-DEPLOY.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
# How To Deploy to AWS using Terraform

## Prerequisites

1. BCGov AWS account/namespace.

## Steps to be taken in the console(UI) to setup the secret in github for terraform deployment

1. [Login to console via IDIR MFA](https://login.nimbus.cloud.gov.bc.ca/)
2. Navigate to IAM, click on policies on left hand menu.
3. Click on `Create policy` button and switch from visual to JSON then paste the below snippet

```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "IAM",
"Effect": "Allow",
"Action": ["iam:*"],
"Resource": ["*"]
},
{
"Sid": "S3",
"Effect": "Allow",
"Action": ["s3:*"],
"Resource": ["*"]
},
{
"Sid": "Cloudfront",
"Effect": "Allow",
"Action": ["cloudfront:*"],
"Resource": ["*"]
},
{
"Sid": "ecs",
"Effect": "Allow",
"Action": ["ecs:*"],
"Resource": "*"
},
{
"Sid": "ecr",
"Effect": "Allow",
"Action": ["ecr:*"],
"Resource": "*"
},
{
"Sid": "Dynamodb",
"Effect": "Allow",
"Action": ["dynamodb:*"],
"Resource": ["*"]
},
{
"Sid": "APIgateway",
"Effect": "Allow",
"Action": ["apigateway:*"],
"Resource": ["*"]
},
{
"Sid": "Cloudwatch",
"Effect": "Allow",
"Action": ["cloudwatch:*"],
"Resource": "*"
},
{
"Sid": "EC2",
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": "*"
},
{
"Sid": "Autoscaling",
"Effect": "Allow",
"Action": ["autoscaling:*"],
"Resource": "*"
},
{
"Sid": "KMS",
"Effect": "Allow",
"Action": ["kms:*"],
"Resource": "*"
},
{
"Sid": "SecretsManager",
"Effect": "Allow",
"Action": ["secretsmanager:*"],
"Resource": "*"
},
{
"Sid": "CloudWatchLogs",
"Effect": "Allow",
"Action": ["logs:*"],
"Resource": "*"
},
{
"Sid": "WAF",
"Effect": "Allow",
"Action": ["wafv2:*"],
"Resource": "*"
},
{
"Sid": "ELB",
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": "*"
},
{
"Sid": "AppAutoScaling",
"Effect": "Allow",
"Action": ["application-autoscaling:*"],
"Resource": "*"
}

]
}
```
4. Then create a role by clicking `create role` button and then selecting (custom trust policy radio button).
5. Paste the below JSON after making modifications to set trust relationships of the role with your github repo(<repo_name> ex: bcgov/quickstart-aws-containers) .

```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::<account_number>:oidc-provider/token.actions.githubusercontent.com"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringLike": {
"token.actions.githubusercontent.com:sub": "repo:<repo_name>:*"
},
"ForAllValues:StringEquals": {
"token.actions.githubusercontent.com:aud": "sts.amazonaws.com",
"token.actions.githubusercontent.com:iss": "https://token.actions.githubusercontent.com"
}
}
}
]
}
```
6. Click on Next button, then add the policies after searching for it and then enabling it by checking the checkbox.
7. Finally give a role name for ex: `GHA_CI_CD` and then click on `create role` button.
7. After the role is created copy the ARN, it would be like `arn:aws:iam::<account_number>:role/<role_name>` , `role_name` is what was created on step 4.
8. Paste this value into github secrets, repository secret or environment secret based on your needs. The key to use is `AWS_DEPLOY_ROLE_ARN`
9. Paste the license plate value( 6 alphanumeric characters ex: `ab9okj`) without the env as a repository secret. The Key to use is `AWS_LICENSE_PLATE`
10. After this the github action workflows would be able to deploy the stack to AWS.
Loading
Loading