Configure CI/CD Pipeline
Configure CI/CD Pipeline
Implement automated continuous integration and deployment pipelines for reliable software delivery with industry best practices.
CI/CD Overview
Modern CI/CD pipelines automate the build, test, and deployment process, enabling teams to deliver software rapidly and reliably.
Pipeline Stages
- Source: Code repository triggers
- Build: Compile and package application
- Test: Unit, integration, and security tests
- Analyze: Code quality and vulnerability scanning
- Package: Create deployable artifacts
- Deploy: Progressive deployment to environments
- Monitor: Post-deployment verification
GitHub Actions Pipeline
Complete CI/CD Workflow
# .github/workflows/cicd.yml
name: CI/CD Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
workflow_dispatch:
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
# Build and Test
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run linting
run: npm run lint
- name: Run unit tests
run: npm run test:unit
- name: Run integration tests
run: npm run test:integration
- name: Generate coverage report
run: npm run coverage
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Build application
run: npm run build
- name: Upload build artifacts
uses: actions/upload-artifact@v3
with:
name: build-artifacts
path: dist/
# Security Scanning
security:
runs-on: ubuntu-latest
needs: build
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
- name: Run Snyk security scan
uses: snyk/actions/node@master
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
# Build Docker Image
docker:
runs-on: ubuntu-latest
needs: [build, security]
permissions:
contents: read
packages: write
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Container Registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Deploy to Staging
deploy-staging:
runs-on: ubuntu-latest
needs: docker
if: github.ref == 'refs/heads/develop'
environment:
name: staging
url: https://staging.example.com
steps:
- name: Deploy to Kubernetes
uses: azure/k8s-deploy@v4
with:
namespace: staging
manifests: |
k8s/deployment.yaml
k8s/service.yaml
images: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
# Deploy to Production
deploy-production:
runs-on: ubuntu-latest
needs: docker
if: github.ref == 'refs/heads/main'
environment:
name: production
url: https://example.com
steps:
- name: Deploy to Production
uses: azure/k8s-deploy@v4
with:
namespace: production
manifests: |
k8s/deployment.yaml
k8s/service.yaml
images: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
strategy: canary
percentage: 20
Jenkins Pipeline
Declarative Pipeline
// Jenkinsfile
pipeline {
agent {
kubernetes {
yaml '''
apiVersion: v1
kind: Pod
spec:
containers:
- name: maven
image: maven:3.8-openjdk-11
command: ['sleep', '99999']
- name: docker
image: docker:20.10
command: ['sleep', '99999']
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
'''
}
}
environment {
DOCKER_REGISTRY = 'registry.example.com'
APP_NAME = 'myapp'
SONAR_HOST = credentials('sonar-host')
SONAR_TOKEN = credentials('sonar-token')
}
stages {
stage('Checkout') {
steps {
checkout scm
script {
env.GIT_COMMIT = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
env.GIT_BRANCH = sh(returnStdout: true, script: 'git rev-parse --abbrev-ref HEAD').trim()
}
}
}
stage('Build') {
steps {
container('maven') {
sh 'mvn clean compile'
}
}
}
stage('Test') {
parallel {
stage('Unit Tests') {
steps {
container('maven') {
sh 'mvn test'
junit 'target/surefire-reports/*.xml'
}
}
}
stage('Integration Tests') {
steps {
container('maven') {
sh 'mvn verify -Pintegration-tests'
}
}
}
}
}
stage('Code Analysis') {
steps {
container('maven') {
withSonarQubeEnv('SonarQube') {
sh """
mvn sonar:sonar \
-Dsonar.host.url=${SONAR_HOST} \
-Dsonar.login=${SONAR_TOKEN} \
-Dsonar.projectKey=${APP_NAME} \
-Dsonar.projectName=${APP_NAME}
"""
}
}
timeout(time: 10, unit: 'MINUTES') {
waitForQualityGate abortPipeline: true
}
}
}
stage('Build Docker Image') {
steps {
container('docker') {
script {
docker.build("${DOCKER_REGISTRY}/${APP_NAME}:${GIT_COMMIT}")
}
}
}
}
stage('Push to Registry') {
when {
branch 'main'
}
steps {
container('docker') {
script {
docker.withRegistry("https://${DOCKER_REGISTRY}", 'docker-credentials') {
docker.image("${DOCKER_REGISTRY}/${APP_NAME}:${GIT_COMMIT}").push()
docker.image("${DOCKER_REGISTRY}/${APP_NAME}:${GIT_COMMIT}").push('latest')
}
}
}
}
}
stage('Deploy') {
when {
branch 'main'
}
steps {
script {
kubernetesDeploy(
configs: 'k8s/*.yaml',
kubeconfigId: 'kubeconfig',
enableConfigSubstitution: true
)
}
}
}
}
post {
always {
cleanWs()
}
success {
slackSend(
color: 'good',
message: "Build Successful: ${env.JOB_NAME} - ${env.BUILD_NUMBER}"
)
}
failure {
slackSend(
color: 'danger',
message: "Build Failed: ${env.JOB_NAME} - ${env.BUILD_NUMBER}"
)
}
}
}
GitLab CI/CD
Complete Pipeline Configuration
# .gitlab-ci.yml
variables:
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: "/certs"
CONTAINER_TEST_IMAGE: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
CONTAINER_RELEASE_IMAGE: $CI_REGISTRY_IMAGE:latest
stages:
- build
- test
- security
- package
- deploy
# Build Stage
build:
stage: build
image: node:18-alpine
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
- node_modules/
script:
- npm ci --cache .npm --prefer-offline
- npm run build
artifacts:
paths:
- dist/
expire_in: 1 hour
# Test Stage
test:unit:
stage: test
image: node:18-alpine
needs: ["build"]
script:
- npm ci --cache .npm --prefer-offline
- npm run test:unit
coverage: '/Lines\s*:\s*(\d+\.\d+)%/'
artifacts:
reports:
junit: junit.xml
coverage_report:
coverage_format: cobertura
path: coverage/cobertura-coverage.xml
test:integration:
stage: test
image: node:18-alpine
needs: ["build"]
services:
- postgres:14
- redis:7
variables:
POSTGRES_DB: test
POSTGRES_USER: test
POSTGRES_PASSWORD: test
DATABASE_URL: "postgresql://test:test@postgres:5432/test"
REDIS_URL: "redis://redis:6379"
script:
- npm ci --cache .npm --prefer-offline
- npm run test:integration
# Security Scanning
security:dependencies:
stage: security
image: node:18-alpine
needs: []
script:
- npm audit --production --audit-level=moderate
allow_failure: true
security:sast:
stage: security
needs: []
include:
- template: Security/SAST.gitlab-ci.yml
security:secrets:
stage: security
image: trufflesecurity/trufflehog:latest
needs: []
script:
- trufflehog git file://. --since-commit HEAD~5 --only-verified
# Package Stage
package:docker:
stage: package
image: docker:24.0.5
needs: ["build", "test:unit", "test:integration"]
services:
- docker:24.0.5-dind
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker build --pull -t $CONTAINER_TEST_IMAGE .
- docker push $CONTAINER_TEST_IMAGE
- |
if [[ "$CI_COMMIT_BRANCH" == "$CI_DEFAULT_BRANCH" ]]; then
docker tag $CONTAINER_TEST_IMAGE $CONTAINER_RELEASE_IMAGE
docker push $CONTAINER_RELEASE_IMAGE
fi
# Deploy Stages
deploy:staging:
stage: deploy
image: bitnami/kubectl:latest
needs: ["package:docker"]
environment:
name: staging
url: https://staging.example.com
only:
- develop
script:
- kubectl config use-context $KUBE_CONTEXT
- kubectl set image deployment/app app=$CONTAINER_TEST_IMAGE -n staging
- kubectl rollout status deployment/app -n staging
deploy:production:
stage: deploy
image: bitnami/kubectl:latest
needs: ["package:docker"]
environment:
name: production
url: https://example.com
only:
- main
when: manual
script:
- kubectl config use-context $KUBE_CONTEXT
- kubectl set image deployment/app app=$CONTAINER_RELEASE_IMAGE -n production
- kubectl rollout status deployment/app -n production
# Rollback job
rollback:production:
stage: deploy
image: bitnami/kubectl:latest
environment:
name: production
when: manual
only:
- main
script:
- kubectl config use-context $KUBE_CONTEXT
- kubectl rollout undo deployment/app -n production
Testing Strategy
Test Pyramid
Test Type | Scope | Speed | Quantity |
---|---|---|---|
Unit Tests | Single function/class | Milliseconds | Thousands |
Integration Tests | Component interactions | Seconds | Hundreds |
E2E Tests | Full user scenarios | Minutes | Dozens |
Performance Tests | Load and stress | Minutes to hours | Few |
Test Automation
# Jest configuration for JavaScript
// jest.config.js
module.exports = {
collectCoverageFrom: [
'src/**/*.{js,jsx,ts,tsx}',
'!src/**/*.d.ts',
'!src/index.js',
'!src/serviceWorker.js',
],
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80,
},
},
testMatch: [
'/src/**/__tests__/**/*.{js,jsx,ts,tsx}',
'/src/**/*.{spec,test}.{js,jsx,ts,tsx}',
],
transform: {
'^.+\\.(js|jsx|ts|tsx)$': 'babel-jest',
},
moduleNameMapper: {
'^@/(.*)$': '/src/$1',
},
};
Infrastructure as Code
Terraform Pipeline
# terraform-pipeline.yml
name: Terraform Pipeline
on:
pull_request:
paths:
- 'terraform/**'
env:
TF_VERSION: '1.5.0'
TF_WORKING_DIR: './terraform'
jobs:
terraform:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_version: ${{ env.TF_VERSION }}
- name: Terraform Format Check
run: terraform fmt -check -recursive
working-directory: ${{ env.TF_WORKING_DIR }}
- name: Terraform Init
run: terraform init
working-directory: ${{ env.TF_WORKING_DIR }}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: Terraform Validate
run: terraform validate
working-directory: ${{ env.TF_WORKING_DIR }}
- name: Terraform Plan
run: terraform plan -out=tfplan
working-directory: ${{ env.TF_WORKING_DIR }}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: Terraform Show
run: terraform show -no-color tfplan
working-directory: ${{ env.TF_WORKING_DIR }}
- name: Terraform Apply
if: github.ref == 'refs/heads/main'
run: terraform apply -auto-approve tfplan
working-directory: ${{ env.TF_WORKING_DIR }}
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
Deployment Strategies
Blue-Green Deployment
#!/bin/bash
# Blue-Green deployment script
CURRENT_ENV=$(kubectl get service app-service -o jsonpath='{.spec.selector.version}')
NEW_ENV="green"
if [ "$CURRENT_ENV" == "green" ]; then
NEW_ENV="blue"
fi
echo "Current environment: $CURRENT_ENV"
echo "Deploying to: $NEW_ENV"
# Deploy new version
kubectl set image deployment/app-$NEW_ENV app=myapp:$VERSION -n production
kubectl rollout status deployment/app-$NEW_ENV -n production
# Run smoke tests
./scripts/smoke-tests.sh $NEW_ENV
if [ $? -eq 0 ]; then
echo "Smoke tests passed, switching traffic"
kubectl patch service app-service -p '{"spec":{"selector":{"version":"'$NEW_ENV'"}}}'
echo "Traffic switched to $NEW_ENV"
else
echo "Smoke tests failed, rolling back"
exit 1
fi
Canary Deployment
# Flagger canary configuration
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: app
namespace: production
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: app
progressDeadlineSeconds: 60
service:
port: 80
targetPort: 8080
gateways:
- public-gateway.istio-system.svc.cluster.local
hosts:
- app.example.com
analysis:
interval: 30s
threshold: 5
maxWeight: 50
stepWeight: 10
metrics:
- name: request-success-rate
thresholdRange:
min: 99
interval: 1m
- name: request-duration
thresholdRange:
max: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://app.production/"
Monitoring and Observability
Pipeline Metrics
- Lead Time: Time from commit to production
- Deployment Frequency: How often we deploy
- MTTR: Mean time to recovery
- Change Failure Rate: Percentage of failed deployments
Deployment Monitoring
# Datadog deployment tracking
- name: Mark deployment in Datadog
uses: DataDog/datadog-actions/mark-deployment@v1
with:
api-key: ${{ secrets.DATADOG_API_KEY }}
app-key: ${{ secrets.DATADOG_APP_KEY }}
service: myapp
env: production
version: ${{ github.sha }}
# NewRelic deployment marker
- name: New Relic Deployment
env:
NEW_RELIC_API_KEY: ${{ secrets.NEW_RELIC_API_KEY }}
NEW_RELIC_APP_ID: ${{ secrets.NEW_RELIC_APP_ID }}
run: |
curl -X POST "https://api.newrelic.com/v2/applications/${NEW_RELIC_APP_ID}/deployments.json" \
-H "X-Api-Key:${NEW_RELIC_API_KEY}" \
-H "Content-Type: application/json" \
-d '{
"deployment": {
"revision": "'${GITHUB_SHA}'",
"user": "'${GITHUB_ACTOR}'",
"description": "'${GITHUB_EVENT_NAME}'"
}
}'
Security in CI/CD
Secret Management
# HashiCorp Vault integration
- name: Import Secrets
uses: hashicorp/vault-action@v2
with:
url: https://vault.example.com
method: jwt
role: myapp-role
secrets: |
secret/data/ci database_url | DATABASE_URL ;
secret/data/ci api_key | API_KEY ;
secret/data/ci aws_access_key | AWS_ACCESS_KEY_ID ;
secret/data/ci aws_secret_key | AWS_SECRET_ACCESS_KEY
Supply Chain Security
# SLSA provenance generation
- name: Generate SLSA Provenance
uses: slsa-framework/[email protected]
with:
subject-name: ${{ env.IMAGE_NAME }}
subject-digest: ${{ steps.image.outputs.digest }}
push-to-registry: true
Best Practices
- Version Everything: Code, configs, infrastructure
- Automate Everything: No manual deployments
- Test Everything: Comprehensive test coverage
- Monitor Everything: Full observability
- Secure Everything: Security at every stage
- Document Everything: Clear runbooks
Troubleshooting
Common Issues
- Build Failures: Check dependencies, environment variables
- Test Flakiness: Isolate tests, mock external services
- Deployment Failures: Verify credentials, check quotas
- Performance Issues: Optimize Docker layers, use caching
Related Resources
Note: This documentation is provided for reference purposes only. It reflects general best practices and industry-aligned guidelines, and any examples, claims, or recommendations are intended as illustrative—not definitive or binding.