πŸš€The Complete GitOps Stack: From EKS Provisioning to App Deployment

create a complete ci/cd infra pipeline on aws using terraform, k8s, argocd, and optionally ingress+DNS.

eks-nginx-argocd/
β”œβ”€β”€ terraform/
β”‚   β”œβ”€β”€ main.tf
β”‚   β”œβ”€β”€ variables.tf
β”‚   β”œβ”€β”€ outputs.tf
β”‚   └── providers.tf
β”œβ”€β”€ kubernetes/
β”‚   β”œβ”€β”€ nginx/
β”‚   β”‚   β”œβ”€β”€ deployment.yaml
β”‚   β”‚   └── service.yaml
β”‚   └── argocd/
β”‚       └── application.yaml
└── README.md

1. Terraform Files for EKS Cluster

terraform/providers.tf

terraform {
  required_version = ">= 1.0.0"

  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
    kubernetes = {
      source  = "hashicorp/kubernetes"
      version = "~> 2.23"
    }
    helm = {
      source  = "hashicorp/helm"
      version = "~> 2.11"
    }
  }
}

provider "aws" {
  region = var.aws_region
}

provider "kubernetes" {
  host                   = module.eks.cluster_endpoint
  cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
  exec {
    api_version = "client.authentication.k8s.io/v1beta1"
    command     = "aws"
    args = [
      "eks",
      "get-token",
      "--cluster-name",
      module.eks.cluster_name
    ]
  }
}

provider "helm" {
  kubernetes {
    host                   = module.eks.cluster_endpoint
    cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
    exec {
      api_version = "client.authentication.k8s.io/v1beta1"
      command     = "aws"
      args = [
        "eks",
        "get-token",
        "--cluster-name",
        module.eks.cluster_name
      ]
    }
  }
}

terraform/main.tf

module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  version = "5.0.0"

  name = "${var.cluster_name}-vpc"
  cidr = var.vpc_cidr

  azs             = var.azs
  private_subnets = var.private_subnets
  public_subnets  = var.public_subnets

  enable_nat_gateway   = true
  single_nat_gateway   = true
  enable_dns_hostnames = true

  public_subnet_tags = {
    "kubernetes.io/role/elb" = 1
  }

  private_subnet_tags = {
    "kubernetes.io/role/internal-elb" = 1
  }
}

module "eks" {
  source  = "terraform-aws-modules/eks/aws"
  version = "19.15.3"

  cluster_name    = var.cluster_name
  cluster_version = var.cluster_version

  vpc_id                         = module.vpc.vpc_id
  subnet_ids                     = module.vpc.private_subnets
  cluster_endpoint_public_access = true

  eks_managed_node_group_defaults = {
    ami_type = "AL2_x86_64"
  }

  eks_managed_node_groups = {
    default = {
      name           = "default-node-group"
      instance_types = ["t3.medium"]
      min_size       = 1
      max_size       = 3
      desired_size   = 2
    }
  }
}

resource "aws_iam_role_policy_attachment" "additional" {
  for_each = module.eks.eks_managed_node_groups

  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
  role       = each.value.iam_role_name
}

terraform/variables.tf

variable "aws_region" {
  description = "AWS region"
  type        = string
  default     = "us-west-2"
}

variable "cluster_name" {
  description = "EKS cluster name"
  type        = string
  default     = "eks-nginx-argocd"
}

variable "cluster_version" {
  description = "Kubernetes version"
  type        = string
  default     = "1.28"
}

variable "vpc_cidr" {
  description = "VPC CIDR"
  type        = string
  default     = "10.0.0.0/16"
}

variable "azs" {
  description = "Availability zones"
  type        = list(string)
  default     = ["us-west-2a", "us-west-2b", "us-west-2c"]
}

variable "private_subnets" {
  description = "Private subnets CIDR"
  type        = list(string)
  default     = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
}

variable "public_subnets" {
  description = "Public subnets CIDR"
  type        = list(string)
  default     = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
}

terraform/outputs.tf

output "cluster_name" {
  description = "Kubernetes Cluster Name"
  value       = module.eks.cluster_name
}

output "cluster_endpoint" {
  description = "Endpoint for EKS control plane"
  value       = module.eks.cluster_endpoint
}

output "cluster_security_group_id" {
  description = "Security group ids attached to the cluster control plane"
  value       = module.eks.cluster_security_group_id
}

output "region" {
  description = "AWS region"
  value       = var.aws_region
}

output "kubeconfig" {
  description = "kubectl config file contents for this EKS cluster"
  value       = module.eks.kubeconfig
  sensitive   = true
}

2. Kubernetes Manifests for NGINX

kubernetes/nginx/deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

kubernetes/nginx/service.yaml

apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  type: LoadBalancer
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80

3. ArgoCD Configuration

kubernetes/argocd/application.yaml

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
  name: nginx-app
  namespace: argocd
spec:
  project: default
  source:
    repoURL: https://github.com/your-username/eks-nginx-argocd.git
    targetRevision: HEAD
    path: kubernetes/nginx
  destination:
    server: https://kubernetes.default.svc
    namespace: default
  syncPolicy:
    automated:
      selfHeal: true
      prune: true

4. README.md

# EKS with NGINX and ArgoCD Deployment

This project demonstrates how to provision an AWS EKS cluster using Terraform, deploy an NGINX application, and set up ArgoCD for GitOps continuous deployment.

## Prerequisites

- AWS account with appropriate permissions
- Terraform installed
- kubectl installed
- AWS CLI configured
- Helm installed (for ArgoCD installation)

## Deployment Steps

### 1. Provision EKS Cluster

```bash
cd terraform
terraform init
terraform plan
terraform apply

2. Configure kubectl

After Terraform completes, configure kubectl:

aws eks --region $(terraform output -raw region) update-kubeconfig --name $(terraform output -raw cluster_name)

3. Install ArgoCD

kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml

4. Access ArgoCD

Port-forward the ArgoCD server:

kubectl port-forward svc/argocd-server -n argocd 8080:443

The default admin password is:

kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d

Access the UI at: https://localhost:8080

5. Deploy NGINX Application via ArgoCD

Apply the ArgoCD Application manifest:

kubectl apply -f kubernetes/argocd/application.yaml

6. Access NGINX Application

After ArgoCD syncs, get the LoadBalancer URL:

kubectl get svc nginx-service

Clean Up

terraform destroy
## Additional Notes

1. You'll need to create a GitHub repository for the ArgoCD application to point to (update the repoURL in `application.yaml`).

2. The Terraform configuration uses the official AWS EKS module which handles VPC, EKS cluster, and node groups creation.

3. The NGINX deployment is exposed via a LoadBalancer service for easy access.

4. ArgoCD is installed using its standard manifests and then configured to manage the NGINX application.

5. For production use, you should:
   - Secure the ArgoCD instance properly
   - Use HTTPS for the ArgoCD server
   - Implement proper IAM roles and policies
   - Consider using private EKS clusters
0
Subscribe to my newsletter

Read articles from SRINIVAS TIRUNAHARI directly inside your inbox. Subscribe to the newsletter, and don't miss out.

Written by

SRINIVAS TIRUNAHARI
SRINIVAS TIRUNAHARI