Terraform Complete Guide 2026: Infrastructure as Code for AWS, GCP, and Azure

Sanjeev SharmaSanjeev Sharma
5 min read

Advertisement

Terraform 2026: Infrastructure as Code

"ClickOps" — manually creating resources in the AWS console — doesn't scale. Terraform lets you define infrastructure in code, version it, and reproduce it exactly.

Installation and Setup

# Install Terraform
brew install terraform  # macOS
# or download from terraform.io

# Verify
terraform --version  # Terraform v1.7+

Basic AWS Setup

# main.tf
terraform {
  required_version = ">= 1.7"

  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }

  # Remote state (critical for teams)
  backend "s3" {
    bucket  = "my-terraform-state"
    key     = "production/terraform.tfstate"
    region  = "us-east-1"
    encrypt = true
    # State locking with DynamoDB
    dynamodb_table = "terraform-state-lock"
  }
}

provider "aws" {
  region = var.aws_region

  default_tags {
    tags = {
      Project     = "webcoderspeed"
      Environment = var.environment
      ManagedBy   = "terraform"
    }
  }
}

Variables

# variables.tf
variable "aws_region" {
  description = "AWS region for all resources"
  type        = string
  default     = "us-east-1"
}

variable "environment" {
  description = "Environment name"
  type        = string
  validation {
    condition     = contains(["development", "staging", "production"], var.environment)
    error_message = "Environment must be development, staging, or production."
  }
}

variable "app_name" {
  description = "Application name"
  type        = string
  default     = "webcoderspeed"
}

variable "db_password" {
  description = "Database master password"
  type        = string
  sensitive   = true  # Never logged or displayed
}

variable "instance_count" {
  type    = number
  default = 2
}
# terraform.tfvars (don't commit if it has secrets!)
aws_region   = "us-east-1"
environment  = "production"
app_name     = "webcoderspeed"

# terraform.tfvars.example (commit this)
aws_region   = "us-east-1"
environment  = "production"
app_name     = "myapp"
db_password  = "CHANGE_ME"

Complete AWS Infrastructure

# network.tf — VPC and subnets
resource "aws_vpc" "main" {
  cidr_block           = "10.0.0.0/16"
  enable_dns_hostnames = true
  enable_dns_support   = true
}

resource "aws_subnet" "public" {
  count             = 2
  vpc_id            = aws_vpc.main.id
  cidr_block        = "10.0.${count.index}.0/24"
  availability_zone = data.aws_availability_zones.available.names[count.index]
  map_public_ip_on_launch = true
}

resource "aws_subnet" "private" {
  count             = 2
  vpc_id            = aws_vpc.main.id
  cidr_block        = "10.0.${count.index + 10}.0/24"
  availability_zone = data.aws_availability_zones.available.names[count.index]
}

# S3 bucket for app assets
resource "aws_s3_bucket" "assets" {
  bucket = "${var.app_name}-assets-${var.environment}"
}

resource "aws_s3_bucket_public_access_block" "assets" {
  bucket                  = aws_s3_bucket.assets.id
  block_public_acls       = true
  block_public_policy     = true
  ignore_public_acls      = true
  restrict_public_buckets = true
}

# CloudFront distribution
resource "aws_cloudfront_distribution" "cdn" {
  enabled = true

  origin {
    domain_name              = aws_s3_bucket.assets.bucket_regional_domain_name
    origin_id                = "S3-${aws_s3_bucket.assets.id}"
    origin_access_control_id = aws_cloudfront_origin_access_control.assets.id
  }

  default_cache_behavior {
    target_origin_id       = "S3-${aws_s3_bucket.assets.id}"
    viewer_protocol_policy = "redirect-to-https"
    allowed_methods        = ["GET", "HEAD"]
    cached_methods         = ["GET", "HEAD"]
    compress               = true

    forwarded_values {
      query_string = false
      cookies { forward = "none" }
    }

    min_ttl     = 0
    default_ttl = 86400
    max_ttl     = 31536000
  }

  restrictions {
    geo_restriction { restriction_type = "none" }
  }

  viewer_certificate {
    cloudfront_default_certificate = true
  }
}

# RDS PostgreSQL
resource "aws_db_instance" "postgres" {
  identifier        = "${var.app_name}-${var.environment}"
  engine            = "postgres"
  engine_version    = "16.2"
  instance_class    = var.environment == "production" ? "db.t3.medium" : "db.t3.micro"
  allocated_storage = 20
  max_allocated_storage = 100  # Auto-scaling storage

  db_name  = replace(var.app_name, "-", "_")
  username = "dbadmin"
  password = var.db_password

  multi_az               = var.environment == "production"
  skip_final_snapshot    = var.environment != "production"
  deletion_protection    = var.environment == "production"
  storage_encrypted      = true

  vpc_security_group_ids = [aws_security_group.rds.id]
  db_subnet_group_name   = aws_db_subnet_group.main.name

  backup_retention_period = var.environment == "production" ? 7 : 1
}

Modules: Reusable Infrastructure

# modules/ecs-service/main.tf
variable "service_name" { type = string }
variable "image" { type = string }
variable "port" { type = number }
variable "cpu" { type = number; default = 256 }
variable "memory" { type = number; default = 512 }
variable "desired_count" { type = number; default = 2 }

resource "aws_ecs_task_definition" "service" {
  family = var.service_name
  network_mode = "awsvpc"
  requires_compatibilities = ["FARGATE"]
  cpu = var.cpu
  memory = var.memory

  container_definitions = jsonencode([{
    name  = var.service_name
    image = var.image
    portMappings = [{ containerPort = var.port }]
  }])
}

output "task_definition_arn" {
  value = aws_ecs_task_definition.service.arn
}

# Use the module
module "api_service" {
  source = "./modules/ecs-service"

  service_name  = "api"
  image         = "ghcr.io/myapp/api:${var.api_version}"
  port          = 3000
  desired_count = 3
}

Workspaces for Multiple Environments

# Create workspaces
terraform workspace new staging
terraform workspace new production

# Switch
terraform workspace select production

# Different variables per workspace
variable "instance_count" {
  default = {
    staging    = 1
    production = 3
  }
}

# Usage
resource "aws_instance" "app" {
  count = var.instance_count[terraform.workspace]
  # ...
}

CI/CD with Terraform

# .github/workflows/terraform.yml
name: Terraform

on:
  pull_request:
    paths: ['terraform/**']
  push:
    branches: [main]
    paths: ['terraform/**']

jobs:
  plan:
    runs-on: ubuntu-latest
    if: github.event_name == 'pull_request'
    steps:
      - uses: actions/checkout@v4
      - uses: hashicorp/setup-terraform@v3

      - name: Terraform Init
        run: terraform init
        env:
          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

      - name: Terraform Plan
        run: terraform plan -no-color
        env:
          TF_VAR_db_password: ${{ secrets.DB_PASSWORD }}

  apply:
    runs-on: ubuntu-latest
    if: github.event_name == 'push' && github.ref == 'refs/heads/main'
    steps:
      - uses: actions/checkout@v4
      - uses: hashicorp/setup-terraform@v3

      - name: Terraform Apply
        run: terraform apply -auto-approve
        env:
          TF_VAR_db_password: ${{ secrets.DB_PASSWORD }}

Terraform + version control = reproducible infrastructure. Every change is code-reviewed, tested, and audited. This is how senior engineers manage cloud infrastructure.

Advertisement

Sanjeev Sharma

Written by

Sanjeev Sharma

Full Stack Engineer · E-mopro