Terraform Cheatsheet
DevOps
terraformiacinfrastructure-as-codedevops
Terraform Cheatsheet
Installation
# Install Terraform
wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install terraform
# Verify installation
terraform --version
terraform -install-autocomplete
# Install terraform-docs (optional)
brew install terraform-docs # macOS
go install github.com/terraform-docs/terraform-docs@latest # Go
Basic Commands
# Initialize working directory
terraform init
# Validate configuration
terraform validate
# Format code
terraform fmt
terraform fmt -check
# Plan changes
terraform plan
terraform plan -out=tfplan
# Apply changes
terraform apply
terraform apply -auto-approve
terraform apply tfplan
# Destroy resources
terraform destroy
# State inspection
terraform show
terraform state list
terraform state show aws_instance.example
# Output values
terraform output
terraform output instance_ip
# Import existing resources
terraform import aws_instance.example i-0123456789abcdef0
Configuration Structure
# provider.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
required_version = ">= 1.0"
}
provider "aws" {
region = "us-west-2"
}
# variables.tf
variable "instance_type" {
description = "EC2 instance type"
type = string
default = "t3.micro"
}
variable "tags" {
description = "Resource tags"
type = map(string)
default = {
Name = "example"
Environment = "dev"
}
}
# main.tf
resource "aws_instance" "example" {
ami = data.aws_ami.ubuntu.id
instance_type = var.instance_type
tags = merge(var.tags, {
Name = "terraform-example"
})
}
# outputs.tf
output "instance_ip" {
description = "Public IP of the instance"
value = aws_instance.example.public_ip
}
Resources
# AWS EC2 Instance
resource "aws_instance" "web" {
ami = data.aws_ami.ubuntu.id
instance_type = "t3.micro"
key_name = var.ssh_key
vpc_security_group_ids = [aws_security_group.web.id]
root_block_device {
volume_type = "gp3"
volume_size = 20
delete_on_termination = true
}
user_data = filebase64("${path.module}/user-data.sh")
tags = var.tags
}
# AWS S3 Bucket
resource "aws_s3_bucket" "data" {
bucket = "my-unique-bucket-name-${random_id.bucket_suffix.hex}"
tags = var.tags
}
resource "aws_s3_bucket_versioning" "data" {
bucket = aws_s3_bucket.data.id
versioning_configuration {
status = "Enabled"
}
}
# AWS RDS Database
resource "aws_db_instance" "postgres" {
identifier = "my-postgres"
engine = " postgres"
engine_version = "15.4"
instance_class = "db.t3.micro"
allocated_storage = 20
storage_type = "gp2"
db_name = "appdb"
username = var.db_username
password = var.db_password
db_subnet_group_name = aws_db_subnet_group.main.name
vpc_security_group_ids = [aws_security_group.db.id]
skip_final_snapshot = true
}
Data Sources
# AWS AMI data source
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"] # Canonical
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
# AWS availability zones
data "aws_availability_zones" "available" {
state = "available"
}
# AWS current region
data "aws_region" "current" {}
# Use in resources
resource "aws_subnet" "example" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, 0)
availability_zone = data.aws_availability_zones.available.names[0]
}
Variables
# Define variable
variable "instance_count" {
description = "Number of instances to create"
type = number
default = 1
validation {
condition = var.instance_count > 0 && var.instance_count <= 10
error_message = "Instance count must be between 1 and 10."
}
}
variable "availability_zones" {
type = list(string)
}
variable "instance_types" {
type = map(string)
}
variable "app_config" {
type = object({
name = string
port = number
replicas = number
})
}
# Use variable in code
resource "aws_instance" "example" {
count = var.instance_count
instance_type = var.instance_types["web"]
tags = {
Name = "${var.app_config.name}-${count.index}"
}
}
Terraform State
# Lock state file
terraform providers lock -platform=linux_amd64
# Move resource to new address
terraform state mv 'aws_instance.example' 'aws_instance.example_new'
# Remove resource from state (keep cloud resource)
terraform state rm aws_instance.example
# Import resource
terraform import aws_instance.example i-0123456789abcdef0
# State backup
terraform state pull > backup.tfstate
# State drift detection
terraform plan -refresh-only
# Workspaces
terraform workspace new dev
terraform workspace list
terraform workspace select prod
# Remote state (S3)
terraform {
backend "s3" {
bucket = "my-terraform-state"
key = "prod/terraform.tfstate"
region = "us-west-2"
encrypt = true
dynamodb_table = "terraform-locks"
}
}
Provisioners
resource "aws_instance" "web" {
ami = data.aws_ami.ubuntu.id
instance_type = "t3.micro"
# Remote-exec provisioner
provisioner "remote-exec" {
inline = [
"sudo apt-get update",
"sudo apt-get install -y nginx"
]
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_key)
host = self.public_ip
}
}
# File provisioner
provisioner "file" {
source = "scripts/deploy.sh"
destination = "/tmp/deploy.sh"
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_key)
host = self.public_ip
}
}
# Local-exec provisioner
provisioner "local-exec" {
command = "echo ${self.public_ip} >> hosts.txt"
when = create
}
}
Modules
# Use module
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.1.2"
name = "my-vpc"
cidr = "10.0.0.0/16"
azs = ["us-west-2a", "us-west-2b"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
}
# Module outputs
output "vpc_id" {
description = "ID of VPC"
value = module.vpc.vpc_id
}
# Local module
module "webserver" {
source = "./modules/webserver"
instance_type = var.instance_type
subnet_id = module.vpc.public_subnets[0]
}
Loops & Conditionals
# count index
resource "aws_instance" "app" {
count = 3
ami = data.aws_ami.ubuntu.id
instance_type = "t3.micro"
tags = {
Name = "app-${count.index}"
}
}
# for_each (map)
resource "aws_instance" "servers" {
for_each = {
"web" = "t3.micro"
"app" = "t3.small"
"db" = "t3.medium"
}
ami = data.aws.ami.ubuntu.id
instance_type = each.value
tags = {
Name = "server-${each.key}"
}
}
# dynamic blocks
resource "aws_security_group" "web" {
name = "web-sg"
description = "Allow web traffic"
dynamic "ingress" {
for_each = var.allowed_ports
content {
from_port = ingress.value
to_port = ingress.value
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
}
Functions
# string functions
resource "aws_s3_bucket" "bucket" {
bucket = "${var.project_name}-${lower(var.environment)}-${random_id.unique.hex}"
}
# numeric functions
fixed_size = var.mongo_size > 20 ? var.mongo_size : 20
# collection functions
subnet_ids = [for subnet in aws_subnet.public : subnet.id]
instance_ips = values(aws_instance.app[*].public_ip)
# file functions
user_data = templatefile("${path.module}/user-data.tpl", {
message = "Hello World"
})
Templates
# templatefile function
user_data = templatefile("${path.module}/cloud-init.tpl", {
hostname = var.hostname
ssh_key_name = var.ssh_key
region = data.aws_region.current.name
})
# cloud-init.tpl
#!/bin/bash
hostnamectl set-hostname ${hostname}
apt-get update
apt-get install -y nginx
systemctl enable nginx
systemctl start nginx
Best Practices
- Use modules for reusable infrastructure components
- Version pinning - Pin provider and module versions
- State management - Use remote state (S3 + DynamoDB) with locking
- Idempotence - Ensure resources can be applied multiple times safely
- Separation of concerns - Split Terraform code into logical files (main.tf, variables.tf, outputs.tf)
- Validation - Use variable validation to catch errors early
- Documentation - Add descriptions to variables and outputs
- Workspaces - Use workspaces for different environments (dev/stage/prod)
- DRY principle - Use locals and loops to avoid repetition
- Testing - Use
terraform validateandterraform planbefore apply
Infrastructure as Code Made Easy