Terraform-Day-4
# Creating Terraform Modules for EC2 Instance with VPC, Subnet, and Other Resources
In this lab, we will refactor a Terraform configuration to utilize modules for better organization and reusability across environments or projects.
## Module Breakdown
1. VPC Module
Manages the VPC and related networking resources (subnets, route tables, etc.).
2. EC2 Instance Module
Launches the EC2 instance with the necessary configuration and user data.
## Folder Structure
terraform-ec2-vpc-lab/
├── main.tf
├── variables.tf
├── outputs.tf
├── modules/
│ ├── vpc/
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── outputs.tf
│ └── ec2/
│ ├── main.tf
│ ├── variables.tf
│ └── outputs.tf
└── terraform.tfvars
## Step 1: VPC Module
### File: `modules/vpc/main.tf`
resource "aws_vpc" "main" {
cidr_block = var.cidr_block
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = var.vpc_name
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = var.subnet_cidr_block
availability_zone = var.availability_zone
map_public_ip_on_launch = true
tags = {
Name = var.subnet_name
}
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.main.id
tags = {
Name = var.igw_name
}
}
resource "aws_route_table" "public_rt" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
tags = {
Name = var.route_table_name
}
}
resource "aws_route_table_association" "public_assoc" {
subnet_id = aws_subnet.public.id
route_table_id = aws_route_table.public_rt.id
}
### File: `modules/vpc/variables.tf`
variable "cidr_block" {
description = "CIDR block for the VPC"
default = "10.0.0.0/16"
}
variable "vpc_name" {
description = "Name of the VPC"
default = "terraform-vpc"
}
variable "subnet_cidr_block" {
description = "CIDR block for the public subnet"
default = "10.0.1.0/24"
}
variable "subnet_name" {
description = "Name of the public subnet"
default = "public-subnet"
}
variable "availability_zone" {
description = "Availability zone for the subnet"
default = "us-east-1a"
}
variable "igw_name" {
description = "Name of the internet gateway"
default = "internet-gateway"
}
variable "route_table_name" {
description = "Name of the route table"
default = "public-route-table"
}
### File: `modules/vpc/outputs.tf`
output "vpc_id" {
value = aws_vpc.main.id
}
output "subnet_id" {
value = aws_subnet.public.id
}
## Step 2: EC2 Module
### File: `modules/ec2/main.tf`
data "aws_ami" "latest_ami" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-gp2"]
}
}
resource "aws_security_group" "sg" {
name = var.security_group_name
description = "Allow SSH and HTTP"
vpc_id = var.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "web" {
ami = data.aws_ami.latest_ami.id
instance_type = var.instance_type
subnet_id = var.subnet_id
security_groups = [aws_security_group.sg.name]
associate_public_ip_address = true
tags = {
Name = var.instance_name
}
}
### File: `modules/ec2/variables.tf`
variable "vpc_id" {
description = "VPC ID"
}
variable "subnet_id" {
description = "Public Subnet ID"
}
variable "instance_type" {
description = "EC2 instance type"
default = "t2.micro"
}
variable "security_group_name" {
description = "Name of the security group"
default = "web-sg"
}
variable "instance_name" {
description = "Name of the EC2 instance"
default = "web-server"
}
### File: `modules/ec2/outputs.tf`
output "public_ip" {
value = aws_instance.web.public_ip
}
## Step 3: Main Configuration
### File: `main.tf`
provider "aws" {
region = "us-east-1"
}
module "vpc" {
source = "./modules/vpc"
cidr_block = "10.0.0.0/16"
vpc_name = "terraform-vpc"
subnet_cidr_block = "10.0.1.0/24"
subnet_name = "public-subnet"
availability_zone = "us-east-1a"
igw_name = "internet-gateway"
route_table_name = "public-route-table"
}
module "ec2" {
source = "./modules/ec2"
vpc_id = module.vpc.vpc_id
subnet_id = module.vpc.subnet_id
instance_type = "t2.micro"
security_group_name = "web-sg"
instance_name = "web-server"
}
output "ec2_public_ip" {
value = module.ec2.public_ip
}
## Step 4: Run the Lab
1. Initialize Terraform
terraform init
2. Apply the Configuration
terraform apply
Confirm with `yes`.
## Step 5: Verify the Resources
- Terraform will output the public IP of the EC2 instance.
- Access the EC2 instance using its public IP in a browser.
# Hands-On Lab: Create Amazon EKS Cluster Using Terraform
This lab demonstrates how to create an Amazon EKS cluster using Terraform, leveraging separate `vpc.tf` and `eks.tf` files for modular configuration.
## Lab Objectives
1. Create a VPC using the `terraform-aws-modules/vpc` module.
2. Deploy an Amazon EKS cluster using the `terraform-aws-modules/eks` module.
3. Validate the EKS cluster and associated resources.
## Folder Structure
terraform-eks-lab/
├── main.tf
├── vpc.tf
├── eks.tf
├── variables.tf
├── outputs.tf
├── terraform.tfvars
## Step 1: Prepare the Files
### main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.3"
}
}
}
provider "aws" {
region = "ap-south-1"
}
module "myapp-vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.14.2"
name = "myapp-vpc"
cidr = var.vpc_cidr_block
azs = data.aws_availability_zones.available.names
private_subnets = var.private_subnet_cidr_blocks
public_subnets = var.public_subnet_cidr_blocks
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
tags = {
"kubernetes.io/cluster/myapp-eks-cluster" = "shared"
}
public_subnet_tags = {
"kubernetes.io/cluster/myapp-eks-cluster" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/myapp-eks-cluster" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "18.29.0"
cluster_name = "myapp-eks-cluster"
cluster_version = "1.22"
subnet_ids = module.myapp-vpc.private_subnets
vpc_id = module.myapp-vpc.vpc_id
eks_managed_node_groups = {
dev = {
min_size = 1
max_size = 3
desired_size = 2
instance_types = ["t2.micro"]
}
}
tags = {
environment = "development"
application = "myapp"
}
}
### vpc.tf
data "aws_availability_zones" "available" {}
variable "vpc_cidr_block" {}
variable "private_subnet_cidr_blocks" {}
variable "public_subnet_cidr_blocks" {}
output "vpc_id" {
value = module.myapp-vpc.vpc_id
}
output "public_subnets" {
value = module.myapp-vpc.public_subnets
}
output "private_subnets" {
value = module.myapp-vpc.private_subnets
}
### eks.tf
output "eks_cluster_id" {
value = module.eks.cluster_id
}
output "eks_cluster_arn" {
value = module.eks.cluster_arn
}
output "node_group_role_arns" {
value = module.eks.eks_managed_node_groups[*].iam_role_arn
}
### variables.tf
variable "vpc_cidr_block" {
description = "CIDR block for the VPC"
default = "10.0.0.0/16"
}
variable "private_subnet_cidr_blocks" {
description = "CIDR blocks for private subnets"
default = ["10.0.1.0/24", "10.0.2.0/24"]
}
variable "public_subnet_cidr_blocks" {
description = "CIDR blocks for public subnets"
default = ["10.0.3.0/24", "10.0.4.0/24"]
}
### outputs.tf
output "vpc_id" {
value = module.myapp-vpc.vpc_id
}
output "eks_cluster_name" {
value = module.eks.cluster_name
}
output "eks_cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "eks_cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
### terraform.tfvars
vpc_cidr_block = "10.0.0.0/16"
private_subnet_cidr_blocks = ["10.0.1.0/24", "10.0.2.0/24"]
public_subnet_cidr_blocks = ["10.0.3.0/24", "10.0.4.0/24"]
## Step 2: Run Terraform Commands
### Step 2.1: Initialize Terraform
terraform init
### Step 2.2: Validate the Configuration
terraform validate
### Step 2.3: Plan the Deployment
terraform plan
### Step 2.4: Apply the Configuration
terraform apply
When prompted, type `yes`.
## Step 3: Validate Resources
### Step 3.1: Verify the VPC
- Navigate to the AWS Management Console.
- Check the created VPC, subnets, internet gateway, and route tables.
### Step 3.2: Verify the EKS Cluster
- Navigate to the Amazon EKS console.
- Confirm the cluster, node group, and associated resources.
### Step 3.3: Connect to the EKS Cluster
Configure `kubectl`:
aws eks --region ap-south-1 update-kubeconfig --name myapp-eks-cluster
List the nodes:
kubectl get nodes
## Key Points
- VPC Module: Dynamically creates VPC, subnets, NAT gateway, and route tables.
- EKS Module: Automates EKS cluster and managed node group creation.
- Data Sources: Used for fetching availability zones.
- Outputs: Facilitate debugging and integration with other systems.