There are many use cases for configuring k8s cluster federation. Most important is high availability
across AWS regions (also same regions) but this approach can also be applied to hybrid use cases. In this exercise,
we setup two clusters earth
and mars
in two different AWS regions us-east-1
and us-west-2
. Then,
we configure federation between both clusters called interstellar
. Lastly, we deploy Service to
federated cluster
-
Create VPC
EAST_VPCID=`aws ec2 create-vpc --cidr-block 10.20.0.0/16 --region us-east-1 --query 'Vpc.VpcId' --output text` # modify dns hostname resolution for the VPC aws ec2 modify-vpc-attribute --vpc-id $EAST_VPCID --enable-dns-hostnames "{\"Value\":true}" --region us-east-1 # create internet gateway and attach it to VPC EAST_IGW=`aws ec2 create-internet-gateway --region us-east-1 --query 'InternetGateway.InternetGatewayId' --output text` aws ec2 attach-internet-gateway --internet $EAST_IGW --vpc $EAST_VPCID --region us-east-1
-
Create Route53 Private Hosted Zone
ID=$(uuidgen) && aws route53 create-hosted-zone --name earth-k8s.internal \ --vpc VPCRegion=us-east-1,VPCId=$EAST_VPCID --caller-reference $ID \ | jq .DelegationSet.NameServers
-
Create k8s cluster
kops create cluster --dns private --name earth-k8s.internal \ --zones us-east-1a,us-east-1b --state s3://dalbhanj-kubernetes-aws-io \ --vpc $EAST_VPCID --network-cidr 10.20.0.0/16 --ssh-public-key $mypubkey # edit cluster info if needed (for ex, subnet size) kops edit cluster earth-k8s.internal # update cluster and commit kops update cluster earth-k8s.internal --yes # Earth cluster should be ready to use in few minutes # refer to https://github.com/dalbhanj/kubernetes-aws-workshop/tree/master/setup_cluster_hacks[setup cluster hacks] to # see how you can use master nodes for running rest of administrative commands # on master node, set alias for context and verify you receive correct info kubectl config set-context earth --cluster=earth-k8s.internal --user=earth-k8s.internal kubectl --context=earth get nodes
-
Create VPC
WEST_VPCID=`aws ec2 create-vpc --cidr-block 10.30.0.0/16 --region us-west-2 --query 'Vpc.VpcId' --output text` # modify dns hostname resolution for the VPC aws ec2 modify-vpc-attribute --vpc-id $WEST_VPCID --enable-dns-hostnames "{\"Value\":true}" --region us-west-2 # create internet gateway and attach it to VPC WEST_IGW=`aws ec2 create-internet-gateway --region us-west-2 --query 'InternetGateway.InternetGatewayId' --output text` aws ec2 attach-internet-gateway --internet $WEST_IGW --vpc $WEST_VPCID --region us-west-2
-
Create Route53 Private Hosted Zone
ID=$(uuidgen) && aws route53 create-hosted-zone --name mars-k8s.internal \ --vpc VPCRegion=us-west-2,VPCId=$WEST_VPCID --caller-reference $ID \ | jq .DelegationSet.NameServers
-
Create k8s cluster
kops create cluster --dns private --name mars-k8s.internal \ --zones us-west-2a,us-west-2b --state s3://dalbhanj-kubernetes-aws-io \ --vpc $WEST_VPCID --network-cidr 10.30.0.0/16 --ssh-public-key $mypubkey # edit cluster info if needed (for ex, subnet size) kops edit cluster mars-k8s.internal # update cluster and commit kops update cluster mars-k8s.internal --yes # Mars cluster should be ready to use in few minutes # refer to https://github.com/dalbhanj/kubernetes-aws-workshop/tree/master/setup_cluster_hacks[setup cluster hacks] to # see how you can use master nodes for running rest of administrative commands # on master node, set alias for context and verify you receive correct info kubectl config set-context mars --cluster=mars-k8s.internal --user=mars-k8s.internal kubectl --context=mars get nodes
-
IAM role configuration
# In addition to master node hacks, you need to give additional permissions on both master node IAM roles to access # S3 and Route53 zones # Edit Earth master node IAM role and add ARN of Mars Route53 zone under Route53 permissions. Do the same for Mars IAM role # Edit Earth master node IAM role and add S3 ARN for Mars object under S3 permissions. Do the same for Mars IAM role
-
Route53 VPC association
# Get Route53 Zone ID for earth-k8s.internal and mars-k8s.internal export Zone1ID=ZQ7FF98RPJWSF export Zone2ID=Z39IYJL1VINZWF aws route53 create-vpc-association-authorization --hosted-zone-id $Zone1ID --vpc VPCRegion=us-west-2,VPCId=$WEST_VPCID aws route53 create-vpc-association-authorization --hosted-zone-id $Zone2ID --vpc VPCRegion=us-east-1,VPCId=$EAST_VPCID ## I ran into an issue with Route53 console not showing correct VPC associations. I manually added VPC associations via console ## It took few minutes for getting dig to resolve both domain names on both master nodes ## You need to make sure both master nodes can run 'validate' successfully and have 'context' defined on both clusters before ## moving forward
-
Install latest k8s client library
# Login to earth master node # download latest k8s client library and copy the binaries curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/kubernetes-client-linux-amd64.tar.gz tar -xzvf kubernetes-client-linux-amd64.tar.gz sudo cp kubernetes/client/bin/kubefed /usr/local/bin sudo chmod +x /usr/local/bin/kubefed sudo cp kubernetes/client/bin/kubectl /usr/local/bin sudo chmod +x /usr/local/bin/kubectl # check context settings kubectl config get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE earth-k8s.internal earth-k8s.internal earth-k8s.internal mars mars-k8s.internal mars-k8s.internal * mars-k8s.internal mars-k8s.internal mars-k8s.internal earth earth-k8s.internal earth-k8s.internal # Use earth as host cluster kubectl config use-context earth # Create RBAC role binding kubectl create clusterrolebinding admin-to-cluster-admin-binding --clusterrole=cluster-admin --user=admin
-
Deploy the federation control plane in host cluster
kubefed init interstellar --host-cluster-context=earth --dns-provider=aws-route53 --dns-zone-name=earth-mars-k8s.internal Creating a namespace federation-system for federation system components... done Creating federation control plane service..... done Creating federation control plane objects (credentials, persistent volume claim)... done Creating federation component deployments... done Updating kubeconfig... done Waiting for federation control plane to come up............................................................................ done Federation API server is running at: a3c168c58981911e7b3e90af787d6efe-674228795.us-east-1.elb.amazonaws.com # you can check newly created context kubectl config get-contexts # change the context to federation kubectl config use-context interstellar # create default namespace if you don't see it (possible bug) kubectl get namespace --context=interstellar kubectl create namespace default --context=interstellar
-
Join Earth and Mars cluster to the federation
kubefed join earth --host-cluster-context=earth --cluster-context=earth kubefed join mars --host-cluster-context=earth --cluster-context=mars # check status of cluster # it took several hours for both clusters to show as Ready kubectl --context=interstellar get clusters NAME STATUS AGE earth Ready 13h mars Ready 13h
-
Deploy application
# deploy an application using replica sets kubectl run nginx --image=nginx --replicas=2 kubectl get deployments -o wide NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINER(S) IMAGE(S) SELECTOR nginx 2 0 0 0 14s nginx nginx run=nginx # didn't get successful deployments # works for independent clusters but not on federation
-
Deploy Service
# expose the application using ELB kubectl expose deployment nginx --type=LoadBalancer --port=80 kubectl get services -o wide kubectl get events kubectl delete services nginx kubectl delete deployment nginx