Cka Guide
Cka Guide
STORAGE - 10%
WORKLOAD AND SCHEDULING - 15%
CLUSTER ARCHITECTURE , CONFIGURATION - 25%
SERVICES AND NETWORKING - 20%
TROUBLESHOOTING - 30%
Step 1
Create 3 images of ubuntu run on different ipaddress and network to be
“bridge”
System names
Ubuntu-Master - 192.168.1.101 Ubuntu-Worker1 - 192.168.1.102 Ubuntu-
Worker2 - 192.168.1.103
Sudo hostnamectl set-hostname Ubuntu-Master - in first system
Sudo hostnamectl set-hostname Ubuntu-Worker1 - in second system
Sudo hostnamectl set-hostname Ubuntu-Worker2 - in
Third system
Step 2
IN ALL MACHINES
sudo apt-get update
Sudo apt-get install docker.io
docker —version
Enable the docker service Sudo su
Systemctl enable docker Systemctl start docker Systemctl status docker
Step 3
IN ALL MACHINES
Step 4
IN ALL MACHINES
Add Kubernetes repo
Sudo apt-add-repository “deb http://apt.kubernetes.io/
kebernetes-cenial main”
Sudo apt-get install kubeadm kubelet kubectl
Sudo apt-mark hold kubeadm kubelet kubectl
Wait for 10 mins
Kubeadm version
Sudoswapoff-a (INALLMACHINES) Step 5
IN Ubuntu-Master Machine
Sudo kubeadm init —pod-network-cidr=10.244.0.0/16
It will display kubeadm join (KEEP BACKUP OF THE STATEMENT)
Mkdir -p $HOME/.kube
Sudo cp -I /etc/kubernetes/admin.conf $HOME/.kube/ config
Sudo chown $(id -u):$(id -g) $HOME/.kube/config Sudo kubectl apply
https://raw.githubusercontent.com/
coreos/flannel/master/Documentation/kube-flannel.yml
Kubectl get pods —all-namespaces
Step 6
IN ALL WORKER MACHINES
Copy back up kubeadm join command fully with given parameter
40 cd CKA
41 ls
42 kubectl get nodes
43 git clone https://github.com/vishymails/YAML.git
44 kubectl get pods
45 kubectl get po
46 ls
47 kubectl create -f example1.yml
48 kubectl get pods
49 kubectl get pods -o wide
50 kubectl get pods -o yaml
51 kubectl describe pod tomcat-pod
52 kubectl get pods -o wide
53 ping 10.244.1.2
54 kubectl exec -it tomcat-pod -- /bin/sh
55 history
56 kubectl delete pod tomcat-pod
57 kubectl get pods -o wide
58 clear
59 alaias g=kubectl
60 alias g=kubectl
61 g get nodes
62 g get pods
66 g run admin-pod --image=busybox --dry-run=client -o yaml --
command sleep 3200
67 g get pods
68 g run admin-pod --image=busybox --dry-run=client -o yaml --
command sleep 3200
69 g get pods
70 g run admin-pod --image=busybox --dry-run=client -o yaml --
command sleep 3200
71 g get pods
72 g run admin-pod --image=busybox --dry-run=client -o yaml --
command sleep 3200 | tee example2.yaml
73 g run admin-pod --image=busybox --dry-run=client -o yaml --
command sleep 3200 | tee example3.yaml
74 echo 'securityContext :
capabilities :
add : ["NET_ADMIN", "SYS_TIME"]
'
75 echo 'securityContext :
capabilities :
add : ["NET_ADMIN", "SYS_TIME"]
' | tee -a example3.yaml
76 g get pos
77 g get po
78 g delete -f example2.yaml
79 g delete pod admin-pods
80 g create -f example3.yaml
REPLICATION CONTROLLER
82 g get pods
83 g delete -f example3.yaml
84 g create -f example4.yml
85 g get po
86 g get po -o wide
87 g get po -l app=tomcat-app
88 g describe rc tomcat-rc
89 g scale rc tomcat-rc --replicas=9
90 g get po -l app=tomcat-app
91 g get po -o wide
92 g scale rc tomcat-rc --replicas=3
93 g get po -o wide
94 g delete rc tomcat-rc
95 g delete -f example4.yml
96 history
REPLICA SETS
g create -f example5.yml
98 g get pods
99 g get pods -l tier=frontend
100 g get pods -l tier=backendend
101 g get pods -l tier=backend
102 g get rs tomcat-rd -o wide
103 g get rs tomcat-rs -o wide
104 g describe rs tomcat-rs
105 g get po -o wide
106 g scale rs tomcat-rs --replicas=9
107 g get po -o wide
108 g scale rs tomcat-rs --replicas=3
109 g get po -o wide
110 g delete rs tomcat-rs
111 g delete -f example5.yml
DEPLOYMENT
1 g create -f examle7.yml
2 g create -f example7.yml
3 g get deploy
4 g get deploy -l app=tomcat-app
5 g get rs -l app=tomcat-app
6 g get po
7 g get po -o wide
8 g describe deploy tomcat-deploy
9 g deploy
10 g get deploy
11 g scale deployment tomcat-deploy --replicas=9
12 g get po
13 g scale deployment tomcat-deploy --replicas=1
14 g get po
15 history
ROLLOUTS
ROLLBACK
INANOTHER TERMINAL
43 alias g=kubectl
44 g rollout history deployment/tomcat-deploy
45 g rollout undo deployment/tomcat-deploy
46 g rollout status deployment/tomcat-deploy
57 g get deploy
58 g delete deploy tomcat-deploy
59 g delete deploy web-proj-268
60 g create deployment web-proj-268 --image=nginx:1.16 -o yaml | tee
example8.yaml
alias g=kubectl
105 g create deployment web-003 --image=nginx --replicas=3 -o yaml |
tee example9.yaml
106 g get pods
112 g get po -l app=web-003
113 g delete pod web-003-75d568fccc-bjzq6
114 g get po -l app=web-003
CHECKING MASTER NODE DESCRIPTION AND LOGS
98 ls /etc
99 ls /etc/kubernetes
100 ls /etc/kubernetes/manifests
101 clear
102 sudo vi /etc/kubernetes/manifests/kube-controller-manager.yaml
1 clear
2 g get nodes
3 ps -ef | grep kubelet
4 sudo grep static /var/lib/kubelet/config.yaml
5 g run static-nginx --image=nginx --dry-run=client -o yaml
6 g run static-nginx --image=nginx --dry-run=client -o yaml >
example10.yml
7 cat example10.yml | ssh kworker1 "tee static-pod.yaml"
8 g get pods
9 g get pods -o wide
10 history
8 g get pods
9 g get pods -o wide
10 history
MULTICONTAINER POD
apiVersion: v1
kind: Pod
metadata:
labels:
run: pod-multi
name: pod-multi
spec:
containers:
- image: nginx
name: container1
- image: busybox
name: container2
command: ["sleep","4800"]
g create -f multipod.yaml
g get pods -o wide
g describe pod pod-multi
SERVICES
1. NODE PORT
2. LOADBALANCER
3. CLUSTER IP
NODEPORT
apiVersion : apps/v1
kind : Deployment
metadata :
name : tomcat-deploy
labels :
app : tomcat-app
spec :
replicas : 3
selector :
matchLabels :
app : tomcat-app
template :
metadata :
labels :
app : tomcat-app
spec :
containers :
- name : tomcat-containers
image : vishymails/tomcatimage:1.0
ports :
- containerPort : 8080
-------------------------
apiVersion : v1
kind : Service
metadata :
name : my-service
labels :
app : tomcat-app
spec :
selector :
app : tomcat-app
type : NodePort
ports :
- nodePort : 31000
port : 80
targetPort : 8080
1 g get po
2 g delete -f example11.yml
3 g apply -f example12-1.yml
4 g get po
5 g apply -f example12-2.yml
6 kubectl get service -l app=tomcatapp
7 kubectl get service -l app=tomcat-app
8 kubectl get service -l app=tomcat-app -o wide
9 g get po -o wide
10 g describe svc my-service
11 curl
12 curl http://10.244.1.44:8080
13 curl http://10.244.1.35:8080
14 g describe svc my-service
15 curl http://10.244.2.35:8080
16 curl http://10.244.2.36:8080
17 g get odes
18 g get nodes -o wide
19 curl http://172.31.26.246:31000
20 history
LOADBALANCER SERVICE
apiVersion : v1
kind : Service
metadata :
name : my-service
labels :
app : tomcat-app
spec :
selector :
app : tomcat-app
type : LoadBalancer
ports :
- nodePort : 31000
port : 8080
targetPort : 8080
g create ns defense
2 g run delta-pod --image=nginx:1.17 --labels env=dev,tier=front -n
defense
3 g get pods
4 g get pods -n defense
5 g describe pods delta-pod -n defense
6 g get pods
7 g describe pods audit-web-app
8 history
9 g get namespace
12 g get all
13 g get all -n defense
14 g get all --namespace defense
16 g api-resources
17 g api-resources --namespaced=true
18 g api-resources --namespaced=false
Q10) Get web-load-5461 pod details in json format and store it in a file at /
opt/output/web-load-5461-j070822n.json
21 g get po
22 g apply -f example6.yaml
23 g get po
25 ls /opt/output
26 sudo ls /opt/output
27 g get pods web-load-5461 -o json | sudo tee /opt/output/web-
load-5461-j070822n.json
28 sudo ls /opt/output
29 sudo mkdir /opt/output
30 g get pods web-load-5461 -o json | sudo tee /opt/output/web-
load-5461-j070822n.json
31 sudo ls /opt/output
32 sudo cat /opt/output/web-load-5461-j070822n.json
Q 13 :use JSONPath query to retrieve our OS images of all K8s nodes and
store it in a file ~/allNodeOSImages8.txt
PERSISTENCE VOLUMES
1. PV - PERSISTENT VOLUME
2. PVC - PERSISTENT VOLUME CLAIMS
1. PROVISIONING
2. BINDING
3. USING
4. RECALIMING
PROVISIONING - 2 TYPES
1. STATIC
2. DYNAMIC
TEMPORARY SPACE
emptydir demo
1 g apply -f example15.yml
2 kubectl get po
3 kubectl exec -it tomcat-pod -- /bin/bash
4 g describe pod tomcat-pod
5 kubectl exec -it tomcat-pod -- /bin/bash
6 g delete pod tomcat-pod
7 g apply -f example15.yml
8 kubectl exec -it tomcat-pod -- /bin/bash
9 history
HOSTPATH
19 g get po
20 g apply -f example16.yml
21 g get po -o wide
22 kubectl exec -it tomcat-hostpath -- /bin/bash
23 g describe pod tomcat-hostpath
24 g get po
25 g delete pod tomcat-hostpath
26 g apply -f example16.yml
27 kubectl exec -it tomcat-hostpath -- /bin/bash
29 g create -f example18.yml
30 g get po
31 g exec -it multicontainer-pod1 --container producer -- /bin/bash
32 g exec -it multicontainer-pod1 --container consumer -- /bin/bash
apiVersion : v1
kind : PersistentVolume
metadata :
name : kube-pv
spec :
storageClassName : standard
capacity :
storage : 1Gi
volumeMode : Filesystem
accessModes :
- ReadWriteOnce
hostPath :
path : /mnt/nginx
apiVersion : v1
kind : PersistentVolumeClaim
metadata :
name : kube-pv
spec :
resources :
requests :
storage : 1Gi
volumeMode : Filesystem
accessModes :
- ReadWriteOnce
apiVersion : v1
kind : Pod
metadata :
name : pv-pod
labels :
name : pv-pod
spec :
containers :
- name : pv-pod
image : nginx
volumeMounts :
- mountPath : /test-pd
name : test-volume
ports :
- containerPort : 80
volumes :
- name : test-volume
persistentVolumeClaim :
claimName : kube-pv
--------------------
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 500Gi
nfs:
path: /
server: 10.218.47.252
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs
volumeMode: Filesystem
---
PVC
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
storageClassName: nfs
volumeMode: Filesystem
volumeName: nfs-pv
Demo 3:
---
---
apiVersion : v1
kind : PersistentVolume
metadata :
name : kube-pv
spec :
storageClassName : standard
capacity :
storage : 1Gi
volumeMode : Filesystem
accessModes :
- ReadWriteOnce
hostPath :
path : /etc/pv-store
---
apiVersion : v1
kind : PersistentVolumeClaim
metadata :
name : kube-pvc
spec :
storageClassName : standard
resources :
requests :
storage : 1Gi
volumeMode : Filesystem
accessModes :
- ReadWriteOnce
---
apiVersion : v1
kind : Pod
metadata :
name : pv-podhostpath
labels :
name : pv-pod
spec :
containers :
- name : pv-pod
image : nginx
volumeMounts :
- mountPath : /test-pd
name : test-volume
ports :
- containerPort : 80
nodeName: kworker1
volumes :
- name : test-volume
persistentVolumeClaim :
claimName : kube-pvc
DEMO 4
------
apiVersion : v1
kind : PersistentVolume
metadata :
name : kube-pv
labels :
type : local
spec :
storageClassName : manual
capacity :
storage : 1Gi
accessModes :
- ReadWriteOnce
hostPath :
path : /mnt/datas
---
apiVersion : v1
kind : PersistentVolumeClaim
metadata :
name : kube-pvc
spec :
storageClassName : manual
resources :
requests :
storage : 1Gi
accessModes :
- ReadWriteOnce
---
apiVersion : v1
kind : Pod
metadata :
name : pv-pod
labels :
name : pv-pod
spec :
containers :
- name : pv-pod
image : nginx
volumeMounts :
- mountPath : /test-pd
name : test-volume
ports :
- containerPort : 80
volumes :
- name : test-volume
persistentVolumeClaim :
claimName : kube-pvc
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-name
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-name
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
volumeName: pv-name
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv0003
spec:
storageClassName: ""
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
claimRef:
namespace: default
name: myclaim
nfs:
path: /tmp
server: 172.17.0.2
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: myclaim
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-rnd
spec:
capacity:
storage: 100Mi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
hostPath:
path: /pv/host-data-rnd
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypvl
spec:
capacity:
storage: 100Mi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /pv/log
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-l
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 50Mi
---
apiVersion: v1
kind: Pod
metadata:
name: my-nginx-pod
spec:
containers:
- name : mynginx
image: nginx
volumeMounts:
- mountPath: "/log"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: pv-claim-l
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Retain
allowVolumeExpansion: true
mountOptions:
- debug
volumeBindingMode: Immediate
apiVersion: v1
kind: Pod
metadata:
name: task-pv-pod
spec:
nodeSelector:
kubernetes.io/hostname: kube-01
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: task-pv-claim
containers:
- name: task-pv-container
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage
STORAGE CLASSES
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: failure-domain.beta.kubernetes.io/zone
values:
- us-central-1a
- us-central-1b
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/aws-ebs
parameters:
type: io1
iopsPerGB: "10"
fsType: ext4
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
fstype: ext4
replication-type: none
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: example-nfs
provisioner: example.com/external-nfs
parameters:
server: nfs-server.example.com
path: /share
readOnly: "false"
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gold
provisioner: kubernetes.io/cinder
parameters:
availability: nova
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: zeroedthick
datastore: VSANDatastore
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Standard_LRS
location: eastus
storageAccount: azure_storage_account_name
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/azure-disk
parameters:
storageaccounttype: Standard_LRS
kind: managed
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: azurefile
provisioner: kubernetes.io/azure-file
parameters:
skuName: Standard_LRS
location: eastus
storageAccount: azure_storage_account_name
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: portworx-io-priority-high
provisioner: kubernetes.io/portworx-volume
parameters:
repl: "1"
snap_interval: "70"
priority_io: "high"
DAEMON SETS
-----------
apiVersion : apps/v1
kind : DaemonSet
metadata :
name : fluent-ds
spec :
template :
metadata :
labels :
name : fluentd
spec :
containers :
- name : fluentd
image : gcr.io/google-containers/fluentd-elasticsearch:1.20
selector :
matchLabels :
name : fluentd
SECRETS
-------
apiVersion : v1
kind : Secret
metadata :
name : secret-ssh-authentication
type : kubernetes.io/ssh-auth
data :
ssh-privatekey : |
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAA
BlwAAAAdzc2gtcn
NhAAAAAwEAAQAAAYEAykPOPaEGHsGwLfu1xlMDXXMeNA4y7MmDfaxr8
Ud8yyJF+l8f/eQJ
PU7irfCElg54ogEhaGKEgUthyhpN9//79YQQlzD8MqAtxbed/
Y7lLp12Z0Yp7u6WPeFPXW
UZU9T40oNcfwUWEd9lE69tmYUqTllQ5fX0zPFZYrrpIypZUe23gVy52Ojg3
23Bv67rKIpb
Ds5mxZTucNlYSjGrgjcBR+ZITgjMpVUUxVyaZjLeKftRKOLbNiosq/
bxijNZ4Rw/DFVgVX
P7dEJih/
3KJhri0IkuYUflmFhnFHhvdWsZUfcu6Iwk7wmYzbR8FEiSxYWC1ui0OKkz51
Ua
NRgJPp5LBtOYqCVJqV5nEJzzYvB1Fg2hOsjez9zjci2lMAnxBbj6FSl/rMmk/
UqvWXgJKv
8t+IH9uZxFDdadU1OBget12cN7zI92T2tkdmuZJC63RxBwr9HEHfqi0nQvB
Q+JkCEJoREB
TM/
Hma9QNFoTrs+gH2Xg1a4HlOnjvSeeJC4JUxwdAAAFiBLeZwYS3mcGAAAA
B3NzaC1yc2
EAAAGBAMpDzj2hBh7BsC37tcZTA11zHjQOMuzJg32sa/
FHfMsiRfpfH/3kCT1O4q3whJYO
eKIBIWhihIFLYcoaTff/+/WEEJcw/
DKgLcW3nf2O5S6ddmdGKe7ulj3hT11lGVPU+NKDXH
8FFhHfZROvbZmFKk5ZUOX19MzxWWK66SMqWVHtt4Fcudjo4N9twb+u6
yiKWw7OZsWU7nDZ
WEoxq4I3AUfmSE4IzKVVFMVcmmYy3in7USji2zYqLKv28YozWeEcPwxVYF
Vz+3RCYof9yi
Ya4tCJLmFH5ZhYZxR4b3VrGVH3LuiMJO8JmM20fBRIksWFgtbotDipM+dV
GjUYCT6eSwbT
mKglSaleZxCc82LwdRYNoTrI3s/
c43ItpTAJ8QW4+hUpf6zJpP1Kr1l4CSr/LfiB/bmcRQ
3WnVNTgYHrddnDe8yPdk9rZHZrmSQut0cQcK/
RxB36otJ0LwUPiZAhCaERAUzPx5mvUDRa
E67PoB9l4NWuB5Tp470nniQuCVMcHQAAAAMBAAEAAAGAHGvkuiI7Uu0S
jZOWGlUl3ubDMr
tgCofSc0FcNZ4++ehJ/wGI5Es7xSKllZ17c/
56kwEnqZxWVDi8eAK0PAn7ZKd3EVevyIlb
hTHlAHEo4SD9N0vTrqBV+kIDvfLr2SzO/
f25bJvRWxeSA28eLnlY1YOVa4rhKD8tuULcab
WUsX9+zq2x67nlh1/
L3tx8bMjsPKtiI5dm3fKchmhuWjDSfEDunhB8eSr4bTcy9vNk5m9L
GzytYvIQLuXXUP0gPxKRxXosUvvmtitgRDHZYpyK07ckUrwHcTmjj0zIKco4
axICN9QZiD
DkPkA42qCVRkEzWPAntddxV9UVvApFUvjvH5RFICgIKZd3K5EwkTEyQZaq
OYYGWuLpXNjQ
7IbwhKxjd7xG7RsEBDcKiULiiYEqBV+J3syzKhH3ydGIhtqkTijeiLh/
B92pXhDrTz7e4i
PZRL/
rEk3evOcPkjaqJVdJX56a9GQ+sleisLC1p428rRUnnF5AO19KB2gLbiscaJTd
AAAA
wQC/
HO2uvKz4WpS1tsp+WKw8it5frhwn+eKX9xaQTEQBGvWb1NLT7+2A53ZLG
R0AcmhzO7
0PnoNQM1JdIQCZwQLeWmIEHfd572rDEb3wweIpW4q3l9tWws6mC1bdGp
83r+5BWvHzsxzC
Rr8VvyC8rij8+REw/qJpuhuatR5Qy6lIWYdRczx7Xpot5/
cSqJrBlbl+KBQdAwe3NMVVcY
A9LP9eTazMkQ5gXOU5qtPgvF8efYvQ2BqHDwCw9xconHHJSlwAAADBAO
hD/611vqVzxHyv
dRg6mYxMFfsO0xcOKaMIeGlzosl9dpk70HzD8FmNWcoqnKWNPZxQ7qFaK
bH0IoTDdU6nHp
mn7l76NjvbA9/7+4jmqBEdCL1e3QaU5jeVNRHocFhllzZLkZnYMNvn9JznQi1
e9ycYgy3C
1yUVSmfTcOJTpzFvNSloM3Tw3uzLZlIvIiFMFhQH9zzS9Q2DMg6Ja1ndEVV
uHM0rVNJBod
Z03TF0w0jval+UYHzg0xSVb/
5UeesfXwAAAMEA3u7+8FHPOP8WJbYzWWFCyEGnO8OlFN8/
gH6vgRm7t2p6Z/
2I5RHbSz9tYOZk6HE06DX1ElenA7C08K9GidpfCLHGdkgPr3ZpeLqmjq
Bf/
35R0eNTINceZlFyKk+CMF9O2he0mmGJDnsWk2I4Qqimf8BJsbkvAbcDsv
LIB5ycrj7Z
+5vNDzi+MwY1wy0ybvDul90KyA3hV7KCx3tLVFT2hNjVkhhlJCgEJSWOxY
mrapR3XTlaxM
Nj0Dzs9BpCjQIDAAAAD2xhYnVzZXJAa21hc3RlcgECAw==
CRON JOB
apiVersion : batch/v1
kind : CronJob
metadata :
name : cron-demo
spec :
schedule : "*/1 * * * *"
jobTemplate :
spec :
template :
metadata :
labels :
name : cron-demo
spec :
containers :
- name : cron-demo
image : centos:7
args :
- python
- -c
- from datetime import datetime; print('[{}] Cron job demo for CKA
batch Oracle ...'.format(datetime.now()))
restartPolicy : Never
apiVersion : v1
kind : Pod
metadata :
name : lifecycle-hook-pod
spec :
containers :
- name : lifecycle-container
image : nginx
lifecycle :
postStart :
exec :
command : ["/bin/sh","-c", "echo Welcome Oracle > /usr/share/post-
start-msg"]
preStop :
exec :
command : ["/usr/sbin/nginx","-s","quit"]
Kmaster
alias g=kubectl
g get nodes
sudo apt update
g drain kmaster --ignore-daemonsets
apt-cache madison kubeadm |head
sudo apt install kubeadm=1.26.3-00
sudo kubeadm upgrade apply v1.26.3-00
sudo kubeadm upgrade apply v1.26.3
sudo apt install kubelet=1.26.3
sudo apt install kubelet=1.26.3-00
sudo systemctl restart kubelet
g uncordon kmaster
g get nodes
g drain kworker1 --ignore-daemonsets
g drain kworker1 --ignore-daemonsets --force
g get nodes
kworker 1
kmaster
Q11) Backup ETCD database and save it under /root with name of backup
"etcd-backup.db"
Utkarsh:
sudo apt install etcd-client
sudo -i
ETCDCTL_API=3 etcdctl snapshot save /root/etcd-backup070822 --
endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/
ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/
pki/etcd/server.key
Image=redis:alpine
252 cd CKA
253 clear
254 alias g=kubectl
255 g describe node kmaster
256 g describe node kmaster | grep -i taint
257 g taint node kmaster node-role.kubernetes.io/control-
plane:NoSchedule-
258 g taint node kmaster node-role.kubernetes.io/control-
plane:NoSchedule
259 g describe node kworker1 | grep -i taint
260 g describe node kworker2 | grep -i taint
261 g taint node kworker1 env_type=production:NoSchedule
262 g run dev-nginx --image=nginx
263 g get po
264 g get po -o wide
265 g run prod-nginx --image=nginx --dry-run=client -o yaml >
example44.yml
266 g apply -f example44.yml
267 g get po
268 g get po -o wide
step 1
1 g create ns nec
2 cd CKA
3 openssl genrsa -out nec-adm.key 2048
4 openssl req -new -key nec-adm.key -out nec-adm.csr
5 ls nec*
6 cat nec-adm.csr | base64 | tr -d "\n"
8 g apply -f example45.yml
9 g get csr
10 g get csr nec-adm
11 g certificate approve nec-adm
12 g get csr nec-adm
14 g apply -f example46.yml
15 g apply -f example47.yml
16 g get rolebindin.rbac.authorization.k8s.io -n nec
17 g get rolebinding.rbac.authorization.k8s.io -n nec
18 g get pods -n nec --as nec-adm
20 g auth can-i get pods -n nec --as nec-adm
21 g auth can-i list pods -n nec --as nec-adm
22 g auth can-i create pods -n nec --as nec-adm
19 history
Q20) Worker node node7 is not responding, have a look and fix the issue
node7 - kworker1
node6 - kmaster
1 g get nodes
2 g describe node kworker1
3 ps -ef | grep kubelet | grep lib
4 sudo cat /var/lib/kubelet/config.yaml | ssh kworker1
5 sudo cat /var/lib/kubelet/config.yaml | ssh kworker1 > mconfig.yaml
6 sudo cat /var/lib/kubelet/config.yaml | ssh kworker1 "tee
mconfig.yaml"
7 g get nodes
kworker 1
Q26
A pod “my-da
ta-pod” in data namespace is not running. Fix the issue and get it in
running state.
cat pv1.yaml
####
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
spec:
capacity:
storage: 100Mi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /data
#############
cat pv1claim.yaml
#######
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv1claim
namespace: data
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 50Mi
#########
cat pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: my-data-pod
namespace: data
spec:
containers:
- name: mydata
image: nginx
volumeMounts:
- mountPath: "/maindata"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: pv1claim
#########
21) List internal IPs of all nodes of given cluster, save result to a file /root/
InternalIPList
Answer should be in a format: Internal IP of 1st Node (space) Internal IP
od 2nd node (in a single line)
ANOTHER SOLUTION
----------------
13 kubectl get nodes -o wide | grep -v "INTERNAL-IP" | awk '{print " "
$6}' | tr -d “\n”
14 sudo cat /root/internalIPList
Q24
We have worker 3 nodes in our cluster, create a DaemonSet (name prod-
pod, image=nginx) on each node except worker node8.
alias g=kubectl
g get nodes
for i in 6 7 8 9; do g describe node node$i |grep -i taint; done
g taint node node8 env=uat:NoSchedule
for i in 6 7 8 9; do g describe node node$i |grep -i taint; done
g create deployment prod-pod --image=nginx --dry-run=client -o yaml
g create deployment prod-pod --image=nginx --dry-run=client -o yaml |
tee prod-pod.yaml
vi prod-pod.yaml
#######
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: prod-pod
name: prod-pod
spec:
selector:
matchLabels:
app: prod-pod
template:
metadata:
labels:
app: prod-pod
spec:
containers:
- image: nginx
name: nginx
#######
g create -f prod-pod.yaml
g get pods -o wide
echo 'apiVersion: v1
kind: Pod
metadata:
name: pod-sec-file
spec:
containers:
- image: redis
name: redis
volumeMounts:
- name: sec1
mountPath: "/secrets"
volumes:
- name: sec1
secret:
secretName: sec1' |tee pod-sec-file.yaml
##
echo 'apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: pod-sec-env
spec:
containers:
- image: redis
name: redis
env:
- name: CONFIDENTIAL
valueFrom:
secretKeyRef:
name: sec1
key: password' |tee pod-sec-env.yaml
CONFIG MAP
management.endpoints.enabled-by-default=true
management.endpoint.info.enabled=true
management.security.enabled=false
management.endpoints.web.exposure.include=*
server.port= 9000
server.servlet.context-path=/oracle
oracleprops.greeting= Thank you and visit again - altered
oracleprops.greeting1= New Data
apiVersion : v1
kind : Pod
metadata :
name : myconfigpod1
labels :
name : myconfigpod1
spec :
containers :
- name : myconfigpod1
image : redis
volumeMounts :
- name : foo
mountPath : "/etc/config"
readOnly : true
volumes :
- name : foo
configMap :
name : config-map2
16 g apply -f example52.yml
17 g get po
18 g exec -it myconfigpod1 -- /bin/bash
HPA
apiVersion : autoscaling/v2
kind : HorizontalPodAutoscaler
metadata :
name : hpa-resource-metrics-memory
namespace : default
spec :
scaleTargetRef :
apiVersion : v1
kind : Deployment
name : tomcat-deploy
minReplicas : 3
maxReplicas : 10
metrics :
- type : Resource
resource :
name : memory
target :
type : AverageValue
averageValue : 500Mi