Lesson 12.5: Practice Exam Set 2


Question 1

Create a new pod called admin-pod with image busybox . allow the pod to set system time . the container should sleep for 3200 seconds. The pod should be in production namespace

[root@master practice-2]# kubectl run admin-pod --image=busybox:latest --command sleep 3200 --dry-run=client -o yaml > 1.yml 
 
[root@master practice-2]# cat 1.yml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: admin-pod
  name: admin-pod
  namespace: production 
spec:
  containers:
  - command:
    - sleep
    - "3200"
    image: busybox:latest
    name: admin-pod
    securityContext:
      capabilities:
        add: ["SYS_TIME"]
 
[root@master practice-2]# kubectl apply -f 1.yml 
pod/admin-pod created
 
[root@master practice-2]# kubectl get pods -n production 
NAME        READY   STATUS    RESTARTS   AGE
admin-pod   1/1     Running   0          12s

Question 2

A KubeConfig file called config has been created in ~/.kube/config. There is something wrong with the configuration. Troubeshoot and fix it.

View your existing configuration with the command kubectl config view

[root@master practice-2]# kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://127.0.0.1:46411
  name: kind-cka-cluster1
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://127.0.0.1:45421
  name: kind-dev
contexts:
- context:
    cluster: kind-cka-cluster1
    user: kind-cka-cluster1
  name: kind-cka-cluster1
- context:
    cluster: kind-dev
    user: kind-dev
  name: kind-dev
current-context: kind-dev
kind: Config
preferences: {}
users:
- name: kind-cka-cluster1
  user:
    client-certificate-data: DATA+OMITTED
    client-key-data: DATA+OMITTED
- name: kind-dev
  user:
    client-certificate-data: DATA+OMITTED
    client-key-data: DATA+OMITTED

The certifications will not have issue, the issue will lie in the server url/port. Check with the above configuration and fix in the config file provided. Edit with vim and save the changes, then view the config again with kubectl config view.

[root@master practice-2]# cat ~/.kube/config 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: ...
    server: https://127.0.0.1:46411
  name: kind-cka-cluster1
- cluster:
    certificate-authority-data: ...
    server: https://127.0.0.1:45421
  name: kind-dev
contexts:
- context:
    cluster: kind-cka-cluster1
    user: kind-cka-cluster1
  name: kind-cka-cluster1
- context:
    cluster: kind-dev
    user: kind-dev
  name: kind-dev
current-context: kind-dev
kind: Config
preferences: {}
users:
- name: kind-cka-cluster1
  user:
    client-certificate-data: ...
    client-key-data: ...
- name: kind-dev
  user:
    client-certificate-data: ...
    client-key-data: ...

Question 3

Create a new deployment called web-proj-268, with image nginx:1.16 and 1 replica. Next upgrade the new deployment to version 1.17 using rolling update. Make sure that the version upgrade is recorded in the resource annotation.

# version nginx:1.16 
[root@master practice-2]# vim q3.yml 
[root@master practice-2]# cat q3.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-proj-268
  name: web-proj-268
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-proj-268
  template:
    metadata:
      labels:
        app: web-proj-268
      annotations:
        kubernetes.io/change-cause: "Deployment of nginx version 1.16"
    spec:
      containers:
      - image: nginx:1.16
        name: nginx
[root@master practice-2]# kubectl apply -f q3.yml 
deployment.apps/web-proj-268 created
 
[root@master practice-2]# kubectl rollout history deploy web-proj-268 
deployment.apps/web-proj-268 
REVISION  CHANGE-CAUSE
1         Deployment of nginx version 1.16
 
# Version nginx:1.17
 
[root@master practice-2]# vim q3.yml 
[root@master practice-2]# cat q3.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-proj-268
  name: web-proj-268
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-proj-268
  template:
    metadata:
      labels:
        app: web-proj-268
      annotations:
        kubernetes.io/change-cause: "Deployment of nginx version 1.17"
    spec:
      containers:
      - image: nginx:1.17
        name: nginx
[root@master practice-2]# kubectl apply -f q3.yml 
deployment.apps/web-proj-268 configured
 
[root@master practice-2]# kubectl rollout history deploy web-proj-268 
deployment.apps/web-proj-268 
REVISION  CHANGE-CAUSE
1         Deployment of nginx version 1.16
2         Deployment of nginx version 1.17
 
# version nginx:latest
 
[root@master practice-2]# vim q3.yml 
[root@master practice-2]# cat q3.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-proj-268
  name: web-proj-268
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-proj-268
  template:
    metadata:
      labels:
        app: web-proj-268
      annotations:
        kubernetes.io/change-cause: "Deployment of nginx version latest"
    spec:
      containers:
      - image: nginx:latest
        name: nginx
[root@master practice-2]# kubectl apply -f q3.yml 
deployment.apps/web-proj-268 configured
 
[root@master practice-2]# kubectl rollout history deploy web-proj-268 
deployment.apps/web-proj-268 
REVISION  CHANGE-CAUSE
1         Deployment of nginx version 1.16
2         Deployment of nginx version 1.17
3         Deployment of nginx version latest
 
# Undo to version 1.16 --to-revision=1
[root@master practice-2]# kubectl rollout undo deployment web-proj-268  --to-revision=1
deployment.apps/web-proj-268 rolled back
[root@master practice-2]# kubectl get pods 
NAME                            READY   STATUS    RESTARTS   AGE
web-proj-268-78c5764bc8-ldvwh   1/1     Running   0          7s
[root@master practice-2]# kubectl describe pod web-proj-268-78c5764bc8-ldvwh | grep -i image 
    Image:          nginx:1.16
 
# Undo to version 1.17 --to-revision=2
[root@master practice-2]# kubectl rollout undo deploy web-proj-268 --to-revision=2
deployment.apps/web-proj-268 rolled back
[root@master practice-2]# kubectl get pods 
NAME                            READY   STATUS    RESTARTS   AGE
web-proj-268-7b99bdd4f4-fdn5d   1/1     Running   0          5s
[root@master practice-2]# kubectl describe pod web-proj-268-7b99bdd4f4-fdn5d | grep -i image: 
    Image:          nginx:1.17
 
# undo to version latest --to-revision=3
[root@master practice-2]# kubectl rollout undo deploy web-proj-268 --to-revision=3
deployment.apps/web-proj-268 rolled back
[root@master practice-2]# kubectl get pods 
NAME                            READY   STATUS    RESTARTS   AGE
web-proj-268-6c9ff76bc8-zncv4   1/1     Running   0          3s
[root@master practice-2]# kubectl describe pod web-proj-268-6c9ff76bc8-zncv4 | grep -i image: 
    Image:          nginx:latest 
  
[root@master practice-2]# kubectl rollout history deploy web-proj-268 
deployment.apps/web-proj-268 
REVISION  CHANGE-CAUSE
4         Deployment of nginx version 1.16
5         Deployment of nginx version 1.17
6         Deployment of nginx version latest

Question 4

Create a new deployment called web-003. Scale the deployment to 3 replicas. Make sure the desired number of pod is always running.

  • This question is based on troubleshooting.
[root@master practice-2]# kubectl create deployment web-003 --image=nginx:latest --replicas=3 --dry-run=client -o yaml > q4.yml 
[root@master practice-2]# vim q4.yml 
[root@master practice-2]# cat q4.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-003
  name: web-003
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web-003
  template:
    metadata:
      labels:
        app: web-003
    spec:
      containers:
      - image: nginx:latest
        name: nginx
[root@master practice-2]# kubectl apply -f q4.yml 
deployment.apps/web-003 created
[root@master practice-2]# kubectl get deploy 
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
web-003   0/3     0            0           6s
[root@master practice-2]# kubectl get deploy 
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
web-003   0/3     0            0           12s
  • You can see that the deployment is not running, so we need to check the kube-system namespace pods.
  • Here, we can see that kube-controller-manager-dev-control-plane is in CrashLoopBackOff status.
  • So we check the static manifest files for errors.
[root@master practice-2]# kubectl get pods -n kube-system 
NAME                                        READY   STATUS             RESTARTS      AGE
calico-kube-controllers-77969b7d87-gh6pf    1/1     Running            7 (15h ago)   4d17h
calico-node-5qxtf                           1/1     Running            7 (15h ago)   10d
calico-node-glgmq                           1/1     Running            6 (15h ago)   10d
calico-node-pjhtn                           1/1     Running            8 (15h ago)   10d
coredns-668d6bf9bc-hb87b                    1/1     Running            1 (15h ago)   4d17h
coredns-668d6bf9bc-xrljm                    1/1     Running            4 (15h ago)   10d
etcd-dev-control-plane                      1/1     Running            0             15h
kube-apiserver-dev-control-plane            1/1     Running            0             15h
kube-controller-manager-dev-control-plane   0/1     CrashLoopBackOff   5 (50s ago)   4m2s
kube-proxy-2dqnc                            1/1     Running            5 (15h ago)   10d
kube-proxy-c227m                            1/1     Running            5 (15h ago)   10d
kube-proxy-fpj9c                            1/1     Running            5 (15h ago)   10d
kube-scheduler-dev-control-plane            1/1     Running            1 (15h ago)   10d
metrics-server-55d46898f-8bvpr              1/1     Running            2 (15h ago)   3d23h
 
[root@master practice-2]# kubectl describe pod kube-controller-manager-dev-control-plane -n kube-system 
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       StartError
      Message:      failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: exec: "kube-controller-man": executable file not found in $PATH
 
# Troubleshoot the manifest file 
[root@master practice-2]# docker exec -it dev-control-plane bash
root@dev-control-plane:/# cd /etc/kubernetes/manifests/
root@dev-control-plane:/etc/kubernetes/manifests# ls
etcd.yaml  kube-apiserver.yaml	kube-controller-manager.yaml  kube-scheduler.yaml
root@dev-control-plane:/etc/kubernetes/manifests# vim kube-controller-manager.yaml 
 
# Change the command 
spec:
  containers:
  - command:
    - kube-controller-manager       # Fix this part to kube-controller-manager from kube-controller-man
# Check the pod status 
[root@master practice-2]# kubectl get pods -n kube-system 
NAME                                        READY   STATUS    RESTARTS      AGE
calico-kube-controllers-77969b7d87-gh6pf    1/1     Running   7 (15h ago)   4d17h
calico-node-5qxtf                           1/1     Running   7 (15h ago)   10d
calico-node-glgmq                           1/1     Running   6 (15h ago)   10d
calico-node-pjhtn                           1/1     Running   8 (15h ago)   10d
coredns-668d6bf9bc-hb87b                    1/1     Running   1 (15h ago)   4d17h
coredns-668d6bf9bc-xrljm                    1/1     Running   4 (15h ago)   10d
etcd-dev-control-plane                      1/1     Running   0             15h
kube-apiserver-dev-control-plane            1/1     Running   0             15h
kube-controller-manager-dev-control-plane   1/1     Running   0             80s
kube-proxy-2dqnc                            1/1     Running   5 (15h ago)   10d
kube-proxy-c227m                            1/1     Running   5 (15h ago)   10d
kube-proxy-fpj9c                            1/1     Running   5 (15h ago)   10d
kube-scheduler-dev-control-plane            1/1     Running   1 (15h ago)   10d
metrics-server-55d46898f-8bvpr              1/1     Running   2 (15h ago)   3d23h
 
# The deployment and pod is running successfully 
[root@master practice-2]# kubectl get deploy 
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
web-003   3/3     3            3           11m
 
[root@master practice-2]# kubectl get pods 
NAME                      READY   STATUS    RESTARTS   AGE
web-003-9bc9fc76b-6445s   1/1     Running   0          99s
web-003-9bc9fc76b-j4gxj   1/1     Running   0          99s
web-003-9bc9fc76b-r5fvq   1/1     Running   0          99s

Question 5

Upgrade the Cluster (Master and worker node) from v1.32.1 to v1.32.0. Make sure you drain both node and make it available after upgrade.

Set the node named controlplane as unavailable and reschedule all the pods running on it.

controlplane:~$ kubectl get nodes 
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   12d   v1.32.1
node01         Ready    <none>          12d   v1.32.1
 
controlplane:~$ kubectl drain controlplane --ignore-daemonsets 
 
controlplane:~$ kubectl get nodes
NAME           STATUS                     ROLES           AGE   VERSION
controlplane   Ready,SchedulingDisabled   control-plane   12d   v1.32.1
node01         Ready                      <none>          12d   v1.32.1
controlplane:~$ apt-get install kubeadm=1.32.0-1.1
 
controlplane:~$ kubeadm upgrade apply v1.32.0
[upgrade] SUCCESS! A control plane node of your cluster was upgraded to "v1.32.1".
 
controlplane:~$ apt-get install kubelet=1.32.0-1.1 kubectl=1.32.0-1.1
controlplane:~$ systemctl restart kubelet
controlplane:~$ kubectl get nodes
NAME           STATUS   ROLES                              AGE   VERSION
controlplane   Ready    control-plane,SchedulingDisabled   12d   v1.32.0
node01         Ready    <none>                             12d   v1.32.1
 
controlplane:~$ kubectl uncordon controlplane
 
controlplane:~$ kubectl get nodes 
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   12d   v1.32.0
node01         Ready    <none>          12d   v1.32.1
controlplane:~$ kubectl drain node01 --ignore-daemonsets 
controlplane:~$ kubectl get nodes
NAME           STATUS                     ROLES           AGE   VERSION
controlplane   Ready                      control-plane   12d   v1.32.0
node01         Ready,SchedulingDisabled   <none>          12d   v1.32.1
 
controlplane:~$ ssh node01 
 
node01:~$ apt-get install kubeadm=1.32.0-1.1
 
node01:~$ kubeadm upgrade node
 
node01:~$ apt-get install kubelet=1.32.0-1.1 kubectl=1.32.0-1.1
 
node01:~$ systemctl restart kubelet 
node01:~$ exit
logout
Connection to node01 closed.
 
controlplane:~$ kubectl get nodes
NAME           STATUS                     ROLES           AGE   VERSION
controlplane   Ready                      control-plane   12d   v1.32.0
node01         Ready,SchedulingDisabled   <none>          12d   v1.32.0
 
controlplane:~$ kubectl uncordon node01
node/node01 uncordoned
 
controlplane:~$ kubectl get nodes
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   12d   v1.32.0
node01         Ready    <none>          12d   v1.32.0

Question 6

Deploy a web-load-5461 pod using the image nginx:1.17 with the labels set to tier=web

  • Weightage : 4% (simple)
[root@master practice-2]# kubectl run web-load-5461 --image=nginx:1.17 --labels tier=web --dry-run=client -o yaml > q6.yml 
[root@master practice-2]# vim q6.yml 
[root@master practice-2]# cat q6.yml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    tier: web
  name: web-load-5461
spec:
  containers:
  - image: nginx:1.17
    name: web-load-5461
 
[root@master practice-2]# kubectl apply -f q6.yml 
pod/web-load-5461 created
 
[root@master practice-2]# kubectl get pods --show-labels
NAME                      READY   STATUS    RESTARTS   AGE     LABELS
web-load-5461             1/1     Running   0          18s     tier=web

Question 7

A pod "my-nginx-pod" (image=nginx) in custom namespace is not running. Find the problem and fix it and make it running. Note: All the supported files has been placed at root directory.

Initial Config. Understand the problem.

[root@master pv]# cat pv.yml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nginx-pv-volume
  labels:
    type: local
  namespace: production
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"
 
[root@master pv]# cat pvc.yml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
[root@master pv]# cat pod.yml 
apiVersion: v1 
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx 
spec:
  volumes:
  - name: nginx-pv-storage 
    persistentVolumeClaim: 
      claimName: nginx-pv-claim
  containers:
  - name: nginx-container
    image: nginx:latest
    ports:
    - containerPort: 80 
    volumeMounts:
      - mountPath: /use/share/nginx/html
        name: nginx-pv-storage
 
[root@master pv]# kubectl get all 
NAME        READY   STATUS    RESTARTS   AGE
pod/nginx   0/1     Pending   0          42s
 
NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   11d
[root@master pv]# 

Apply the pv and pvc

[root@master pv]# cat pv.yml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nginx-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"
 
[root@master pv]# kubectl apply -f pv.yml 
persistentvolume/nginx-pv-volume created
 
[root@master pv]# cat pvc.yml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
[root@master pv]# kubectl apply -f pvc.yml 
persistentvolumeclaim/nginx-pv-claim created
 
[root@master pv]# kubectl get pvc
NAME             STATUS   VOLUME            CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
nginx-pv-claim   Bound    nginx-pv-volume   10Gi       RWO            manual         <unset>                 4s
 
[root@master pv]# kubectl get pods 
NAME    READY   STATUS    RESTARTS   AGE
nginx   1/1     Running   0          3m32s

Describe the pod and look for the error. Here we can see that persistentvolumeclaim "nginx-pv-claim" not found.

[root@master pv]# kubectl get pods 
NAME    READY   STATUS    RESTARTS   AGE
nginx   0/1     Pending   0          4s
 
[root@master pv]# kubectl describe pod
Name:             nginx
Namespace:        default
Priority:         0
Service Account:  default
Node:             <none>
Labels:           app=nginx
Annotations:      <none>
Status:           Pending
IP:               
IPs:              <none>
Containers:
  nginx-container:
    Image:        nginx:latest
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:
      /use/share/nginx/html from nginx-pv-storage (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pvr2s (ro)
Conditions:
  Type           Status
  PodScheduled   False 
Volumes:
  nginx-pv-storage:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  app-pv-claim
    ReadOnly:   false
  kube-api-access-pvr2s:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  9s    default-scheduler  0/3 nodes are available: persistentvolumeclaim "app-pv-claim" not found. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling.

PVC and pod should be in the same namespace. We can see that the pvc is created in the default namespace. So we are going to modify the pvc file and deploy to production namespace, same as that of the pod namespace. We need to delete the pv as well.

[root@master pv]# kubectl get pvc -n production 
No resources found in production namespace.
 
[root@master pv]# kubectl delete pvc nginx-pv-claim 
persistentvolumeclaim "nginx-pv-claim" deleted
[root@master pv]# kubectl delete pv nginx-pv-volume 
persistentvolume "nginx-pv-volume" deleted
 
# modify the pvc file. pv is not namespaced  
[root@master pv]# cat pvc.yml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-pv-claim
  namespace: production 
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
 
# We dont need to change the pv namespace as it is not namespaced resource
[root@master pv]# kubectl api-resources | grep persi
NAME                                SHORTNAMES   APIVERSION                          NAMESPACED   KIND
persistentvolumeclaims              pvc          v1                                  true         PersistentVolumeClaim
persistentvolumes                   pv           v1                                  false        PersistentVolume
 
# Apply and check whether bound
[root@master pv]# kubectl apply -f pv.yml 
persistentvolume/nginx-pv-volume created
[root@master pv]# kubectl apply -f pvc.yml 
persistentvolumeclaim/nginx-pv-claim created
[root@master pv]# kubectl get pv 
NAME              CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                       STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
nginx-pv-volume   10Gi       RWO            Retain           Bound    production/nginx-pv-claim   manual         <unset>                          7s
[root@master pv]# kubectl get pvc -n production 
NAME             STATUS   VOLUME            CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
nginx-pv-claim   Bound    nginx-pv-volume   10Gi       RWO            manual         <unset>                 8s
 
# Check the pod status
[root@master pv]# kubectl get pods -n production 
NAME    READY   STATUS    RESTARTS   AGE
nginx   1/1     Running   0          9s

Question 8

Create a multi-container pod, "multi-pod" in development namespace using images: nginx and redis

[root@master practice-2]# kubectl run multi-pod --image=nginx --dry-run=client -o yaml > multi-pod.yml 
[root@master practice-2]# vim multi-pod.yml 
[root@master practice-2]# cat multi-pod.yml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: multi-pod
  name: multi-pod
  namespace: development
spec:
  containers:
  - image: nginx
    name: nginx
  - image: redis
    name: redis
[root@master practice-2]# kubectl apply -f multi-pod.yml 
pod/multi-pod created
[root@master practice-2]# kubectl get pods -n development 
NAME        READY   STATUS    RESTARTS   AGE
multi-pod   2/2     Running   0          14s

Question 9

A pod "nginx-pod" (image=nginx) in default namespace is not running. Find the problem and fix it to make it running. (Weightage 7%)

Describe the pod and check for the reason behind pending pod

[root@master practice-2]# cat pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec: 
  containers:
  - name: nginx
    image: nginx
 
[root@master practice-2]# kubectl describe pod nginx 
Name:             nginx
Namespace:        default
Priority:         0
Service Account:  default
Node:             <none>
Labels:           app=nginx
Annotations:      <none>
Status:           Pending
IP:               
IPs:              <none>
Containers:
  nginx:
    Image:        nginx
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9774j (ro)
Conditions:
  Type           Status
  PodScheduled   False 
Volumes:
  kube-api-access-9774j:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  19s   default-scheduler  0/3 nodes are available: 1 node(s) had untolerated taint {color: blue}, 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 1 node(s) were unschedulable. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling.

Fix the taint and toleration node(s) had untolerated taint {color: blue}

[root@master practice-2]# vim pod.yml 
[root@master practice-2]# cat pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec: 
  containers:
  - name: nginx
    image: nginx
  tolerations:
  - key: "color"
    operator: "Equal"
    value: "blue"
    effect: "NoSchedule"
 
[root@master practice-2]# kubectl apply -f pod.yml 
pod/nginx configured
 
[root@master practice-2]# kubectl get pods -owide 
NAME    READY   STATUS    RESTARTS   AGE     IP                NODE         NOMINATED NODE   READINESS GATES
nginx   1/1     Running   0          3m20s   192.168.171.135   dev-worker   <none>           <none>

Question 10

Create a new deployment called nginx-deploy, with nginx:1.16 and 8 replica. There are 5 worker node in Cluster. Please make sure no pod will get deployed on 2 worker node (cka-cluster3-worker , cka-cluster3-worker2) (Weightage: 5%)

You dont need to perform this in exam. Creating a kind cluster with 5 nodes

[root@master kubernetes]# cat 5-wn-config.yaml 
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
  extraPortMappings:  
  - containerPort: 30003
    hostPort: 30003
- role: worker
- role: worker
- role: worker
- role: worker
- role: worker
networking:
  disableDefaultCNI: true
  podSubnet: 192.168.0.0/16
 
[root@master kubernetes]# kind create cluster --image kindest/node:v1.29.14@sha256:8703bd94ee24e51b778d5556ae310c6c0fa67d761fae6379c8e0bb480e6fea29 --name cka-cluster3 --config 5-wn-config.yaml 
Creating cluster "cka-cluster3" ...
 Ensuring node image (kindest/node:v1.29.14) 🖼
 Preparing nodes 📦 📦 📦 📦 📦 📦  
 Writing configuration 📜 
 Starting control-plane 🕹️ 
 Installing StorageClass 💾 
 Joining worker nodes 🚜 
Set kubectl context to "kind-cka-cluster3"
You can now use your cluster with:
 
kubectl cluster-info --context kind-cka-cluster3
 
Have a nice day! 👋
 
[root@master kubernetes]# kubectl config use-context kind-cka-cluster3
Switched to context "kind-cka-cluster3".

Cordon the unwanted nodes

[root@master ~]# kubectl cordon cka-cluster3-worker 
node/cka-cluster3-worker cordoned
 
[root@master ~]# kubectl cordon cka-cluster3-worker2
node/cka-cluster3-worker2 cordoned
 
[root@master practice-2]# kubectl get nodes 
NAME                         STATUS                     ROLES           AGE    VERSION
cka-cluster3-control-plane   Ready                      control-plane   130m   v1.29.14
cka-cluster3-worker          Ready,SchedulingDisabled   <none>          129m   v1.29.14
cka-cluster3-worker2         Ready,SchedulingDisabled   <none>          129m   v1.29.14
cka-cluster3-worker3         Ready                      <none>          129m   v1.29.14
cka-cluster3-worker4         Ready                      <none>          129m   v1.29.14
cka-cluster3-worker5         Ready                      <none>          129m   v1.29.14

Apply the deployment and check the node of the pods deployed.

[root@master practice-2]# kubectl create deployment nginx-deploy --image=nginx:1.16 --replicas=8 --dry-run=client -o yaml > q10.yml 
 
[root@master practice-2]# cat q10.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy
  name: nginx-deploy
spec:
  replicas: 8
  selector:
    matchLabels:
      app: nginx-deploy
  template:
    metadata:
      labels:
        app: nginx-deploy
    spec:
      containers:
      - image: nginx:1.16
        name: nginx
 
[root@master practice-2]# kubectl apply -f q10.yml 
deployment.apps/nginx-deploy created
 
[root@master practice-2]# kubectl get deploy 
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deploy   8/8     8            8           5s
 
[root@master practice-2]# kubectl get pods -o wide 
NAME                            READY   STATUS    RESTARTS   AGE   IP           NODE                   NOMINATED NODE   READINESS GATES
nginx-deploy-68d7c886bf-2db8q   1/1     Running   0          14s   10.0.1.182   cka-cluster3-worker5   <none>           <none>
nginx-deploy-68d7c886bf-6zddq   1/1     Running   0          14s   10.0.5.150   cka-cluster3-worker3   <none>           <none>
nginx-deploy-68d7c886bf-8p82c   1/1     Running   0          14s   10.0.5.213   cka-cluster3-worker3   <none>           <none>
nginx-deploy-68d7c886bf-h4mcc   1/1     Running   0          14s   10.0.0.12    cka-cluster3-worker4   <none>           <none>
nginx-deploy-68d7c886bf-jnhf4   1/1     Running   0          14s   10.0.0.135   cka-cluster3-worker4   <none>           <none>
nginx-deploy-68d7c886bf-lr64c   1/1     Running   0          14s   10.0.0.42    cka-cluster3-worker4   <none>           <none>
nginx-deploy-68d7c886bf-th2gk   1/1     Running   0          14s   10.0.1.80    cka-cluster3-worker5   <none>           <none>
nginx-deploy-68d7c886bf-tslnv   1/1     Running   0          14s   10.0.1.124   cka-cluster3-worker5   <none>           <none>

Uncordon the nodes

[root@master practice-2]# kubectl uncordon cka-cluster3-worker 
node/cka-cluster3-worker uncordoned
 
[root@master practice-2]# kubectl uncordon cka-cluster3-worker2
node/cka-cluster3-worker2 uncordoned
 
[root@master practice-2]# kubectl get nodes 
NAME                         STATUS   ROLES           AGE    VERSION
cka-cluster3-control-plane   Ready    control-plane   130m   v1.29.14
cka-cluster3-worker          Ready    <none>          130m   v1.29.14
cka-cluster3-worker2         Ready    <none>          130m   v1.29.14
cka-cluster3-worker3         Ready    <none>          130m   v1.29.14
cka-cluster3-worker4         Ready    <none>          130m   v1.29.14
cka-cluster3-worker5         Ready    <none>          130m   v1.29.14

Question 11

Create a ReplicaSet (Name: web-pod, image: nginx:1.16, Replica:3) There is already a pod running in the cluster. Please make sure that the total count of pod running into a cluster is not more than 3. (Weightage 5%)

View the label of the existing pod-

[root@master practice-2]# kubectl get pods --show-labels
NAME       READY   STATUS    RESTARTS   AGE   LABELS
demo-pod   1/1     Running   0          15s   app=web

Create a replicaset yaml file , we dont have imperative way to create this

[root@master practice-2]# cat web-pod.yml 
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: web-pod
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web 
  template:
    metadata:
      labels:
        app: web
    spec:
      containers:
        - name: nginx
          image: nginx:1.16
 
k[root@master practice-2]# kubectl apply -f web-pod.yml 
replicaset.apps/web-pod created
 
[root@master practice-2]# kubectl get rs 
NAME      DESIRED   CURRENT   READY   AGE
web-pod   3         3         2       5s
 
[root@master practice-2]# kubectl get pods --show-labels 
NAME            READY   STATUS    RESTARTS   AGE   LABELS
demo-pod        1/1     Running   0          12m   app=web
web-pod-d5lvn   1/1     Running   0          44s   app=web
web-pod-xknt8   1/1     Running   0          44s   app=web

Now two pods are created by replicaset, but the demo-pod will be as a part of the replicaset to make it 3 replicas, as it has label app=web.

Question 12

There are 3 Nodes in the cluster. Create DaemonSet ( Name:my-pod, Image:nginx ) on each node except one node dev-worker2

Taint the unwanted node dev-worker2

[root@master practice-2]# kubectl get nodes 
NAME                         STATUS   ROLES           AGE   VERSION
cka-cluster3-control-plane   Ready    control-plane   8h    v1.29.14
cka-cluster3-worker          Ready    <none>          8h    v1.29.14
cka-cluster3-worker2         Ready    <none>          8h    v1.29.14
cka-cluster3-worker3         Ready    <none>          8h    v1.29.14
cka-cluster3-worker4         Ready    <none>          8h    v1.29.14
cka-cluster3-worker5         Ready    <none>          8h    v1.29.14
 
[root@master practice-2]# kubectl taint node cka-cluster3-worker env=no:NoSchedule 
node/cka-cluster3-worker tainted
 
[root@master practice-2]# kubectl describe node cka-cluster3-worker | grep -i taint 
Taints:             env=no:NoSchedule
[root@master practice-2]# kubectl create deployment my-pod --image=nginx --dry-run=client -o yaml > ds.yml 
[root@master practice-2]# vim ds.yml 
[root@master practice-2]# cat ds.yml 
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app: my-pod
  name: my-pod
spec:
  selector:
    matchLabels:
      app: my-pod
  template:
    metadata:
      labels:
        app: my-pod
    spec:
      containers:
      - image: nginx
        name: nginx
[root@master practice-2]# kubectl apply -f ds.yml 
daemonset.apps/my-pod created
[root@master practice-2]# kubectl get ds -owide 
NAME     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE    CONTAINERS   IMAGES   SELECTOR
my-pod   4         4         4       4            4           <none>          112s   nginx        nginx    app=my-pod
 
[root@master practice-2]# kubectl get pods -owide 
NAME            READY   STATUS    RESTARTS      AGE     IP           NODE                   NOMINATED NODE   READINESS GATES
demo-pod        1/1     Running   1 (30m ago)   6h43m   10.0.1.86    cka-cluster3-worker5   <none>           <none>
my-pod-454c2    1/1     Running   0             115s    10.0.0.198   cka-cluster3-worker4   <none>           <none>
my-pod-jt6h9    1/1     Running   0             115s    10.0.4.127   cka-cluster3-worker2   <none>           <none>
my-pod-lgzkd    1/1     Running   0             115s    10.0.1.141   cka-cluster3-worker5   <none>           <none>
my-pod-mqr4t    1/1     Running   0             115s    10.0.5.145   cka-cluster3-worker3   <none>           <none>
web-pod-d5lvn   1/1     Running   1 (30m ago)   6h31m   10.0.0.18    cka-cluster3-worker4   <none>           <none>
web-pod-xknt8   1/1     Running   1 (30m ago)   6h31m   10.0.3.162   cka-cluster3-worker    <none>           <none>

Question 13

Generate a file CKA0007.txt with details about the available size of all the node in the kubernetes cluster using a custom column format as mentioned below:

NAMEAVAILABLE_MEMORYAVAILABLE_CPU
cka-cluster3-worker......
cka-cluster3-worker2......
cka-cluster3-worker3......
cka-cluster3-worker4......
cka-cluster3-worker5......

View the status of the nodes json

[root@master practice-2]# kubectl get nodes -o json 
{
    "apiVersion": "v1",
    ...
    "items": [
      {
          "metadata": {
                "name": "cka-cluster3-worker5",
          },
          "status": {
           "allocatable": {
                    "cpu": "6",
                    ...
                    "memory": "4179948Ki",
            }
          }
      }
    ]
    ...
}
[root@master practice-2]# kubectl get node -o custom-columns=NAME:.metadata.name,AVAILABLE_MEMORY:.status.allocatable.memory,AVAILABLE_CPU:.status.allocatable.cpu
NAME                         AVAILABLE_MEMORY   AVAILABLE_CPU
cka-cluster3-control-plane   4179948Ki          6
cka-cluster3-worker          4179948Ki          6
cka-cluster3-worker2         4179948Ki          6
cka-cluster3-worker3         4179948Ki          6
cka-cluster3-worker4         4179948Ki          6
cka-cluster3-worker5         4179948Ki          6

Write in the file

[root@master practice-2]# kubectl get node -o custom-columns=NAME:.metadata.name,AVAILABLE_MEMORY:.status.allocatable.memory,AVAILABLE_CPU:.status.allocatable.cpu > cka0007.txt 
[root@master practice-2]# cat cka0007.txt 
NAME                         AVAILABLE_MEMORY   AVAILABLE_CPU
cka-cluster3-control-plane   4179948Ki          6
cka-cluster3-worker          4179948Ki          6
cka-cluster3-worker2         4179948Ki          6
cka-cluster3-worker3         4179948Ki          6
cka-cluster3-worker4         4179948Ki          6
cka-cluster3-worker5         4179948Ki          6

Question 14

There are various pods running in all the namespaces of kubernetes cluster. Write a command to "/opt/pods_asc.sh" which list all the pods sorted by their AGE in ascending order.

Get all the pods using -A ,and sort it using timestamp

[root@master practice-2]# kubectl get pods -A --sort-by=.metadata.creationTimestamp
NAMESPACE            NAME                                                 READY   STATUS    RESTARTS        AGE
kube-system          kube-controller-manager-cka-cluster3-control-plane   1/1     Running   18 (23m ago)    13h
kube-system          kube-scheduler-cka-cluster3-control-plane            1/1     Running   18 (23m ago)    13h
local-path-storage   local-path-provisioner-55ff44bb8-b4pqp               1/1     Running   3 (4h45m ago)   13h
kube-system          kube-proxy-rc767                                     1/1     Running   5 (4h45m ago)   13h
kube-system          coredns-76f75df574-qrmb7                             1/1     Running   4 (4h45m ago)   13h
kube-system          coredns-76f75df574-tz7bh                             1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-2x68g                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-mtv59                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-gnbgw                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-ffljb                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-fdn98                                     1/1     Running   4 (4h45m ago)   13h
kube-system          cilium-qvfzj                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-wvrhx                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-t8vtg                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-2l9wk                                         1/1     Running   5 (4h45m ago)   12h
kube-system          cilium-operator-db96c7c9c-n9cwx                      1/1     Running   12 (23m ago)    12h
kube-system          cilium-operator-db96c7c9c-6mmvz                      1/1     Running   10 (55m ago)    12h
kube-system          cilium-dpvdk                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-4d6mt                                         1/1     Running   4 (4h45m ago)   12h
kube-system          kube-apiserver-cka-cluster3-control-plane            1/1     Running   0               4h45m
kube-system          etcd-cka-cluster3-control-plane                      1/1     Running   0               4h45m

Convert the output into ascending order by adding tac

[root@master practice-2]# kubectl get pods -A --sort-by=.metadata.creationTimestamp | tac
kube-system          etcd-cka-cluster3-control-plane                      1/1     Running   0               4h45m
kube-system          kube-apiserver-cka-cluster3-control-plane            1/1     Running   0               4h45m
kube-system          cilium-4d6mt                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-dpvdk                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-operator-db96c7c9c-6mmvz                      1/1     Running   10 (55m ago)    12h
kube-system          cilium-operator-db96c7c9c-n9cwx                      1/1     Running   12 (23m ago)    12h
kube-system          cilium-2l9wk                                         1/1     Running   5 (4h45m ago)   12h
kube-system          cilium-t8vtg                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-wvrhx                                         1/1     Running   4 (4h45m ago)   12h
kube-system          cilium-qvfzj                                         1/1     Running   4 (4h45m ago)   12h
kube-system          kube-proxy-fdn98                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-ffljb                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-gnbgw                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-mtv59                                     1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-2x68g                                     1/1     Running   4 (4h45m ago)   13h
kube-system          coredns-76f75df574-tz7bh                             1/1     Running   4 (4h45m ago)   13h
kube-system          coredns-76f75df574-qrmb7                             1/1     Running   4 (4h45m ago)   13h
kube-system          kube-proxy-rc767                                     1/1     Running   5 (4h45m ago)   13h
local-path-storage   local-path-provisioner-55ff44bb8-b4pqp               1/1     Running   3 (4h45m ago)   13h
kube-system          kube-scheduler-cka-cluster3-control-plane            1/1     Running   18 (23m ago)    13h
kube-system          kube-controller-manager-cka-cluster3-control-plane   1/1     Running   18 (23m ago)    13h
NAMESPACE            NAME                                                 READY   STATUS    RESTARTS        AGE

Write the command in shell and provide access to execute

[root@master practice-2]# echo "kubectl get pods -A --sort-by=.metadata.creationTimestamp | tac" > /opt/pods_asc.sh
[root@master practice-2]# cat /opt/pods_asc.sh 
kubectl get pods -A --sort-by=.metadata.creationTimestamp | tac
 
[root@master practice-2]# chmod +x /opt/pods_asc.sh 

Question 15

Create a deployment called web-proj-268, with image nginx:1.16 and 1 replica. Next upgrade the deployment version to 1.17 using rolling update. Make sure that the version upgrade is recorded in the resource annotation.

Create a deployment with nginx version 1.16

[root@master practice-2]# kubectl create deployment web-proj-268 --image=nginx:1.16 --replicas=1 --dry-run=client -o yaml > web-proj-268.yml 
[root@master practice-2]# vim web-proj-268.yml 
[root@master practice-2]# cat web-proj-268.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-proj-268
  name: web-proj-268
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-proj-268
  template:
    metadata:
      labels:
        app: web-proj-268
    spec:
      containers:
      - image: nginx:1.16
        name: nginx
 
[root@master practice-2]# kubectl get pods 
 
NAME                            READY   STATUS    RESTARTS   AGE
web-proj-268-68b8987b5d-57mql   1/1     Running   0          56s
 
[root@master practice-2]# kubectl describe pod web-proj-268-68b8987b5d-57mql | grep -i image:
    Image:          nginx:1.16

Configure the pod to nginx version 1.17 and apply the changes with --record

[root@master practice-2]# cat web-proj-268.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: web-proj-268
  name: web-proj-268
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-proj-268
  template:
    metadata:
      labels:
        app: web-proj-268
    spec:
      containers:
      - image: nginx:1.17
        name: nginx
 
[root@master practice-2]# kubectl apply -f web-proj-268.yml --record 
Flag --record has been deprecated, --record will be removed in the future
deployment.apps/web-proj-268 configured
 
[root@master practice-2]# kubectl get pods 
NAME                            READY   STATUS    RESTARTS   AGE
web-proj-268-5564fbcd55-g85mf   1/1     Running   0          118s
[root@master practice-2]# kubectl describe pod web-proj-268-5564fbcd55-g85mf | grep -i image:
    Image:          nginx:1.17

View the rollout record

[root@master practice-2]# kubectl get deploy 
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
web-proj-268   1/1     1            1           6m4s
 
[root@master practice-2]# kubectl rollout history deployment web-proj-268
deployment.apps/web-proj-268 
REVISION  CHANGE-CAUSE
1         <none>
2         kubectl apply --filename=web-proj-268.yml --record=true

Question 16

Create a pod nginx-k8s using image nginx and initContainer "git-k8s" with image alpine/git. Volume mount path of the main container "/usr/share/nginx/html". Nginx index.html should be override with shared volume. index.html file cloned from path "https://github.com/jhawithu/k8s-nginx"

[root@master ~]# kubectl run pod nginx-k8s --image=nginx --dry-run=client -o yaml > nginx-k8s.yml 
[root@master ~]# cat nginx-k8s.yml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: pod
  name: nginx-k8s
spec:
  containers:
  - image: nginx
    name: nginx-k8s
    ports:
    - containerPort: 80
    volumeMounts:
    - mountPath: "/usr/share/nginx/html"
      name: my-data
  initContainers:
  - name: git-k8s
    image: alpine/git
    command: ['git','clone','https://github.com/jhawithu/k8s-nginx','/data']
    volumeMounts:
    - mountPath: "/data"
      name: my-data 
  volumes:
  - name: my-data 
    emptyDir: {}
 
[root@master ~]# kubectl apply -f nginx-k8s.yml 
pod/nginx-k8s created
 
[root@master ~]# kubectl get pods 
NAME        READY   STATUS            RESTARTS   AGE
nginx-k8s   0/1     PodInitializing   0          8s
 
[root@master ~]# kubectl get pods 
NAME        READY   STATUS    RESTARTS   AGE
nginx-k8s   1/1     Running   0          13s
[root@master ~]# kubectl exec -it nginx-k8s -- bash 
Defaulted container "nginx-k8s" out of: nginx-k8s, git-k8s (init)
root@nginx-k8s:/# cd /usr/share/nginx/html/ 
root@nginx-k8s:/usr/share/nginx/html# ls
README.md  index.html
root@nginx-k8s:/usr/share/nginx/html# cat index.html 
<html>
  <head>
    <title>I Love Kubernetes</title>
    <meta charset="utf-8" />
  </head>
  <body>
    <h1>
     Hello world !!!
    </h1>
  </body>
</html>

Question 17

Create a deployment with the below information mentioned in the table using the provided file "deployment.yml" at location "/root". There are some typo in the file, that need to fix accordingly and create the deployment successfully in default namespace.

Deployment Namenginx-deployment
Imagenginx:latest
ContainerPort80
Replicas3
Creat using --dry-run=client mentioning the image, name, replicas, and port then check with the existing file and correct if there is any errors in the yml file.
[root@master ~]# kubectl create deployment nginx-deploy --image=nginx:latest --replicas=3 --port=80 --dry-run=client -o yaml > deployment.yml 
[root@master ~]# 
[root@master ~]# vim deployment.yml 
[root@master ~]# cat deployment.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy
  name: nginx-deploy
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-deploy
  template:
    metadata:
      labels:
        app: nginx-deploy
    spec:
      containers:
      - image: nginx:latest
        name: nginx
        ports:
        - containerPort: 80
 
[root@master ~]# kubectl apply -f deployment.yml 
deployment.apps/nginx-deploy created
 
[root@master ~]# kubectl get deployment
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deploy   3/3     3            3           11s
 
[root@master ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
nginx-deploy-864c7757b9-8mw4p   1/1     Running   0          15s
nginx-deploy-864c7757b9-fjhjp   1/1     Running   0          15s
nginx-deploy-864c7757b9-lnlfs   1/1     Running   0          15s
All systems normal

© 2025 2023 Sanjeeb KC. All rights reserved.