Lesson 12.1: Practice Exam
Question 1:
Create a new ClusterRole named deployment-clusterrole
, which only allows to create
the following resource types:
- Deployment
- StatefulSet
- DaemonSet
Create a new ServiceAccount named cicd-token
in the existing namespace app-team1
.
Bind the new ClusterRole deployment-clusterrole
to the new ServiceAccount cicd-token
, limit to the namespace app-team1
.
Imperative Way
Answer >
Step 1: Create ClusterRole
[root@master exam]# kubectl create clusterrole deployment-clusterrole --verb=create --resource=Deployment,StatefulSet,DaemonSet
clusterrole.rbac.authorization.k8s.io/deployment-clusterrole created
Step 2: Create Namespace if not present and service account
[root@master exam]# kubectl create ns app-team1
namespace/app-team1 created
[root@master exam]# kubectl create sa cicd-token -n app-team1
serviceaccount/cicd-token created
Step 3: Create a rolebinding
- https://kubernetes.io/docs/reference/access-authn-authz/rbac/#kubectl-create-rolebinding
- As rolebinding name is not provided, you should use the name of the clusterrole created for the rolebinding name.
[root@master exam]# kubectl create rolebinding deployment-clusterrole \
> --clusterrole=deployment-clusterrole \
> --serviceaccount=app-team1:cicd-token \
> --namespace=app-team1
rolebinding.rbac.authorization.k8s.io/deployment-clusterrole created
Declarative Way
[root@master rbac]# kubectl create sa cicd-token -n=app-team1
serviceaccount/cicd-token created
[root@master rbac]# cat deployment-clusterrole.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: deployment-clusterrole
rules:
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets"]
verbs: ["create"]
[root@master rbac]# cat cluster-rolebinding.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: deployment-clusterrole-binding
namespace: app-team1
subjects:
- kind: ServiceAccount
name: cicd-token
namespace: app-team1
roleRef:
kind: ClusterRole
name: deployment-clusterrole
apiGroup: rbac.authorization.k8s.io
[root@master rbac]# kubectl apply -f deployment-clusterrole.yml
[root@master rbac]# kubectl apply -f cluster-rolebinding.yml
Verification
[root@master rbac]# kubectl auth can-i create deployment --as=system:serviceaccount:app-team1:cicd-token -n=app-team1
yes
[root@master rbac]# kubectl auth can-i create deployment --as=system:serviceaccount:app-team1:cicd-token -n=default
no
Question 2:
Set the node named cka-cluster1-control-plane as unavailable and reschedule all the pods running on it.
controlplane:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 12d v1.32.1
node01 Ready <none> 12d v1.32.1
controlplane:~$ kubectl drain controlplane --ignore-daemonsets
node/controlplane cordoned
Warning: ignoring DaemonSet-managed Pods: kube-system/canal-vmgsc, kube-system/kube-proxy-nlw5b
evicting pod local-path-storage/local-path-provisioner-5c94487ccb-hbt6p
evicting pod kube-system/calico-kube-controllers-fdf5f5495-7lzbl
pod/local-path-provisioner-5c94487ccb-hbt6p evicted
pod/calico-kube-controllers-fdf5f5495-7lzbl evicted
node/controlplane drained
controlplane:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready,SchedulingDisabled control-plane 12d v1.32.1
node01 Ready <none> 12d v1.32.1
controlplane:~$ apt-get install kubeadm=1.32.0-1.1
controlplane:~$ kubeadm upgrade apply v1.32.0
[upgrade] SUCCESS! A control plane node of your cluster was upgraded to "v1.32.1".
controlplane:~$ apt-get install kubelet=1.32.0-1.1 kubectl=1.32.0-1.1
controlplane:~$ systemctl restart kubelet
controlplane:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane,SchedulingDisabled 12d v1.32.0
node01 Ready <none> 12d v1.32.1
controlplane:~$ kubectl uncordon controlplane
controlplane:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 12d v1.32.0
node01 Ready <none> 12d v1.32.1