-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun-demo.txt
131 lines (80 loc) · 4.69 KB
/
run-demo.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
## To view the actual nodes resource requests total, please use the tool eks-node-viewer.
## It can be downloaded from https://github.com/awslabs/eks-node-viewer
## Start the eks-node-viewer using the enxt syntax to display only NAP / karpenter added nodes
./eks-node-viewer --node-selector karpenter.sh/nodepool --disable-# --resources cpu
## check initial nodes on the AKS cluster
kubectl get nodes
## check the CRD related to NAP / karpenter
kubectl get crd | grep karpenter
## check the actual instances of the CRDs
kubectl get aksnodeclasses
kubectl get nodepools
kubectl get nodeclaims
## check the evenst related to NAP / karpenter
kubectl get events -A --field-selector source=karpenter --sort-by='.metadata.creationTimestamp'
## create a deployment that requests 1000m CPU and scale it to 20 relicas
kubectl apply -f demo-NAP/inflate-default-1000mCPU.yaml
kubectl scale deployment.apps/inflate-default-1000mcpu --replicas 20
## check the evenst related to NAP / karpenter
kubectl get events -A --field-selector source=karpenter --sort-by='.metadata.creationTimestamp'
## create a deployment that requests 250m CPU and scale it to 10 relicas
## deployment configured to be able to run on Spot nodes
kubectl apply -f demo-NAP/inflate-spot-250mCPU.yaml
kubectl scale deployment.apps/inflate-spot-250mcpu --replicas 10
## create a deployment that requests 500m CPU and scale it to 10 relicas
## deployment configured to be able to run on Spot nodes
kubectl apply -f demo-NAP/inflate-spot-500mCPU.yaml
kubectl scale deployment.apps/inflate-spot-500mcpu --replicas 12
## check the evenst related to NAP / karpenter
kubectl get events -A --field-selector source=karpenter --sort-by='.metadata.creationTimestamp'
## check the created nodeclaims and look at the resources
kubectl get node_claim
kubectl get nodeclaims {node_claim} -o yaml | grep ^status -A10
## check the nodes and how the pods are scheduled
kubectl get nodes
kubectl get pods -o wide
## scale deployments to a small number of replicas
## check the nodes and observe that some might got removed
## check how some of the pods have been removed and created on new nodes
kubectl scale deployment.apps/inflate-default-1000mcpu --replicas 1
kubectl scale deployment.apps/inflate-spot-250mcpu --replicas 1
kubectl scale deployment.apps/inflate-spot-500mcpu --replicas 1
## create a new nodepool
kubectl apply -f demo-NAP/nodepool-64G-spot-D-202309290.yaml
## check the envents about the new created node pool
kubectl get events -A --field-selector source=karpenter --sort-by='.metadata.creationTimestamp'
## create the AKSnodeclass for the previous nodepool
## check the node image and diskspace as soon as a such node will be created
kubectl apply -f demo-NAP/nodeclass-64G-202309290.yaml
## create a new aksnodeclass the nodepool that is using the aksnodeclass
kubectl apply -f demo-NAP/nodeclass-256G-202401220.yaml
kubectl apply -f demo-NAP/nodepool-256G-spot-B-202401220.yaml
## check the nodepools
kubectl get nodepools
## scale deployments up and check the new nodes and names matching the nodepool we created earlier
kubectl scale deployment.apps/inflate-default-1000mcpu --replicas 6
kubectl scale deployment.apps/inflate-spot-250mcpu --replicas 8
kubectl scale deployment.apps/inflate-spot-500mcpu --replicas 7
## check the new created nodeclaims using the new nodepool look at the resources
kubectl get node_claim
kubectl get nodeclaims {node_claim} -o yaml | grep ^status -A10
## create a new deployment, that cannot be disrupted by NAP / karpenter and scale it
## check the annotation
kubectl apply -f demo-NAP/inflate-spot-250mcpu-no-disrupt.yaml
kubectl scale deployment.apps/inflate-spot-250mcpu-no-disrupt --replicas 2
## scale to 0 the other deployment and observe that some nodes are not removed
kubectl scale deployment.apps/inflate-default-1000mcpu --replicas 2
kubectl scale deployment.apps/inflate-spot-250mcpu --replicas 3
kubectl scale deployment.apps/inflate-spot-500mcpu --replicas 6
## check the envents about the nodes and why nodeclaim cannot be disrupted
kubectl get events -A --field-selector source=karpenter --sort-by='.metadata.creationTimestamp'
## clean up the workload and NAP / karpenter will delete the nodes
# kubectl apply -f demo-NAP/inflate-spot-250mCPU.yaml
# kubectl apply -f demo-NAP/inflate-spot-500mCPU.yaml
# kubectl apply -f demo-NAP/inflate-default-1000mCPU.yaml
# kubectl apply -f demo-NAP/inflate-spot-250mcpu-no-disrupt.yaml
## remove the manual added nodepools and aksnodeclasses
# kubectl delete -f demo-NAP/nodepool-64G-spot-D-202309290.yaml
# kubectl delete -f demo-NAP/nodepool-256G-spot-B-202401220.yaml
# kubectl delete -f demo-NAP/nodeclass-256G-202401220.yaml
# kubectl delete -f demo-NAP/nodeclass-64G-202309290.yaml