-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscript.txt
82 lines (62 loc) · 1.92 KB
/
script.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# Create deployment and service
kubectl apply -f deployment.yaml -f service.yaml
kubectl get all
kubectl describe ep echo
# Create pod and access service
kubectl run -it test --image appropriate/curl ash
while true ; do curl <service ip> ; sleep 1 ; done
# Readiness probe
## Second window
kubectl exec -it <curl pod> sh
curl <podip>:5000/ready
curl <podip>:5000/toggleReady
curl <podip>:5000/ready
exit
kubectl get pods
# [Ready : 0/1]
kubectl describe ep echo
kubectl describe pod <unready pod>
# [NotReadyAddresses]
## Set pod as ready
sudo iptables -t nat -L KUBE-SERVICES
sudo iptables -t nat -L KUBE-SVC-
sudo iptables -t nat -L KUBE-SEP-
# hairpin
# pick pod from host
kubectl exec echodeploy-xxxx -it sh
hostname -i
while true ; do wget -q -O - 10.200.20.164 ; sleep 1 ; done
# affinity
kubectl apply -f service-persistent.yaml
curl 10.200.20.164 ; curl 10.200.20.164 ; curl 10.200.20.164
sudo iptables -t nat -L KUBE-SVC-
cat /proc/net/xt_recent/KUBE-SEP-
### ipvs
kubectx lbipvs.eu1.staging.dog
kubectl apply -f deployment.yaml -f service.yaml
kubectl get all
kubectl run -it test --image appropriate/curl ash
while true ; do curl <service ip> ; sleep 1 ; done
ssh <node>
sudo ipvsadm --list --numeric --tcp-service 10.200.200.68:80
sudo ip -d addr show dev kube-ipvs0
## Persistency
k apply -f service-persistent.yaml
sudo ipvsadm --list --numeric --tcp-service 10.200.200.68:80
## Hairpin
sudo iptables -t nat -L KUBE-POSTROUTING
sudo ipset -L KUBE-LOOP-BACK
## DNS
k exec -it test-7b944db754-g885v sh
cat /etc/resolv.conf
curl echo
## External access
ssh laurent.bernaille@datadog-kube-client
curl <podip>:5000
curl <serviceip>
sudo iptables -t nat -N KUBE-MARK-DROP
sudo iptables -t nat -A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
sudo ./kube-proxy --kubeconfig=kubeconfig
while true ; do curl 10.200.164.177 ; sleep 1 ; done
cat /etc/dnsmasq.conf
while true ; do curl echo.default.svc.cluster.local ; sleep 1 ; done