diff --git a/config/yurtadm/samples/openyurt-v0.7.0/Clusterfile b/config/yurtadm/samples/openyurt-v0.7.0/Clusterfile new file mode 100644 index 00000000000..0a0e08d85e5 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/Clusterfile @@ -0,0 +1,28 @@ +apiVersion: sealer.cloud/v2 +kind: Cluster +metadata: + name: my-cluster +spec: + hosts: + - ips: [ 1.2.3.4 ] + roles: [ master ] + image: test:latest + ssh: + passwd: xxx + pk: /root/.ssh/id_rsa + user: root + env: + - PodCIDR=10.244.0.0/16 + cmd_args: + - BindAddress=0.0.0.0 + - ClusterCIDR=10.244.0.0/16 +--- + +## Custom configurations must specify kind, will be merged to default kubeadm configs +kind: ClusterConfiguration +networking: + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 +controllerManager: + extraArgs: + controllers: -nodelifecycle,*,bootstrapsigner,tokencleaner \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/README.md b/config/yurtadm/samples/openyurt-v0.7.0/README.md new file mode 100644 index 00000000000..59198f656e1 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/README.md @@ -0,0 +1,38 @@ +# OpenYurt ClusterImage + +Currently, `yurtadm init` is implemented by sealer v0.8.5 to create kubernetes master nodes. + +## Install sealer + +```bash +wget https://github.com/sealerio/sealer/releases/download/v0.8.5/sealer-v0.8.5-linux-amd64.tar.gz +tar -zxvf sealer-v0.8.5-linux-amd64.tar.gz -C /usr/bin +``` + +## Build your own OpenYurt Cluster + +Modify the Kubefile to build your own OpenYurt cluster image. + +### 1. Build OpenYurt Cluster Image + +```bash +cd ./cluster-image + +# build openyurt ClusterImage +sealer build -t registry-1.docker.io/your_dockerhub_username/openyurt-cluster:latest-k8s-1.21.14 -f Kubefile . + +# push to dockerhub +sealer push registry-1.docker.io/your_dockerhub_username/openyurt-cluster:latest-k8s-1.21.14 +``` + +### 2. Make a Clusterfile + +A sample Clusterfile in ./Clusterfile + +### 3. Run OpenYurt Cluster + +```bash +sealer apply -f Clusterfile +``` + +Note: `yurtadm init` only creates master nodes. For worker nodes, you should use `yurtadm join`. \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/Kubefile b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/Kubefile new file mode 100644 index 00000000000..b50fe2dbf50 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/Kubefile @@ -0,0 +1,18 @@ +FROM kubernetes:v1.21.14-alpine + +# flannel: https://github.com/sealerio/applications/tree/main/flannel +# cni has been replaced by the content in openyurt-cni-0.8.7-0.x86_64.rpm in https://github.com/openyurtio/openyurt/releases +# Reason see: https://openyurt.io/docs/user-manuals/network/edge-pod-network/#ipam-pod-ip-address-kept + +COPY cni . +COPY init-kube.sh /scripts/ +COPY kube-flannel.yaml.tmpl manifests/ + +COPY shell-plugin.yaml plugins + +# openyurt +COPY yamls/* manifests +COPY install.sh . +RUN chmod 777 install.sh + +CMD bind_address=${BindAddress} cluster_cidr=${ClusterCIDR} ./install.sh \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/bridge b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/bridge new file mode 100755 index 00000000000..4cc76e18680 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/bridge differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/dhcp b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/dhcp new file mode 100755 index 00000000000..1953aca6658 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/dhcp differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/flannel b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/flannel new file mode 100755 index 00000000000..2c99c8d52eb Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/flannel differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-device b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-device new file mode 100755 index 00000000000..005d75603a7 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-device differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-local b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-local new file mode 100755 index 00000000000..e506a5a5939 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/host-local differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ipvlan b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ipvlan new file mode 100755 index 00000000000..428b4864fb6 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ipvlan differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/loopback b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/loopback new file mode 100755 index 00000000000..fcd73791cb1 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/loopback differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/macvlan b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/macvlan new file mode 100755 index 00000000000..e4287cc193d Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/macvlan differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/portmap b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/portmap new file mode 100755 index 00000000000..ed11e2866e5 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/portmap differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ptp b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ptp new file mode 100755 index 00000000000..50fb267153a Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/ptp differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/sample b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/sample new file mode 100755 index 00000000000..e813677c9d6 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/sample differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/tuning b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/tuning new file mode 100755 index 00000000000..822cd5c1c86 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/tuning differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/vlan b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/vlan new file mode 100755 index 00000000000..3f3af9b33f1 Binary files /dev/null and b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/cni/vlan differ diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/init-kube.sh b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/init-kube.sh new file mode 100644 index 00000000000..e9618b4cb57 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/init-kube.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +# Copyright © 2022 Alibaba Group Holding Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Open ipvs +modprobe -- ip_vs +modprobe -- ip_vs_rr +modprobe -- ip_vs_wrr +modprobe -- ip_vs_sh +modprobe -- br_netfilter +## version_ge 4.19 4.19 true ; +## version_ge 5.4 4.19 true ; +## version_ge 3.10 4.19 false ; + +version_ge(){ + test "$(echo "$@" | tr ' ' '\n' | sort -rV | head -n 1)" == "$1" +} + +disable_selinux(){ + if [ -s /etc/selinux/config ] && grep 'SELINUX=enforcing' /etc/selinux/config; then + sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + setenforce 0 + fi +} + +get_distribution() { + lsb_dist="" + # Every system that we officially support has /etc/os-release + if [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + # Returning an empty string here should be alright since the + # case statements don't act unless you provide an actual value + echo "$lsb_dist" +} + +disable_firewalld() { + lsb_dist=$( get_distribution ) + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + case "$lsb_dist" in + ubuntu|deepin|debian|raspbian) + command -v ufw &> /dev/null && ufw disable + ;; + centos|rhel|ol|sles|kylin|neokylin) + systemctl stop firewalld && systemctl disable firewalld + ;; + *) + systemctl stop firewalld && systemctl disable firewalld + echo "unknown system, use default to stop firewalld" + ;; + esac +} + +kernel_version=$(uname -r | cut -d- -f1) +if version_ge "${kernel_version}" 4.19; then + modprobe -- nf_conntrack +else + modprobe -- nf_conntrack_ipv4 +fi + +cat < /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.conf.all.rp_filter=0 +EOF +sysctl --system +sysctl -w net.ipv4.ip_forward=1 +disable_firewalld +swapoff -a || true +disable_selinux + +chmod -R 755 ../bin/* +chmod 644 ../bin +cp ../bin/* /usr/bin +cp ../scripts/kubelet-pre-start.sh /usr/bin +#cni +mkdir /opt/cni/bin -p +chmod -R 755 ../cni/* +chmod 644 ../cni +cp ../cni/* /opt/cni/bin + +# Cgroup driver +mkdir -p /etc/systemd/system +cp ../etc/kubelet.service /etc/systemd/system/ +[ -d /etc/systemd/system/kubelet.service.d ] || mkdir /etc/systemd/system/kubelet.service.d +cp ../etc/10-kubeadm.conf /etc/systemd/system/kubelet.service.d/ + +[ -d /var/lib/kubelet ] || mkdir -p /var/lib/kubelet/ + +cat < /var/lib/kubelet/config.yaml +address: 0.0.0.0 +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 2m0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt +authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 5m0s + cacheUnauthorizedTTL: 30s +cgroupDriver: ${criDriver} +cgroupsPerQOS: true +clusterDomain: cluster.local +configMapAndSecretChangeDetectionStrategy: Watch +containerLogMaxFiles: 5 +containerLogMaxSize: 10Mi +contentType: application/vnd.kubernetes.protobuf +cpuCFSQuota: true +cpuCFSQuotaPeriod: 100ms +cpuManagerPolicy: none +cpuManagerReconcilePeriod: 10s +enableControllerAttachDetach: true +enableDebuggingHandlers: true +enforceNodeAllocatable: +- pods +eventBurst: 10 +eventRecordQPS: 5 +evictionHard: + imagefs.available: 15% + memory.available: 100Mi + nodefs.available: 10% + nodefs.inodesFree: 5% +evictionPressureTransitionPeriod: 5m0s +failSwapOn: true +fileCheckFrequency: 20s +hairpinMode: promiscuous-bridge +healthzBindAddress: 127.0.0.1 +healthzPort: 10248 +httpCheckFrequency: 20s +imageGCHighThresholdPercent: 85 +imageGCLowThresholdPercent: 80 +imageMinimumGCAge: 2m0s +iptablesDropBit: 15 +iptablesMasqueradeBit: 14 +kind: KubeletConfiguration +kubeAPIBurst: 10 +kubeAPIQPS: 5 +makeIPTablesUtilChains: true +maxOpenFiles: 1000000 +maxPods: 110 +nodeLeaseDurationSeconds: 40 +nodeStatusUpdateFrequency: 10s +oomScoreAdj: -999 +podPidsLimit: -1 +port: 10250 +registryBurst: 10 +registryPullQPS: 5 +resolvConf: /etc/resolv.conf +rotateCertificates: true +runtimeRequestTimeout: 2m0s +serializeImagePulls: true +staticPodPath: /etc/kubernetes/manifests +streamingConnectionIdleTimeout: 4h0m0s +syncFrequency: 1m0s +volumeStatsAggPeriod: 1m0s +EOF + +systemctl enable kubelet \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/install.sh b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/install.sh new file mode 100644 index 00000000000..4d34e2f5886 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/install.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright 2022 The OpenYurt Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "[INFO] Start installing OpenYurt." + +kubectl apply -f manifests/kube-flannel.yaml + +## install openyurt components +kubectl apply -f manifests/yurt-controller-manager.yaml +kubectl apply -f manifests/yurt-tunnel-agent.yaml +kubectl apply -f manifests/yurt-tunnel-server.yaml +kubectl apply -f manifests/yurt-app-manager.yaml +kubectl apply -f manifests/yurthub-cfg.yaml + +## configure coredns +kubectl scale --replicas=0 deployment/coredns -n kube-system +kubectl apply -f manifests/coredns.yaml +kubectl annotate svc kube-dns -n kube-system openyurt.io/topologyKeys='openyurt.io/nodepool' + +## configure kube-proxy +str_patch='{"data": {"config.conf": "apiVersion: kubeproxy.config.k8s.io/v1alpha1\nbindAddress: '${bind_address}'\nfeatureGates:\n EndpointSliceProxying: true\nbindAddressHardFail: false\nclusterCIDR: '${cluster_cidr}'\nconfigSyncPeriod: 0s\nenableProfiling: false\nipvs:\n minSyncPeriod: 0s\n strictARP: false\nkind: KubeProxyConfiguration\nmode: ipvs\nudpIdleTimeout: 0s\nwinkernel:\n enableDSR: false\nkubeconfig.conf:"}}' +kubectl patch cm -n kube-system kube-proxy --patch "${str_patch}" && kubectl delete pod --selector k8s-app=kube-proxy -n kube-system + +echo "[INFO] OpenYurt is successfully installed." \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/kube-flannel.yaml.tmpl b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/kube-flannel.yaml.tmpl new file mode 100644 index 00000000000..65dd71ee794 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/kube-flannel.yaml.tmpl @@ -0,0 +1,224 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ .PodCIDR }}", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: registry.cn-hangzhou.aliyuncs.com/openyurt/flannel-edge:v0.14.0-1 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: registry.cn-hangzhou.aliyuncs.com/openyurt/flannel-edge:v0.14.0-1 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/shell-plugin.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/shell-plugin.yaml new file mode 100644 index 00000000000..d32bbca570d --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/shell-plugin.yaml @@ -0,0 +1,10 @@ +apiVersion: sealer.aliyun.com/v1alpha1 +kind: Plugin +metadata: + name: MyShell +spec: + type: SHELL + action: PostInstall + 'on': master + data: | + kubectl label node $HOSTNAME openyurt.io/is-edge-worker=false --overwrite \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/coredns.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/coredns.yaml new file mode 100644 index 00000000000..6cc43c0379d --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/coredns.yaml @@ -0,0 +1,134 @@ +apiVersion: v1 +data: + Corefile: | + .:53 { + errors + log . { + class denial success + + } + health { + lameduck 5s + } + ready + hosts /etc/edge/tunnel-nodes { # 增加hosts插件 + reload 300ms + fallthrough + } + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-dns + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.aliyuncs.com/google_containers/coredns:1.7.0 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ready + port: 8181 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + - mountPath: /etc/edge + name: hosts + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccount: coredns + serviceAccountName: coredns + tolerations: + - operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + name: coredns + name: config-volume + - configMap: + defaultMode: 420 + name: yurt-tunnel-nodes + name: hosts \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-app-manager.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-app-manager.yaml new file mode 100644 index 00000000000..af55aac74c8 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-app-manager.yaml @@ -0,0 +1,1278 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: nodepools.apps.openyurt.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.type + description: The type of nodepool + name: Type + type: string + - JSONPath: .status.readyNodeNum + description: The number of ready nodes in the pool + name: ReadyNodes + type: integer + - JSONPath: .status.unreadyNodeNum + name: NotReadyNodes + type: integer + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: apps.openyurt.io + names: + categories: + - all + kind: NodePool + listKind: NodePoolList + plural: nodepools + shortNames: + - np + singular: nodepool + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: NodePool is the Schema for the nodepools API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NodePoolSpec defines the desired state of NodePool + properties: + annotations: + additionalProperties: + type: string + description: 'If specified, the Annotations will be added to all nodes. NOTE: existing labels with samy keys on the nodes will be overwritten.' + type: object + labels: + additionalProperties: + type: string + description: 'If specified, the Labels will be added to all nodes. NOTE: existing labels with samy keys on the nodes will be overwritten.' + type: object + selector: + description: A label query over nodes to consider for adding to the pool + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + taints: + description: If specified, the Taints will be added to all nodes. + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + type: array + type: + description: The type of the NodePool + type: string + type: object + status: + description: NodePoolStatus defines the observed state of NodePool + properties: + nodes: + description: The list of nodes' names in the pool + items: + type: string + type: array + readyNodeNum: + description: Total number of ready nodes in the pool. + format: int32 + type: integer + unreadyNodeNum: + description: Total number of unready nodes in the pool. + format: int32 + type: integer + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: uniteddeployments.apps.openyurt.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyReplicas + description: The number of pods ready. + name: READY + type: integer + - JSONPath: .status.templateType + description: The WorkloadTemplate Type. + name: WorkloadTemplate + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: AGE + type: date + group: apps.openyurt.io + names: + kind: UnitedDeployment + listKind: UnitedDeploymentList + plural: uniteddeployments + shortNames: + - ud + singular: uniteddeployment + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: UnitedDeployment is the Schema for the uniteddeployments API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: UnitedDeploymentSpec defines the desired state of UnitedDeployment. + properties: + revisionHistoryLimit: + description: Indicates the number of histories to be conserved. If unspecified, defaults to 10. + format: int32 + type: integer + selector: + description: Selector is a label query over pods that should match the replica count. It must match the pod template's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + topology: + description: Topology describes the pods distribution detail between each of pools. + properties: + pools: + description: Contains the details of each pool. Each element in this array represents one pool which will be provisioned and managed by UnitedDeployment. + items: + description: Pool defines the detail of a pool. + properties: + name: + description: Indicates pool name as a DNS_LABEL, which will be used to generate pool workload name prefix in the format '--'. Name should be unique between all of the pools under one UnitedDeployment. Name is NodePool Name + type: string + nodeSelectorTerm: + description: Indicates the node selector to form the pool. Depending on the node selector, pods provisioned could be distributed across multiple groups of nodes. A pool's nodeSelectorTerm is not allowed to be updated. + type: object + patch: + description: Indicates the patch for the templateSpec Now support strategic merge path :https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/#notes-on-the-strategic-merge-patch Patch takes precedence over Replicas fields If the Patch also modifies the Replicas, use the Replicas value in the Patch + type: object + replicas: + description: Indicates the number of the pod to be created under this pool. + format: int32 + type: integer + tolerations: + description: Indicates the tolerations the pods under this pool have. A pool's tolerations is not allowed to be updated. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + type: array + required: + - name + type: object + type: array + type: object + workloadTemplate: + description: WorkloadTemplate describes the pool that will be created. + properties: + deploymentTemplate: + description: Deployment template + properties: + metadata: + type: object + spec: + description: DeploymentSpec is the specification of the desired behavior of the Deployment. + type: object + required: + - spec + type: object + statefulSetTemplate: + description: StatefulSet template + properties: + metadata: + type: object + spec: + description: A StatefulSetSpec is the specification of a StatefulSet. + type: object + required: + - spec + type: object + type: object + required: + - selector + type: object + status: + description: UnitedDeploymentStatus defines the observed state of UnitedDeployment. + properties: + collisionCount: + description: Count of hash collisions for the UnitedDeployment. The UnitedDeployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Represents the latest available observations of a UnitedDeployment's current state. + items: + description: UnitedDeploymentCondition describes current state of a UnitedDeployment. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of in place set condition. + type: string + type: object + type: array + currentRevision: + description: CurrentRevision, if not empty, indicates the current version of the UnitedDeployment. + type: string + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this UnitedDeployment. It corresponds to the UnitedDeployment's generation, which is updated on mutation by the API Server. + format: int64 + type: integer + poolReplicas: + additionalProperties: + format: int32 + type: integer + description: Records the topology detail information of the replicas of each pool. + type: object + readyReplicas: + description: The number of ready replicas. + format: int32 + type: integer + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + templateType: + description: TemplateType indicates the type of PoolTemplate + type: string + required: + - currentRevision + - replicas + - templateType + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: yurtappdaemons.apps.openyurt.io +spec: + additionalPrinterColumns: + - JSONPath: .status.templateType + description: The WorkloadTemplate Type. + name: WorkloadTemplate + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: AGE + type: date + group: apps.openyurt.io + names: + kind: YurtAppDaemon + listKind: YurtAppDaemonList + plural: yurtappdaemons + shortNames: + - yad + singular: yurtappdaemon + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: YurtAppDaemon is the Schema for the YurtAppDaemon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: YurtAppDaemonSpec defines the desired state of YurtAppDaemon. + properties: + nodepoolSelector: + description: NodePoolSelector is a label query over nodepool that should match the replica count. It must match the nodepool's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + revisionHistoryLimit: + description: Indicates the number of histories to be conserved. If unspecified, defaults to 10. + format: int32 + type: integer + selector: + description: Selector is a label query over pods that should match the replica count. It must match the pod template's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + workloadTemplate: + description: WorkloadTemplate describes the pool that will be created. + properties: + deploymentTemplate: + description: Deployment template + properties: + metadata: + type: object + spec: + description: DeploymentSpec is the specification of the desired behavior of the Deployment. + type: object + required: + - spec + type: object + statefulSetTemplate: + description: StatefulSet template + properties: + metadata: + type: object + spec: + description: A StatefulSetSpec is the specification of a StatefulSet. + type: object + required: + - spec + type: object + type: object + required: + - nodepoolSelector + - selector + type: object + status: + description: YurtAppDaemonStatus defines the observed state of YurtAppDaemon. + properties: + collisionCount: + description: Count of hash collisions for the YurtAppDaemon. The YurtAppDaemon controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. + format: int32 + type: integer + conditions: + description: Represents the latest available observations of a YurtAppDaemon's current state. + items: + description: YurtAppDaemonCondition describes current state of a YurtAppDaemon. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of in place set condition. + type: string + type: object + type: array + currentRevision: + description: CurrentRevision, if not empty, indicates the current version of the YurtAppDaemon. + type: string + nodepools: + description: NodePools indicates the list of node pools selected by YurtAppDaemon + items: + type: string + type: array + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this YurtAppDaemon. It corresponds to the YurtAppDaemon's generation, which is updated on mutation by the API Server. + format: int64 + type: integer + templateType: + description: TemplateType indicates the type of PoolTemplate + type: string + required: + - currentRevision + - templateType + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: yurtingresses.apps.openyurt.io +spec: + additionalPrinterColumns: + - JSONPath: .status.nginx_ingress_controller_version + description: The nginx ingress controller version + name: Nginx-Ingress-Version + type: string + - JSONPath: .status.ingress_controller_replicas_per_pool + description: The nginx ingress controller replicas per pool + name: Replicas-Per-Pool + type: integer + - JSONPath: .status.readyNum + description: The number of pools on which ingress is enabled + name: ReadyNum + type: integer + - JSONPath: .status.unreadyNum + description: The number of pools on which ingress is enabling or enable failed + name: NotReadyNum + type: integer + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: apps.openyurt.io + names: + categories: + - all + kind: YurtIngress + listKind: YurtIngressList + plural: yurtingresses + shortNames: + - ying + singular: yurtingress + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: YurtIngress is the Schema for the yurtingresses API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: YurtIngressSpec defines the desired state of YurtIngress + properties: + ingress_controller_replicas_per_pool: + description: Indicates the number of the ingress controllers to be deployed under all the specified nodepools. + format: int32 + type: integer + pools: + description: Indicates all the nodepools on which to enable ingress. + items: + description: IngressPool defines the details of a Pool for ingress + properties: + name: + description: Indicates the pool name. + type: string + required: + - name + type: object + type: array + type: object + status: + description: YurtIngressStatus defines the observed state of YurtIngress + properties: + conditions: + description: Indicates all the nodepools on which to enable ingress. + properties: + ingressreadypools: + description: Indicates the pools that ingress controller is deployed successfully. + items: + type: string + type: array + ingressunreadypools: + description: Indicates the pools that ingress controller is being deployed or deployed failed. + items: + description: IngressNotReadyPool defines the condition details of an ingress not ready Pool + properties: + name: + description: Indicates the pool name. + type: string + poolinfo: + description: Info of ingress not ready condition. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + type: + description: Type of ingress not ready condition. + type: string + type: object + required: + - name + type: object + type: array + type: object + ingress_controller_replicas_per_pool: + description: Indicates the number of the ingress controllers deployed under all the specified nodepools. + format: int32 + type: integer + nginx_ingress_controller_version: + description: Indicates the nginx ingress controller version deployed under all the specified nodepools. + type: string + readyNum: + description: Total number of ready pools on which ingress is enabled. + format: int32 + type: integer + unreadyNum: + description: Total number of unready pools on which ingress is enabling or enable failed. + format: int32 + type: integer + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: yurt-app-leader-election-role + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: yurt-app-manager-role +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get + - patch + - update +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - get + - patch + - update +- apiGroups: + - apps.openyurt.io + resources: + - nodepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - nodepools/status + verbs: + - get + - patch + - update +- apiGroups: + - apps.openyurt.io + resources: + - uniteddeployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - uniteddeployments/status + verbs: + - get + - patch + - update +- apiGroups: + - apps.openyurt.io + resources: + - yurtappdaemons + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - yurtappdaemons/status + verbs: + - get + - patch + - update +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses/status + verbs: + - get + - patch + - update +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: yurt-app-leader-election-rolebinding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: yurt-app-leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: yurt-app-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: yurt-app-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: kube-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: yurt-app-webhook-certs + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: yurt-app-webhook-service + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 9876 + selector: + control-plane: yurt-app-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + control-plane: yurt-app-manager + name: yurt-app-manager + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + control-plane: yurt-app-manager + template: + metadata: + labels: + control-plane: yurt-app-manager + spec: + containers: + - args: + - --enable-leader-election + - --v=4 + command: + - /usr/local/bin/yurt-app-manager + image: openyurt/yurt-app-manager:v0.6.0-beta.1 + imagePullPolicy: Always + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + openyurt.io/is-edge-worker: "false" + priorityClassName: system-node-critical + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.alibabacloud.com/addon + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: yurt-app-webhook-certs +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + template: "" + name: yurt-app-mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /mutate-apps-openyurt-io-v1alpha1-nodepool + failurePolicy: Fail + name: mnodepool.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - nodepools +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /mutate-apps-openyurt-io-v1alpha1-uniteddeployment + failurePolicy: Fail + name: muniteddeployment.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - uniteddeployments +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /mutate-apps-openyurt-io-v1alpha1-yurtappdaemon + failurePolicy: Fail + name: myurtappdaemon.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - yurtappdaemons +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + template: "" + name: yurt-app-validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /validate-apps-openyurt-io-v1alpha1-nodepool + failurePolicy: Fail + name: vnodepool.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - nodepools +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /validate-apps-openyurt-io-v1alpha1-uniteddeployment + failurePolicy: Fail + name: vuniteddeployment.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - uniteddeployments +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /validate-apps-openyurt-io-v1alpha1-yurtappdaemon + failurePolicy: Fail + name: vyurtappdaemon.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - yurtappdaemons +- clientConfig: + caBundle: Cg== + service: + name: yurt-app-webhook-service + namespace: kube-system + path: /validate-apps-openyurt-io-v1alpha1-yurtingress + failurePolicy: Fail + name: vyurtingress.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - yurtingresses diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-controller-manager.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-controller-manager.yaml new file mode 100644 index 00000000000..8dd9a50f8c2 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-controller-manager.yaml @@ -0,0 +1,148 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: yurt-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: yurt-controller-manager +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update + - apiGroups: + - "" + resources: + - pods/status + verbs: + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - delete + - list + - watch + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - patch + - update + - list + - watch + - apiGroups: + - "" + - apps + resources: + - daemonsets + verbs: + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update + - apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - kubernetes.io/kube-apiserver-client + - kubernetes.io/kubelet-serving + verbs: + - approve +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: yurt-controller-manager +subjects: + - kind: ServiceAccount + name: yurt-controller-manager + namespace: kube-system +roleRef: + kind: ClusterRole + name: yurt-controller-manager + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: yurt-controller-manager + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: yurt-controller-manager + template: + metadata: + labels: + app: yurt-controller-manager + spec: + serviceAccountName: yurt-controller-manager + hostNetwork: true + tolerations: + - operator: "Exists" + nodeSelector: + openyurt.io/is-edge-worker: "false" + affinity: + nodeAffinity: + # we prefer allocating ycm on cloud node + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: openyurt.io/is-edge-worker + operator: In + values: + - "false" + containers: + - name: yurt-controller-manager + image: openyurt/yurt-controller-manager:v0.7.0 + command: + - yurt-controller-manager \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-agent.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-agent.yaml new file mode 100644 index 00000000000..10c2945ce6c --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-agent.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: yurt-tunnel-agent + name: yurt-tunnel-agent + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: yurt-tunnel-agent + template: + metadata: + labels: + k8s-app: yurt-tunnel-agent + spec: + nodeSelector: + beta.kubernetes.io/os: linux + openyurt.io/is-edge-worker: "true" + containers: + - command: + - yurt-tunnel-agent + args: + - --node-name=$(NODE_NAME) + - --node-ip=$(POD_IP) + - --v=2 + image: openyurt/yurt-tunnel-agent:v0.7.0 + imagePullPolicy: IfNotPresent + name: yurt-tunnel-agent + volumeMounts: + - name: k8s-dir + mountPath: /etc/kubernetes + - name: kubelet-pki + mountPath: /var/lib/kubelet/pki + - name: tunnel-agent-dir + mountPath: /var/lib/yurttunnel-agent + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + hostNetwork: true + restartPolicy: Always + tolerations: + - operator: Exists + volumes: + - name: k8s-dir + hostPath: + path: /etc/kubernetes + type: Directory + - name: kubelet-pki + hostPath: + path: /var/lib/kubelet/pki + type: Directory + - name: tunnel-agent-dir + hostPath: + path: /var/lib/yurttunnel-agent + type: DirectoryOrCreate diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-server.yaml.tmpl b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-server.yaml.tmpl new file mode 100644 index 00000000000..0eb7629892d --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurt-tunnel-server.yaml.tmpl @@ -0,0 +1,228 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: tunnel-proxy-client +rules: + - apiGroups: + - "" + resources: + - nodes/stats + - nodes/metrics + - nodes/log + - nodes/spec + - nodes/proxy + verbs: + - create + - get + - list + - watch + - delete + - update + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tunnel-proxy-client +subjects: + - kind: User + name: tunnel-proxy-client + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: tunnel-proxy-client + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: yurt-tunnel-server +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - list + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - pods + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch + - get + - create + - update +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - get + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: yurt-tunnel-server +subjects: + - kind: ServiceAccount + name: yurt-tunnel-server + namespace: kube-system +roleRef: + kind: ClusterRole + name: yurt-tunnel-server + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: yurt-tunnel-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: x-tunnel-server-svc + namespace: kube-system + labels: + name: yurt-tunnel-server +spec: + type: NodePort + ports: + - port: 10263 + targetPort: 10263 + name: https + - port: 10262 + targetPort: 10262 + name: tcp + selector: + k8s-app: yurt-tunnel-server +--- +apiVersion: v1 +kind: Service +metadata: + name: x-tunnel-server-internal-svc + namespace: kube-system + labels: + name: yurt-tunnel-server +spec: + ports: + - port: 10250 + targetPort: 10263 + name: https + - port: 10255 + targetPort: 10264 + name: http + selector: + k8s-app: yurt-tunnel-server +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: yurt-tunnel-server-cfg + namespace: kube-system +data: + localhost-proxy-ports: "10266, 10267" + http-proxy-ports: "" + https-proxy-ports: "" + dnat-ports-pair: "" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: yurt-tunnel-server + namespace: kube-system + labels: + k8s-app: yurt-tunnel-server +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: yurt-tunnel-server + template: + metadata: + labels: + k8s-app: yurt-tunnel-server + spec: + hostNetwork: true + serviceAccountName: yurt-tunnel-server + restartPolicy: Always + volumes: + - name: tunnel-server-dir + hostPath: + path: /var/lib/yurttunnel-server + type: DirectoryOrCreate + tolerations: + - operator: "Exists" + nodeSelector: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + openyurt.io/is-edge-worker: "false" + containers: + - name: yurt-tunnel-server + image: openyurt/yurt-tunnel-server:v0.7.0 + imagePullPolicy: IfNotPresent + command: + - yurt-tunnel-server + args: + - --bind-address=$(NODE_IP) + - --insecure-bind-address=$(NODE_IP) + - --cert-ips={{ .YurttunnelServerAddress }} + - --proxy-strategy=destHost + - --v=2 + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + volumeMounts: + - name: tunnel-server-dir + mountPath: /var/lib/yurttunnel-server + +--- + +apiVersion: v1 +data: + tunnel-nodes: "" +kind: ConfigMap +metadata: + name: yurt-tunnel-nodes + namespace: kube-system \ No newline at end of file diff --git a/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurthub-cfg.yaml b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurthub-cfg.yaml new file mode 100644 index 00000000000..679df092920 --- /dev/null +++ b/config/yurtadm/samples/openyurt-v0.7.0/cluster-image/yamls/yurthub-cfg.yaml @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: yurt-hub +rules: + - apiGroups: + - "" + resources: + - events + verbs: + - get + - apiGroups: + - apps.openyurt.io + resources: + - nodepools + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - yurt-hub-cfg + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: yurt-hub +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: yurt-hub +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: yurt-hub-cfg + namespace: kube-system +data: + cache_agents: "" + filter_endpoints: coredns/endpoints#list;watch + filter_servicetopology: coredns/endpointslices#list;watch + filter_discardcloudservice: "" + filter_masterservice: "" diff --git a/go.mod b/go.mod index 2bcccfa80da..70b36e5a3b3 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 github.com/Microsoft/go-winio v0.4.15 github.com/aliyun/alibaba-cloud-sdk-go v1.61.579 + github.com/chai2010/gettext-go v1.0.2 github.com/daviddengcn/go-colortext v1.0.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/emicklei/go-restful v2.12.0+incompatible // indirect diff --git a/go.sum b/go.sum index 3c0ae979222..e264d0487e4 100644 --- a/go.sum +++ b/go.sum @@ -115,6 +115,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= diff --git a/pkg/util/kubernetes/kubeadm/app/constants/constants.go b/pkg/util/kubernetes/kubeadm/app/constants/constants.go index 12c40d6a6e3..f2853a8eaaa 100644 --- a/pkg/util/kubernetes/kubeadm/app/constants/constants.go +++ b/pkg/util/kubernetes/kubeadm/app/constants/constants.go @@ -447,13 +447,13 @@ var ( ControlPlaneComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.21.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.18.0") // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = version.MustParseSemantic("v1.21.0") + MinimumKubeletVersion = version.MustParseSemantic("v1.18.0") // CurrentKubernetesVersion specifies current Kubernetes version supported by kubeadm - CurrentKubernetesVersion = version.MustParseSemantic("v1.22.0") + CurrentKubernetesVersion = version.MustParseSemantic("v1.18.0") // SupportedEtcdVersion lists officially supported etcd versions with corresponding Kubernetes releases SupportedEtcdVersion = map[uint8]string{ diff --git a/pkg/util/kubernetes/kubeadm/app/discovery/token/token.go b/pkg/util/kubernetes/kubeadm/app/discovery/token/token.go index 8389d98b03d..ec0d105feee 100644 --- a/pkg/util/kubernetes/kubeadm/app/discovery/token/token.go +++ b/pkg/util/kubernetes/kubeadm/app/discovery/token/token.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "fmt" + "strings" "time" "github.com/pkg/errors" @@ -57,7 +58,7 @@ func RetrieveBootstrapConfig(data joindata.YurtJoinData) (*clientcmdapi.Config, clusterinfo := kubeconfigutil.GetClusterFromKubeConfig(cfg) return kubeconfigutil.CreateWithToken( - fmt.Sprintf("https://%s", data.ServerAddr()), + fmt.Sprintf("https://%s", strings.Split(data.ServerAddr(), ",")[0]), "kubernetes", TokenUser, clusterinfo.CertificateAuthorityData, @@ -79,7 +80,7 @@ func retrieveValidatedConfigInfo(client clientset.Interface, data joindata.YurtJ return nil, err } - endpoint := data.ServerAddr() + endpoint := strings.Split(data.ServerAddr(), ",")[0] insecureBootstrapConfig := buildInsecureBootstrapKubeConfig(endpoint, "kubernetes") clusterName := insecureBootstrapConfig.Contexts[insecureBootstrapConfig.CurrentContext].Cluster diff --git a/pkg/util/kubernetes/kubectl/pkg/util/i18n/i18n.go b/pkg/util/kubernetes/kubectl/pkg/util/i18n/i18n.go new file mode 100644 index 00000000000..043f3178df3 --- /dev/null +++ b/pkg/util/kubernetes/kubectl/pkg/util/i18n/i18n.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package i18n + +import ( + "fmt" + + "github.com/chai2010/gettext-go" +) + +// T translates a string, possibly substituting arguments into it along +// the way. If len(args) is > 0, args1 is assumed to be the plural value +// and plural translation is used. +func T(defaultValue string, args ...int) string { + if len(args) == 0 { + return gettext.PGettext("", defaultValue) + } + return fmt.Sprintf(gettext.PNGettext("", defaultValue, defaultValue+".plural", args[0]), + args[0]) +} diff --git a/pkg/util/kubernetes/kubectl/pkg/util/templates/normalizers.go b/pkg/util/kubernetes/kubectl/pkg/util/templates/normalizers.go new file mode 100644 index 00000000000..978e893045c --- /dev/null +++ b/pkg/util/kubernetes/kubectl/pkg/util/templates/normalizers.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import "strings" + +const Indentation = ` ` + +// Examples normalizes a command's examples to follow the conventions. +func Examples(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.trim().indent().string +} + +type normalizer struct { + string +} + +func (s normalizer) trim() normalizer { + s.string = strings.TrimSpace(s.string) + return s +} + +func (s normalizer) indent() normalizer { + indentedLines := []string{} + for _, line := range strings.Split(s.string, "\n") { + trimmed := strings.TrimSpace(line) + indented := Indentation + trimmed + indentedLines = append(indentedLines, indented) + } + s.string = strings.Join(indentedLines, "\n") + return s +} diff --git a/pkg/yurtadm/README.md b/pkg/yurtadm/README.md new file mode 100644 index 00000000000..933dca00ea9 --- /dev/null +++ b/pkg/yurtadm/README.md @@ -0,0 +1,170 @@ +# Yurtadm + +## 1. Background + +In order to allow users to quickly obtain an OpenYurt test cluster, OpenYurt provides the command `yurtadm init` to initialize the cluster. Users only need to select the version of the OpenYurt cluster mirror to create the corresponding version of OpenYurt. Then yurt-app-manager, yurt-controller-manager, yurt-tunnel-server, yurt-tunnel-agent components will be automatically deployed. +To expand the cluster later, users can use the `yurtadm join` command to add edge nodes or cloud nodes to the cluster. + +## 2. Ability + +Using yurtadm, you can do: +- Create a simple openyurt cluster with just one command. +- Create a High Availability OpenYurt cluster. + +## 3. Supported Versions + +Currently, yurtadm supports: +- openyurt verion: v0.7.0 +- k8s version: v1.19.8, v1.20.10, v1.21.14(default) + +## 4. Process +### 4.1 Compile Yurtadm + +When initializing the cluster, you need to obtain the Yurtadm executable first. To quickly build and install yurtadm, you can execute the following command to complete the installation if the build system has golang 1.13+ and bash installed: + +```bash +git clone https://github.com/openyurtio/openyurt.git +cd openyurt +make build WHAT="yurtadm" ARCH="amd64" REGION=cn +``` + +The executable will be stored in the _output/ directory. + +### 4.2 Initialize the OpenYurt Cluster + +Execute the following command as root account, no need to install container runtimes such as docker in advance. Docker will be automatically installed during the execution of `yurtadm init`. + +```bash +# Initialize an OpenYurt cluster. +yurtadm init --apiserver-advertise-address 1.2.3.4 --openyurt-version v0.7.0 --passwd xxx + +# Initialize an OpenYurt high availability cluster. +yurtadm init --apiserver-advertise-address 1.2.3.4,1.2.3.5,1.2.3.6 --openyurt-version v0.7.0 --passwd xxx +``` + +`yurtadm init` will use sealer to create a K8s cluster. And kubeadm, kubectl, docker, etc. will all be installed during this process. + +Note: The following components are installed during `yurtadm init` : +- kubeadm +- kubectl +- kubelet +- kube-proxy +- docker + +### 4.3 Joining Node to Cluster + +Currently, you can use kubeadm token create to get bootstrap token. +Get bootstrap token from the master: + +```bash +kubeadm token create +W0720 20:46:19.782354 31205 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] +abcdef.0123456789abcdef +``` + +Before `yurtadm join` you need to: +- Install a runtime (like docker) on the worker node +- Configure `/etc/docker/daemon.json` +- Copy the yurtadm command to the node to be joined + +`vi /etc/docker/daemon.json`, change the docker driver to systemd and configure the sea.hub registry (When sealer creates the master, images are automatically saved in sea.hub): + +```bash +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "insecure-registries": [ + "sea.hub:5000" + ] +} +``` + +restart docker: + +```bash +systemctl daemon-reload +systemctl restart docker +``` + +Then execute the `yurtadm join` command in the worker node: + +```bash +# Join the edge node to cluster. +yurtadm join 1.2.3.4:6443 --token=abcdef.0123456789abcdef --node-type=edge --discovery-token-unsafe-skip-ca-verification --v=5 + +# Join the edge node to a high availability cluster. +yurtadm join 1.2.3.4:6443,1.2.3.5:6443,1.2.3.6:6443 --token=abcdef.0123456789abcdef --node-type=edge --discovery-token-unsafe-skip-ca-verification --v=5 + +# Join the cloud node to cluster. +yurtadm join 1.2.3.4:6443 --token=abcdef.0123456789abcdef --node-type=cloud --discovery-token-unsafe-skip-ca-verification --v=5 + +# Join the cloud node to a high availability cluster. +yurtadm join 1.2.3.4:6443,1.2.3.5:6443,1.2.3.6:6443 --token=abcdef.0123456789abcdef --node-type=cloud --discovery-token-unsafe-skip-ca-verification --v=5 +``` + +Note: The following components are installed during `yurtadm init` : +- kubeadm +- kubectl +- kubelet +- kube-proxy + +### 4.4 Delete Joined Node + +When you need to delete a node joined using `yurtadm join`, the steps are as follows: + +In master: + +```bash +kubectl drain {NodeName} --delete-local-data --force --ignore-daemonsets +kubectl delete node {NodeName} +``` + +In joined node: + +1. Execute the reset process: + +```bash +yurtadm reset +``` + +2. Delete `/etc/cni/net.d` dir: + +```bash +rm -rf /etc/cni/net.d +``` + +3. Clean /etc/hosts, Delete the record `sea.hub:5000`. + +## 5. Reference + +### 5.1 yurtadm init flags + +| **Flag** | **Description** | +| ------------------------------------------ | ------------------------------------------------------------ | +| --apiserver-advertise-address
string | The IP address the API Server will advertise it's listening on. | +| --cluster-cidr
string | Choose a CIDR range of the pods in the cluster (default "10.244.0.0/16") | +| --image-repository
string | Choose a registry to pull cluster images from (default "registry.cn-hangzhou.aliyuncs.com/openyurt") | +| --k8s-version
string | Choose a specific Kubernetes version for the control plane. (default "1.21.14") | +| --kube-proxy-bind-address
string | Choose an IP address for the proxy server to serve on (default "0.0.0.0") | +| --openyurt-version
string | Choose a specific OpenYurt version for the control plane. (default "v0.7.0") | +| -p, --passwd
string | Set master server ssh password | +| --pod-subnet
string | PodSubnet is the subnet used by Pods. (default "10.244.0.0/16") | +| --service-subnet
string | ServiceSubnet is the subnet used by kubernetes Services. (default "10.96.0.0/12") | +| --yurt-tunnel-server-address
string | The yurt-tunnel-server address. | + +### 5.2 yurtadm join flags + +| **Flag** | **Description** | +| --------------------------------------------- | ------------------------------------------------------------ | +| --cri-socket
string | Path to the CRI socket to connect (default "/var/run/dockershim.sock") | +| --discovery-token-ca-cert-hash
strings | For token-based discovery, validate that the root CA public key matches this hash (format: ":"). | +| --discovery-token-unsafe-skip-ca-verification | For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. | +| --ignore-preflight-errors
strings | A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. | +| --kubernetes-resource-server
string | Sets the address for downloading k8s node resources (default "dl.k8s.io") | +| --node-labels
string | Sets the labels for joining node | +| --node-name
string | Specify the node name. if not specified, hostname will be used. | +| --node-type
string | Sets the node is edge or cloud (default "edge") | +| --organizations
string | Organizations that will be added into hub's client certificate | +| --pause-image
string | Sets the image version of pause container (default "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2") | +| --skip-phases
strings | List of phases to be skipped | +| --token
string | Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. | +| --yurthub-image
string | Sets the image version of yurthub component (default "registry.cn-hangzhou.aliyuncs.com/openyurt/yurthub:v0.7.0") | \ No newline at end of file diff --git a/pkg/yurtadm/cmd/join/join.go b/pkg/yurtadm/cmd/join/join.go index 1092f1ab71a..b0b1363785a 100644 --- a/pkg/yurtadm/cmd/join/join.go +++ b/pkg/yurtadm/cmd/join/join.go @@ -37,6 +37,8 @@ import ( "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/constants" "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/discovery/token" kubeconfigutil "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/util/kubeconfig" + "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubectl/pkg/util/i18n" + "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubectl/pkg/util/templates" "github.com/openyurtio/openyurt/pkg/yurtadm/cmd/join/joindata" yurtphase "github.com/openyurtio/openyurt/pkg/yurtadm/cmd/join/phases" yurtconstants "github.com/openyurtio/openyurt/pkg/yurtadm/constants" @@ -44,6 +46,20 @@ import ( ) var ( + joinExample = templates.Examples(i18n.T(` + # Join the edge node to cluster. + yurtadm join 1.2.3.4:6443 --token=abcdef.0123456789abcdef --node-type=edge --discovery-token-unsafe-skip-ca-verification --v=5 + + # Join the edge node to a high availability cluster. + yurtadm join 1.2.3.4:6443,1.2.3.5:6443,1.2.3.6:6443 --token=abcdef.0123456789abcdef --node-type=edge --discovery-token-unsafe-skip-ca-verification --v=5 + + # Join the cloud node to cluster. + yurtadm join 1.2.3.4:6443 --token=abcdef.0123456789abcdef --node-type=cloud --discovery-token-unsafe-skip-ca-verification --v=5 + + # Join the cloud node to a high availability cluster. + yurtadm join 1.2.3.4:6443,1.2.3.5:6443,1.2.3.6:6443 --token=abcdef.0123456789abcdef --node-type=cloud --discovery-token-unsafe-skip-ca-verification --v=5 + `)) + joinWorkerNodeDoneMsg = dedent.Dedent(` This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. @@ -91,8 +107,9 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { joinRunner := workflow.NewRunner() cmd := &cobra.Command{ - Use: "join [api-server-endpoint]", - Short: "Run this on any machine you wish to join an existing cluster", + Use: "join [api-server-endpoint]", + Short: "Run this on any machine you wish to join an existing cluster", + Example: joinExample, RunE: func(cmd *cobra.Command, args []string) error { if err := joinRunner.Run(args); err != nil { return err diff --git a/pkg/yurtadm/cmd/join/phases/joinnode.go b/pkg/yurtadm/cmd/join/phases/joinnode.go index e9702cfe17e..0b6ad36843d 100644 --- a/pkg/yurtadm/cmd/join/phases/joinnode.go +++ b/pkg/yurtadm/cmd/join/phases/joinnode.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -70,6 +71,7 @@ func runJoinNode(c workflow.RunData) error { if err != nil { return err } + if err := addYurthubStaticYaml(data, filepath.Join(constants.KubernetesDir, constants.ManifestsSubDirName)); err != nil { return err } @@ -168,8 +170,18 @@ func addYurthubStaticYaml(data joindata.YurtJoinData, podManifestPath string) er } } + // convert + // 1.2.3.4:6443,1.2.3.5:6443,1.2.3.6:6443 + // to + // https://1.2.3.4:6443,https://1.2.3.5:6443,https://1.2.3.6:6443 + serverAddrs := strings.Split(data.ServerAddr(), ",") + for i := 0; i < len(serverAddrs); i++ { + serverAddrs[i] = fmt.Sprintf("https://%s", serverAddrs[i]) + } + kubernetesServerAddrs := strings.Join(serverAddrs, ",") + ctx := map[string]string{ - "kubernetesServerAddr": fmt.Sprintf("https://%s", data.ServerAddr()), + "kubernetesServerAddr": kubernetesServerAddrs, "image": data.YurtHubImage(), "joinToken": data.JoinToken(), "workingMode": data.NodeRegistration().WorkingMode, diff --git a/pkg/yurtadm/cmd/join/phases/preflight.go b/pkg/yurtadm/cmd/join/phases/preflight.go index ee6bacff026..9ba6ab821bd 100644 --- a/pkg/yurtadm/cmd/join/phases/preflight.go +++ b/pkg/yurtadm/cmd/join/phases/preflight.go @@ -71,7 +71,7 @@ func runPreflight(c workflow.RunData) error { Discovery: kubeadmapi.Discovery{ TLSBootstrapToken: data.JoinToken(), BootstrapToken: &kubeadmapi.BootstrapTokenDiscovery{ - APIServerEndpoint: data.ServerAddr(), + APIServerEndpoint: strings.Split(data.ServerAddr(), ",")[0], Token: data.JoinToken()}, }, ControlPlane: nil, diff --git a/pkg/yurtadm/cmd/join/phases/prepare.go b/pkg/yurtadm/cmd/join/phases/prepare.go index f17386b94a2..7e31b4fa8cb 100644 --- a/pkg/yurtadm/cmd/join/phases/prepare.go +++ b/pkg/yurtadm/cmd/join/phases/prepare.go @@ -65,6 +65,10 @@ func runPrepare(c workflow.RunData) error { if err := system.SetSELinux(); err != nil { return err } + if err := system.AddSeaHubHosts(data); err != nil { + return err + } + if err := kubernetes.CheckAndInstallKubelet(data.KubernetesResourceServer(), data.KubernetesVersion()); err != nil { return err } diff --git a/pkg/yurtadm/cmd/yurtinit/init.go b/pkg/yurtadm/cmd/yurtinit/init.go index b69a6d9f2c0..a572e7e5edf 100644 --- a/pkg/yurtadm/cmd/yurtinit/init.go +++ b/pkg/yurtadm/cmd/yurtinit/init.go @@ -29,6 +29,8 @@ import ( flag "github.com/spf13/pflag" "k8s.io/klog/v2" + "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubectl/pkg/util/i18n" + "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubectl/pkg/util/templates" strutil "github.com/openyurtio/openyurt/pkg/util/strings" tmplutil "github.com/openyurtio/openyurt/pkg/util/templates" "github.com/openyurtio/openyurt/pkg/yurtadm/constants" @@ -41,23 +43,29 @@ const ( APIServerAdvertiseAddress = "apiserver-advertise-address" //YurttunnelServerAddress flag sets the IP address of Yurttunnel Server. YurttunnelServerAddress = "yurt-tunnel-server-address" - // NetworkingServiceSubnet flag sets the range of IP address for service VIPs. - NetworkingServiceSubnet = "service-cidr" - // NetworkingPodSubnet flag sets the range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. - NetworkingPodSubnet = "pod-network-cidr" + // NetworkingServiceSubnet flag sets the subnet used by kubernetes Services. + NetworkingServiceSubnet = "service-subnet" + // NetworkingPodSubnet flag sets the subnet used by Pods. + NetworkingPodSubnet = "pod-subnet" + // ClusterCIDR flag sets the CIDR range of the pods in the cluster. It is used to bridge traffic coming from outside of the cluster. + ClusterCIDR = "cluster-cidr" + // KubeProxyBindAddress flag sets the IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces) + KubeProxyBindAddress = "kube-proxy-bind-address" // OpenYurtVersion flag sets the OpenYurt version for the control plane. OpenYurtVersion = "openyurt-version" + // K8sVersion flag sets the Kubernetes version for the control plane. + K8sVersion = "k8s-version" // ImageRepository flag sets the container registry to pull control plane images from. ImageRepository = "image-repository" - // PassWd flag is the password of master server. + // PassWd flag sets the password of master server. PassWd = "passwd" TmpDownloadDir = "/tmp" SealerUrlFormat = "https://github.com/alibaba/sealer/releases/download/%s/sealer-%s-linux-%s.tar.gz" - DefaultSealerVersion = "v0.6.1" + DefaultSealerVersion = "v0.8.5" - InitClusterImage = "%s/openyurt-cluster:%s" + InitClusterImage = "%s/openyurt-cluster:%s-k8s-%s" SealerRunCmd = "sealer apply -f %s/Clusterfile" OpenYurtClusterfile = ` @@ -67,42 +75,67 @@ metadata: name: my-cluster spec: hosts: - - ips: - - {{.apiserver_address}} - roles: - - master + - ips: [ {{.apiserver_address}} ] + roles: [ master ] image: {{.cluster_image}} ssh: passwd: {{.passwd}} pk: /root/.ssh/id_rsa user: root env: + - PodCIDR={{.pod_subnet}} - YurttunnelServerAddress={{.yurttunnel_server_address}} + cmd_args: + - BindAddress={{.bind_address}} + - ClusterCIDR={{.cluster_cidr}} --- -apiVersion: sealer.cloud/v2 -kind: KubeadmConfig -metadata: - name: default-kubernetes-config -spec: - networking: - {{if .pod_subnet }} - podSubnet: {{.pod_subnet}} - {{end}} - {{if .service_subnet}} - serviceSubnet: {{.service_subnet}} - {{end}} - controllerManager: - extraArgs: - controllers: -nodelifecycle,*,bootstrapsigner,tokencleaner + +## Custom configurations must specify kind, will be merged to default kubeadm configs +kind: ClusterConfiguration +networking: + podSubnet: {{.pod_subnet}} + serviceSubnet: {{.service_subnet}} +controllerManager: + extraArgs: + controllers: -nodelifecycle,*,bootstrapsigner,tokencleaner ` ) var ( + initExample = templates.Examples(i18n.T(` + # Initialize an OpenYurt cluster. + yurtadm init --apiserver-advertise-address 1.2.3.4 --openyurt-version v0.7.0 --passwd xxx + + # Initialize an OpenYurt high availability cluster. + yurtadm init --apiserver-advertise-address 1.2.3.4,1.2.3.5,1.2.3.6 --openyurt-version v0.7.0 --passwd xxx + `)) + ValidSealerVersions = []string{ - "v0.6.1", + //"v0.6.1", + "v0.8.5", + } + + ValidOpenYurtAndK8sVersions = []version{ + { + OpenYurtVersion: "v0.7.0", + K8sVersion: "v1.19.8", + }, + { + OpenYurtVersion: "v0.7.0", + K8sVersion: "v1.20.10", + }, + { + OpenYurtVersion: "v0.7.0", + K8sVersion: "v1.21.14", + }, } ) +type version struct { + OpenYurtVersion string + K8sVersion string +} + // clusterInitializer init a node to master of openyurt cluster type clusterInitializer struct { InitOptions @@ -114,8 +147,9 @@ func NewCmdInit() *cobra.Command { o := NewInitOptions() cmd := &cobra.Command{ - Use: "init", - Short: "Run this command in order to set up the OpenYurt control plane", + Use: "init", + Short: "Run this command in order to set up the OpenYurt control plane", + Example: initExample, RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err @@ -143,22 +177,32 @@ func addFlags(flagset *flag.FlagSet, o *InitOptions) { "The yurt-tunnel-server address.") flagset.StringVarP( &o.ServiceSubnet, NetworkingServiceSubnet, "", o.ServiceSubnet, - "Use alternative range of IP address for service VIPs.", + "ServiceSubnet is the subnet used by kubernetes Services.", ) flagset.StringVarP( &o.PodSubnet, NetworkingPodSubnet, "", o.PodSubnet, - "Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.", + "PodSubnet is the subnet used by Pods.", ) flagset.StringVarP(&o.Password, PassWd, "p", o.Password, - "set master server ssh password", + "Set master server ssh password", ) flagset.StringVarP( &o.OpenYurtVersion, OpenYurtVersion, "", o.OpenYurtVersion, `Choose a specific OpenYurt version for the control plane.`, ) + flagset.StringVarP( + &o.K8sVersion, K8sVersion, "", o.K8sVersion, + `Choose a specific Kubernetes version for the control plane.`, + ) flagset.StringVarP(&o.ImageRepository, ImageRepository, "", o.ImageRepository, "Choose a registry to pull cluster images from", ) + flagset.StringVarP(&o.ClusterCIDR, ClusterCIDR, "", o.ClusterCIDR, + "Choose a CIDR range of the pods in the cluster", + ) + flagset.StringVarP(&o.KubeProxyBindAddress, KubeProxyBindAddress, "", o.KubeProxyBindAddress, + "Choose an IP address for the proxy server to serve on", + ) } func NewInitializerWithOptions(o *InitOptions) *clusterInitializer { @@ -241,11 +285,13 @@ func (ci *clusterInitializer) PrepareClusterfile() error { clusterfile, err := tmplutil.SubsituteTemplate(OpenYurtClusterfile, map[string]string{ "apiserver_address": ci.AdvertiseAddress, - "cluster_image": fmt.Sprintf(InitClusterImage, ci.ImageRepository, ci.OpenYurtVersion), + "cluster_image": fmt.Sprintf(InitClusterImage, ci.ImageRepository, ci.OpenYurtVersion, ci.K8sVersion), "passwd": ci.Password, "pod_subnet": ci.PodSubnet, "service_subnet": ci.ServiceSubnet, "yurttunnel_server_address": ci.YurttunnelServerAddress, + "cluster_cidr": ci.ClusterCIDR, + "bind_address": ci.KubeProxyBindAddress, }) if err != nil { return err diff --git a/pkg/yurtadm/cmd/yurtinit/options.go b/pkg/yurtadm/cmd/yurtinit/options.go index 44bf1b4bfd4..697847df60a 100644 --- a/pkg/yurtadm/cmd/yurtinit/options.go +++ b/pkg/yurtadm/cmd/yurtinit/options.go @@ -19,6 +19,7 @@ package yurtinit import ( "fmt" "net" + "strings" "github.com/pkg/errors" @@ -31,27 +32,55 @@ type InitOptions struct { YurttunnelServerAddress string ServiceSubnet string PodSubnet string + ClusterCIDR string Password string ImageRepository string OpenYurtVersion string + K8sVersion string + KubeProxyBindAddress string } func NewInitOptions() *InitOptions { return &InitOptions{ - ImageRepository: constants.DefaultOpenYurtImageRegistry, - OpenYurtVersion: constants.DefaultOpenYurtVersion, + ImageRepository: constants.DefaultOpenYurtImageRegistry, + OpenYurtVersion: constants.DefaultOpenYurtVersion, + K8sVersion: constants.DefaultK8sVersion, + PodSubnet: constants.DefaultPodSubnet, + ServiceSubnet: constants.DefaultServiceSubnet, + ClusterCIDR: constants.DefaultClusterCIDR, + KubeProxyBindAddress: constants.DefaultKubeProxyBindAddress, } } func (o *InitOptions) Validate() error { - if err := validateServerAddress(o.AdvertiseAddress); err != nil { + // There may be multiple ip addresses, separated by commas. + if o.AdvertiseAddress != "" { + ipArray := strings.Split(o.AdvertiseAddress, ",") + for _, ip := range ipArray { + if err := validateServerAddress(ip); err != nil { + return err + } + } + } + + if o.OpenYurtVersion == "" { + return errors.New("OpenYurtVersion can not be empty!") + } + + if o.K8sVersion == "" { + return errors.New("K8sVersion can not be empty!") + } + + if err := validateOpenYurtAndK8sVersions(o.OpenYurtVersion, o.K8sVersion); err != nil { return err } + if o.YurttunnelServerAddress != "" { if err := validateServerAddress(o.YurttunnelServerAddress); err != nil { return err } } + if o.Password == "" { return fmt.Errorf("password can't be empty.") } @@ -61,14 +90,26 @@ func (o *InitOptions) Validate() error { return err } } + if o.ServiceSubnet != "" { if err := validateCidrString(o.ServiceSubnet); err != nil { return err } } + return nil } +func validateOpenYurtAndK8sVersions(openyurtVersion string, k8sVersion string) error { + for _, version := range ValidOpenYurtAndK8sVersions { + if openyurtVersion == version.OpenYurtVersion && k8sVersion == openyurtVersion { + return nil + } + } + + return errors.Errorf("cannot use openyurtVersion: %s and k8sVersion: %s !", openyurtVersion, k8sVersion) +} + func validateServerAddress(address string) error { ip := net.ParseIP(address) if ip == nil { diff --git a/pkg/yurtadm/constants/constants.go b/pkg/yurtadm/constants/constants.go index d7874cb4021..e101c75cc0f 100644 --- a/pkg/yurtadm/constants/constants.go +++ b/pkg/yurtadm/constants/constants.go @@ -24,6 +24,9 @@ const ( YurttunnelAgentComponentName = "yurt-tunnel-agent" YurttunnelNamespace = "kube-system" + EtcHostsFile = "/etc/hosts" + SealerRegistry = "sea.hub" + SysctlK8sConfig = "/etc/sysctl.d/k8s.conf" KubeletConfigureDir = "/etc/kubernetes" KubeletWorkdir = "/var/lib/kubelet" @@ -37,6 +40,7 @@ const ( YurthubStaticPodFileName = "yurthub.yaml" PauseImagePath = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2" + OpenYurtCniUrl = "https://github.com/openyurtio/openyurt/releases/download/v0.7.0/openyurt-cni-0.8.7-0.x86_64.rpm" CniUrlFormat = "https://aliacs-edge-k8s-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/public/pkg/openyurt/cni/%s/cni-plugins-linux-%s-%s.tgz" DefaultKubernetesResourceServer = "dl.k8s.io" KubeUrlFormat = "https://%s/%s/kubernetes-node-linux-%s.tar.gz" @@ -48,15 +52,21 @@ const ( DefaultOpenYurtImageRegistry = "registry.cn-hangzhou.aliyuncs.com/openyurt" DefaultOpenYurtVersion = "latest" - YurtControllerManager = "yurt-controller-manager" - YurtTunnelServer = "yurt-tunnel-server" - YurtTunnelAgent = "yurt-tunnel-agent" - Yurthub = "yurthub" - YurtAppManager = "yurt-app-manager" - YurtAppManagerNamespace = "kube-system" - DirMode = 0755 - FileMode = 0666 - KubeletServiceContent = ` + DefaultK8sVersion = "1.21.14" + DefaultPodSubnet = "10.244.0.0/16" + DefaultServiceSubnet = "10.96.0.0/12" + DefaultClusterCIDR = "10.244.0.0/16" + DefaultKubeProxyBindAddress = "0.0.0.0" + + YurtControllerManager = "yurt-controller-manager" + YurtTunnelServer = "yurt-tunnel-server" + YurtTunnelAgent = "yurt-tunnel-agent" + Yurthub = "yurthub" + YurtAppManager = "yurt-app-manager" + YurtAppManagerNamespace = "kube-system" + DirMode = 0755 + FileMode = 0666 + KubeletServiceContent = ` [Unit] Description=kubelet: The Kubernetes Node Agent Documentation=http://kubernetes.io/docs/ diff --git a/pkg/yurtadm/util/kubernetes/util.go b/pkg/yurtadm/util/kubernetes/util.go index 3c6bcd6499c..3d69b5a6051 100644 --- a/pkg/yurtadm/util/kubernetes/util.go +++ b/pkg/yurtadm/util/kubernetes/util.go @@ -127,7 +127,7 @@ func CheckAndInstallKubelet(kubernetesResourceServer, clusterVersion string) err savePath := fmt.Sprintf("%s/kubernetes-node-linux-%s.tar.gz", constants.TmpDownloadDir, runtime.GOARCH) klog.V(1).Infof("Download kubelet from: %s", packageUrl) if err := util.DownloadFile(packageUrl, savePath, 3); err != nil { - return fmt.Errorf("Download kuelet fail: %w", err) + return fmt.Errorf("Download kubelet fail: %w", err) } if err := util.Untar(savePath, constants.TmpDownloadDir); err != nil { return err @@ -144,20 +144,20 @@ func CheckAndInstallKubelet(kubernetesResourceServer, clusterVersion string) err klog.Infof("Cni dir %s already exist, skip install.", constants.KubeCniDir) return nil } - //download and install kubernetes-cni - cniUrl := fmt.Sprintf(constants.CniUrlFormat, constants.KubeCniVersion, runtime.GOARCH, constants.KubeCniVersion) - savePath := fmt.Sprintf("%s/cni-plugins-linux-%s-%s.tgz", constants.TmpDownloadDir, runtime.GOARCH, constants.KubeCniVersion) + + // download and install openyurt-cni + cniUrl := fmt.Sprintf("%s", constants.OpenYurtCniUrl) + savePath := fmt.Sprintf("%s/openyurt-cni-0.8.7-0.x86_64.rpm", constants.TmpDownloadDir) klog.V(1).Infof("Download cni from: %s", cniUrl) if err := util.DownloadFile(cniUrl, savePath, 3); err != nil { return err } - - if err := os.MkdirAll(constants.KubeCniDir, 0600); err != nil { - return err - } - if err := util.Untar(savePath, constants.KubeCniDir); err != nil { + // rpm -ivh --nodeps --force /tmp/openyurt-cni-0.8.7-0.x86_64.rpm + // will install cni binaries to /opt/cni/bin/ + if _, err := exec.Command("rpm", "-ivh", "--nodeps", "--force", savePath).CombinedOutput(); err != nil { return err } + return nil } diff --git a/pkg/yurtadm/util/system/util.go b/pkg/yurtadm/util/system/util.go index dc00a24abb8..72410a22d70 100644 --- a/pkg/yurtadm/util/system/util.go +++ b/pkg/yurtadm/util/system/util.go @@ -20,10 +20,12 @@ import ( "fmt" "os" "os/exec" + "strings" "github.com/opencontainers/selinux/go-selinux" "k8s.io/klog/v2" + "github.com/openyurtio/openyurt/pkg/yurtadm/cmd/join/joindata" "github.com/openyurtio/openyurt/pkg/yurtadm/constants" "github.com/openyurtio/openyurt/pkg/yurtadm/util/edgenode" ) @@ -38,7 +40,7 @@ net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1` ) -//setIpv4Forward turn on the node ipv4 forward. +// SetIpv4Forward turn on the node ipv4 forward. func SetIpv4Forward() error { klog.Infof("Setting ipv4 forward") if err := os.WriteFile(ip_forward, []byte("1"), 0644); err != nil { @@ -47,9 +49,9 @@ func SetIpv4Forward() error { return nil } -//setBridgeSetting turn on the node bridge-nf-call-iptables. +// SetBridgeSetting turn on the node bridge-nf-call-iptables. func SetBridgeSetting() error { - klog.Info("Setting bridge settings for kubernetes.") + klog.Info("Setting bridge settings for kubernetes") if err := os.WriteFile(constants.SysctlK8sConfig, []byte(kubernetsBridgeSetting), 0644); err != nil { return fmt.Errorf("Write file %s fail: %w ", constants.SysctlK8sConfig, err) } @@ -69,9 +71,28 @@ func SetBridgeSetting() error { return nil } -// setSELinux turn off the node selinux. +// SetSELinux turn off the node selinux. func SetSELinux() error { - klog.Info("Disabling SELinux.") + klog.Info("Disabling SELinux") selinux.SetDisabled() return nil } + +// AddSeaHubHosts add to /etc/hosts +// When sealer creates the master, images are automatically saved in sea.hub +func AddSeaHubHosts(data joindata.YurtJoinData) error { + klog.Info("Adding sea.hub:5000 to /etc/hosts") + f, err := os.OpenFile(constants.EtcHostsFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0660) + if err != nil { + return fmt.Errorf("Open file %s fail: %w ", constants.EtcHostsFile, err) + } + defer f.Close() + + // x.x.x.x:6443 + serverAddr := strings.Split(data.ServerAddr(), ",")[0] + // x.x.x.x + masterIpAddr := strings.Split(serverAddr, ":")[0] + + f.WriteString(fmt.Sprintf("%s %s\n", masterIpAddr, constants.SealerRegistry)) + return nil +}