Timetombs

泛义的工具是文明的基础,而确指的工具却是愚人的器物

66h / 116a
,更新于 2024-01-29T05:49:24Z+08:00 by   b0bb5af

[K8S] Install

版权声明 - CC BY-NC-SA 4.0

kubeadm是k8s的command-line工具,用来创建和维护k8s集群。

1. 前提要求

rolehostnamefixed iposcpumemory
masterk8s-master-1192.168.2.211ubuntu server 18.04.424G
workerk8s-worker-1192.168.2.212ubuntu server 18.04.424G
workerk8s-worker-2192.168.2.213ubuntu server 18.04.424G

1.1 硬件要求

  1. 至少2核CPU。
  2. 至少2G的内存。
  3. 每个node都有固定的IP,并且可以直联(无NAT)。
# 查看CPU核心数
cat /proc/cpuinfo | grep  processor | wc -l

# 查看内存大小
free -h

# 查看ip
ip a

1.2 软件要求

  1. hostname唯一,并且不包含._和大写字母。
  2. 为k8s相关的服务配置防火墙,这里图省事,直接关闭防火墙。
  3. 关闭Swap内存(k8s为了性能考虑,不允许开启Swap)。
# https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd

set -x

cat <<-EOF >> /etc/hosts
192.168.2.211 api-server.k8s.test
EOF

# 禁用防火墙
# ubuntu
ufw disable
# centos
systemctl stop firewalld
systemctl disable firewalld


# 禁用swap
sudo swapoff -a
# 永久禁用swap
cat /etc/fstab
cat /etc/fstab | grep -v swap > /etc/fstab
cat /etc/fstab


# 永久禁用SELINUX
tee <<-EOF /etc/selinux/config
SELINUX=disabled
EOF

# 开启overlay和br_netfilter模块
cat <<-EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

# 开启ipv4 forward
cat <<-EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
EOF
sysctl --system

2. 安装Docker

安装Docker

3. 安装Kubeadm

set -eux


apt update -y
apt install -y apt-transport-https


curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -


tee <<-EOF /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF


apt update -y
# 安装kubeadm kubelet kubectl
apt install -y kubeadm-1.18.3 kubelet-1.18.3 kubectl-1.18.3

4. 初始化Master节点

上述步骤需要在每个node上都执行。本步骤只需在master上执行即可。集群配置文件

# 文档 : https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file
# 查看默认配置 : kubeadm config print init-defaults
# 安装 : kubeadm init --config kubeadm.init-config.yml --upload-certs -v=5
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
# 版本
kubernetesVersion: v1.18.3
# 集群名称
clusterName: k8s
# 内部组件的镜像仓库
imageRepository: registry.aliyuncs.com/google_containers
# 使用CoreDNS
dns:
  type: CoreDNS
# API SERVER组件的服务地址,worker节点通过这个地址和master通信
controlPlaneEndpoint: api-server.k8s.test:6443
# 笔者的环境 : k8s的node节点使用C类私有IP=192.168.0.0/16; Service使用B类私有IP=172.16.0.0/12; Pod使用A类私有IP=10.0.0.0/8
networking:
  # Pod的网络地址范围,/16最多可以给65536个Pod分配IP。
  podSubnet: 10.1.0.1/16
  # Service的网络地址范围,/24最多可以给256个Service分配IP。
  serviceSubnet: 172.16.1.0/24
  # 默认的顶级域名
  dnsDomain: cluster.local

初始化命令:

wget https://linianhui.github.io/k8s/install/kubeadm.init-config.yml

kubeadm init --config kubeadm.init-config.yml --upload-certs -v=5

成功后会查看node:

kubectl get nodes

# 输出
NAME           STATUS     ROLES    AGE   VERSION
k8s-master-1   Ready      master   2m    v1.18.1

# 获取kubeadm join命令
kubeadm token create --print-join-command

5. 初始化Worker节点

本步骤只需分别在worker上执行即可。

# 示例kubeadm join命令
kubeadm join api-server.k8s.test:6443 --token opomfo.nd0dkto8ye006hda --discovery-token-ca-cert-hash sha256:da3764c85a4727de39d674f93a976c617f15f49ca11b2a68bc850c5789

成功后会查看node:

kubectl get nodes

# 输出
NAME           STATUS   ROLES    AGE     VERSION
k8s-master-1   Ready    master   3m21s   v1.18.3
k8s-worker-1   Ready    <none>   112s    v1.18.3
k8s-worker-2   Ready    <none>   108s    v1.18.3

6. 部署网络插件

部署flannel网络插件。

kubectl apply -f https://linianhui.github.io/k8s/install/flannel.yml

参考 :

  1. https://github.com/coreos/flannel

7. 部署dashboard

部署metrics-serverdashboard

# 检查flannel是否部署完成,部署完成后再部署dashboard。 
kubectl get pods -A

# 部署监控服务
kubectl apply -f https://linianhui.github.io/k8s/install/metrics-server.yml
# 部署dashboard
kubectl apply -f https://linianhui.github.io/k8s/install/dashboard.yml

部署完成后dashboard的端口号为30080。笔者的地址为 : http://192.168.2.211:30080

# 获取访问dashboard的token
kubectl -n kube-dashboard describe secret kube-dashboard-admin-token

参考 :

  1. https://github.com/kubernetes-sigs/metrics-server
  2. https://github.com/kubernetes/dashboard

8. Debug

kubectl run -it --image=lnhcode/tool --restart=Never --command --rm -- sh

9. Reference

sh文件列表
# https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd

set -x

cat <<-EOF >> /etc/hosts
192.168.2.211 api-server.k8s.test
EOF

# 禁用防火墙
# ubuntu
ufw disable
# centos
systemctl stop firewalld
systemctl disable firewalld


# 禁用swap
sudo swapoff -a
# 永久禁用swap
cat /etc/fstab
cat /etc/fstab | grep -v swap > /etc/fstab
cat /etc/fstab


# 永久禁用SELINUX
tee <<-EOF /etc/selinux/config
SELINUX=disabled
EOF

# 开启overlay和br_netfilter模块
cat <<-EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

# 开启ipv4 forward
cat <<-EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
EOF
sysctl --system
set -eux


apt update -y
apt install -y apt-transport-https


curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -


tee <<-EOF /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF


apt update -y
# 安装kubeadm kubelet kubectl
apt install -y kubeadm-1.18.3 kubelet-1.18.3 kubectl-1.18.3
yml文件列表
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# kubectl apply -f metrics-server.yml
# kubectl apply -f dashboard.yml
# kubectl -n kube-dashboard describe secret kube-dashboard-admin-token

apiVersion: v1
kind: Namespace
metadata:
  name: kube-dashboard

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dashboard-admin
  namespace: kube-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kube-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: kube-dashboard-admin
    namespace: kube-dashboard

---
apiVersion: v1
kind: Secret
metadata:
  labels:
    app: kube-dashboard
  # hard code : https://github.com/kubernetes/dashboard/blob/master/src/app/backend/auth/api/types.go#L28
  name: kubernetes-dashboard-certs
  namespace: kube-dashboard
type: Opaque

---
apiVersion: v1
kind: Secret
metadata:
  labels:
    app: kube-dashboard
  # hard code : https://github.com/kubernetes/dashboard/blob/master/src/app/backend/client/api/types.go#L33
  name: kubernetes-dashboard-csrf
  namespace: kube-dashboard
type: Opaque
data:
  csrf: ''

---
apiVersion: v1
kind: Secret
metadata:
  labels:
    app: kube-dashboard
  # hard code : https://github.com/kubernetes/dashboard/blob/master/src/app/backend/auth/api/types.go#L25
  name: kubernetes-dashboard-key-holder
  namespace: kube-dashboard
type: Opaque

---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: dashboard-ui
  name: dashboard-ui
  namespace: kube-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: dashboard-ui
  template:
    metadata:
      labels:
        app: dashboard-ui
    spec:
      containers:
        - name: dashboard-ui
          # https://hub.docker.com/r/kubernetesui/dashboard
          image: kubernetesui/dashboard:v2.3.1
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          # https://github.com/kubernetes/dashboard/blob/master/docs/common/dashboard-arguments.md
          args:
            - --auto-generate-certificates
            - --namespace=kube-dashboard
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
            - name: tmp
              mountPath: /tmp
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp
          emptyDir: {}
      serviceAccountName: kube-dashboard-admin
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: dashboard-ui
  name: dashboard-ui
  namespace: kube-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30080
  selector:
    app: dashboard-ui

---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kube-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
            - mountPath: /tmp
              name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kube-dashboard-admin
      nodeSelector:
        'beta.kubernetes.io/os': linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kube-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    app: dashboard-metrics-scraper
# https://github.com/coreos/flannel

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: '/etc/cni/net.d'
    - pathPrefix: '/etc/kube-flannel'
    - pathPrefix: '/run/flannel'
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
    - min: 0
      max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ''
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ''
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ''
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
  - kind: ServiceAccount
    name: flannel
    namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }    
  net-conf.json: |
    {
      "Network": "10.1.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }    
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
        - operator: Exists
          effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
        - name: install-cni
          image: quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64
          command:
            - cp
          args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
          volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
      containers:
        - name: kube-flannel
          image: quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64
          command:
            - /opt/bin/flanneld
          args:
            - --ip-masq
            - --kube-subnet-mgr
          resources:
            requests:
              cpu: '100m'
              memory: '50Mi'
            limits:
              cpu: '100m'
              memory: '50Mi'
          securityContext:
            privileged: false
            capabilities:
              add: ['NET_ADMIN']
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
# 文档 : https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file
# 查看默认配置 : kubeadm config print init-defaults
# 安装 : kubeadm init --config kubeadm.init-config.yml --upload-certs -v=5
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
# 版本
kubernetesVersion: v1.18.3
# 集群名称
clusterName: k8s
# 内部组件的镜像仓库
imageRepository: registry.aliyuncs.com/google_containers
# 使用CoreDNS
dns:
  type: CoreDNS
# API SERVER组件的服务地址,worker节点通过这个地址和master通信
controlPlaneEndpoint: api-server.k8s.test:6443
# 笔者的环境 : k8s的node节点使用C类私有IP=192.168.0.0/16; Service使用B类私有IP=172.16.0.0/12; Pod使用A类私有IP=10.0.0.0/8
networking:
  # Pod的网络地址范围,/16最多可以给65536个Pod分配IP。
  podSubnet: 10.1.0.1/16
  # Service的网络地址范围,/24最多可以给256个Service分配IP。
  serviceSubnet: 172.16.1.0/24
  # 默认的顶级域名
  dnsDomain: cluster.local
# https://github.com/kubernetes-sigs/metrics-server/tree/master/deploy/1.8%2B
# kubectl apply -f metrics-server.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server-admin
  namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metrics-server-admin-role
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: metrics-server-admin
    namespace: kube-system

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server-admin
      volumes:
        # mount in tmp so we can safely use from-scratch images and/or read-only containers
        - name: tmp-dir
          emptyDir: {}
      containers:
        - name: metrics-server
          image: registry.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
          args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --kubelet-preferred-address-types=InternalIP
            - --kubelet-insecure-tls
          ports:
            - name: main-port
              containerPort: 4443
              protocol: TCP
          securityContext:
            readOnlyRootFilesystem: true
            runAsNonRoot: true
            runAsUser: 1000
          imagePullPolicy: Always
          volumeMounts:
            - name: tmp-dir
              mountPath: /tmp
      nodeSelector:
        beta.kubernetes.io/os: linux

---
apiVersion: v1
kind: Service
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    kubernetes.io/name: 'Metrics-server'
    kubernetes.io/cluster-service: 'true'
spec:
  selector:
    k8s-app: metrics-server
  ports:
    - port: 443
      protocol: TCP
      targetPort: main-port

---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
  name: v1beta1.metrics.k8s.io
spec:
  service:
    name: metrics-server
    namespace: kube-system
  group: metrics.k8s.io
  version: v1beta1
  insecureSkipTLSVerify: true
  groupPriorityMinimum: 100
  versionPriority: 100

https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init

https://kuboard.cn/install/install-k8s.html

下一篇 : [K8S] Networking - Flannel