环境

角色 主机名 内网 IP 集群 IP 操作系统 服务 执行目录
部署机 k8s-master master120 10.0.4.120 - CentOS kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/
etcd-node etcd121 10.0.4.121 10.10.10.121 CentOS etcd /opt/etcd/
etcd-node etcd122 10.0.4.122 10.10.10.122 CentOS etcd /opt/etcd/
etcd-node etcd123 10.0.4.123 10.10.10.123 CentOS etcd /opt/etcd/
k8s-node node124 10.0.4.124 - CentOS docker flannel kubelet kube-proxy /opt/kubernetes/
k8s-node node125 10.0.4.125 - CentOS docker flannel kubelet kube-proxy /opt/kubernetes/
k8s-node node126 10.0.4.126 - CentOS docker flannel kubelet kube-proxy /opt/kubernetes/

前期准备

  • 全部服务器关闭 firewalld 和 selinux,禁用 swap,部署机(master120)可免密 ssh 登陆其他服务器
  • 软件版本
    • CentOS: 7.7
    • etcd: 3.3.18
    • docker: ce-19.03.5
    • flannel: 0.11.0
    • kubernetes: 1.17.2
  • k8s牵扯到多个网段,这里说明下
    • 10.0.4.0/24 该网段是服务器物理网卡 IP 地址段,通过该地址访问互联网
    • 10.10.10.0/24 该网段是杜撰的,但也配置在服务器物理网卡上,用于 etcd 集群节点间通信,与 k8s 集群无关
    • 10.10.9.0/24 该网段是杜撰的,分配 k8s service 的 clusterIP
    • 172.17.0.0/24 该网段是杜撰的,是 docker 网桥自带网段,也是 flannel 提供的网段,实现不同节点间的容器互通
    • 172.16.0.0/24 该网段是杜撰的,是 k8s pod 的 IP 地址区间,用于区别流量来源

部署 etcd 集群

  • 在部署机(master120)上操作下面步骤

  • 创建 etcd 部署目录

    1
    2
    
    mkdir /home/deploy/etcd/{bin,cfg,ssl} -p
    mkdir /home/deploy/ssl/etcd -p
    
  • 安装 cfssl 工具

    1
    2
    3
    4
    
    curl -o /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    curl -o /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    curl -o /usr/local/bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    chmod 0755 /usr/local/bin/cfssl*
    
  • 创建 ca-config.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    
    cat > /home/deploy/ssl/etcd/ca-config.json <<< '
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "etcd": {
            "expiry": "87600h",
            "usages": [
              "signing",
              "key encipherment",
              "server auth",
              "client auth"
            ]
          }
        }
      }
    }
    '
    
  • 创建 ca-csr.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    
    cat > /home/deploy/ssl/etcd/ca-csr.json <<< '
    {
      "CN": "etcd",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "Beijing",
          "ST": "Beijing"
        }
      ]
    }
    '
    
  • 创建 server-csr.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    
    # 注意修改 "10.0.4.*" 和 "10.10.10.*" 为自己环境地址
    cat > /home/deploy/ssl/etcd/server-csr.json <<< '
    {
      "CN": "etcd",
      "hosts": [
        "10.0.4.121",
        "10.0.4.122",
        "10.0.4.123",
        "10.10.10.121",
        "10.10.10.122",
        "10.10.10.123",
        "127.0.0.1"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing"
        }
      ]
    }
    '
    
  • 生成证书

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    
    cd /home/deploy/ssl/etcd/
    # 生成 ca.pem ca-key.pem
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    # 生成 server.pem server-key.pem
    cfssl gencert \
        -ca=ca.pem \
        -ca-key=ca-key.pem \
        -config=ca-config.json \
        -profile=etcd \
        server-csr.json | cfssljson -bare server
    # 复制证书到部署目录
    scp *.pem /home/deploy/etcd/ssl/
    
  • 下载 etcd 二进制包

    1
    2
    3
    4
    
    cd /home/deploy/
    curl -L -O https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
    tar zxf etcd-v3.3.18-linux-amd64.tar.gz
    scp etcd-v3.3.18-linux-amd64/{etcd,etcdctl} etcd/bin/
    
  • 创建 etcd 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    
    # 这里的 etcd 虚拟机都有两个网卡,一个用于提供服务,另一个用于集群通信
    # 注意修改 "10.0.4.*" 和 "10.10.10.*" 为自己环境地址
    cat > /home/deploy/etcd/cfg/etcd <<<'
    # [Member]
    ETCD_NAME="etcdXXX"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://10.10.10.XXX:2380"
    ETCD_LISTEN_CLIENT_URLS="https://10.0.4.XXX:2379"
    
    # [Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.10.10.XXX:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://10.0.4.XXX:2379"
    ETCD_INITIAL_CLUSTER="etcd121=https://10.10.10.121:2380,etcd122=https://10.10.10.122:2380,etcd123=https://10.10.10.123:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="fucking-etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    '
    
  • 创建 etcd.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    
    cat > /home/deploy/etcd.service <<<'
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/opt/etcd/cfg/etcd
    ExecStart=/opt/etcd/bin/etcd \
        --name=${ETCD_NAME} \
        --data-dir=${ETCD_DATA_DIR} \
        --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
        --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
        --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
        --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
        --initial-cluster=${ETCD_INITIAL_CLUSTER} \
        --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
        --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} \
        --cert-file=/opt/etcd/ssl/server.pem \
        --key-file=/opt/etcd/ssl/server-key.pem \
        --peer-cert-file=/opt/etcd/ssl/server.pem \
        --peer-key-file=/opt/etcd/ssl/server-key.pem \
        --trusted-ca-file=/opt/etcd/ssl/ca.pem \
        --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 部署到远程三个 etcd 节点(etcd121、etcd122、etcd123)

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    cd /home/deploy
    for id in $(seq 121 123); do
        ip="10.0.4.$id"
        scp -r etcd $ip:/opt/
        ssh $ip "sed -i 's/XXX/$id/g' /opt/etcd/cfg/etcd"
        scp etcd.service $ip:/usr/lib/systemd/system/
        systemctl -H $ip daemon-reload
        systemctl -H $ip enable etcd
    done
    
  • 启动三个 etcd 节点的 etcd 服务

    1
    2
    3
    4
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    for ip in $(seq -f'10.0.4.%g' 121 123); do
        systemctl -H $ip start etcd
    done
    
  • 查看 etcd 集群状态

    1
    2
    3
    4
    5
    6
    7
    8
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    cd /home/deploy/etcd/ssl
    ../bin/etcdctl \
        --ca-file=ca.pem \
        --cert-file=server.pem \
        --key-file=server-key.pem \
        --endpoints="https://10.0.4.121:2379" \
        cluster-health
    

安装 Docker

  • 在每个 k8s node 服务器(node124、node125、node126)上操作下面步骤
  • 安装 docker-ce,参考这里

部署 Flannel 网络

  • 在部署机(master120)上操作下面步骤

  • 创建 flannel 部署目录

    1
    2
    3
    4
    
    cd /home/deploy
    mkdir flannel/{bin,cfg,ssl} -p
    # 复制 etcd 证书到 flannel 证书目录下
    rm -rf flannel/ssl/etcd && scp -r etcd/ssl flannel/ssl/etcd
    
  • 连接 etcd,写入预定义的子网段

    1
    2
    3
    4
    5
    6
    7
    8
    9
    
    # 这里的预定义字段是 "172.17.0.0/16",推荐用这个,与 docker 原生网段一致
    # 注意修改 "10.0.4.*" 为自己环境地址
    cd /home/deploy/etcd/ssl
    ../bin/etcdctl \
        --ca-file=ca.pem \
        --cert-file=server.pem \
        --key-file=server-key.pem \
        --endpoints="https://10.0.4.122:2379" \
        set /coreos.com/network/config '{"Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
    
  • 获取 flannel 二进制包

    1
    2
    3
    4
    
    cd /home/deploy
    curl -L -O https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
    tar zxf flannel-v0.11.0-linux-amd64.tar.gz
    scp flanneld mk-docker-opts.sh flannel/bin/
    
  • 创建 flannel 配置文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    cat > /home/deploy/flannel/cfg/flanneld <<< '
    FLANNEL_OPTIONS=" \
        --etcd-endpoints=https://10.0.4.121:2379,https://10.0.4.122:2379,https://10.0.4.123:2379 \
        -etcd-cafile=/opt/kubernetes/ssl/etcd/ca.pem \
        -etcd-certfile=/opt/kubernetes/ssl/etcd/server.pem \
        -etcd-keyfile=/opt/kubernetes/ssl/etcd/server-key.pem \
    "
    '
    
  • 创建 flanneld.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    
    cat > /home/deploy/flanneld.service <<< '
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service
    
    [Service]
    Type=notify
    EnvironmentFile=/opt/kubernetes/cfg/flanneld
    ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
    ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 修改 docker.service,用于从指定的子网段启动 docker

    1
    2
    3
    
    # 关键在 "EnvironmentFile=/run/flannel/subnet.env"
    # 该文件就是 flanneld 服务生成的 docker 参数
    # 这里仅作记录,具体实现已移到下一步的部署脚本中
    
  • 部署到远程三个 k8s node 节点(node124、node125、node126)

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    cd /home/deploy
    for ip in $(seq -f'10.0.4.%g' 124 126); do
        systemctl -H $ip stop docker
        ssh $ip "mkdir /opt/kubernetes"
        scp -r flannel/* $ip:/opt/kubernetes/
        scp flanneld.service $ip:/usr/lib/systemd/system/
        ssh $ip 'sed -i \
            -e "/^Type/aEnvironmentFile=/run/flannel/subnet.env" \
            -e "/^ExecStart/s/$/\$DOCKER_NETWORK_OPTIONS/" \
            /usr/lib/systemd/system/docker.service \
        '
        systemctl -H $ip daemon-reload
        systemctl -H $ip enable flanneld
    done
    
  • 启动三个 k8s node 节点的 flanneld 和 docker 服务

    1
    2
    3
    4
    5
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    for ip in $(seq -f'10.0.4.%g' 124 126); do
        systemctl -H $ip start flanneld
        systemctl -H $ip start docker
    done
    
  • 启动完成后,不同节点的 docker0 网卡 ip 互通

部署 k8s master 节点

  • 部署前确保前面的 etcd 集群、flannel 网络和 docker 都是正常的

  • 在部署机(master120,即当前节点)上操作下面步骤

  • 创建 master 部署目录

    1
    2
    3
    4
    5
    
    cd /home/deploy
    mkdir master/{bin,cfg,ssl} -p
    mkdir ssl/master -p
    # 复制 etcd 证书到 master 证书目录下
    rm -rf master/ssl/etcd && scp -r etcd/ssl master/ssl/etcd
    
  • 创建 ca-config.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    
    cat > /home/deploy/ssl/master/ca-config.json <<< '
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    '
    
  • 创建 ca-csr.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    
    cat > /home/deploy/ssl/master/ca-csr.json <<< '
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    '
    
  • 创建 kube-apiserver-csr.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    
    # 这里的 10.10.9.1 是 kubernetes service 的集群地址
    # 该地址默认是下文中的 service-cluster-ip-range 网段的第一个 ip
    # dns 组件会用到
    # 注意修改 "10.0.4.*" 为自己环境地址
    cat > /home/deploy/ssl/master/kube-apiserver-csr.json <<< '
    {
      "CN": "kubernetes",
      "hosts": [
        "127.0.0.1",
        "10.0.4.120",
        "10.10.9.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
      ],
      "key": {
          "algo": "rsa",
          "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    '
    
  • 创建 kube-proxy-csr.json 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    
    cat > /home/deploy/ssl/master/kube-proxy-csr.json <<< '
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    '
    
  • 生成证书

    1
    2
    3
    4
    5
    6
    
    cd /home/deploy/ssl/master
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    # 复制证书到部署目录
    scp *.pem /home/deploy/master/ssl/
    
  • 获取 kubernetes 二进制包

    1
    2
    3
    4
    5
    
    cd /home/deploy
    curl -L -O https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz
    tar zxf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes/server/bin
    scp kube-apiserver kube-scheduler kube-controller-manager kubectl /home/deploy/master/bin/
    
  • 创建 token 文件

    1
    2
    
    # 第一个字符串随机写的,看心情 ……
    echo '1111222233334444aaaabbbbccccdddd,kubelet-bootstrap,10001,"system:kubelet-bootstrap"' > /home/deploy/master/cfg/token.csv
    
  • 创建 kube-apiserver 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    
    # 这里的 service-cluster-ip-range 是 k8s service 地址区间,提供一个与现有网络不通的网段
    # 注意修改 "10.0.4.*" 为自己环境地址
    cat > /home/deploy/master/cfg/kube-apiserver <<< '
    KUBE_APISERVER_OPTS=" \
        --logtostderr=true \
        --v=4 \
        --etcd-servers=https://10.0.4.121:2379,https://10.0.4.122:2379,https://10.0.4.123:2379 \
        --bind-address=10.0.4.120 \
        --secure-port=6443 \
        --advertise-address=10.0.4.120 \
        --allow-privileged=true \
        --service-cluster-ip-range=10.10.9.0/24 \
        --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
        --authorization-mode=RBAC,Node \
        --enable-bootstrap-token-auth \
        --token-auth-file=/opt/kubernetes/cfg/token.csv \
        --service-node-port-range=30000-50000 \
        --tls-cert-file=/opt/kubernetes/ssl/kube-apiserver.pem \
        --tls-private-key-file=/opt/kubernetes/ssl/kube-apiserver-key.pem \
        --client-ca-file=/opt/kubernetes/ssl/ca.pem \
        --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
        --etcd-cafile=/opt/kubernetes/ssl/etcd/ca.pem \
        --etcd-certfile=/opt/kubernetes/ssl/etcd/server.pem \
        --etcd-keyfile=/opt/kubernetes/ssl/etcd/server-key.pem \
    "
    '
    
  • 创建 kube-apiserver.service 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    
    cat > /home/deploy/kube-apiserver.service <<< '
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
    ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 创建 kube-scheduler 配置文件

    1
    2
    3
    4
    5
    6
    7
    8
    
    cat > /home/deploy/master/cfg/kube-scheduler <<< '
    KUBE_SCHEDULER_OPTS=" \
        --logtostderr=true \
        --v=4 \
        --master=127.0.0.1:8080 \
        --leader-elect \
    "
    '
    
  • 创建 kube-scheduler.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    
    cat > /home/deploy/kube-scheduler.service <<< '
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
    ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 创建 kube-controller-mananger 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    
    # 注意这里设置了 cluster-name 为 "my_k8s_cluster"
    # 这个名字在后面会用到
    # 这里的 service-cluster-ip-range 是 k8s service 地址区间,与之前配置的网段相同
    cat > /home/deploy/master/cfg/kube-controller-manager <<< '
    KUBE_CONTROLLER_MANAGER_OPTS=" \
        --logtostderr=true \
        --v=4 \
        --master=127.0.0.1:8080 \
        --leader-elect=true \
        --address=127.0.0.1 \
        --service-cluster-ip-range=10.10.9.0/24 \
        --cluster-name=my_k8s_cluster \
        --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
        --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \
        --root-ca-file=/opt/kubernetes/ssl/ca.pem \
        --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
    "
    '
    
  • 创建 kube-controller-manager.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    
    cat > /home/deploy/kube-controller-manager.service <<< '
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
    ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 部署到执行目录

    1
    2
    3
    4
    5
    6
    7
    
    cd /home/deploy
    mkdir /opt/kubernetes
    scp -r master/* /opt/kubernetes/
    scp kube-apiserver.service kube-scheduler.service kube-controller-manager.service /usr/lib/systemd/system/
    systemctl daemon-reload
    systemctl enable kube-apiserver kube-scheduler kube-controller-manager
    ln -sf /opt/kubernetes/bin/kubectl /usr/local/bin/
    
  • 启动 k8s master 各组件

    1
    2
    3
    
    systemctl start kube-apiserver
    systemctl start kube-scheduler
    systemctl start kube-controller-manager
    
  • 查看集群各组件状态

    1
    
    kubectl get cs
    

部署 k8s node 节点

  • 在部署机(master120)上操作下面步骤

  • 创建 node 部署目录

    1
    2
    3
    
    cd /home/deploy
    mkdir node/{bin,cfg,ssl} -p
    scp kubernetes/server/bin/{kubelet,kube-proxy} node/bin/
    
  • 将kubelet-bootstrap用户绑定到系统集群角色

    1
    2
    3
    
    kubectl create clusterrolebinding kubelet-bootstrap \
        --clusterrole=system:node-bootstrapper \
        --user=kubelet-bootstrap
    
  • 生成 bootstrap.kubeconfig 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    
    export BOOTSTRAP_TOKEN=1111222233334444aaaabbbbccccdddd
    export KUBE_APISERVER="https://10.0.4.120:6443"
    cd /home/deploy/master/ssl
    # 设置集群参数,这里的指定的集群就是前面设置的 "my_k8s_cluster"
    kubectl config set-cluster my_k8s_cluster \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=/home/deploy/node/cfg/bootstrap.kubeconfig
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
        --token=${BOOTSTRAP_TOKEN} \
        --kubeconfig=/home/deploy/node/cfg/bootstrap.kubeconfig
    # 设置上下文参数
    kubectl config set-context default \
        --cluster=my_k8s_cluster \
        --user=kubelet-bootstrap \
        --kubeconfig=/home/deploy/node/cfg/bootstrap.kubeconfig
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=/home/deploy/node/cfg/bootstrap.kubeconfig
    
  • 生成 kube-proxy.kubeconfig 文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    
    export KUBE_APISERVER="https://10.0.4.120:6443"
    cd /home/deploy/master/ssl
    # 设置集群参数,这里的指定的集群就是前面设置的 "my_k8s_cluster"
    kubectl config set-cluster my_k8s_cluster \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=/home/deploy/node/cfg/kube-proxy.kubeconfig
    kubectl config set-credentials kube-proxy \
        --client-certificate=kube-proxy.pem \
        --client-key=kube-proxy-key.pem \
        --embed-certs=true \
        --kubeconfig=/home/deploy/node/cfg/kube-proxy.kubeconfig
    kubectl config set-context default \
        --cluster=my_k8s_cluster \
        --user=kube-proxy \
        --kubeconfig=/home/deploy/node/cfg/kube-proxy.kubeconfig
    kubectl config use-context default --kubeconfig=/home/deploy/node/cfg/kube-proxy.kubeconfig
    
  • 创建 kubelet 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    
    # --kubeconfig 指定 kubeconfig 文件位置,会自动生成
    # --cert-dir 颁发证书待存放位置
    # 注意修改 "10.0.4.*" 为自己环境地址
    # 这里保留 "XXX",后面部署命令会统一替换
    cat > /home/deploy/node/cfg/kubelet <<< '
    KUBELET_OPTS=" \
        --logtostderr=true \
        --v=4 \
        --hostname-override=10.0.4.XXX \
        --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
        --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
        --config=/opt/kubernetes/cfg/kubelet.config \
        --cert-dir=/opt/kubernetes/ssl \
        --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
    "
    '
    # registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 该镜像可以提前导入本地局域网中的私有 docker 仓库中
    
  • 创建 kubelet.config 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    16
    17
    
    # 这里的 clusterDNS 是 DNS service 的集群地址,这里分配 10.10.9.2
    # 注意修改 "10.0.4.*" 为自己环境地址
    # 这里保留 "XXX",后面部署命令会统一替换
    cat > /home/deploy/node/cfg/kubelet.config <<< '
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 10.0.4.XXX
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.10.9.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    '
    
  • 创建 kubelet.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    14
    15
    
    cat > /home/deploy/kubelet.service <<< '
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    
    [Service]
    EnvironmentFile=/opt/kubernetes/cfg/kubelet
    ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 创建 kube-proxy 配置文件

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    
    # 这里的 cluster-cidr 是 pod ip 地址区间
    # 注意修改 "10.0.4.*" 为自己环境地址
    # 这里保留 "XXX",后面部署命令会统一替换
    cat > /home/deploy/node/cfg/kube-proxy <<< '
    KUBE_PROXY_OPTS=" \
        --logtostderr=true \
        --v=4 \
        --hostname-override=10.0.4.XXX \
        --cluster-cidr=172.16.0.0/16 \
        --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
    "
    '
    
  • 创建 kube-proxy.service

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    13
    
    cat > /home/deploy/kube-proxy.service <<< '
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
    ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    '
    
  • 部署到远程三个 k8s node 节点(node124、node125、node126)

     1
     2
     3
     4
     5
     6
     7
     8
     9
    10
    11
    12
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    cd /home/deploy
    for id in $(seq 124 126); do
        ip="10.0.4.$id"
        scp -r node/* $ip:/opt/kubernetes/
        ssh $ip "sed -i 's/XXX/$id/g' /opt/kubernetes/cfg/kubelet"
        ssh $ip "sed -i 's/XXX/$id/g' /opt/kubernetes/cfg/kubelet.config"
        ssh $ip "sed -i 's/XXX/$id/g' /opt/kubernetes/cfg/kube-proxy"
        scp kubelet.service kube-proxy.service $ip:/usr/lib/systemd/system/
        systemctl -H $ip daemon-reload
        systemctl -H $ip enable kubelet kube-proxy
    done
    
  • 启动三个 k8s node 节点的 kubelet 和 kube-proxy 服务

    1
    2
    3
    4
    5
    
    # 注意修改 "10.0.4.*" 为自己环境地址
    for ip in $(seq -f'10.0.4.%g' 124 126); do
        systemctl -H $ip start kubelet
        systemctl -H $ip start kube-proxy
    done
    
  • 审批 node 加入集群

    1
    2
    3
    4
    
    kubectl get csr
    # "XXXX" 上一命令输出的 NAME 列
    kubectl certificate approve XXXX
    kubectl get node
    
  • 查看集群状态

    1
    2
    
    kubectl get node
    kubectl get cs
    

部署其他组件

运行一个测试示例

  • 暂无