Bootstrap

天翼云部署k8s集群(二进制)

1 环境准备

1.1 节点信息
主机名IP地址操作系统硬件配置
k8s-master192.168.0.5CentOS7.9 3.10CPU:2核 内存:4G 硬盘:40G
k8s-worker-1192.168.0.6CentOS7.9 3.10CPU:2核 内存:4G 硬盘:40G
k8s-worker-2192.168.0.7CentOS7.9 3.10CPU:2核 内存:4G 硬盘:40G
1.2 系统软件环境
软件版本
Dockerdocker-20.10.9-ce
Kubernetesv1.22.17
Etcdetcd-v3.5.1
Flannelv0.16.1
Coredns1.8.4
Dashboardv2.3.1
Ingress-nginxv1.0.0
cfssl---
1.3 节点组件分配
节点名称IP地址部署组件
k8s-master192.168.0.5kube-apiserver,kube-controller-manager,kube-scheduler,docker,kubelet,kubectl,kube-proxy,etcd
k8s-worker1192.168.0.6docker,kubelet,kube-proxy,etcd
k8s-worker2192.168.0.7docker,kubelet,kube-proxy,etcd

2 基础环境配置
2.1 修改主机名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-worker1
hostnamectl set-hostname k8s-worker2
2.2 创建相关目录
# master,worker都做
mkdir -p /opt/etcd/{bin,cfg,ssl}
mkdir -p /opt/k8s/{bin,cfg,ssl,logs,yaml}
mkdir -p /data/TLS/{etcd,k8s}
2.3 本地域名解析
# master,worker都做
cat >> /etc/hosts << EOF
192.168.0.2 host
192.168.0.5 k8s-master
192.168.0.6 k8s-worker1
192.168.0.7 k8s-worker2
192.168.0.5 etcd-1
192.168.0.6 etcd-2
192.168.0.7 etcd-3
EOF

3 系统环境配置
3.1 启用IPVS模式
# master,worker都做
# IPVS模式相关配置
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
​
# 执行生效
sysctl --system  或  sysctl -p
​
# 配置IPVS模式所需模块
yum -y install ipvsadm ipset conntrack-tools
3.2 关闭交换分区
# master,worker都做
# 禁用swap交换分区
swapoff -a  
​
# 注释掉交换分区挂载命令,永久禁止
vim /etc/fstab   或用sed命令   sed '/swap/ s,^,#,'  /etc/fstab -i    
​
# 查看交换分区禁用情况
free -h      
3.3 关闭SELinux
# master,worker都做
# 临时修改selinux状态
setenforce 0
​
#修改enforcing为disabled
vim /etc/selinux/config  或用sed命令
sed -i 's,SELINUX=enforcing,SELINUX=disabled,g' /etc/selinux/config
​
# 查看当前selinux状态
getenforce   
3.4 关闭firewalld
# master,worker都做
# 停止firewalld
systemctl stop firewalld
​
# 禁止firewalld开机自启
systemctl disable firewalld
​
# 查看firewalld当前状态
systemctl status firewalld
​
# 或者卸载firewalld
yum remove -y firewalld
3.5 配置免密登陆
# master做即可
# 生成公私钥...全回车
ssh-keygen
​
# 发送公钥到被控节点
for i in {5..7}
do
ssh-copy-id 192.168.0.$i
done
​
# yes...密码
3.6 配置时间同步
# master,worker都做
# 以跳板机(192.168.0.2)为NTP服务器
# 在跳板机操作
yum -y install ntp ntpdate
vim /etc/ntp.conf  # 添加server 127.127.1.1 指定本机为NTP服务器
systemctl restart ntpd.service  # 重启服务
service ntpd status  # 查看状态
systemctl enable ntpd # 设置开机自启
​
# 同步的服务器操作
yum -y install ntp
vim /etc/ntp.conf #添加server 192.168.0.2 \ Fudge 192.168.0.2 stratum 10 指定NTP
systemctl restart ntpd.service # 重启服务
systemctl stop ntpd.service  # 关闭NTP服务
service ntpd status   # 查看状态
ntpdate 192.168.0.2  # 设置时间同步
crontab -e  写入 * */5 * * * ntpdate 192.168.0.2 # 每5小时同步一次
3.7 配置加载模块
# master,worker都做
cat > /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
ipip
EOF
​
# 设置开机启动
systemctl restart systemd-modules-load
systemctl enable systemd-modules-load
​
# 查看模块是否生效
lsmod |grep ip_vs
3.8 设置limit打开数
# master,worker都做
# 写入以下配置
vim /etc/security/limits.conf
* soft nofile 655360
* hard nofile 655360
* soft nproc 655650
* hard nproc 655650

4 安装cfssl证书工具
4.1 安装相关工具
# master,worker都做
# 安装wget
yum -y install wget

# 通过wget下载cfssl工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
# 出现“失败:拒绝连接”时稍微等待后再试,这是网络连接解析跳转超时的问题

# 赋予执行权限
chmod +x cfssl*

# 移到$PATH可执行路径下
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

# 生成配置模版命令
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json

5 部署etcd集群
5.1 etcd集群节点
节点名称IP地址
etcd-1192.168.0.5
etcd-2192.168.0.6
etcd-3192.168.0.7
5.2 自签TLS证书
# 在master做即可,做完后分发修改
# 进入证书工作目录
cd /data/TLS/etcd

# 自签CA证书
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "etcd": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

# 编写csr请求文件
cat > ca-csr.json << EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Guangzhou",
            "ST": "Guangzhou"
        }
    ]
}
EOF

# 生成CA证书,生成ca.pem和ca-key.pem、ca.csr
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
5.2 签发https证书
# 在master做即可,做完后分发修改
# 编写csr请求文件,hosts内可以多写几个预留IP
cat > server-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.0.5",
    "192.168.0.6",
    "192.168.0.7",
    "192.168.0.8",
    "192.168.0.9"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Guangzhou",
            "ST": "Guangzhou"
        }
    ]
}
EOF

# 生成server证书,生成server.pem和server-key.pem、server.csr文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd server-csr.json | cfssljson -bare server

# 拷贝证书到etcd证书目录下
rsync -avX /data/TLS/etcd/*.pem /opt/etcd/ssl/
5.3 下载etcd文件
# 在master做即可,做完后分发修改
# wget下载etcd二进制文件
wget https://github.com/etcd-io/etcd/releases/download/v3.5.1/etcd-v3.5.1-linux-amd64.tar.gz

# 解压tar包
tar xf etcd-v3.5.1-linux-amd64.tar.gz

# 移动可执行二进制文件到etcd工作目录
mv etcd-v3.5.1-linux-amd64/etcd* /opt/etcd/bin
5.4 创建配置文件
# 在master做即可,做完后分发修改
# 注意修改节点名和对应IP地址
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.0.5:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.5:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.5:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.5:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.0.5:2380,etcd-2=https://192.168.0.6:2380,etcd-3=https://192.168.0.7:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
5.5 编写启动文件
# 在master做即可,做完后分发
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
5.6 etcd配置分发
# 同步etcd目录
rsync -avX /opt/etcd  etcd-2:/opt/
^2^3	 # 将上一条执行命令的2替换成3再执行一次

# 同步启动文件
rsync -av /usr/lib/systemd/system/etcd.service etcd-2:/usr/lib/systemd/system/etcd.service
^2^3     # 将上一条执行命令的2替换成3再执行一次
5.7 启动etcd
# 三台节点同时操作
# 重新加载配置
systemctl daemon-reload

# 启动etcd
systemctl start etcd

# 设置开机自启
systemctl enable etcd

# 查看etcd服务状态
systemctl status etcd

# 验证集群启动状态successfully
/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.0.5:2379,https://192.168.0.6:2379,https://192.168.0.7:2379" endpoint health

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.0.5:2379,https://192.168.0.6:2379,https://192.168.0.7:2379" member list

6 部署docker
6.1 下载docker文件
# master,worker都做
# 下载docker二进制包
wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.9.tgz

# 解压二进制包
tar zxvf docker-20.10.9.tgz

# mv到$PATH可执行路径
rsync -avX docker/* /usr/bin/
6.2 创建配置文件
# master,worker都做
# 创建daemon.json的父目录
mkdir -p /etc/docker

#设置镜像加速器、本地仓库
cat > /etc/docker/daemon.json <<EOF											
{
    "registry-mirrors": ["https://cy3j0usn.mirror.aliyuncs.com","https://k8s.gcr.io"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "insecure-registries": ["192.168.0.2:5000","host:5000"]
}
EOF
6.3 编写启动文件
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target
EOF

# 同步启动文件
rsync -avX /usr/lib/systemd/system/docker.service 192.168.0.6:/usr/lib/systemd/system/
^.6^.7
6.4 启动docker
# master,worker都做
# 重新加载配置文件
systemctl daemon-reload

# 启动docker并设置开机自启
systemctl enable docker --now

# 查看docker服务状态
systemctl status docker

# 查看镜像加速、本地仓库是否配置成功
docker info | tail -9 | head -6
--------------------------- 
  host:5000
  192.168.0.2:5000
  127.0.0.0/8
 Registry Mirrors:
  https://cy3j0usn.mirror.aliyuncs.com/
  https://k8s.gcr.io/

7 部署k8s集群准备
7.1 下载k8s相关组件
# wget下载二进制tar包,所有组件都在包内
wget https://dl.k8s.io/v1.22.17/kubernetes-server-linux-amd64.tar.gz

# 解压tar包
tar xf kubernetes-server-linux-amd64.tar.gz
7.2 组件分发
# 拷贝二进制文件到相关节点、相关位置
cd kubernetes/server/bin
rsync -avX kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /opt/k8s/bin
rsync -avX kubectl  /usr/bin
rsync -avX kubelet kube-proxy [email protected]:/opt/k8s/bin/
^.6^.7

8 部署组件
8.1 部署apiserver
8.1.1 证书颁发机构
# 进入k8s证书工作目录
cd /data/TLS/k8s

# 自签CA证书
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Guangzhou",
            "ST": "Guangzhou",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

# 生成CA证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
8.1.2 签发证书
# 创建证书请求文件
cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "192.168.0.4",
      "192.168.0.5",
      "192.168.0.6",
      "192.168.0.7",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Guangzhou",
            "ST": "Guangzhou",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
8.1.3 同步证书
# master 节点
rsync -av /data/TLS/k8s/ca*pem /opt/k8s/ssl/
^ca^server

# 同步至node节点
rsync -avX /data/TLS/k8s/ca.pem 192.168.0.6:/opt/k8s/ssl
^.6^.7

8.1.4 创建配置文件

# master做即可
cat > /opt/k8s/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \
--feature-gates=RemoveSelfLink=false \
--v=2 \
--log-dir=/opt/k8s/logs \
--bind-address=192.168.0.5 \
--secure-port=6443 \
--advertise-address=192.168.0.5 \
--anonymous-auth=false \
--allow-privileged=true \
--runtime-config=api/all=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction,DefaultStorageClass \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/k8s/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/k8s/ssl/server.pem \
--kubelet-client-key=/opt/k8s/ssl/server-key.pem \
--tls-cert-file=/opt/k8s/ssl/server.pem  \
--tls-private-key-file=/opt/k8s/ssl/server-key.pem \
--client-ca-file=/opt/k8s/ssl/ca.pem \
--apiserver-count=1 \
--service-account-issuer=api \
--service-account-key-file=/opt/k8s/ssl/ca-key.pem \
--service-account-signing-key-file=/opt/k8s/ssl/server-key.pem \
--service-account-signing-key-file=/opt/k8s/ssl/ca-key.pem \
--etcd-servers=https://192.168.0.5:2379,https://192.168.0.6:2379,https://192.168.0.7:2379 \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--requestheader-client-ca-file=/opt/k8s/ssl/ca.pem \
--proxy-client-cert-file=/opt/k8s/ssl/server.pem \
--proxy-client-key-file=/opt/k8s/ssl/server-key.pem \
--requestheader-allowed-names=kubernetes \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--enable-aggregator-routing=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--event-ttl=1h \
--audit-log-path=/opt/k8s/logs/k8s-audit.log"
EOF
8.1.4 TLS动态签署
# master做即可
# 启动TLS Bootstrapping机制
# 生成token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
ea8bfdd5de0e8671757d1e999d9ebdec

# 创建token文件
cat > /opt/k8s/cfg/token.csv << EOF
ea8bfdd5de0e8671757d1e999d9ebdec,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
# 格式:token,用户名,UID,用户组
# TLS Bootstraping机制:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。
8.1.5 编写启动文件
# master做即可
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-apiserver.conf
ExecStart=/opt/k8s/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
8.1.6 启动apiserver
# 重新加载配置文件
systemctl daemon-reload

# 启动kube-apiserver组件
systemctl start kube-apiserver

# 设置开机自启
systemctl enable kube-apiserver

# 查看kube-apiserver组件运行状态
systemctl status kube-apiserver.service
8.2 部署controller-manager
8.2.1 创建配置文件
# master做即可
cat > /opt/k8s/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--leader-elect=true \\
--kubeconfig=/opt/k8s/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/k8s/ssl/ca.pem \\
--cluster-signing-key-file=/opt/k8s/ssl/ca-key.pem  \\
--root-ca-file=/opt/k8s/ssl/ca.pem \\
--service-account-private-key-file=/opt/k8s/ssl/ca-key.pem \\
--cluster-signing-duration=87600h0m0s"
EOF
8.2.2 生成证书
# master做即可
# 进入证书工作目录
cd /data/TLS/k8s

# 创建证书请求文件
cat > kube-controller-manager-csr.json << EOF
{
  "CN": "system:kube-controller-manager",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Guangzhou", 
      "ST": "Guangzhou",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
8.2.3 生成kubeconfig文件
# master做即可
# 进入指定目录操作
cd /data/TLS/k8s

# 以下命令在命令行直接执行
KUBE_CONFIG="/opt/k8s/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://192.168.0.5:6443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
  --client-certificate=./kube-controller-manager.pem \
  --client-key=./kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-controller-manager \
  --kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
8.2.4 编写启动文件
# master做即可
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=kube-apiserver.service

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-controller-manager.conf
ExecStart=/opt/k8s/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
8.2.5 启动controller-manager
# 重新加载配置文件
systemctl daemon-reload

# 启动kube-controller-manager
systemctl start kube-controller-manager

# 设置开机自启
systemctl enable kube-controller-manager

# 查看kube-controller-manager
systemctl status kube-controller-manager
8.3 部署scheduler
8.3.1 创建配置文件
# master做即可
cat > /opt/k8s/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--leader-elect \\
--kubeconfig=/opt/k8s/cfg/kube-scheduler.kubeconfig \\
--bind-address=127.0.0.1"
EOF
8.3.2 生成证书
# master做即可
# 进入证书工作目录
cd /data/TLS/k8s

# 创建证书请求文件
cat > kube-scheduler-csr.json << EOF
{
  "CN": "system:kube-scheduler",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Guangzhou",
      "ST": "Guangzhou",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
8.3.3 生成kubeconfig文件
# master做即可
# 进入指定目录操作
cd /data/TLS/k8s

# 以下命令在命令行直接执行
KUBE_CONFIG="/opt/k8s/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://192.168.0.5:6443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-scheduler \
  --client-certificate=./kube-scheduler.pem \
  --client-key=./kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-scheduler \
  --kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
8.3.4 编写启动文件
# master做即可
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=kube-apiserver.service

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-scheduler.conf
ExecStart=/opt/k8s/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
8.3.5 启动scheduler
# 重新加载配置文件
systemctl daemon-reload

# 启动kube-scheduler
systemctl start kube-scheduler

# 设置开机自启
systemctl enable kube-scheduler

# 查看kube-scheduler
systemctl status kube-scheduler
8.4 部署kubelet
8.4.1 创建配置文件
# master,worker都做
cat > /opt/k8s/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--hostname-override=k8s-master \\
--network-plugin=cni \\
--kubeconfig=/opt/k8s/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/k8s/cfg/bootstrap.kubeconfig \\
--config=/opt/k8s/cfg/kubelet-config.yml \\
--cert-dir=/opt/k8s/ssl \\
--pod-infra-container-image=host:5000/k8s/pause:3.6"
EOF
8.4.2 生成证书
# master做即可
# 进入证书工作目录
cd /data/TLS/k8s

# 生成kubectl连接集群请求证书
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Guangzhou",
      "ST": "Guangzhou",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
8.4.3 生成kubeconfig文件
# master做即可
# 创建隐藏文件
mkdir -p /root/.kube

# 以下命令在命令行直接执行
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://192.168.0.5:6443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
  --client-certificate=./admin.pem \
  --client-key=./admin-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
  --cluster=kubernetes \
  --user=cluster-admin \
  --kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

# 查看组件状态
kubectl  get cs
8.4.4 编写启动文件
# master,worker都做
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/opt/k8s/cfg/kubelet.conf
ExecStart=/opt/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
8.4.5 创建yml配置文件
# master,worker都做
cat > /opt/k8s/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/k8s/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
8.4.6 创建引导文件
# 在master做即可
# 创建bootstrap.kubeconfig文件
# 进入指定目录操作
cd /data/TLS/k8s

# 以下命令在命令行直接执行
KUBE_CONFIG="/opt/k8s/cfg/bootstrap.kubeconfig"
KUBE_APISERVER="https://192.168.0.5:6443"
TOKEN=`cat /opt/k8s/cfg/token.csv|awk -F',' '{print $1}'` 

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials "kubelet-bootstrap" \
  --token=${TOKEN} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
5.4.7 授权用户请求证书
# 在master做即可
cat > /opt/k8s/yaml/kubelet-bootstrap-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: create-csrs-for-bootstrapping
subjects:
- kind: Group
  name: system:bootstrappers
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:node-bootstrapper
  apiGroup: rbac.authorization.k8s.io  
EOF

# 导入yaml
kubectl apply -f /opt/k8s/yaml/kubelet-bootstrap-rbac.yaml
5.4.7 同步kubelet
# 同步kubelet配置
rsync -av /opt/k8s/cfg/{kubelet.conf,kubelet-config.yml,bootstrap.kubeconfig} 192.168.0.6:/opt/k8s/cfg/
^.6^.7

# 同步启动文件
rsync -av /usr/lib/systemd/system/kubelet.service 192.168.0.6:/usr/lib/systemd/system/kubelet.service
^.6^.7
8.4.8 启动kubelet
# master,worker都做
# 重新加载配置文件
systemctl daemon-reload

# 启动kubelet
systemctl start kubelet

# 设置开机自启
systemctl enable kubelet

# 查看kubelet
systemctl status kubelet
8.4.9 批准证书
# 查看证书
kubectl get csr

# 批量批准证书加入集群
for csr in `kubectl get csr |awk 'NR>1 {print $1}'`;do kubectl certificate approve $csr;done

# 查看节点进入集群状态
kubectl get nodes
# 没有部署CNI网络插件,节点都是NotReady状态
8.4.10 kubelet启动错误
# 如果启动kubelet是失败,查看/var/log/messages时报以下错误
error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope

# 原因是kubelet-bootstrap并没授权创建证书,需要授权
# 授权命令(master执行即可)
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

# 删除授权命令(可以以此测试是否相关)
kubectl delete clusterrolebinding kubelet-bootstrap

# 如若无关则检查相关配置文件、如:IP、序列号、hostname是否正确等等 
8.5 部署kube-proxy
8.5.1 创建配置文件
# master,worker都做
cat > /opt/k8s/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--config=/opt/k8s/cfg/kube-proxy-config.yml"
EOF
8.5.2 IPVS模式的yml文件
# master,worker都做
cat > /opt/k8s/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
iptables:
  masqueradeAll: true
  masqueradeBit: null
  minSyncPeriod: 0s
  syncPeriod: 0s
ipvs:
  masqueradeAll: true
  excludeCIDRs: null
  minSyncPeriod: 0s
  scheduler: "rr"
  strictARP: false
  syncPeriod: 0s
  tcpFinTimeout: 0s
  tcpTimeout: 0s
  udpTimeout: 0s
mode: "ipvs"
clientConnection:
  kubeconfig: /opt/k8s/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master
clusterCIDR: 10.0.0.0/24
EOF

# 修改hostnameOverride为节点hostname
clusterCIDR: kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT

clusterCIDR: 10.0.0.0/24这个是集群service段,和kube-apiserver.conf还有kube-controller-manager.conf中--service-cluster-ip-range=10.0.0.0/24参数保持一致
8.5.3 生成证书
# master做即可
# 进入证书工作目录
cd /data/TLS/k8s

# 创建证书请求文件
cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Guangzhou",
      "ST": "Guangzhou",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
8.5.4 生成kubeconfig文件
# master,worker都做
KUBE_CONFIG="/opt/k8s/cfg/kube-proxy.kubeconfig"
KUBE_APISERVER="https://192.168.0.5:6443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-proxy \
  --client-certificate=./kube-proxy.pem \
  --client-key=./kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
8.5.5 编写启动文件
# master,worker都做
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=docker.service

[Service]
EnvironmentFile=/opt/k8s/cfg/kube-proxy.conf
ExecStart=/opt/k8s/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
8.5.6 同步kube-proxy
# 同步kube-proxy配置
rsync -av /opt/k8s/cfg/{kube-proxy.conf,kube-proxy-config.yml,kube-proxy.kubeconfig} 192.168.0.6:/opt/k8s/cfg/
^.6^.7

# 同步启动文件
rsync -av /usr/lib/systemd/system/kube-proxy.service 192.168.0.6:/usr/lib/systemd/system/kube-proxy.service
^.6^.7

# 同步kube-proxy.conf, kube-proxy-config.yml, kube-proxy.kubeconfig, kube-proxy.service到所有节点, 修改kube-proxy-config.yml配置文件中hostnameOverride参数为对应节点的hostname
8.5.7 启动kube-proxy
# master,worker都做
# 重新加载配置文件
systemctl daemon-reload

# 启动kube-proxy
systemctl start kube-proxy

# 设置开机自启
systemctl enable kube-proxy

# 查看kube-proxy
systemctl status kube-proxy

# 验证IPVS模式
ipvsadm -l
----------------------
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  k8s-master:https rr
  -> k8s-master:sun-sr-https      Masq    1      0          0     

9 apiserver授权
9.1 apiserver访问权限授权
# master做即可
# 进入授权工作目录
cd /opt/k8s/yaml

# 如果不进行授权,将无法管理容器
cat > /opt/k8s/yaml/apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
      - pods/log
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF

# 导入
kubectl apply -f apiserver-to-kubelet-rbac.yaml

10 部署相关插件
10.1 部署cni网络
10.1.1 部署cni插件
# master,worker都要做
# 到以下地址找到对应版本的amd64下载
https://github.com/containernetworking/plugins/releases

# 创建cni插件工作目录和生成配置目录
mkdir -p /opt/cni/bin  /etc/cni/net.d

# 解压缩
tar zvxf cni-plugins-linux-amd64-v0.9.1.tgz -C /opt/cni/bin

# 分发内容
scp -r /opt/cni 192.168.0.6:/opt/cni/
^.6^.7
10.1.2 下载flannel
# 镜像下载
docker pull quay.io/coreos/flannel:v0.16.1-amd64

# 修改image为自身镜像拉取地址
vim kube-flannel.yaml

# 创建pod
kubectl apply -f kube-flannel.yaml
10.2 部署CoreDNS
10.2.1 下载yaml文件
# 到一下地址复制内容修改
https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
# 下载coredns.yaml.sed,修改后保存为coredns.yaml

# 修改下列注释内容
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local {          # 删除原本大写 修改成cluster.local
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {         # 删除原本大写 修改成/etc/resolv.conf
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }                        # 删除原有大写
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
    app.kubernetes.io/name: coredns
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
      app.kubernetes.io/name: coredns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
        app.kubernetes.io/name: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
               matchExpressions:
               - key: k8s-app
                 operator: In
                 values: ["kube-dns"]
             topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.9.4
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
    app.kubernetes.io/name: coredns
spec:
  selector:
    k8s-app: kube-dns
    app.kubernetes.io/name: coredns
  clusterIP: 10.0.0.2   # 修改成和kubelet-config.yml里的clusterDNS一样的IP
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
    
# 部署coredns
kubectl apply -f coredns.yaml

# 测试解析
kubectl run -itd --image=myos:nginx 
kubectl exec -it pod/myos:nginx -- /bin/bash
ping baidu.com
10.3 部署ingress-nginx
10.3.1 下载yaml文件
# 到以下路径下载yaml文件
https://github.com/kubernetes/ingress-nginx/blob/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml

# 替换镜像
# 国内镜像下载地址:registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.1.1
# 国内镜像下载地址:registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1

# 镜像我下载上传到自己的本地仓库了,替换掉yaml镜像录取路径
sed  's,\(image: \)k8s.gcr.io/ingress-nginx/controller.*,\1host:5000/ingress/nginx-ingress-controller:v1.1.1,' nginx-ingress-controller.yaml -i

sed  's,\(image: \)k8s.gcr.io/ingress-nginx/kube-webhook-certgen.*,\1host:5000/ingress/kube-webhook-certgen:v1.1.1,' nginx-ingress-controller.yaml -i
10.3.2 修改yaml配置
# 配置副本数(可选)
# 在Deployment控制器spec下加入replicas: 2
.......
kind: Deployment
metadata:
  labels:
    helm.sh/chart: ingress-nginx-4.0.1
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 1.0.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  replicas: 2
  selector:
..........

# 将ingress部署到指定节点(可选)
kubectl label nodes  k8s-node1 type=ingress
kubectl label nodes  k8s-node2 type=ingress

# 配置Deployment控制器下nodeSelector标签
............
      nodeSelector:
        kubernetes.io/os: linux
        type: "ingress"
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
............

# 配置为hostNetwork模式(可选)
# 需要在Ingress Controller的yaml配置文件中指定使用主机网络hostNetwork: true位置位于Deployment.spec.tmplate.spec下
.......
template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/component: controller
    spec:
      hostNetwork: true
      dnsPolicy: ClusterFirst
............
# 配置hostNetwork模式就不需要配置Service了, 如果不配置hostNetwork在域名访问时候需要在域名后加上ingress的service端口
# 可以将控制器设置为DaemonSet,就可以将域名解析到任意节点进行访问了 # 我采用的是这个,修改Deployment为DaemonSet
# 部署ingress Controller节点端口(80,443)不能被占用

# 部署ingress-nginx
kubectl apply -f nginx-ingress-controller.yaml

# 查看部署状态
kubectl get all -n ingress-nginx
# ingress-nginx-admission...的pod状态为Completed是正常的,这些pod执行完任务状态就为Completed
10.3.3 验证ingress-nginx
# 创建deployment和service
cat > tomcat-deployment.yaml << EOF
apiVersion: apps/v1 
kind: Deployment   
metadata:             
  name: tomcat-app
  labels:       
    app: tomcat
spec:          
  replicas: 2 
  selector:
    matchLabels:
      app: tomcat
  template:        
    metadata:  
      labels:  
        app: tomcat
    spec:
      containers:     
      - name: tomcat-container
        image: tomcat:jre8-openjdk
        imagePullPolicy: Always          
        ports:
        - containerPort: 8080
        resources:
          requests:
            memory: "1Gi"
            cpu: "500m"
          limits: 
            memory: "2Gi" 
            cpu: "1000m"
---
apiVersion: v1
kind: Service
metadata:
  name: tomcat-service
  labels:
    app: tomcat
spec:
  selector:
    app: tomcat
  ports:
  - name: tomcat-port
    protocol: TCP
    port: 8080
    targetPort: 8080
  type: ClusterIP
EOF

# 执行创建
kubectl apply -f tomcat-deployment.yaml

# 配置nginx-ingress
cat > tomcat-ingress.yaml << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: tomcat
  namespace: default
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
  - host: tomcat.ing.cn
    http:
      paths:
      - path: "/"
        pathType: Prefix
        backend:
          service:
            name: tomcat-service
            port:
              number: 8080
EOF

# 执行创建
kubectl apply -f tomcat-ingress.yaml

# 查看ingress生效情况
kubectl get ing

# 配置本机的/etc/hosts再通过域名访问
echo "tomcat.ing.cn 192.168.0.10 192.168.0.11 192.168.0.12" >> /etc/hosts
curl tomcat.ing.cn 
;