Apiserver采用高可用方式安装部署。这篇文章接上一篇《Kubernetes1.9生产环境高可用实践–001-ETCD高可用集群部署》。
在这一篇,我们着重写如何部署ApiServer,以及apiserver高可用的配置。
配置中使用到的文件下载地址:https://pan.baidu.com/s/1wyhV_kBpIqZ_MdS2Ghb8sg
Apiserver采用高可用方式安装部署。这篇文章接上一篇《Kubernetes1.9生产环境高可用实践–001-ETCD高可用集群部署》。
在这一篇,我们着重写如何部署ApiServer,以及apiserver高可用的配置。
配置中使用到的文件下载地址:https://pan.baidu.com/s/1wyhV_kBpIqZ_MdS2Ghb8sg
准备两台服务器: 192.168.3.53 yds-dev-svc01-master01 192.168.3.54 yds-dev-svc01-master02 192.168.3.46 yds-dev-svc01-master03
在yds-dev-svc01-master01中配置如下信息:
[root@localhost ~]# hostnamectl set-hostname yds-dev-svc01-master01 [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32 TYPE=Ethernet PROXY_METHOD=none BROWSER_ONLY=no BOOTPROTO=static DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=ens32 UUID=7d6fb2ed-364c-415f-9b02-0e54436ff1ec DEVICE=ens32 ONBOOT=yes IPADDR=192.168.3.53 NETMASK=255.255.255.0 GATEWAY=192.168.3.1 DNS1=192.168.3.10 DNS1=61.139.2.69设置内核
cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p /etc/sysctl.conf #若问题 执行sysctl -p 时出现: sysctl -p sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory 解决方法: modprobe br_netfilter ls /proc/sys/net/bridge配置完成后,退出重新登录。
在yds-dev-svc01-master02中配置如下信息:
[root@yds-dev-svc01-master02 ~]# hostnamectl set-hostname yds-dev-svc01-master02 [root@yds-dev-svc01-master02 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32 TYPE=Ethernet PROXY_METHOD=none BROWSER_ONLY=no BOOTPROTO=static DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=ens32 UUID=7d6fb2ed-364c-415f-9b02-0e54436ff1ec DEVICE=ens32 ONBOOT=yes IPADDR=192.168.3.54 NETMASK=255.255.255.0 GATEWAY=192.168.3.1 DNS1=192.168.3.10 DNS2=61.139.2.69设置内核
cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p /etc/sysctl.conf #若问题 执行sysctl -p 时出现: sysctl -p sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory 解决方法: modprobe br_netfilter ls /proc/sys/net/bridge配置完成后,退出重新登录。
将服务器升级到最新。 升级完成后,重新启动一下。
yum update -y; yum install -y epel-release reboot查看kube-apiserver版本
[root@yds-dev-svc01-master01 ~]# /usr/local/bin/kube-apiserver --version Kubernetes v1.9.0记得我们在ETCD创建证书的服务器吗,服务名为:yds-dev-svc01-etcd01。为了方便,我们继续使用这一台服务器来创建证书
在yds-dev-svc01-etcd01服务器中,我们进入到证书创建目录:
[root@yds-dev-svc01-etcd01 key]# pwd /tmp/key [root@yds-dev-svc01-etcd01 key]# ls ca-config.json ca-csr.json ca.pem etcd-csr.json etcd.pem ca.csr ca-key.pem etcd.csr etcd-key.pem确认 Issuer 字段的内容和 ca-csr.json 一致; 确认 Subject 字段的内容和 kubernetes-csr.json 一致; 确认 X509v3 Subject Alternative Name 字段的内容和 kubernetes-csr.json 一致; 确认 X509v3 Key Usage、Extended Key Usage 字段的内容和 ca-config.json 中 kubernetes profile 一致;
将生成的证书复制到Kubernetes的配置目录/etc/kubernetes/ssl/
复制证书到yds-dev-svc01-master01
需要先在服务器中创建目录/etc/kubernetes/ssl/
scp etcd.pem etcd-key.pem root@192.168.3.53:/etc/kubernetes/ssl scp ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem root@192.168.3.53:/etc/kubernetes/ssl/复制证书到yds-dev-svc01-master02
需要先在服务器中创建目录/etc/kubernetes/ssl/
scp etcd.pem etcd-key.pem root@192.168.3.54:/etc/kubernetes/ssl scp ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem root@192.168.3.54:/etc/kubernetes/ssl/创建审核配置文件。
cat >> audit-policy.yaml <<EOF # Log all requests at the Metadata level. apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: - level: Metadata EOF将创建的配置文件 audit-policy.yaml放到/etc/kubernetes目录中。
cp audit-policy.yaml /etc/kubernetes/在yds-dev-svc01-master01和yds-dev-svc01-master02中分别执行
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') cat > /etc/kubernetes/token.csv <<EOF ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF我们这里先配置服务器yds-dev-svc01-master01,yds-dev-svc01-master01配置完,测试完成后,再配置yds-dev-svc01-master02.
–service-node-port-range=30000-32767 这个指定pod端口的范围。
创建kube-apiserver systemd unit文件kube-apiserver.service
[root@yds-dev-svc01-master01 ~]# cat /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Service Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target After=etcd.service [Service] EnvironmentFile=-/etc/kubernetes/config EnvironmentFile=-/etc/kubernetes/apiserver ExecStart=/usr/bin/kube-apiserver \ $KUBE_LOGTOSTDERR \ $KUBE_AUDIT \ $KUBE_LOG_LEVEL \ $KUBE_ETCD_SERVERS \ $KUBE_API_ADDRESS \ $KUBE_API_PORT \ $KUBELET_PORT \ $KUBE_ALLOW_PRIV \ $KUBE_SERVICE_ADDRESSES \ $KUBE_ADMISSION_CONTROL \ $KUBE_API_ARGS Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.targetconfig 文件中的配置会在kube-apiserver.service,kube-controller-manager.service,kube-scheduler.service,kubelet.service,kube-proxy.service文件件调用。 KUBE_MASTER 这里配置的是http类型访问。因为这里主要是本服务器中controller-manager,scheduler和proxy使用。
通过curl访问API接口。
[root@yds-dev-svc01-master01 ~]# curl -L --cacert /etc/kubernetes/ssl/ca.pem https://192.168.3.53:6443/api { "kind": "APIVersions", "versions": [ "v1" ], "serverAddressByClientCIDRs": [ { "clientCIDR": "0.0.0.0/0", "serverAddress": "192.168.3.53:6443" } ] } [root@yds-dev-svc01-master01 kubernetes]# curl -L http://127.0.0.1:8080/api { "kind": "APIVersions", "versions": [ "v1" ], "serverAddressByClientCIDRs": [ { "clientCIDR": "0.0.0.0/0", "serverAddress": "192.168.3.53:6443" } ] }创建配置文件/etc/kubernetes/controller-manager。
[root@yds-dev-svc01-master01 ~]# cat /etc/kubernetes/controller-manager ### # The following values are used to configure the kubernetes controller-manager # defaults from config and apiserver should be adequate # Add your own! KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"–leader-elect=true 这里必须要设置为true,保证集群中只有一个kube-controller-manager处于活跃状态
创建配置文件/usr/lib/systemd/system/kube-controller-manager.service
[root@yds-dev-svc01-master01 ~]# cat /usr/lib/systemd/system/kube-controller-manager.service Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/config EnvironmentFile=-/etc/kubernetes/controller-manager ExecStart=/usr/bin/kube-controller-manager \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_MASTER \ $KUBE_CONTROLLER_MANAGER_ARGS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target创建配置文件/etc/kubernetes/scheduler
[root@yds-dev-svc01-master01 ~]# cat /etc/kubernetes/scheduler ### # kubernetes scheduler config # default config should be adequate # Add your own! KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"–leader-elect=true 这里必须要设置为true,保证集群中只有一个kube-scheduler处于活跃状态
创建配置文件/usr/lib/systemd/system/kube-scheduler.service
[root@yds-dev-svc01-master01 ~]# cat /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Plugin Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/config EnvironmentFile=-/etc/kubernetes/scheduler ExecStart=/usr/bin/kube-scheduler \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_MASTER \ $KUBE_SCHEDULER_ARGS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.targetkubectl工具用于管理k8s集群,最好不要把这个工具安装在apiserver服务器中。
这里,我们继续回到服务器yds-dev-svc01-etcd01中进行操作。 记得我们创建证书的目录为:
cd /tmp/key/设置KUBE_APISERVER变量
export KUBE_APISERVER="https://192.168.3.53:6443"设置集群参数
kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER}设置客户端认证参数
kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ssl/admin.pem \ --embed-certs=true \ --client-key=/etc/kubernetes/ssl/admin-key.pem设置上下文参数
kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin设置默认上下文
kubectl config use-context kubernetes通过上面的输出信息,我们查看到controller-manager,scheduler状态正常。说明我们的安装正常。
前面我们配置了yds-dev-svc01-master01服务。现在我们来配置yds-dev-svc01-master02。因两台都是apiserver,因此,我们只需要把yds-dev-svc01-master01的配置复制到yds-dev-svc01-master02中,并做相应的修改就好。
复制完成,需要把以下行的192.168.3.53改成192.168.3.54
KUBE_API_ADDRESS="--advertise-address=192.168.3.53 --bind-address=192.168.3.53 --insecure-bind-address=127.0.0.1"TO
KUBE_API_ADDRESS="--advertise-address=192.168.3.54 --bind-address=192.168.3.54 --insecure-bind-address=127.0.0.1"将所有的服务复制完成后,我们在yds-dev-svc01-master02中进行一些配置。并启动服务。
chmod +x /usr/bin/kube* firewall-cmd --add-port=6443/tcp --permanent firewall-cmd --reload systemctl daemon-reload systemctl enable kube-apiserver kube-controller-manager kube-scheduler systemctl start kube-apiserver kube-controller-manager kube-scheduler systemctl status kube-apiserver kube-controller-manager kube-scheduler在yds-dev-svc01-master01和yds-dev-svc01-master02中安装keepalive. 我们这里安装的版本为:keepalived-1.4.2
yum install -y gcc openssl-devel wget cd /tmp wget http://www.keepalived.org/software/keepalived-1.4.2.tar.gz tar -xvzf keepalived-1.4.2.tar.gz cd keepalived-1.4.2 ./configure --prefix=/usr/local/keepalived make && make install ln -s /usr/local/keepalived/sbin/keepalived /usr/sbin/keepalived编辑systemd unit文件
[root@yds-dev-svc01-master01 ~]# cat /usr/lib/systemd/system/keepalived.service [Unit] Description=LVS and VRRP High Availability Monitor After= network-online.target syslog.target Wants=network-online.target [Service] Type=forking PIDFile=/var/run/keepalived.pid KillMode=process EnvironmentFile=-/usr/local/keepalived/etc/sysconfig/keepalived ExecStart=/usr/local/keepalived/sbin/keepalived $KEEPALIVED_OPTIONS ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target在yds-dev-svc01-master01和yds-dev-svc01-master02中启动keepalived.
systemctl daemon-reload ;systemctl enable keepalived; systemctl restart keepalived; systemctl status keepalived1.查看IP信息 yds-dev-svc01-master01
[root@yds-dev-svc01-master01 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:48:d8:a8 brd ff:ff:ff:ff:ff:ff inet 192.168.3.53/24 brd 192.168.3.255 scope global ens32 valid_lft forever preferred_lft forever inet 192.168.3.55/24 scope global secondary ens32 valid_lft forever preferred_lft forever inet6 fe80::9cd:60a3:99e2:48ff/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::fbd2:5239:fe68:ea3d/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::2a36:8b76:9a1d:7d50/64 scope link tentative dadfailed valid_lft forever preferred_lft foreveryds-dev-svc01-master02
[root@yds-dev-svc01-master02 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:fc:62:1d brd ff:ff:ff:ff:ff:ff inet 192.168.3.54/24 brd 192.168.3.255 scope global ens32 valid_lft forever preferred_lft forever inet6 fe80::9cd:60a3:99e2:48ff/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::fbd2:5239:fe68:ea3d/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::2a36:8b76:9a1d:7d50/64 scope link tentative dadfailed valid_lft forever preferred_lft forever我们查看到,现在192.168.3.55在yds-dev-svc01-master01中。 我们访问一下192.168.3.55
[root@yds-dev-svc01-master02 ~]# curl -k https://192.168.3.55:6443 { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", "reason": "Forbidden", "details": { }, "code": 403服务访问正常。
现在,我们停用yds-dev-svc01-master01中的kube-apiserver.
在yds-dev-svc01-master01
[root@yds-dev-svc01-master01 ~]# systemctl stop kube-apiserver [root@yds-dev-svc01-master01 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:48:d8:a8 brd ff:ff:ff:ff:ff:ff inet 192.168.3.53/24 brd 192.168.3.255 scope global ens32 valid_lft forever preferred_lft forever inet6 fe80::9cd:60a3:99e2:48ff/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::fbd2:5239:fe68:ea3d/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::2a36:8b76:9a1d:7d50/64 scope link tentative dadfailed valid_lft forever preferred_lft forever在yds-dev-svc01-master02
[root@yds-dev-svc01-master02 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:fc:62:1d brd ff:ff:ff:ff:ff:ff inet 192.168.3.54/24 brd 192.168.3.255 scope global ens32 valid_lft forever preferred_lft forever inet 192.168.3.55/24 scope global secondary ens32 valid_lft forever preferred_lft forever inet6 fe80::9cd:60a3:99e2:48ff/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::fbd2:5239:fe68:ea3d/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 fe80::2a36:8b76:9a1d:7d50/64 scope link tentative dadfailed valid_lft forever preferred_lft forever看到IP 192.168.3.55已经切换到yds-dev-svc01-master02中。测试访问:
[root@yds-dev-svc01-master02 ~]# curl -k https://192.168.3.55:6443 { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", "reason": "Forbidden", "details": { }, "code": 403服务访问正常。
以上,我们的apiserver高可用配置完成。
你的支持,是笔者最大的动力:
