K8S 的搭建步骤

xiaoxiao2021-02-28  58

#启动etcd sudo docker run -d --net=host --name=etcd1 -v /etc/kubernetes/ssl:/etc/kubernetes/ssl -v /var/etcd/data/:/var/etcd/data/ gcr.io/google_containers/etcd:3.0.17 /usr/local/bin/etcd --data-dir=/var/etcd/data \ --name=etcd2 \ --listen-client-urls=http://0.0.0.0:2379 \ --advertise-client-urls=http://192.168.56.103:2379 \ --initial-advertise-peer-urls=http://192.168.56.103:2380 \ --listen-peer-urls=http://192.168.56.103:2380 \ --initial-cluster=etcd0=http://192.168.56.101:2380,etcd1=http://192.168.56.102:2380,etcd2=http://192.168.56.103:2380 \ --initial-cluster-state=new \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --initial-cluster-token etcd-cluster-0 sudo docker run -d --net=host --name=etcd0 -v /etc/kubernetes/ssl:/etc/kubernetes/ssl -v /var/etcd/data/:/var/etcd/data/ gcr.io/google_containers/etcd:3.0.17 /usr/local/bin/etcd --data-dir=/var/etcd/data --name=etcd0 \ --listen-client-urls=http://0.0.0.0:2379 --advertise-client-urls=http://0.0.0.0:2379 \ --initial-cluster-state=new \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem #验证 http://10.101.85.159:2379/v2/members etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ cluster-health #启动api sudo docker run -d --net=host --name=kube-apiserver -v /etc/kubernetes:/etc/kubernetes gcr.io/google_containers/hyperkube:v1.6.1 /hyperkube apiserver \ --service-cluster-ip-range=10.254.0.0/16 \ --advertise-address=192.168.56.101 --bind-address=192.168.56.101 --insecure-bind-address=192.168.56.101 \ --kubelet-port=10250 \ --port=8080 \ --allow-privileged=true \ --admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota \ --authorization-mode=RBAC \ --runtime-config=rbac.authorization.k8s.io/v1beta1 \ --kubelet-https=true \ --experimental-bootstrap-token-auth \ --token-auth-file=/etc/kubernetes/token.csv \ --service-node-port-range=30000-32767 \ --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \ --v=0 \ --logtostderr=true \ --allow-privileged=true \ --etcd-cafile=/etc/kubernetes/ssl/ca.pem \ --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=http://192.168.56.101:2379,http://192.168.56.102:2379,http://192.168.56.103:2379 \ --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --event-ttl=1h #kube-controller-manager sudo docker run -d --net=host --name=kube-controller-manager -v /etc/kubernetes:/etc/kubernetes gcr.io/google_containers/hyperkube:v1.6.1 /hyperkube controller-manager \ --master=http://192.168.56.101:8080 \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \ --root-ca-file=/etc/kubernetes/ssl/ca.pem \ --v=2 --leader-elect=true #scheduler sudo docker run -d --net=host --name=kube-scheduler -v /etc/kubernetes:/etc/kubernetes gcr.io/google_containers/hyperkube:v1.6.1 /hyperkube scheduler \ --master=http://192.168.56.101:8080 \ --v=2 --leader-elect=true FLANNEL_NETWORK=192.168.0.0/16 FLANNEL_SUBNET=192.168.40.1/24 FLANNEL_MTU=1450 FLANNEL_IPMASQ=false etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ set /coreos.com/network/config "{\"Network\":\"10.254.0.0/16\",\"Backend\":{\"Type\":\"vxlan\"}}" etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ get /coreos.com/network/config #网络配置 ./flanneld --etcd-endpoints=http://192.168.56.101:2379,http://192.168.56.102:2379,http://192.168.56.103:2379 -etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem > /var/log/flanneld 2>&1 & ./mk-docker-opts.sh -i cat /run/flannel/subnet.env source /run/flannel/subnet.env ifconfig docker0 ${FLANNEL_SUBNET} SUBNET=10.254.0.0/16 defaultgw=`route -n | grep '^0.0.0.0' | awk '{print $2}'` ip route del $SUBNET via $defaultgw dev eth0 ip route add $SUBNET via 0.0.0.0 dev flannel.1 10.254.51.0/24 /etc/systemd/system/multi-user.target.wants/docker.service --disable-legacy-registry --bip=192.168.35.1/24 --ip-masq=false --mtu=1450 sudo systemctl daemon-reload sudo systemctl restart docker # kubectl create clusterrolebinding kubelet-bootstrap \ --server=http://192.168.56.101:8080 \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap #启动 hyperkube kubelet --api-servers=http://192.168.56.101:8080 \ --cluster-dns=10.254.0.2 \ --cluster-domain=cluster.local. \ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --require-kubeconfig --cert-dir=/etc/kubernetes/ssl > /var/log/kubelet 2>&1 & # apiserver让其认证通过 kubectl get csr kubectl certificate approve csr-2b308 kubectl get nodes # hyperkube proxy --master=http://192.168.56.101:8080 \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.254.0.0/16 > /var/log/proxy 2>&1 &
转载请注明原文地址: https://www.6miu.com/read-69499.html

最新回复(0)