亚洲激情专区-91九色丨porny丨老师-久久久久久久女国产乱让韩-国产精品午夜小视频观看

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

Kubernetes 集群搭建

發布時間:2020-07-01 01:44:28 來源:網絡 閱讀:407 作者:wuye1232588 欄目:云計算

基礎環境

系統環境# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core)

主機名設置

centos-master 192.168.59.135

centos-minion1 192.168.59.132

centos-minion2 192.168.59.133

關閉selinux 和 firewalld 后重啟服務器

# systemctl stop firewalld
# systemctl disable firewalld

# setenforce 0
# sed -i 's/^SELINUX=.*/SELINUX=disableds/' /etc/selinux/config

三個節點 安裝并部署etcd集群

# yum install etcd -y

安裝版本

# rpm -qa | grep etcd

etcd-3.2.7-1.el7.x86_64

配置ETCD /etc/etcd/etcd.conf

Master etcd 配置

# cat /etc/etcd/etcd.conf | grep -Ev "^#|^$"

ETCD_NAME=centos-master
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.135:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.135:2379"
Minion2 etcd 配置

# grep -Ev "^#|^$" /etc/etcd/etcd.conf

ETCD_NAME=centos-minion2
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.133:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.133:2379"
Minion1 etcd 配置

# grep -Ev "^#|^$" /etc/etcd/etcd.conf

ETCD_NAME=centos-minion1
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.132:2379"
啟動etcd集群(三個節點) 并檢查狀態(任意一臺上操作)

# 啟動etcd

# systemctl start etcd
# systemctl enable etcd

# 查看狀態

# etcdctl member list

10a23ff41e3abcb8: name=centos-minion1 peerURLs=http://192.168.59.132:2380 clientURLs=http://192.168.59.132:2379 isLeader=false
168ea6ce7632b2e4: name=centos-minion2 peerURLs=http://192.168.59.133:2380 clientURLs=http://192.168.59.133:2379 isLeader=true
587d83f824bf96c6: name=centos-master peerURLs=http://192.168.59.135:2380 clientURLs=http://192.168.59.135:2379 isLeader=false

# etcdctl cluster-health

member 10a23ff41e3abcb8 is healthy: got healthy result from http://192.168.59.132:2379
member 168ea6ce7632b2e4 is healthy: got healthy result from http://192.168.59.133:2379
member 587d83f824bf96c6 is healthy: got healthy result from http://192.168.59.135:2379
cluster is healthy

kubernetes master 節點安裝部署


#yum install kubernetes -y

安裝的版本

# rpm -qa | grep kubernetes

kubernetes-client-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-master-1.5.2-0.7.git269f928.el7.x86_64
kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
配置kubernetes API Server (/etc/kubernetes/apiserver)

# cat /etc/kubernetes/apiserver | grep -Ev "^#|^$"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""

配置kubernetes config (/etc/kubernetes/config)

# cat /etc/kubernetes/config | grep -Ev "^#|^$"

KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://centos-master:8080"

kubernetes minion 節點安裝(minion1和minion2)

# yum install flannel docker kubernetes -y
配置 flannel (/etc/sysconfig/flanneld)

# grep -Ev "^#|^$" /etc/sysconfig/flanneld

FLANNEL_ETCD_ENDPOINTS="http://192.168.59.133:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置 kubelet (/etc/kubernetes/kubelet)

# grep -Ev "^#|^$" /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=centos-minion2"
KUBELET_API_SERVER="--api-servers=http://centos-master:8080"

# 下面請填寫你的registry地址,如果你能連接到任何網絡,請自動過濾
# KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.59.133:5000/pod-infrastructure:latest"

# 下面填寫你的dns信息和網絡信息
KUBELET_ARGS="--cluster-dns=192.168.51.198 --cluster-domain=atomic.io/network"

啟動程序

kubernetes master

for SERVICES in kube-apiserver kube-controller-manager kube-scheduler; dosystemctl restart $SERVICESsystemctl enable $SERVICESsystemctl status $SERVICES -ldone

etcd 網絡配置

# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
kubernetes minion

for SERVICES in kube-proxy kubelet docker flanneld; do
systemctl restart $SERVICES
systemctl enable $SERVICES
systemctl status $SERVICES
done

查看 節點情況(在 master)

# kubectl get nodes

NAME STATUS AGE
centos-minion1 Ready 1h
centos-minion2 Ready 1h

查看flannel網卡

[root@centos-minion1 ~]# ifconfig flannel0

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472
inet 172.17.34.0 netmask 255.255.0.0 destination 172.17.34.0
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

[root@centos-minion2 ~]# ifconfig flannel0

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472
inet 172.17.59.0 netmask 255.255.0.0 destination 172.17.59.0
inet6 fe80::2d54:2169:1a0:d364 prefixlen 64 scopeid 0x20<link>
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3 bytes 144 (144.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

簡單測試 (在master上創建一個Pod、Service 和 RC)

# ls
http-pod.yaml http-rc.yaml http-service.yaml

# cat http-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: http-pod
labels:
name: http-pod
spec:
containers:
- name: http
image: 192.168.59.133:5000/centos6-http
ports:
- containerPort: 80

# cat http-service.yaml
apiVersion: v1
kind: Service
metadata:
name: http-service
spec:
type: NodePort
ports:
- port: 80
nodePort: 30001
selector:
name: http-pod

# cat http-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: http-rc
spec:
replicas: 2
selector:
name: http-pod
template:
metadata:
labels:
name: http-pod
spec:
containers:
- name: http-pod
image: 192.168.59.133:5000/centos6-http
ports:
- containerPort: 80
創建Pod

# kubectl create -f http-pod.yaml
pod "http-pod" created

# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-pod 1/1 Running 0 4s
創建Service

# kubectl create -f http-service.yaml
service "http-service" created

# kubectl get service
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
http-service 10.254.235.49 <nodes> 80:30001/TCP 5s
kubernetes 10.254.0.1 <none> 443/TCP 1d
查看 pod在哪個節點上生成

# kubectl describe service

Name: http-service
Namespace: default
Labels: <none>
Selector: name=http-pod
Type: NodePort
IP: 10.254.235.49
Port: <unset> 80/TCP
NodePort: <unset> 30001/TCP
Endpoints: 172.17.59.3:80 #這個地址是flannel的地址 為minion2
Session Affinity: None
No events.

Name: kubernetes
Namespace: default
Labels: component=apiserver
provider=kubernetes
Selector: <none>
Type: ClusterIP
IP: 10.254.0.1
Port: https 443/TCP
Endpoints: 192.168.59.135:6443
Session Affinity: ClientIP
No events.

訪問pod 會顯示出http默認的歡迎頁面

# curl http://192.168.59.133:30001/
創建RC

# kubectl create -f http-rc.yaml
replicationcontroller "http-rc" created

# kubectl get rc
NAME DESIRED CURRENT READY AGE
http-rc 2 2 2 8s

# kubectl get pods # 本來是一個現在有2個
NAME READY STATUS RESTARTS AGE
http-pod 1/1 Running 0 9m
http-rc-b24kx 1/1 Running 0 13s
現在刪除一個pod 看能不能在生成pod

# kubectl delete pod http-pod
pod "http-pod" deleted

# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-rc-8cl5p 1/1 Running 0 2s
http-rc-b24kx 1/1 Running 0 2m

# kubectl delete pod http-rc-8cl5p http-rc-b24kx
pod "http-rc-8cl5p" deleted
pod "http-rc-b24kx" deleted

# kubectl get pods
NAME READY STATUS RESTARTS AGE
http-rc-xxtrw 1/1 Running 0 3s
http-rc-z8t9n 1/1 Running 0 3s

刪除了2次,最后都同樣有兩個pod生成,經測試都可以正常訪問
查看pod的描述

# kubectl describe pod
Name: http-rc-xxtrw
Namespace: default
Node: centos-minion2/192.168.59.133
Start Time: Tue, 31 Oct 2017 16:05:51 +0800
Labels: name=http-pod
Status: Running
IP: 172.17.59.4
Controllers: ReplicationController/http-rc
Containers:
http-pod:
Container ID: docker://a3338c455a27540c8f7b7b3f01fa3862b1082f7ae47e9b3761610b4a6043245b
Image: 192.168.59.133:5000/centos6-http
Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715
Port: 80/TCP
State: Running
Started: Tue, 31 Oct 2017 16:05:53 +0800
Ready: True
Restart Count: 0
Volume Mounts: <none>
Environment Variables: <none>
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations: <none>
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-xxtrw to centos-minion2
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Created Created container with docker id a3338c455a27; Security:[seccomp=unconfined]
2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Started Started container with docker id a3338c455a27

Name: http-rc-z8t9n
Namespace: default
Node: centos-minion1/192.168.59.132
Start Time: Tue, 31 Oct 2017 16:05:52 +0800
Labels: name=http-pod
Status: Running
IP: 172.17.34.3
Controllers: ReplicationController/http-rc
Containers:
http-pod:
Container ID: docker://6b4fbca3f6a8690f24fe749556323a6be85f5122f378a076a8bf9d0556a89b6e
Image: 192.168.59.133:5000/centos6-http
Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715
Port: 80/TCP
State: Running
Started: Tue, 31 Oct 2017 16:05:54 +0800
Ready: True
Restart Count: 0
Volume Mounts: <none>
Environment Variables: <none>
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations: <none>
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-z8t9n to centos-minion1
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Created Created container with docker id 6b4fbca3f6a8; Security:[seccomp=unconfined]
2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Started Started container with docker id 6b4fbca3f6a8
向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

顺平县| 四川省| 泰顺县| 天峻县| 南汇区| 台州市| 清丰县| 元阳县| 望城县| 淮北市| 瓦房店市| 绥中县| 西城区| 聂荣县| 鹤山市| 安溪县| 大名县| 临武县| 南平市| 泰州市| 措勤县| 永新县| 洪雅县| 镶黄旗| 温州市| 泰安市| 崇左市| 五指山市| 汪清县| 永春县| 五河县| 鄄城县| 南川市| 巴中市| 三门峡市| 千阳县| 太白县| 九寨沟县| 汝城县| 红安县| 河曲县|