环境介绍及准备
物理机操作系统
[root@master tmp]# cat /etc/redhat-release
CentOS Linux release 7.2.1511 (Core)
[root@master tmp]# uname -r
3.10.0-327.el7.x86_64
服务器信息
hostname | ip |
---|---|
master,etcd,registry | 10.0.0.161 |
node01 | 10.0.0.159 |
node02 | 10.0.0.160 |
修改主机名
master:
[root@master ~]#hostnamectl --static set-hostname master
node01:
[root@node01 ~]# hostnamectl --static set-hostname node01
node02:
[root@node02 ~]# hostnamectl --static set-hostname node02
集群全部服务器修改hosts配置文件,关闭防火墙
cat >>/etc/hosts<<EOF
10.0.0.161 etcd
10.0.0.161 registry
10.0.0.159 node01
10.0.0.160 node02
10.0.0.161 master
EOF
systemctl disable firewalld.service
systemctl stop firewalld.service
部署master
安装etcd
安装etcd
[root@master tmp]# yum install etcd -y
修改/etc/etcd/etcd.conf
# [member]
ETCD_NAME=master
==ETCD_DATA_DIR="/var/lib/etcd/default.etcd"==
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
ETCD_ADVERTISE_CLIENT_URLS="http://etcd:2379,http://etcd:4001"
验证etcd
[root@master ~]# systemctl start etcd
[root@master ~]# etcdctl set testdir/testkey0 0
0
[root@master ~]# etcdctl get testdir/testkey0
0
[root@master ~]# etcdctl -C http://etcd:4001 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://etcd:2379
cluster is healthy
[root@master ~]# etcdctl -C http://etcd:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://0.0.0.0:2379
cluster is healthy
安装Docker
[root@master ~]# yum install docker
配置registry
配置Docker配置文件,使其允许从registry中拉取镜像。
[root@master ~]# vim /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false'
if [ -z "${DOCKER_CERT_PATH}" ]; then
DOCKER_CERT_PATH=/etc/docker
fi
OPTIONS='--insecure-registry registry:5000'
配置docker服务
[root@master ~]# chkconfig docker on
[root@master ~]# service docker start
安装kubernets
[root@master ~]# yum install kubernetes
配置kubernetes
[root@master ~]#vi /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_ETCD_SERVERS="--etcd-servers=http://etcd:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""
[root@master ~]# vi /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://master:8080"
启动kubernetes
[root@master ~]# systemctl enable kube-apiserver.service
[root@master ~]# systemctl start kube-apiserver.service
[root@master ~]# systemctl enable kube-controller-manager.service
[root@master ~]# systemctl start kube-controller-manager.service
[root@master ~]# systemctl enable kube-scheduler.service
[root@master ~]# systemctl start kube-scheduler.service
部署node
安装docker
参见2.2
安装kubernets
参见2.3
配置并启动kubernetes
在kubernetes node上需要运行以下组件:
Kubelet
Kubernets Proxy
相应的要更改以下几个配置信息:
[root@node01 ~]# vim /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://master:8080"
[root@node01 ~] vim /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=node01"
KUBELET_API_SERVER="--api-servers=http://master:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
启动服务并设置开机自启动
[root@master ~]# systemctl enable kubelet.service
[root@master ~]# systemctl start kubelet.service
[root@master ~]# systemctl enable kube-proxy.service
[root@master ~]# systemctl start kube-proxy.service
查看状态
在master上查看集群中节点及节点状态
[root@master ~]# kubectl -s http://master:8080 get node
NAME STATUS AGE
node01 Ready 3m
node02 Ready 16s
[root@master ~]# kubectl get nodes
NAME STATUS AGE
node01 Ready 3m
node02 Ready 43s
至此,已经搭建了一个kubernetes集群,但目前该集群还不能很好的工作,请继续后续的步骤。
创建覆盖网络——Flannel
安装Flannel
在master、node上均执行如下命令,进行安装
[root@master ~]# yum install flannel
配置Flannel
master、node上均修改/etc/sysconfig/flanneld
[root@master ~]# vi /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://etcd:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置etcd中关于flannel的key
Flannel使用Etcd进行配置,来保证多个Flannel实例之间的配置一致性,所以需要在etcd上进行如下配置:(‘/atomic.io/network/config’这个key与上文/etc/sysconfig/flannel中的配置项FLANNEL_ETCD_PREFIX是相对应的,错误的话启动就会出错)
[root@master ~]# etcdctl mk /atomic.io/network/config '{ "Network": "10.0.0.0/16" }'
{ "Network": "10.0.0.0/16" }
启动
启动Flannel之后,需要依次重启docker、kubernete。
在master执行:
systemctl enable flanneld.service
systemctl start flanneld.service
service docker restart
systemctl restart kube-apiserver.service
systemctl restart kube-controller-manager.service
systemctl restart kube-scheduler.service
在node上执行:
systemctl enable flanneld.service
systemctl start flanneld.service
service docker restart
systemctl restart kubelet.service
systemctl restart kube-proxy.service
安装dashboard
拉取docker镜像
|
|
修改docker镜像的tag
|
|
查看docker镜像
[root@master tmp]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.access.redhat.com/rhel7/pod-infrastructure latest f66f4bd9b894 8 weeks ago 205.8 MB
gcr.io/google_containers/kubernetes-dashboard-amd64 v1.6.0 416701f962f2 4 months ago 108.6 MB
registry.cn-hangzhou.aliyuncs.com/google-containers/kubernetes-dashboard-amd64 v1.6.0 416701f962f2 4 months ago 108.6 MB
[root@master tmp]#docker save registry.cn-hangzhou.aliyuncs.com/google-containers/kubernetes-dashboard-amd64 > dashboard.tar
[root@master tmp]#docker save registry.access.redhat.com/rhel7/pod-infrastructure > podinfrastructure.tar
复制docker到node01/node02
[root@master tmp]scp podinfrastructure.tar node01:~
[root@master tmp]scp podinfrastructure.tar node02:~
[root@master tmp]scp dashboard.tar node01:~
[root@master tmp]scp dashboard.tar node02:~
node01/node02生成docker镜像
docker load < dashboard.tar
docker load < podinfrastructure.tar
配置dashboard
cat >dashboard.yaml<<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterparts
name: kubernetes-dashboard-latest
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
version: latest
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 9090
args:
- --apiserver-host=http://10.0.0.161:8080
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
EOF
cat >dashboardsvc.yaml<<EOF
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090
EOF
kubectl create -f dashboard.yaml
kubectl create -f dashboardsvc.yaml
验证dashboard
[root@master tmp]# kubectl get deployment --all-namespaces
NAMESPACE NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
kube-system kubernetes-dashboard-latest 1 1 1 1 1h
[root@master tmp]# kubectl get svc --all-namespaces
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes 10.0.0.1 <none> 443/TCP 3h
kube-system kubernetes-dashboard 10.254.161.116 <none> 80/TCP 1h
[root@master tmp]# kubectl get pod -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
kube-system kubernetes-dashboard-latest-2587589530-czkq5 1/1 Running 0 1h 10.0.7.2 node02