kubernetes1.2部署

来源:互联网 发布:掌趣上游网络 编辑:程序博客网 时间:2024/06/03 21:47
部署环境
centos7操作系统
3台主机
10.10.125.68 master
10.10.125.69 node
10.10.125.71 node


查看版本
master
[root@localhost home]# kubectl version
Client Version: version.Info{Major:"1", Minor:"2", GitVersion:"v1.2.0", GitCommit:"ec7364b6e3b155e78086018aa644057edbe196e5", GitTreeState:"clean"}
Server Version: version.Info{Major:"1", Minor:"2", GitVersion:"v1.2.0", GitCommit:"ec7364b6e3b155e78086018aa644057edbe196e5", GitTreeState:"clean"}

node
[root@localhost ~]# flanneld -version
0.5.3
[root@localhost ~]# docker version 
Client:
 Version:         1.10.3
 API version:     1.22
 Package version: docker-common-1.10.3-59.el7.centos.x86_64
 Go version:      go1.6.3
 Git commit:      3999ccb-unsupported
 Built:           Thu Dec 15 17:24:43 2016
 OS/Arch:         linux/amd64

Server:
 Version:         1.10.3
 API version:     1.22
 Package version: docker-common-1.10.3-59.el7.centos.x86_64
 Go version:      go1.6.3
 Git commit:      3999ccb-unsupported
 Built:           Thu Dec 15 17:24:43 2016
 OS/Arch:         linux/amd64


安装过程

1、安装CentOS-7-x86_64-DVD-1511.iso操作系统,确定好时区
2、配置ip master192.168.25.10,node192.168.25.11 ,node2192.168.25.12
3、配置/etc/hosts

4、配置/etc/hostname

5、配置http代理


vi /etc/profile
export http_proxy="http://xiao.yang:Zxy*1109@192.168.2.49:8080"
export https_proxy="http://xiao.yang:Zxy*1109@192.168.2.49:8080"
export no_proxy="localhost,127.0.0.1,10.10.*.*,.neusoft.com"

生效
source /etc/profile
export
6、配置好yum源【只保留这一个文件】
    /etc/yum.repos.d
    
CentOS7-Base-163.repo
 
3、安装ntp服务端,保持时间一致
#yum -y install ntp ntpdate
#cp  /etc/ntp.conf  /etc/ntp.conf.bak
#mv  /etc/ntp.conf  /etc/ntp.conf.bak
#vi /etc/ntp.conf
driftfile /var/lib/ntp/drift
server time.neusoft.com

----添加为服务
 chkconfig --level 345 ntpd on
----启动 
 service ntpd start

5、关闭本地防火墙并设置开机不自启动
# systemctl stop firewalld.service
# systemctl disable firewalld.service
6、关闭本地selinux防火墙
# vi /etc/sysconfig/selinux 
SELINUX=disabled
# setenforce 0
7、安装ifconfig工具
        -bash: ifconfig: command not found
# yum install net-tools -y
9、配置docker http代理和加速器
10、master 安装
yum -y install etcd kubernetes-master  【默认安装】
yum -y install etcd kubernetes-master-1.2*【安装1.2版本】

[root@localhost ~]# vi /etc/kubernetes/controller-manager

###
# The following values are used to configure the kubernetes controller-manager

# defaults from config and apiserver should be adequate

# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS=""
KUBE_CONTROLLER_MANAGER_ARGS="--node-monitor-grace-period=10s --pod-eviction-timeout=10s"



[root@localhost ~]#  vi /etc/kubernetes/apiserver 

###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#

# The address on the local server to listen to.
#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

# The port on the local server to listen on.
# KUBE_API_PORT="--port=8080"

# Port minions listen on
# KUBELET_PORT="--kubelet-port=10250"

# Comma separated list of nodes in the etcd cluster
#KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.25.10:2379"

# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

# default admission control policies
#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"

# Add your own!
KUBE_API_ARGS=""


[root@localhost ~]#  vi /etc/kubernetes/config 

###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver
#KUBE_MASTER="--master=http://127.0.0.1:8080"
KUBE_MASTER="--master=http://192.168.25.10:8080"





[root@localhost ~]#  vi /etc/etcd/etcd.conf

# [member]
ETCD_NAME=default
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_LISTEN_PEER_URLS="http://localhost:2380"
#ETCD_LISTEN_CLIENT_URLS="http://localhost:2379"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.25.10:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#
#[proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#
#[logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
#
#[profiling]
#ETCD_ENABLE_PPROF="false"

systemctl enableetcd kube-apiserver kube-scheduler kube-controller-manager
systemctl start etcd kube-apiserver kube-scheduler kube-controller-manager
systemctl restart etcd kube-apiserver kube-scheduler kube-controller-manager

yum -y install docker

etcdctl set /coreos.com/network/config '{ "Network" : "10.1.0.0/16" }'

11、node 安装

yum install kubernetes-node-1.2* flannel-0.5.3  docker -y


systemctl enable docker
systemctl start docker


[root@localhost yum.repos.d]# vi /etc/sysconfig/flanneld 

# Flanneld configuration options

# etcd url location.  Point this to the server where etcd runs
#FLANNEL_ETCD="http://127.0.0.1:2379"
FLANNEL_ETCD="http://192.168.25.10:2379"

# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
#FLANNEL_ETCD_KEY="/atomic.io/network"
FLANNEL_ETCD_KEY="/coreos.com/network"

# Any additional options that you want to pass
#FLANNEL_OPTIONS=""


systemctl enable flanneld.service
systemctl restart flanneld.service
systemctl restart docker



[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:cf:89:3a brd ff:ff:ff:ff:ff:ff
    inet 192.168.25.11/24 brd 192.168.25.255 scope global eno16777736
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fecf:893a/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:6b:3e:bd:c8 brd ff:ff:ff:ff:ff:ff
    inet 10.1.18.1/24 scope global docker0
       valid_lft forever preferred_lft forever
4: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN qlen 500
    link/none 
    inet 10.1.18.0/16 scope global flannel0
       valid_lft forever preferred_lft forever


[root@localhost ~]# vi /etc/kubernetes/config

###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver
#KUBE_MASTER="--master=http://127.0.0.1:8080"
KUBE_MASTER="--master=http://192.168.25.10:8080"



[root@localhost ~]# vi /etc/kubernetes/kubelet 

###
# kubernetes kubelet (minion) config

# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
#KUBELET_ADDRESS="--address=127.0.0.1"
KUBELET_ADDRESS="--address=0.0.0.0"

# The port for the info server to serve on
# KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname
#KUBELET_HOSTNAME="--hostname-override=127.0.0.1"
KUBELET_HOSTNAME="--hostname-override=192.168.25.11"

# location of the api-server
#KUBELET_API_SERVER="--api-servers=http://127.0.0.1:8080"
KUBELET_API_SERVER="--api-servers=http://192.168.25.10:8080"

# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

# Add your own!
KUBELET_ARGS=""

systemctl enable kubelet kube-proxy
systemctl start kubelet kube-proxy
systemctl restart kubelet kube-proxy


验证
在master节点上执行
[root@localhost ~]# kubectl get nodes
NAME            STATUS    AGE
192.168.25.11   Ready     23s


etcdctl get /coreos.com/network/config 


nginx.yaml

cat << EOF >nginx.yaml
apiVersion: extensions/v1beta1
kind:Deployment
metadata:
name: nginx-deployment
spec:
replicas:2
template:
metadata:
labels:
app: nginx
spec:
containers:
-name: nginx
image:nginx:1.7.9
ports:
-containerPort:80
resources:
requests:
cpu:400m
EOF

kubectl create -f nginx.yaml --record

  
nginx-svc.yaml
cat << EOF >nginx-svc.yaml
apiVersion:v1
kind:Service
metadata:
name: nginx
labels:
app:nginx
spec:
type: NodePort
selector:
app: nginx
ports:
-port:80
targetPort:80
nodePort:30088
EOF

kubectl create -f nginx-svc.yaml

[root@localhost home]# kubectl get deployment,nodes,pod,service -o wide
NAME                                DESIRED        CURRENT   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment                    2              2         2            2           4m
NAME                                STATUS         AGE
192.168.25.11                       Ready          1h
192.168.25.12                       Ready          1h
NAME                                READY          STATUS        RESTARTS   AGE       NODE
nginx-deployment-1863429836-6oz0k   1/1            Running       0          4m        192.168.25.12
nginx-deployment-1863429836-mu3te   1/1            Running       0          4m        192.168.25.11
NAME                                CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE       SELECTOR
kubernetes                          10.254.0.1     <none>        443/TCP    2h        <none>
nginx                               10.254.48.18   nodes         80/TCP     4m        app=nginx

kubectl delete -f nginx-svc.yaml

kubectl delete -f nginx.yaml
其他能连进 192段的机器浏览器访问
http://192.168.25.12:30088/
http://192.168.25.11:30088/

api访问路径
   http://192.168.25.10:8080/swagger-ui 
 http://10.10.125.68:8080/swagger-ui 



进入docker容器

wget -P ~ https://github.com/yeasy/docker_practice/raw/master/_local/.bashrc_docker;
wget不成功,直接把下面文件放到 root目录下
.bashrc_docker


echo "[ -f ~/.bashrc_docker ] && . ~/.bashrc_docker" >> ~/.bashrc; source ~/.bashrc




修改
1、
kubectl edit deployment nginx-deployment
kubectl edit deployment/nginx-deployment
kubectl edit pod nginx
kubectl edit pod/nginx
kubectl edit rc frontend
kubectl edit rc/frontend

2、
sed -i "s/nginx:1.7.9/nginx:1.9.1/g" nginx.yaml
kubectl apply -f nginx.yaml

参考:
http://blog.csdn.net/yang7551735/article/details/51172179





原创粉丝点击