手动搭建Kubernetes1.8高可用集群(7)dnsmasq

来源:互联网 发布:空气净化器评测 知乎 编辑:程序博客网 时间:2024/05/16 10:51

接着上一篇

一、准备

1、镜像 阿里云镜像下载

andyshinn/dnsmasq:2.72gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1

2、创建文件夹

/etc/dnsmasq.d/etc/dnsmasq.d-available

二、配置文件

1、/etc/dnsmasq.d-available/01-kube-dns.conf

      /etc/dnsmasq.d/01-kube-dns.conf          两个文件内容一样链接一下

#Listen on localhostbind-interfaceslisten-address=0.0.0.0addn-hosts=/etc/hostsstrict-order# Forward k8s domain to kube-dnsserver=/cluster.local/10.233.0.3# Reply NXDOMAIN to bogus domains requests like com.cluster.local.cluster.locallocal=/cluster.local.default.svc.cluster.local./default.svc.cluster.local.default.svc.cluster.local./com.default.svc.cluster.local./cluster.local.svc.cluster.local./svc.cluster.local.svc.cluster.local./com.svc.cluster.local./#Set upstream dns serversserver=192.168.1.1no-resolvbogus-privno-negcachecache-size=1000dns-forward-max=150max-cache-ttl=10max-ttl=20log-facility=-

2、/etc/kubernetes/dnsmasq-clusterrolebinding.yml

---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: dnsmasq  namespace: "kube-system"subjects:  - kind: ServiceAccount    name: dnsmasq    namespace: "kube-system"roleRef:  kind: ClusterRole  name: cluster-admin  apiGroup: rbac.authorization.k8s.io

3、/etc/kubernetes/dnsmasq-serviceaccount.yml

---apiVersion: v1kind: ServiceAccountmetadata:  name: dnsmasq  namespace: "kube-system"  labels:    kubernetes.io/cluster-service: "true"

4、/etc/kubernetes/dnsmasq-deploy.yml

---apiVersion: extensions/v1beta1kind: Deploymentmetadata:  name: dnsmasq  namespace: "kube-system"  labels:    k8s-app: dnsmasq    kubernetes.io/cluster-service: "true"spec:  replicas: 1  selector:    matchLabels:      k8s-app: dnsmasq  strategy:    type: "Recreate"  template:    metadata:      labels:        k8s-app: dnsmasq        kubernetes.io/cluster-service: "true"        kubespray/dnsmasq-checksum: "37a3d39ad780e599d0ba2405abfb43cd8d6139a3"    spec:      tolerations:        - effect: NoSchedule          operator: Exists      containers:        - name: dnsmasq          image: "andyshinn/dnsmasq:2.72"          imagePullPolicy: IfNotPresent          command:            - dnsmasq          args:            - -k            - -C            - /etc/dnsmasq.d/01-kube-dns.conf          securityContext:            capabilities:              add:                - NET_ADMIN          resources:            limits:              cpu: 100m              memory: 170Mi            requests:              cpu: 40m              memory: 50Mi          ports:            - name: dns              containerPort: 53              protocol: UDP            - name: dns-tcp              containerPort: 53              protocol: TCP          volumeMounts:            - name: etcdnsmasqd              mountPath: /etc/dnsmasq.d            - name: etcdnsmasqdavailable              mountPath: /etc/dnsmasq.d-available      volumes:        - name: etcdnsmasqd          hostPath:            path: /etc/dnsmasq.d        - name: etcdnsmasqdavailable          hostPath:            path: /etc/dnsmasq.d-available      dnsPolicy: Default  # Don't use cluster DNS.

5、/etc/kubernetes/dnsmasq-svc.yml

---apiVersion: v1kind: Servicemetadata:  labels:    kubernetes.io/cluster-service: 'true'    k8s-app: dnsmasq  name: dnsmasq  namespace: kube-systemspec:  ports:    - port: 53      name: dns-tcp      targetPort: 53      protocol: TCP    - port: 53      name: dns      targetPort: 53      protocol: UDP  type: ClusterIP  clusterIP: 10.233.0.2  selector:    k8s-app: dnsmasq

6、/etc/kubernetes/dnsmasq-autoscaler.yml

---apiVersion: extensions/v1beta1kind: Deploymentmetadata:  name: dnsmasq-autoscaler  namespace: kube-system  labels:    k8s-app: dnsmasq-autoscaler    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: Reconcilespec:  template:    metadata:      labels:        k8s-app: dnsmasq-autoscaler      annotations:        scheduler.alpha.kubernetes.io/critical-pod: ''        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'    spec:      serviceAccountName: dnsmasq      tolerations:        - effect: NoSchedule          operator: Exists      containers:        - name: autoscaler          image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1          resources:            requests:              cpu: "20m"              memory: "10Mi"          command:            - /cluster-proportional-autoscaler            - --namespace=kube-system            - --configmap=dnsmasq-autoscaler            - --target=Deployment/dnsmasq            # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.            # If using small nodes, "nodesPerReplica" should dominate.            - --default-params={"linear":{"nodesPerReplica":10,"preventSinglePointFailure":true}}            - --logtostderr=true            - --v=2

三、创建

1、apply

kubectl apply -f /etc/kubernetes/dnsmasq-clusterrolebinding.ymlkubectl apply -f /etc/kubernetes/dnsmasq-serviceaccount.yml

2、create

kubectl create -f /etc/kubernetes/dnsmasq-deploy.ymlkubectl create -f /etc/kubernetes/dnsmasq-svc.ymlkubectl create -f /etc/kubernetes/dnsmasq-autoscaler.yml

四、验证

1、kubectl get po -o wide -n kube-system

[root@node1 ~]# kubectl get po -o wide -n kube-systemNAME                                  READY     STATUS    RESTARTS   AGE       IP              NODEcalico-node-5d56t                     1/1       Running   0          2h        192.168.1.122   node2calico-node-t8z9l                     1/1       Running   0          2h        192.168.1.126   node3calico-node-z7nr5                     1/1       Running   0          2h        192.168.1.121   node1dnsmasq-775767cfd7-654vs              1/1       Running   0          43m       10.233.75.3     node2dnsmasq-775767cfd7-m5hfl              1/1       Running   0          38m       10.233.71.6     node3dnsmasq-autoscaler-856b5c899b-tvzkl   1/1       Running   0          43m       10.233.71.2     node3kube-apiserver-node1                  1/1       Running   0          5h        192.168.1.121   node1kube-apiserver-node2                  1/1       Running   0          5h        192.168.1.122   node2kube-controller-manager-node1         1/1       Running   0          5h        192.168.1.121   node1kube-controller-manager-node2         1/1       Running   0          5h        192.168.1.122   node2kube-proxy-node1                      1/1       Running   0          5h        192.168.1.121   node1kube-proxy-node2                      1/1       Running   0          5h        192.168.1.122   node2kube-proxy-node3                      1/1       Running   0          5h        192.168.1.126   node3kube-scheduler-node1                  1/1       Running   0          5h        192.168.1.121   node1kube-scheduler-node2                  1/1       Running   0          5h        192.168.1.122   node2nginx-proxy-node3                     1/1       Running   0          5h        192.168.1.126   node3


原创粉丝点击