#cloud-config hostname: "coreos.local.2" ssh_authorized_keys: - ... coreos: etcd2: # generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3 # specify the initial size of your cluster with ?size=X # discovery: https://discovery.etcd.io/64cdc949e85e49cbabe1f019b6c95408 # multi-region and multi-cloud deployments need to use $public_ipv4 advertise-client-urls: http://192.168.1.11:2379,http://192.168.1.11:4001 initial-advertise-peer-urls: http://192.168.1.11:2380 # listen on both the official ports and the legacy ports # legacy ports can be omitted if your application doesn't depend on them listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 listen-peer-urls: http://192.168.1.11:2380 name: slave1 initial-cluster: master1=http://192.168.1.10:2380,slave1=http://192.168.1.11:2380,slave2=http://192.168.1.12:2380 initial-cluster-state: new initial-cluster-token: syn-etcd-cluster1 units: - name: etcd2.service command: start - name: fleet.service command: start - name: 00-enp0s3.network runtime: true content: | [Match] Name=enp0s3 [Network] Address=192.168.1.11/24 Gateway=192.168.1.1 DNS=8.8.8.8 DNS=8.8.4.4 - name: flanneld.service drop-ins: - name: 40-ExecStartPre-symlink.conf content: | [Service] ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env - name: docker.service drop-ins: - name: 40-flannel.conf content: | [Unit] Requires=flanneld.service After=flanneld.service write_files: - path: /etc/kubernetes/worker-kubeconfig.yaml permissions: "0644" owner: "root" content: | apiVersion: v1 kind: Config clusters: - name: local cluster: certificate-authority: /etc/kubernetes/ssl/ca.pem users: - name: kubelet user: client-certificate: /etc/kubernetes/ssl/worker.pem client-key: /etc/kubernetes/ssl/worker-key.pem contexts: - context: cluster: local user: kubelet name: kubelet-context current-context: kubelet-context - path: /etc/kubernetes/manifests/kube-proxy.yaml permissions: "0644" owner: "root" content: | apiVersion: v1 kind: Pod metadata: name: kube-proxy namespace: kube-system spec: hostNetwork: true containers: - name: kube-proxy image: quay.io/coreos/hyperkube:v1.1.8_coreos.0 command: - /hyperkube - proxy - --master=https://192.168.1.10 - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml - --proxy-mode=iptables securityContext: privileged: true volumeMounts: - mountPath: /etc/ssl/certs name: "ssl-certs" - mountPath: /etc/kubernetes/worker-kubeconfig.yaml name: "kubeconfig" readOnly: true - mountPath: /etc/kubernetes/ssl name: "etc-kube-ssl" readOnly: true volumes: - name: "ssl-certs" hostPath: path: "/usr/share/ca-certificates" - name: "kubeconfig" hostPath: path: "/etc/kubernetes/worker-kubeconfig.yaml" - name: "etc-kube-ssl" hostPath: path: "/etc/kubernetes/ssl" - path: /etc/systemd/system/kubelet.service permissions: "0644" owner: "root" content: | [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests Environment=KUBELET_VERSION=v1.1.8_coreos.0 ExecStart=/usr/lib/coreos/kubelet-wrapper \ --api_servers=https://192.168.1.10 \ --register-node=true \ --allow-privileged=true \ --config=/etc/kubernetes/manifests \ --hostname-override=192.168.1.11 \ --cluster-dns=10.3.0.10 \ --cluster-domain=cluster.local \ --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ --tls-cert-file=/etc/kubernetes/ssl/worker.pem \ --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem Restart=always RestartSec=10 [Install] WantedBy=multi-user.target - path: /etc/hosts content: | 127.0.0.1 localhost 192.168.1.10 kube-worker-1 192.168.1.11 kube-worker-2 192.168.1.12 kube-worker-3 - path: /etc/flannel/options.env permissions: "0644" owner: "root" content: | FLANNELD_IFACE=192.168.1.11 FLANNELD_ETCD_ENDPOINTS=http://192.168.1.10:2379,http://192.168.1.11:2379,http://192.168.1.12:2379