Coreos
發現流雲配置的意外結束
所以這是我的雲配置
#cloud-config coreos: etcd2: discovery: "https://discovery.etcd.io/tocken" advertise-client-urls: "http://$private_ipv4:2379" initial-advertise-peer-urls: "http://$private_ipv4:2380" listen-client-urls: "http://0.0.0.0:2379,http://0.0.0.0:4001" listen-peer-urls: "http://$private_ipv4:2380,http://$private_ipv4:7001" flannel: interface: $private_ipv4 units: - name: etcd2.service command: start - name: flanneld.service drop-ins: - name: 50-network-config.conf content: | [Service] ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16" }' command: start - name: sshd.socket command: restart runtime: true content: | [Unit] Description=OpenSSH server daemon Conflicts=sshd.service [Socket] ListenStream=65321 FreeBind=true Accept=yes [Install] WantedBy=sockets.target - name: kubelet.service command: restart runtime: true content: | [Service] Environment=KUBELET_VERSION=v1.6.1_coreos.0 ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests ExecStart=/usr/lib/coreos/kubelet-wrapper \ --api-servers=http://127.0.0.1:8080 \ --allow-privileged=true \ --config=/etc/kubernetes/manifests \ --hostname-override=$private_ipv4 \ --cluster-dns=10.13.0.10 \ --cluster-domain=cluster.local Restart=always RestartSec=10 [Install] WantedBy=multi-user.target users: - name: admin ssh-authorized-keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuCXgeT7kQfSikcU1BWRyMFi8izN+1WHPNopaaXQV2xune6nKOHN8yhGVRKaE9iQHY+6jSjxWd5SY9CEyWlIST5dxfffRkWZiuJISHAxl6+E+fI0kNsUG2AXTXuJnXBQllqkgsggfBJ+5BxNf35IyfILTqkDu99ZNBNbeTNSPJmbYgMs71fWB2TiGx8ugsZrIOzqbcEfu9KNTD+RszrLaCRAZNl1sANEk7N7ZIUaIIlBBxmaPWW1voXor4AP/SAnHMEouX25ZlruL7nCEH9BybVYT8xFVEBl0fJIoj/c1TYkk/80P7JLJg0pIAxMCWqy0NzBwEcXbef1yIlO6meDuZ Kirill@NOUTKIR groups: - "sudo" shell: /bin/bash write_files: - path: "/etc/ssh/sshd_config" permissions: 0600 owner: root:root content: | HostKey /etc/ssh/ssh_host_rsa_key HostKey /etc/ssh/ssh_host_dsa_key HostKey /etc/ssh/ssh_host_ecdsa_key HostKey /etc/ssh/ssh_host_ed25519_key UsePrivilegeSeparation yes KeyRegenerationInterval 3600 ServerKeyBits 1024 SyslogFacility AUTH LogLevel INFO LoginGraceTime 120 PermitRootLogin no StrictModes yes RSAAuthentication yes PubkeyAuthentication yes IgnoreRhosts yes RhostsRSAAuthentication no HostbasedAuthentication no PermitEmptyPasswords no ChallengeResponseAuthentication no X11Forwarding yes X11DisplayOffset 10 PrintMotd no PrintLastLog yes TCPKeepAlive yes AcceptEnv LANG LC_* Subsystem sftp /usr/lib/openssh/sftp-server UsePAM yes AllowUsers admin PasswordAuthentication no - path: "/etc/kubernetes/manifests/kube-apiserver.yaml # permissions: ?? # owner: ?? content: | apiVersion: v1 kind: Pod metadata: name: kube-apiserver namespace: kube-system spec: hostNetwork: true containers: - name: kube-apiserver image: quay.io/coreos/hyperkube:v1.6.1_coreos.0 command: - /hyperkube - apiserver - --bind-address=0.0.0.0 - --etcd-servers=http://<master private IP>:2379,http://<node1 private IP>:2379,http://<node2 private IP>:2379 - --allow-privileged=true - --service-cluster-ip-range=10.13.0.0/24 - --secure-port=443 - --advertise-address=<master private IP> - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota # - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem # - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem # - --client-ca-file=/etc/kubernetes/ssl/ca.pem - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true ports: - containerPort: 443 hostPort: 443 name: https - containerPort: 8080 hostPort: 8080 name: local # volumeMounts: # - mountPath: /etc/kubernetes/ssl # name: ssl-certs-kubernetes # readOnly: true # - mountPath: /etc/ssl/certs # name: ssl-certs-host # readOnly: true # volumes: # - hostPath: # path: /etc/kubernetes/ssl # name: ssl-certs-kubernetes # - hostPath: # path: /usr/share/ca-certificates # name: ssl-certs-host - path: /etc/kubernetes/manifests/kube-proxy.yaml # permissions: ?? # owner: ?? content: | apiVersion: v1 kind: Pod metadata: name: kube-proxy namespace: kube-system spec: hostNetwork: true containers: - name: kube-proxy image: quay.io/coreos/hyperkube:v1.6.1_coreos.0 command: - /hyperkube - proxy - --master=http://127.0.0.1:8080 - --proxy-mode=iptables securityContext: privileged: true # volumeMounts: # - mountPath: /etc/ssl/certs # name: ssl-certs-host # readOnly: true # volumes: # - hostPath: # path: /usr/share/ca-certificates # name: ssl-certs-host - path: /etc/kubernetes/manifests/kube-controller-manager.yaml # permissions: ?? # owner: ?? content: | apiVersion: v1 kind: Pod metadata: name: kube-controller-manager namespace: kube-system spec: hostNetwork: true containers: - name: kube-controller-manager image: quay.io/coreos/hyperkube:v1.6.1_coreos.0 command: - /hyperkube - controller-manager - --master=http://127.0.0.1:8080 - --leader-elect=true # - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem # - --root-ca-file=/etc/kubernetes/ssl/ca.pem livenessProbe: httpGet: host: 127.0.0.1 path: /healthz port: 10252 initialDelaySeconds: 15 timeoutSeconds: 1 # volumeMounts: # - mountPath: /etc/kubernetes/ssl # name: ssl-certs-kubernetes # readOnly: true # - mountPath: /etc/ssl/certs # name: ssl-certs-host # readOnly: true # volumes: # - hostPath: # path: /etc/kubernetes/ssl # name: ssl-certs-kubernetes # - hostPath: # path: /usr/share/ca-certificates # name: ssl-certs-host - path: /etc/kubernetes/manifests/kube-scheduler.yaml # permissions: ?? # owner: ?? content: | apiVersion: v1 kind: Pod metadata: name: kube-scheduler namespace: kube-system spec: hostNetwork: true containers: - name: kube-scheduler image: quay.io/coreos/hyperkube:v1.6.1_coreos.0 command: - /hyperkube - scheduler - --master=http://127.0.0.1:8080 - --leader-elect=true livenessProbe: httpGet: host: 127.0.0.1 path: /healthz port: 10251 initialDelaySeconds: 15 timeoutSeconds: 1
有人遇到過這種情況嗎?我已經失去了 4 個小時的Google搜尋和嘗試狗屎
PS:前行錯誤
第 99 行:
- path: "/etc/kubernetes/manifests/kube-apiserver.yaml
…缺少報價。
僅供參考,我在YAMLlint的幫助下在大約 5 秒內找到了這個。