Kubernetes
Arquitectura Kubernetes (II)
Continuamos con los siguientes pasos en la creacion de un cluster Kubernetes. Esta vez toca kubelet y flanneld Instalación de KUBECTL Nos bajamos los binarios, descomprimimos y copiamos a /usr/kubernetes en todos los master:
1 2 3 |
wget https://dl.k8s.io/v1.11.0/kubernetes-client-linux-amd64.tar.gz tar xzvf kubernetes-client-linux-amd64.tar.gz cp kubectl /usr/kubernetes |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
{ "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "ES", "ST": "MADRID", "L": "MADRID", "O": "system:masters", "OU": "System" } ] } |
1 2 |
cfssl_linux-amd64 gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca.json -profile=kubernetes admin-csr.json | cfssljson_linux-amd64 -bare admin cp admin*pem /etc/kubernetes/ssl |
1 2 |
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://172.20.20.10:6443 |
1 2 3 4 5 6 7 8 9 10 |
head -c 16 /dev/urandom | od -An -t x | tr -d ' ' kubectl config set-credentials admin --client-certificate=/etc/kubernetes/ssl/admin.pem --embed-certs=true --client-key=/etc/kubernetes/ssl/admin-key.pem --token=804fa617f748dfef4ed29b30798ecaff kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin kubectl config use-context kubernetes |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
kubectl config view apiVersion: v1 clusters: - cluster: certificate-authority-data: REDACTED server: https://172.20.20.10:6443 name: kubernetes contexts: - context: cluster: kubernetes user: admin name: kubernetes current-context: kubernetes kind: Config preferences: {} users: - name: admin user: client-certificate-data: REDACTED client-key-data: REDACTED token: 804fa617f748dfef4ed29b30798ecaff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
{ "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "ES", "ST": "MADRID", "L": "MADRID", "O": "k8s", "OU": "System" } ] } cfssl_linux-amd64 gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca.json -profile=kubernetes flanneld-csr.json | cfssljson_linux-amd64 -bare flanneld mkdir -p /etc/flanneld/ssl cp flann*pem /etc/flanneld/ssl |
1 |
etcdctl --endpoints=https://172.20.20.10:2379,https://172.20.20.11:2379,https://172.20.20.12:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem --debug="true" set "/kubernetes/network/config" '{"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}' |
1 |
/usr/kubernetes/etcdctl --endpoints=https://172.20.20.12:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem get /kubernetes/network/config |
1 2 3 |
wget https://github.com/coreos/flannel/releases/download/v0.9.1/flannel-v0.9.1-linux-amd64.tar.gz tar xzvf flannel-v0.9.1-linux-amd64.tar.gz cp flanneld mk-docker-opts.sh /usr/kubernetes |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
[Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify ExecStart=/usr/kubernetes/flanneld -iface-regex eth1 -etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/flanneld/ssl/flanneld.pem -etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem -etcd-endpoints=https://172.20.20.10:2379,https://172.20.20.11:2379,https://172.20.20.12:2379 -etcd-prefix=/kubernetes/network ExecStartPost=/usr/kubernetes/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker Restart=on-failure [Install] WantedBy=multi-user.target RequiredBy=docker.service |
1 2 3 4 |
systemctl daemon-reload systemctl enable flanneld systemctl start flanneld systemctl status flanneld |
1 2 3 4 5 6 7 8 9 10 |
/usr/kubernetes/etcdctl --endpoints=https://172.20.20.12:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem ls /kubernetes/network/subnets /kubernetes/network/subnets/172.30.43.0-24 /kubernetes/network/subnets/172.30.49.0-24 /kubernetes/network/subnets/172.30.87.0-24 /usr/kubernetes/etcdctl --endpoints=https://172.20.20.12:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key- file=/etc/etcd/ssl/etcd-key.pem get /kubernetes/network/subnets/172.30.43.0-24 {"PublicIP":"172.20.20.11","BackendType":"vxlan","BackendData":{"VtepMAC":"ea:f3:c2:9b:18:f4"}} |
1 2 3 4 5 |
wget https://dl.k8s.io/v1.11.0/kubernetes-server-linux-amd64.tar.gz tar xzvf kubernetes-server-linux-amd64.tar.gz cp kube-apiserver kube-controller-manager kube-scheduler /usr/kubernetes |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
{ "CN": "kubernetes", "hosts": [ "127.0.0.1", "172.20.20.10", "172.20.20.11", "172.20.20.12", "k8s-api.virtual.local", "10.254.0.2", "10.254.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "ES", "ST": "MADRID", "L": "MADRID", "O": "SCM", "OU": "ClusterK8s" } ] } |
1 |
804fa617f748dfef4ed29b30798ecaff,kubelet-bootstrap,10001,"system:kubelet-bootstrap" |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
[Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/usr/kubeserverkube-apiserver --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota --advertise-address=172.20.20.10 --bind-address=0.0.0.0 --insecure-bind-address=172.20.20.10 --authorization-mode=Node,RBAC --runtime-config=rbac.authorization.k8s.io/v1alpha1 --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/etc/kubernetes/token.csv --service-cluster-ip-range=10.254.0.0/16 --service-node-port-range=30000-32766 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --etcd-servers=https://172.20.20.10:2379,https://172.20.20.11:2379,https://172.20.20.12:2379 --enable-swagger-ui=true --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --audit-policy-file=/etc/kubernetes/audit-policy.yaml --event-ttl=1h --logtostderr=true Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
apiVersion: audit.k8s.io/v1beta1 # This is required. kind: Policy # Don't generate audit events for all requests in RequestReceived stage. omitStages: - "RequestReceived" rules: # Log pod changes at RequestResponse level - level: RequestResponse resources: - group: "" # Resource "pods" doesn't match requests to any subresource of pods, # which is consistent with the RBAC policy. resources: ["pods"] # Log "pods/log", "pods/status" at Metadata level - level: Metadata resources: - group: "" resources: ["pods/log", "pods/status"] # Don't log requests to a configmap called "controller-leader" - level: None resources: - group: "" resources: ["configmaps"] resourceNames: ["controller-leader"] # Don't log watch requests by the "system:kube-proxy" on endpoints or services - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core API group resources: ["endpoints", "services"] # Don't log authenticated requests to certain non-resource URL paths. - level: None userGroups: ["system:authenticated"] nonResourceURLs: - "/api*" # Wildcard matching. - "/version" # Log the request body of configmap changes in kube-system. - level: Request resources: - group: "" # core API group resources: ["configmaps"] # This rule only applies to resources in the "kube-system" namespace. # The empty string "" can be used to select non-namespaced resources. namespaces: ["kube-system"] # Log configmap and secret changes in all other namespaces at the Metadata level. - level: Metadata resources: - group: "" # core API group resources: ["secrets", "configmaps"] # Log all other resources in core and extensions at the Request level. - level: Request resources: - group: "" # core API group - group: "extensions" # Version of group should NOT be included. # A catch-all rule to log all other requests at the Metadata level. - level: Metadata # Long-running requests like watches that fall under this rule will not # generate an audit event in RequestReceived. omitStages: - "RequestReceived" |
1 2 3 4 |
systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver systemctl status kube-apiserver |
1 2 3 4 5 6 7 8 9 10 11 |
[Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/kubernetes/kube-controller-manager --address=127.0.0.1 --master=http://172.20.20.10:8080 --allocate-node-cidrs=true --service-cluster-ip-range=10.254.0.0/16 --cluster-cidr=172.30.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target |
1 2 3 4 |
systemctl daemon-reload systemctl enable kube-controller-manager systemctl start kube-controller-manager systemctl status kube-controller-manager |
1 2 3 4 5 6 7 8 9 10 11 |
[Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/kubernetes/kube-scheduler --address=127.0.0.1 --master=http://172.20.20.10:8080 --leader-elect=true Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target |
1 2 3 4 |
systemctl daemon-reload systemctl enable kube-scheduler systemctl start kube-scheduler systemctl status kube-scheduler |
1 |
kubectl create clusterrolebinding root-cluster-admin-binding --clusterrole=cluster-admin --user=kubelet-bootstrap --server=http://172.20.20.10:8080 |
1 2 3 4 5 6 7 |
kubectl get componentstatuses NAME STATUS MESSAGE ERROR controller-manager Healthy ok etcd-0 Healthy {"health": "true"} etcd-1 Healthy {"health": "true"} etcd-2 Healthy {"health": "true"} scheduler Healthy ok |