$ kubectl create -f k8smm-worker.yaml
$ kubectl get event -w #
$ kubectl get machinedeployment
$ kubectl get ms
$ kubectl get machines
NAME PROVIDERID PHASE
k8smm-aws-controlplane-0 aws:////i-0acaac28fc08113f3 running
k8smm-aws-worker-85d474df85-hcwwv aws:////i-0b6c621da978cb4d2 running
$ kubectl --kubeconfig=./k8smm-aws.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
ip-10-0-0-218.us-east-2.compute.internal Ready <none> 2m30s v1.15.3
ip-10-0-0-242.us-east-2.compute.internal Ready master 31m v1.15.3
$ kubectl get clusters
NAME PHASE
k8smm-aws provisioned
name များကို vim သို့မဟုတ် sed အသုံးပြု၍ အလွယ်တကူ manifest များကို ပြောင်းလဲအသုံးပြုနိုင်သည်။
:%s/k8smm-aws/awsugmm-aws/g
$ kubectl create -f awsugmm-cluster.yaml
$ kubectl get clusters
NAME PHASE
awsugmm-aws provisioning
k8smm-aws provisioned
awsugmm-control-plane.yaml
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Machine
metadata:
name: awsugmm-aws-controlplane-0
labels:
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/cluster-name: "awsugmm-aws"
spec:
version: v1.15.3
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
name: awsugmm-aws-controlplane-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AWSMachine
name: awsugmm-aws-controlplane-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AWSMachine
metadata:
name: awsugmm-aws-controlplane-0
spec:
instanceType: t3.large
# This IAM profile is part of the pre-requisites.
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
# Change this value to a valid SSH Key Pair present in your AWS Account.
sshKeyName: k8smm
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
metadata:
name: awsugmm-aws-controlplane-0
spec:
# For more information about these values,
# refer to the Kubeadm Bootstrap Provider documentation.
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.hostname }}'
kubeletExtraArgs:
cloud-provider: aws
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: aws
controllerManager:
extraArgs:
cloud-provider: aws
$ kubectl create -f awsugmm-control-plane.yam
$ kubectl get events -w # SuccessfulSetNodeRef
$ kubectl get machines
NAME PROVIDERID PHASE
awsugmm-aws-controlplane-0 aws:////i-0663a439bce811cb9 running
k8smm-aws-controlplane-0 aws:////i-0acaac28fc08113f3 running
k8smm-aws-worker-85d474df85-hcwwv aws:////i-0b6c621da978cb4d2 running
$ kubectl get secret
$ kubectl --namespace=default get secret/awsugmm-aws-kubeconfig -o json \
| jq -r .data.value \
| base64 --decode \
> ./awsugmm-aws.kubeconfig
$ kubectl --kubeconfig=./awsugmm-aws.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
ip-10-0-0-97.us-east-2.compute.internal NotReady master 6m57s v1.15.3
$ kubectl --kubeconfig=./awsugmm-aws.kubeconfig create -f calico.yaml
$ kubectl --kubeconfig=./awsugmm-aws.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
ip-10-0-0-97.us-east-2.compute.internal Ready master 8m14s v1.15.3
awsugmm-worker.yaml
apiVersion: cluster.x-k8s.io/v1alpha2
kind: MachineDeployment
metadata:
name: awsugmm-aws-worker
labels:
cluster.x-k8s.io/cluster-name: awsugmm-aws
nodepool: nodepool-0
spec:
replicas: 1
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: awsugmm-aws
nodepool: nodepool-0
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: awsugmm-aws
nodepool: nodepool-0
spec:
version: v1.15.3
bootstrap:
configRef:
name: awsugmm-aws-worker
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfigTemplate
infrastructureRef:
name: awsugmm-aws-worker
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AWSMachineTemplate
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AWSMachineTemplate
metadata:
name: awsugmm-aws-worker
spec:
template:
spec:
instanceType: t3.large
# This IAM profile is part of the pre-requisites.
iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
# Change this value to a valid SSH Key Pair present in your AWS Account.
sshKeyName: k8smm
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfigTemplate
metadata:
name: awsugmm-aws-worker
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.hostname }}'
kubeletExtraArgs:
cloud-provider: aws
$ kubectl create -f awsugmm-worker.yaml
$ kubectl get events -w # SuccessfulSetNodeRef
$ kubectl get machines
NAME PROVIDERID PHASE
awsugmm-aws-controlplane-0 aws:////i-0663a439bce811cb9 running
awsugmm-aws-worker-644cb8787b-nmt9r aws:////i-02f55f2b38c4d422a running
k8smm-aws-controlplane-0 aws:////i-0acaac28fc08113f3 running
k8smm-aws-worker-85d474df85-hcwwv aws:////i-0b6c621da978cb4d2 running
$ kubectl --kubeconfig=./awsugmm-aws.kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
ip-10-0-0-188.us-east-2.compute.internal Ready <none> 99s v1.15.3
ip-10-0-0-97.us-east-2.compute.internal Ready master 12m v1.15.3