vm_name=
gcloud compute instances create \
"${vm_name}" --zone asia-northeast1-c \
--image-project=ubuntu-os-cloud --image-family=ubuntu-2004-lts \
--machine-type "n1-custom-12-30720" --boot-disk-size 50G --boot-disk-type pd-standard \
--enable-nested-virtualization --metadata=enable-oslogin=TRUE
gcloud compute ssh "${vm_name}" --zone asia-northeast1-cOS ログインを使わない場合
ssh-keygen -t ed25519 -N "" -f "id_rsa_crc"
sed "s/ssh-ed25519/$(whoami):ssh-ed25519/" "id_rsa_crc.pub" > ssh-metadata
gcloud compute instances add-metadata "${vm_name}" \
--zone asia-northeast1-c --metadata-from-file ssh-keys=ssh-metadata
gcloud compute ssh "${vm_name}" --ssh-key-file="id_rsa_crc" --tunnel-through-iapsudo apt-get update
sudo apt install -y qemu-kvm libvirt-daemon libvirt-daemon-system network-manager
wget https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz
tar -xvf crc-linux-amd64.tar.xz
sudo mv crc-linux-*/crc /usr/local/bin/
crc config set network-mode user
crc cleanup
crc setup
exitgcloud compute instances reset "${vm_name}" --zone asia-northeast1-c
gcloud compute ssh "${vm_name}" --zone asia-northeast1-chttps://cloud.redhat.com/openshift/create/local で予め取得しておいた Pull secret を渡し、CRC を起動します。
crc setup
crc daemon &
crc start/etc/hosts に以下を追加する
(@see https://code-ready.github.io/crc/#dns-configuration-linux_gsg)
34.146.76.136 api.crc.testing
34.146.76.136 oauth-openshift.apps-crc.testing
34.146.76.136 console-openshift-console.apps-crc.testing
管理者権限でログインし、リソースを表示
eval $(crc oc-env)
oc login -u kubeadmin https://api.crc.testing:6443
oc get co
oc get po --all-namespaces | wc -lkubectl でも接続を確認
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
export KUBECONFIG=$HOME/.crc/machines/crc/kubeconfig
kubectl get po --all-namespaces | wc -lローカル PV へのダイナミックプロビジョナを用意します
(@see https://github.com/code-ready/crc/wiki/Dynamic-volume-provisioning)
oc new-project local-path-storage
oc create serviceaccount local-path-provisioner -n local-path-storage
oc adm policy add-scc-to-user hostaccess -z local-path-provisioner -n local-path-storage
cat <<EOF | oc apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.12
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/mnt/pv-data"]
}
]
}
EOF