Skip to content

Instantly share code, notes, and snippets.

@mgazza
Created February 26, 2019 16:10
Show Gist options
  • Save mgazza/c6ea70d54375f3350f8c8d8519c0794b to your computer and use it in GitHub Desktop.
Save mgazza/c6ea70d54375f3350f8c8d8519c0794b to your computer and use it in GitHub Desktop.

Revisions

  1. mgazza created this gist Feb 26, 2019.
    167 changes: 167 additions & 0 deletions script.sh
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,167 @@
    #!/usr/bin/env bash

    set -e

    function __is_pod_ready() {
    [[ "$(kubectl get po "$1" -o 'jsonpath={.status.conditions[?(@.type=="Ready")].status}')" == 'True' ]]
    }

    function __pods_ready() {
    local pod

    [[ "$#" == 0 ]] && return 0

    for pod in $pods; do
    __is_pod_ready "$pod" || return 1
    done

    return 0
    }

    function __wait-until-pods-ready() {
    local period interval i pods

    if [[ $# != 2 ]]; then
    echo "Usage: wait-until-pods-ready PERIOD INTERVAL" >&2
    echo "" >&2
    echo "This script waits for all pods to be ready in the current namespace." >&2

    return 1
    fi

    period="$1"
    interval="$2"

    for ((i=0; i<$period; i+=$interval)); do
    pods="$(kubectl get po -o 'jsonpath={.items[*].metadata.name}')"
    if __pods_ready $pods; then
    return 0
    fi

    echo "Waiting for pods to be ready..."
    sleep "$interval"
    done

    echo "Waited for $period seconds, but all pods are not ready yet."
    return 1
    }

    function __getVolumeDebuggerPod(){

    cat << EOP
    kind: Pod
    apiVersion: v1
    metadata:
    name: volume-debugger
    spec:
    volumes:
    - name: volume-to-debug
    persistentVolumeClaim:
    claimName: $1
    containers:
    - name: debugger
    image: busybox
    command: ['sleep', '3600']
    volumeMounts:
    - mountPath: "/data"
    name: volume-to-debug
    EOP

    }

    function __copyFirstData(){

    if [ -d "data" ]; then
    echo "data directory already exists exiting"
    return 1
    fi

    # switch cluster to $sourceClusterName
    gcloud container clusters get-credentials $sourceClusterName --zone $sourceZone --project $sourceProject

    currentNs=$(kubectl config get-contexts $(kubectl config current-context) --no-headers | awk '{ print $5 }')
    kubectl config set-context $(kubectl config current-context) --namespace=$sourceNamespace

    # get the volume claim name from the pod
    sourceClaimName=$(kubectl get pods $sourcePod -o 'jsonpath={.spec.volumes[].persistentVolumeClaim.claimName}')
    # get the statefulset name
    sourceControllerName=$(kubectl get pods $sourcePod -o 'jsonpath={.metadata.ownerReferences[].name}')

    # scale down $sourceClusterName to zero pods and copy data
    kubectl scale statefulset $sourceControllerName --replicas 0

    # create the pod to copy data from
    __getVolumeDebuggerPod $sourceClaimName | kubectl create -f -
    __wait-until-pods-ready 300 2
    kubectl cp $sourceNamespace/volume-debugger:data data
    kubectl delete pod volume-debugger --grace-period=0

    kubectl config set-context $(kubectl config current-context) --namespace=$currentNs

    # switch to production-us-central1 copy data
    gcloud container clusters get-credentials $destClusterName --zone $destZone --project $destProject

    currentNs=$(kubectl config get-contexts $(kubectl config current-context) --no-headers | awk '{ print $5 }')
    kubectl config set-context $(kubectl config current-context) --namespace=$destNamespace

    # get the volume claim name from the pod
    destClaimName=$(kubectl get pods $destPod -o 'jsonpath={.spec.volumes[].persistentVolumeClaim.claimName}')
    # get the statefulset name
    destControllerName=$(kubectl get pods $destPod -o 'jsonpath={.metadata.ownerReferences[].name}')

    # scale down production-us-central1 to zero pods
    kubectl scale statefulset $destControllerName --replicas 0

    __getVolumeDebuggerPod $destClaimName | kubectl create -f -
    __wait-until-pods-ready 300 2
    kubectl cp data $destNamespace/volume-debugger:/
    kubectl delete pod volume-debugger --grace-period=0

    rm -rf data

    kubectl scale statefulset $destControllerName --replicas 1

    kubectl config set-context $(kubectl config current-context) --namespace=$currentNs
    }


    function __copyFirstDataFromOldToNew(){

    sourcePod=test-cc-fdc-nashville-0
    sourceNamespace=firstdata-test
    sourceClusterName=gke-production-us-central1
    sourceZone=us-central1-a
    sourceProject=prj-shared-services

    destPod=firstdata-authorizations-test-0
    destNamespace=firstdata-test
    destClusterName=production-us-central1
    destZone=us-central1
    destProject=prj-shared-services

    # copy from pod to pod
    __copyFirstData

    }

    function __copyFirstDataFromNewToOld(){

    sourcePod=firstdata-authorizations-test-0
    sourceNamespace=firstdata-test
    sourceClusterName=production-us-central1
    sourceZone=us-central1
    sourceProject=prj-shared-services

    destPod=test-cc-fdc-nashville-0
    destNamespace=firstdata-test
    destClusterName=gke-production-us-central1
    destZone=us-central1-a
    destProject=prj-shared-services

    # copy from pod to pod
    __copyFirstData

    }


    __copyFirstDataFromNewToOld()