Creating the NFS Volume:
$ docker volume create --driver local \
  --opt type=nfs \
  --opt o=addr=192.168.1.115,uid=1000,gid=1000,rw \
  --opt device=:/mnt/volumes/mysql-test \
  mysql-test-1| source 'https://rubygems.org' | |
| gem 'fluentd' | |
| gem 'fluent-plugin-td' | |
| gem 'fluent-plugin-elasticsearch' | 
| #!/bin/sh | |
| # Backup your data | |
| # Use at your own risk | |
| # Usage ./extended-cleanup-rancher2.sh | |
| # Include clearing all iptables: ./extended-cleanup-rancher2.sh flush | |
| docker rm -f $(docker ps -qa) | |
| docker rmi -f $(docker images -q) | |
| docker volume rm $(docker volume ls -q) | |
| for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done | |
| cleanupdirs="/etc/ceph /etc/cni /etc/kubernetes /opt/cni /opt/rke /run/secrets/kubernetes.io /run/calico /run/flannel /var/lib/calico /var/lib/etcd /var/lib/cni /var/lib/kubelet /var/lib/rancher/rke/log /var/log/containers /var/log/pods /var/run/calico" | 
| #!/bin/bash | |
| ######## | |
| # Purpose :- To take a backup of MongoDB Collections and upload to AWS s3 | |
| # Requirement :- Make Sure Collection.config file is present in /data/Backup/mongodb | |
| # format for Collection.config is db|collection | |
| # For example | |
| # db1|collections1 | 
| # Manually remove finalizers | |
| kubectl edit -n cattle-system secret tls-rancher | |
| kubectl patch secret tls-rancher -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system | |
| kubectl patch namespace cattle-system -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system | |
| kubectl delete namespace cattle-system --grace-period=0 --force | |
| kubectl patch namespace cattle-global-data -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system | |
| kubectl delete namespace cattle-global-data --grace-period=0 --force | 
| #!/bin/bash | |
| ######## | |
| # Purpose :- To Restore backup of MongoDB Collections from AWS s3 | |
| # Requirement :- Make Sure you took backup via backupMongo_bucket.sh | |
| # Bug Report to :- [email protected] | |
| ######## | |
| PROGNAME=$(basename $0) | 
| #!/bin/sh | |
| # Make sure to: | |
| # 1) Name this file `backup.sh` and place it in /home/ubuntu | |
| # 2) Run sudo apt-get install awscli to install the AWSCLI | |
| # 3) Run aws configure (enter s3-authorized IAM user and specify region) | |
| # 4) Fill in DB host + name | |
| # 5) Create S3 bucket for the backups and fill it in below (set a lifecycle rule to expire files older than X days in the bucket) | |
| # 6) Run chmod +x backup.sh | |
| # 7) Test it out via ./backup.sh | 
| #!/bin/bash | |
| function parse_yaml { | |
| local prefix=$2 | |
| local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') | |
| sed -ne "s|^\($s\):|\1|" \ | |
| -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ | |
| -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | | |
| awk -F$fs '{ | |
| indent = length($1)/2; | 
| import socket | |
| def find_open_port(): | |
| """ | |
| Use socket's built in ability to find an open port. | |
| """ | |
| sock = socket.socket() | |
| sock.bind(('', 0)) | 
| def failureReport = { text -> "\r\n------------ Finish Report ------------\r\n${text}------------ Finish Report Report ------------\r\n" } | |
| def botUrl = "https://api.telegram.org/bot${env.BOT_TOKEN}/sendMessage" | |
| def bot = { text -> sh "curl -s -X POST ${botUrl} -d chat_id=${env.BOT_CHAT_ID} -d text='#${env.BUILD_ID} ${text}'" } | |
| node { | |
| bot("build starting") | |
| def mvnHome | |
| stage('Preparation') { | |
| if (env.USE_GIT_PULL == "true") { |