This is not official documentation, have/make backups, use at your own risk.
v2.6.3 and up only
When etcd db size exceeds quota, it will raise an alarm and throw the error mvcc: database space exceeded.
To manually trigger this situation:
| local upstream = require("apisix.upstream") | |
| local core = require("apisix.core") | |
| local ipmatcher = require("resty.ipmatcher") | |
| local ngx = ngx | |
| local schema = { | |
| type = "object", | |
| properties = { | |
| }, |
| apiVersion: v1 | |
| kind: Secret | |
| metadata: | |
| name: index-template | |
| stringData: | |
| template: '{"index_patterns":["logstash-*"],"template":{"aliases":{"logstash":{}},"mappings":{"dynamic":"true","dynamic_date_formats":["strict_date_optional_time","yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z"],"dynamic_templates":[]},"settings":{"index":{"lifecycle":{"name":"logstash_policy","rollover_alias":"logstash"},"number_of_shards":"1","number_of_replicas":"0"}}}}' |
This is not official documentation, have/make backups, use at your own risk.
v2.6.3 and up only
When etcd db size exceeds quota, it will raise an alarm and throw the error mvcc: database space exceeded.
To manually trigger this situation:
| apiVersion: v1 | |
| kind: Secret | |
| metadata: | |
| name: index-template | |
| stringData: | |
| template: '{"index_patterns":["logstash-*"],"template":{"aliases":{"logstash":{}},"mappings":{"dynamic":"true","dynamic_date_formats":["strict_date_optional_time","yyyy/MM/dd HH:mm:ss Z||yyyy/MM/dd Z"],"dynamic_templates":[]},"settings":{"index":{"lifecycle":{"name":"logstash_policy","rollover_alias":"logstash"},"number_of_shards":"1","number_of_replicas":"0"}}}}' |
| #!/usr/bin/env bash | |
| set -ex | |
| export TEST_CLUSTER_NAME=quick-test | |
| export CERT_MANAGER_VERSION=v1.3.1 | |
| export KIND_IMAGE=kindest/node:v1.20.2 | |
| # Create test cluster | |
| echo "Creating test cluster..." | |
| kind create cluster --name="$TEST_CLUSTER_NAME" --image="$KIND_IMAGE" | |
| until kubectl --timeout=120s wait --for=condition=Ready pods --all --namespace kube-system; do sleep 1; done |
Setup etcdctl using the instructions at https://github.com/etcd-io/etcd/releases/tag/v3.4.13 (changed path to /usr/local/bin):
Note: if you want to match th etcdctl binaries with the embedded k3s etcd version, please run the curl command for getting the version first and adjust ETCD_VER below accordingly:
curl -L --cacert /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --cert /var/lib/rancher/k3s/server/tls/etcd/server-client.crt --key /var/lib/rancher/k3s/server/tls/etcd/server-client.key https://127.0.0.1:2379/version
| apiVersion: logging.banzaicloud.io/v1beta1 | |
| kind: ClusterFlow | |
| metadata: | |
| name: archive | |
| spec: | |
| match: | |
| - select: {} | |
| outputRefs: | |
| - s3 |
| #!/bin/bash | |
| #https://gist.github.com/davidedg/c29c478ee9c15a804a99cbd1de364647#file-userdata | |
| # Intended to run together with AMI amzn-ami-vpc-nat-hvm-*, with ASG min=max=desired=1 | |
| # Tested with amzn-ami-vpc-nat-hvm-2018.03.0.20180811-x86_64-ebs (ami-0ea87e2bfa81ca08a) | |
| # Expected TAGS: | |
| # - Backend Subnets: Name=InternetNAT Values=AvailabilityZone ( eg: eu-west-1 ) where Frontend NAT Instance is (this allows for multiple NAT instances to serve differenze AZ subnets | |
| # - Backend Subnets + ASG: Name=Environment, Values=EnvironmentLabel ( eg: "production", "staging" ... ) (this allows for multiple environments in same VPC, served by different NAT instances) | |
| # - ASG: Name=EIP Values=EIP-allocation-id ( eg eipalloc-abcdef12 ) | |
| PATH="/usr/sbin:/sbin:/usr/bin:/bin" | |
| function log { logger -t "NAT-instance" -- $1; } |