Skip to content

Instantly share code, notes, and snippets.

@Gurpartap
Last active March 24, 2022 03:07
Show Gist options
  • Save Gurpartap/943f8e435c110231e9f553ff1d9a5e34 to your computer and use it in GitHub Desktop.
Save Gurpartap/943f8e435c110231e9f553ff1d9a5e34 to your computer and use it in GitHub Desktop.

Revisions

  1. Gurpartap revised this gist Apr 17, 2020. 2 changed files with 5 additions and 1 deletion.
    2 changes: 1 addition & 1 deletion 0_readme.md
    Original file line number Diff line number Diff line change
    @@ -4,7 +4,7 @@
    # submit the parameterized batch job to cluster:
    nomad job run ./cadence-setup-schema.nomad

    # dispatch parameterized job (without any param):
    # run this job (but without any param):
    nomad job dispatch cadence-setup-schema
    ```

    4 changes: 4 additions & 0 deletions 1_cadence-setup-schema.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -9,6 +9,10 @@ job cadence-setup-schema {
    # value = "cadence-services"
    # }

    parameterized {
    payload = "forbidden"
    }

    meta {
    keyspace = "cadence"
    visibility_keyspace = "cadence_visibility"
  2. Gurpartap revised this gist Apr 17, 2020. 1 changed file with 10 additions and 10 deletions.
    20 changes: 10 additions & 10 deletions 0_readme.md
    Original file line number Diff line number Diff line change
    @@ -1,23 +1,23 @@
    ### for a simple setup, cadence-server.nomad does it all.
    ### use cadence-setup-schema.nomad for automatic schema setup and update

    ```sh
    nomad job run ./cadence-server.nomad
    # submit the parameterized batch job to cluster:
    nomad job run ./cadence-setup-schema.nomad

    # dispatch parameterized job (without any param):
    nomad job dispatch cadence-setup-schema
    ```

    ### use cadence-web.nomad for web ui service.
    ### for a simple setup, cadence-server.nomad does it all.

    ```sh
    nomad job run ./cadence-web.nomad
    nomad job run ./cadence-server.nomad
    ```

    ### use cadence-setup-schema.nomad for automatic schema setup and update
    ### use cadence-web.nomad for web ui service.

    ```sh
    # submit the parameterized batch job to cluster:
    nomad job run ./cadence-setup-schema.nomad

    # dispatch parameterized job (without any param):
    nomad job dispatch cadence-setup-schema
    nomad job run ./cadence-web.nomad
    ```

    ### run services individually
  3. Gurpartap revised this gist Apr 17, 2020. 8 changed files with 137 additions and 19 deletions.
    30 changes: 30 additions & 0 deletions 0_readme.md
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,30 @@
    ### for a simple setup, cadence-server.nomad does it all.

    ```sh
    nomad job run ./cadence-server.nomad
    ```

    ### use cadence-web.nomad for web ui service.

    ```sh
    nomad job run ./cadence-web.nomad
    ```

    ### use cadence-setup-schema.nomad for automatic schema setup and update

    ```sh
    # submit the parameterized batch job to cluster:
    nomad job run ./cadence-setup-schema.nomad

    # dispatch parameterized job (without any param):
    nomad job dispatch cadence-setup-schema
    ```

    ### run services individually

    see files 4 to 7 to run each server service component individually.
    comes handy when you must scale, allocate resources, and/or measure
    metrics more precisely.

    individual service jobs can be mixed with cadence-server.nomad
    on the same cluster without conflict.
    101 changes: 101 additions & 0 deletions 1_cadence-setup-schema.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,101 @@
    job cadence-setup-schema {
    datacenters = ["dc1"]
    type = "batch"
    priority = 100

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    meta {
    keyspace = "cadence"
    visibility_keyspace = "cadence_visibility"
    }

    group cadence-setup-schema {
    task cadence-setup-schema {
    driver = "docker"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0-auto-setup"
    command = "bash"
    args = ["/opt/cadence/bin/setup-schema.sh"]
    volumes = [
    "local/setup-schema.sh:/opt/cadence/bin/setup-schema.sh"
    ]
    network_mode = "host"
    }

    env {
    SKIP_SCHEMA_SETUP = false

    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    CASSANDRA_SEEDS = "cassandra-cluster1-node1.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node3.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "${NOMAD_META_keyspace}"
    VISIBILITY_KEYSPACE = "${NOMAD_META_visibility_keyspace}"
    }

    template {
    change_mode = "noop"
    destination = "local/setup-schema.sh"
    // language=sh
    data = <<EOH
    #!/bin/bash
    set -x
    DB="${DB:-cassandra}"
    RF=${RF:-3}
    # cassandra env
    export KEYSPACE="${KEYSPACE:-cadence}"
    export VISIBILITY_KEYSPACE="${VISIBILITY_KEYSPACE:-cadence_visibility}"
    setup_cassandra_schema() {
    SCHEMA_DIR=$CADENCE_HOME/schema/cassandra/cadence/versioned
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS create -k $KEYSPACE --rf $RF
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS -k $KEYSPACE setup-schema -v 0.0
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS -k $KEYSPACE update-schema -d $SCHEMA_DIR
    VISIBILITY_SCHEMA_DIR=$CADENCE_HOME/schema/cassandra/visibility/versioned
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS create -k $VISIBILITY_KEYSPACE --rf $RF
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS -k $VISIBILITY_KEYSPACE setup-schema -v 0.0
    cadence-cassandra-tool --ep $CASSANDRA_SEEDS -k $VISIBILITY_KEYSPACE update-schema -d $VISIBILITY_SCHEMA_DIR
    }
    setup_schema() {
    if [ "$DB" == "cassandra" ]; then
    echo 'setup cassandra schema'
    setup_cassandra_schema
    fi
    }
    wait_for_cassandra() {
    server=`echo $CASSANDRA_SEEDS | awk -F ',' '{print $1}'`
    until cqlsh --cqlversion=3.4.4 $server < /dev/null; do
    echo 'waiting for cassandra to start up'
    sleep 1
    done
    echo 'cassandra started'
    }
    wait_for_db() {
    if [ "$DB" == "cassandra" ]; then
    wait_for_cassandra
    fi
    }
    wait_for_db
    setup_schema
    EOH
    }
    }
    }
    }
    15 changes: 1 addition & 14 deletions 1_cadence-server.nomad.hcl → 2_cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,16 +1,3 @@
    # for a simple setup, cadence-server.nomad.hcl does it all.
    # use cadence-web.nomad.hcl for web ui service.

    # see files 3 to 6 to run each server service component individually.
    # comes handy when you must scale, allocate resources, and/or measure
    # metrics more precisely.
    #
    # individual service jobs can be mixed with cadence-server.nomad.hcl
    # on the same cluster without conflict.

    # currently requires you to run cassandra schema migrations manually
    # (run 1 job with 0.11.0-auto-setup image)

    job cadence-server {
    datacenters = ["dc1"]
    type = "service"
    @@ -31,7 +18,7 @@ job cadence-server {

    task cadence-server {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0"
    2 changes: 1 addition & 1 deletion 2_cadence-web.nomad.hcl → 3_cadence-web.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -14,7 +14,7 @@ job cadence-web {

    task cadence-web {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/web:latest"
    Original file line number Diff line number Diff line change
    @@ -18,7 +18,7 @@ job cadence-frontend {

    task cadence-frontend {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0"
    Original file line number Diff line number Diff line change
    @@ -18,7 +18,7 @@ job cadence-history {

    task cadence-history {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0"
    Original file line number Diff line number Diff line change
    @@ -18,7 +18,7 @@ job cadence-matching {

    task cadence-matching {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0"
    2 changes: 1 addition & 1 deletion 6_cadence-worker.nomad.hcl → 7_cadence-worker.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -18,7 +18,7 @@ job cadence-worker {

    task cadence-worker {
    driver = "docker"
    kill_timeout = "30s"
    kill_timeout = "45s"

    config {
    image = "ubercadence/server:0.11.0"
  4. Gurpartap revised this gist Apr 14, 2020. 1 changed file with 2 additions and 2 deletions.
    4 changes: 2 additions & 2 deletions 1_cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -2,8 +2,8 @@
    # use cadence-web.nomad.hcl for web ui service.

    # see files 3 to 6 to run each server service component individually.
    # comes handy when you must allocate resources and/or measure metrics
    # more precisely.
    # comes handy when you must scale, allocate resources, and/or measure
    # metrics more precisely.
    #
    # individual service jobs can be mixed with cadence-server.nomad.hcl
    # on the same cluster without conflict.
  5. Gurpartap revised this gist Apr 14, 2020. 6 changed files with 33 additions and 6 deletions.
    5 changes: 4 additions & 1 deletion 1_cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,3 +1,6 @@
    # for a simple setup, cadence-server.nomad.hcl does it all.
    # use cadence-web.nomad.hcl for web ui service.

    # see files 3 to 6 to run each server service component individually.
    # comes handy when you must allocate resources and/or measure metrics
    # more precisely.
    @@ -11,7 +14,7 @@
    job cadence-server {
    datacenters = ["dc1"]
    type = "service"
    priority = 60
    priority = 75

    # constraint {
    # attribute = "${meta.tags}"
    2 changes: 1 addition & 1 deletion 2_cadence-web.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,7 @@
    job cadence-web {
    datacenters = ["dc1"]
    type = "service"
    priority = 20
    priority = 25

    # constraint {
    # attribute = "${meta.tags}"
    8 changes: 7 additions & 1 deletion 3_cadence-frontend.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,13 @@
    job cadence-frontend {
    datacenters = ["dc1"]
    type = "service"
    priority = 60
    priority = 75

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    group cadence-frontend {
    count = 3
    8 changes: 7 additions & 1 deletion 4_cadence-history.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,13 @@
    job cadence-history {
    datacenters = ["dc1"]
    type = "service"
    priority = 60
    priority = 75

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    group cadence-history {
    count = 3
    8 changes: 7 additions & 1 deletion 5_cadence-matching.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,13 @@
    job cadence-matching {
    datacenters = ["dc1"]
    type = "service"
    priority = 60
    priority = 75

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    group cadence-matching {
    count = 3
    8 changes: 7 additions & 1 deletion 6_cadence-worker.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,7 +1,13 @@
    job cadence-worker {
    datacenters = ["dc1"]
    type = "service"
    priority = 60
    priority = 75

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    group cadence-worker {
    count = 3
  6. Gurpartap revised this gist Apr 14, 2020. 5 changed files with 374 additions and 9 deletions.
    14 changes: 9 additions & 5 deletions 1_cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,8 +1,12 @@
    # to run each server service component individually, clone the job file for each of the service,
    # cherry pick the service{} and port{} definitions, and set the SERVICES env var appropriately.
    # for an example, see cadence-history.nomad.hcl file below.

    # currently requires you to run cassandra schema migrations manually (run 1 job with 0.11.0-auto-setup image)
    # see files 3 to 6 to run each server service component individually.
    # comes handy when you must allocate resources and/or measure metrics
    # more precisely.
    #
    # individual service jobs can be mixed with cadence-server.nomad.hcl
    # on the same cluster without conflict.

    # currently requires you to run cassandra schema migrations manually
    # (run 1 job with 0.11.0-auto-setup image)

    job cadence-server {
    datacenters = ["dc1"]
    124 changes: 124 additions & 0 deletions 3_cadence-frontend.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,124 @@
    job cadence-frontend {
    datacenters = ["dc1"]
    type = "service"
    priority = 60

    group cadence-frontend {
    count = 3

    constraint {
    distinct_hosts = true
    }

    task cadence-frontend {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/server:0.11.0"
    network_mode = "host"
    volumes = [
    "local/dynamicconfig.yml:/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    ]
    }

    service {
    name = "cadence-frontend"
    port = "frontend"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    env {
    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    SERVICES = "frontend"
    BIND_ON_IP = "${NOMAD_IP_frontend}"
    CASSANDRA_SEEDS = "cassandra-cluster1-node1.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node3.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "cadence"
    VISIBILITY_KEYSPACE = "cadence_visibility"
    SKIP_SCHEMA_SETUP = true
    DYNAMIC_CONFIG_FILE_PATH = "/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    RINGPOP_BOOTSTRAP_MODE = "dns"
    RINGPOP_SEEDS = "cadence-frontend.service.consul:7933,cadence-history.service.consul:7934,cadence-matching.service.consul:7935,cadence-worker.service.consul:7939"
    PROMETHEUS_ENDPOINT = "${NOMAD_ADDR_prometheus}"
    }

    template {
    change_mode = "noop"
    destination = "local/dynamicconfig.yml"
    data = <<EOH
    ---
    system.minRetentionDays:
    - value: 0
    constraints: {}
    system.historyArchivalStatus:
    - value: "disabled"
    constraints: {}
    system.visibilityArchivalStatus:
    - value: "disabled"
    constraints: {}
    frontend.enableClientVersionCheck:
    - value: true
    constraints: {}
    frontend.visibilityListMaxQPS:
    - value: 100
    constraints: {}
    EOH
    }

    resources {
    cpu = 1500
    memory = 256

    network {
    mbits = 100

    port frontend {
    static = 7933
    }
    port prometheus {}
    }
    }

    meta {
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 5
    delay = "5s"
    mode = "delay"
    interval = "1m"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "15s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "1m"
    progress_deadline = "2m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }
    9 changes: 5 additions & 4 deletions 3_cadence-history.nomad.hcl → 4_cadence-history.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -27,10 +27,11 @@ job cadence-history {
    port = "history"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    check
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    118 changes: 118 additions & 0 deletions 5_cadence-matching.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,118 @@
    job cadence-matching {
    datacenters = ["dc1"]
    type = "service"
    priority = 60

    group cadence-matching {
    count = 3

    constraint {
    distinct_hosts = true
    }

    task cadence-matching {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/server:0.11.0"
    network_mode = "host"
    volumes = [
    "local/dynamicconfig.yml:/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    ]
    }

    service {
    name = "cadence-matching"
    port = "matching"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    env {
    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    SERVICES = "matching"
    BIND_ON_IP = "${NOMAD_IP_matching}"
    CASSANDRA_SEEDS = "cassandra-cluster1-node1.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node3.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "cadence"
    VISIBILITY_KEYSPACE = "cadence_visibility"
    SKIP_SCHEMA_SETUP = true
    DYNAMIC_CONFIG_FILE_PATH = "/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    RINGPOP_BOOTSTRAP_MODE = "dns"
    RINGPOP_SEEDS = "cadence-frontend.service.consul:7933,cadence-history.service.consul:7934,cadence-matching.service.consul:7935,cadence-worker.service.consul:7939"
    PROMETHEUS_ENDPOINT = "${NOMAD_ADDR_prometheus}"
    }

    template {
    change_mode = "noop"
    destination = "local/dynamicconfig.yml"
    data = <<EOH
    ---
    system.minRetentionDays:
    - value: 0
    constraints: {}
    system.historyArchivalStatus:
    - value: "disabled"
    constraints: {}
    system.visibilityArchivalStatus:
    - value: "disabled"
    constraints: {}
    EOH
    }

    resources {
    cpu = 500
    memory = 256

    network {
    mbits = 100

    port matching {
    static = 7935
    }
    port prometheus {}
    }
    }

    meta {
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 5
    delay = "5s"
    mode = "delay"
    interval = "1m"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "15s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "1m"
    progress_deadline = "2m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }
    118 changes: 118 additions & 0 deletions 6_cadence-worker.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,118 @@
    job cadence-worker {
    datacenters = ["dc1"]
    type = "service"
    priority = 60

    group cadence-worker {
    count = 3

    constraint {
    distinct_hosts = true
    }

    task cadence-worker {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/server:0.11.0"
    network_mode = "host"
    volumes = [
    "local/dynamicconfig.yml:/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    ]
    }

    service {
    name = "cadence-worker"
    port = "worker"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    env {
    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    SERVICES = "worker"
    BIND_ON_IP = "${NOMAD_IP_worker}"
    CASSANDRA_SEEDS = "cassandra-cluster1-node1.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node3.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "cadence"
    VISIBILITY_KEYSPACE = "cadence_visibility"
    SKIP_SCHEMA_SETUP = true
    DYNAMIC_CONFIG_FILE_PATH = "/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    RINGPOP_BOOTSTRAP_MODE = "dns"
    RINGPOP_SEEDS = "cadence-frontend.service.consul:7933,cadence-history.service.consul:7934,cadence-matching.service.consul:7935,cadence-worker.service.consul:7939"
    PROMETHEUS_ENDPOINT = "${NOMAD_ADDR_prometheus}"
    }

    template {
    change_mode = "noop"
    destination = "local/dynamicconfig.yml"
    data = <<EOH
    ---
    system.minRetentionDays:
    - value: 0
    constraints: {}
    system.historyArchivalStatus:
    - value: "disabled"
    constraints: {}
    system.visibilityArchivalStatus:
    - value: "disabled"
    constraints: {}
    EOH
    }

    resources {
    cpu = 500
    memory = 256

    network {
    mbits = 100

    port worker {
    static = 7939
    }
    port prometheus {}
    }
    }

    meta {
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 5
    delay = "5s"
    mode = "delay"
    interval = "1m"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "15s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "1m"
    progress_deadline = "2m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }
  7. Gurpartap revised this gist Apr 14, 2020. 3 changed files with 215 additions and 4 deletions.
    9 changes: 5 additions & 4 deletions cadence-server.nomad.hcl → 1_cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -1,5 +1,6 @@
    # to run each server component run individually, clone the job file for each,
    # to run each server service component individually, clone the job file for each of the service,
    # cherry pick the service{} and port{} definitions, and set the SERVICES env var appropriately.
    # for an example, see cadence-history.nomad.hcl file below.

    # currently requires you to run cassandra schema migrations manually (run 1 job with 0.11.0-auto-setup image)

    @@ -11,7 +12,7 @@ job cadence-server {
    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-server"
    # value = "cadence-services"
    # }

    group cadence-server {
    @@ -133,7 +134,7 @@ EOH

    resources {
    cpu = 2999
    memory = 2000
    memory = 2048

    network {
    mbits = 100
    @@ -155,7 +156,7 @@ EOH
    }

    meta {
    restarted_at = "Tue Apr 14 23:16:50 IST 2020"
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    90 changes: 90 additions & 0 deletions 2_cadence-web.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,90 @@
    job cadence-web {
    datacenters = ["dc1"]
    type = "service"
    priority = 20

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-services"
    # }

    group cadence-web {
    count = 1

    task cadence-web {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/web:latest"
    port_map = {
    http = 8088
    }
    }

    # restarts job when cadence-frontend service changes
    template {
    env = true
    destination = "${NOMAD_SECRETS_DIR}/env"
    data = <<EOF
    CADENCE_TCHANNEL_PEERS={{range $index, $service := service "cadence-frontend" }}{{if ne $index 0}},{{end}}{{$service.Address}}:{{$service.Port}}{{end}}
    EOF
    }

    service {
    name = "cadence-web"
    port = "http"

    check {
    type = "http"
    path = "/"
    interval = "5s"
    timeout = "3s"
    }
    }

    resources {
    cpu = 1000
    memory = 768

    network {
    mbits = 100

    port http {
    static = 8088
    }
    }
    }

    meta {
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 3
    delay = "10s"
    interval = "1m"
    mode = "delay"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "10s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "2m"
    progress_deadline = "3m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }
    120 changes: 120 additions & 0 deletions 3_cadence-history.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,120 @@
    job cadence-history {
    datacenters = ["dc1"]
    type = "service"
    priority = 60

    group cadence-history {
    count = 3

    constraint {
    distinct_hosts = true
    }

    task cadence-history {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/server:0.11.0"
    network_mode = "host"
    volumes = [
    "local/dynamicconfig.yml:/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    ]
    }

    service {
    name = "cadence-history"
    port = "history"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    }
    }

    env {
    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    SERVICES = "history"
    BIND_ON_IP = "${NOMAD_IP_history}"
    CASSANDRA_SEEDS = "cassandra-cluster1-node3.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node1.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "cadence"
    VISIBILITY_KEYSPACE = "cadence_visibility"
    SKIP_SCHEMA_SETUP = true
    DYNAMIC_CONFIG_FILE_PATH = "/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    RINGPOP_BOOTSTRAP_MODE = "dns"
    RINGPOP_SEEDS = "cadence-frontend.service.consul:7933,cadence-history.service.consul:7934,cadence-matching.service.consul:7935,cadence-worker.service.consul:7939"
    PROMETHEUS_ENDPOINT = "${NOMAD_ADDR_prometheus}"
    }

    template {
    change_mode = "noop"
    destination = "local/dynamicconfig.yml"
    data = <<EOH
    ---
    system.minRetentionDays:
    - value: 0
    constraints: {}
    system.historyArchivalStatus:
    - value: "disabled"
    constraints: {}
    system.visibilityArchivalStatus:
    - value: "disabled"
    constraints: {}
    history.EnableConsistentQueryByDomain:
    - value: true
    constraints: {}
    EOH
    }

    resources {
    cpu = 1999
    memory = 1536

    network {
    mbits = 100

    port history {
    static = 7934
    }
    port prometheus {}
    }
    }

    meta {
    last_run_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 5
    delay = "5s"
    mode = "delay"
    interval = "1m"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "15s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "1m"
    progress_deadline = "2m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }
  8. Gurpartap created this gist Apr 14, 2020.
    187 changes: 187 additions & 0 deletions cadence-server.nomad.hcl
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,187 @@
    # to run each server component run individually, clone the job file for each,
    # cherry pick the service{} and port{} definitions, and set the SERVICES env var appropriately.

    # currently requires you to run cassandra schema migrations manually (run 1 job with 0.11.0-auto-setup image)

    job cadence-server {
    datacenters = ["dc1"]
    type = "service"
    priority = 60

    # constraint {
    # attribute = "${meta.tags}"
    # operator = "set_contains"
    # value = "cadence-server"
    # }

    group cadence-server {
    count = 3

    constraint {
    distinct_hosts = true
    }

    task cadence-server {
    driver = "docker"
    kill_timeout = "30s"

    config {
    image = "ubercadence/server:0.11.0"
    network_mode = "host"
    volumes = [
    "local/dynamicconfig.yml:/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    ]
    }

    service {
    name = "cadence-frontend"
    port = "frontend"

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    service {
    name = "cadence-history"
    port = "history"

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    service {
    name = "cadence-matching"
    port = "matching"

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    service {
    name = "cadence-worker"
    port = "worker"

    check {
    type = "tcp"
    interval = "5s"
    timeout = "15s"
    initial_status = "passing"
    }
    }

    service {
    name = "cadence-server"
    tags = ["metrics", "metrics-port=${NOMAD_PORT_prometheus}"]
    }

    env {
    # change requires db reset
    NUM_HISTORY_SHARDS = 4

    LOG_LEVEL = "info"
    SERVICES = "frontend,history,matching,worker"
    BIND_ON_IP = "${NOMAD_IP_frontend}"
    CASSANDRA_SEEDS = "cassandra-cluster1-node1.node.consul,cassandra-cluster1-node2.node.consul,cassandra-cluster1-node3.node.consul"
    DB = "cassandra"
    RF = 3
    KEYSPACE = "cadence"
    VISIBILITY_KEYSPACE = "cadence_visibility"
    SKIP_SCHEMA_SETUP = true
    DYNAMIC_CONFIG_FILE_PATH = "/etc/cadence/config/dynamicconfig/dynamicconfig.yml"
    RINGPOP_BOOTSTRAP_MODE = "dns"
    RINGPOP_SEEDS = "cadence-frontend.service.consul:7933,cadence-history.service.consul:7934,cadence-matching.service.consul:7935,cadence-worker.service.consul:7939"
    PROMETHEUS_ENDPOINT = "${NOMAD_ADDR_prometheus}"
    }

    template {
    change_mode = "noop"
    destination = "local/dynamicconfig.yml"
    data = <<EOH
    ---
    system.minRetentionDays:
    - value: 0
    constraints: {}
    system.historyArchivalStatus:
    - value: "disabled"
    constraints: {}
    system.visibilityArchivalStatus:
    - value: "disabled"
    constraints: {}
    frontend.enableClientVersionCheck:
    - value: true
    constraints: {}
    frontend.visibilityListMaxQPS:
    - value: 100
    constraints: {}
    history.EnableConsistentQueryByDomain:
    - value: true
    constraints: {}
    EOH
    }

    resources {
    cpu = 2999
    memory = 2000

    network {
    mbits = 100

    port frontend {
    static = 7933
    }
    port history {
    static = 7934
    }
    port matching {
    static = 7935
    }
    port worker {
    static = 7939
    }
    port prometheus {}
    }
    }

    meta {
    restarted_at = "Tue Apr 14 23:16:50 IST 2020"
    }
    }

    restart {
    attempts = 5
    delay = "5s"
    mode = "delay"
    interval = "1m"
    }
    }

    migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "15s"
    healthy_deadline = "60s"
    }

    update {
    max_parallel = 1
    min_healthy_time = "15s"
    healthy_deadline = "1m"
    progress_deadline = "2m"
    auto_revert = true
    auto_promote = true
    canary = 1
    stagger = "5s"
    }
    }