-
Notifications
You must be signed in to change notification settings - Fork 3
/
storm-cluster-example.conf
116 lines (98 loc) · 4.71 KB
/
storm-cluster-example.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# The datacenter where this cluster should be created.
# This can be any datacenter, but cluster creation may be faster
# if it's created in the same datacenter as the image.
# Time taken for transferring files from local system is another factor.
DATACENTER_FOR_CLUSTER="newark"
# or alternatively, specify the LOCATION or ABBR returned by avail.datacenters (case does not matter)
# DATACENTER_FOR_CLUSTER="Newark, NJ, USA"
# DATACENTER_FOR_CLUSTER="newark"
# Sizing of cluster:
# A storm cluster consists of 1 nimbus node and n supervisor nodes.
#
# Nimbus node's role is to distribute jobs to supervisors.
#
# Supervisor nodes start and supervise the worker processes in which
# bolts and spouts are executed.
# Select a linode plan for the nimbus node.
# Nimbus node runs nimbus daemon and UI daemon.
# Plan are from https://www.linode.com/pricing : "2GB | 4GB | 8GB ...."
NIMBUS_NODE="2GB"
# Select count and linode plans for supervisor nodes.
# Supervisor nodes are the workhorses. They host supervisor daemon,
# logviewer daemon, and the worker processes that execute bolts and spouts.
#
# Syntax is: "<plan>:<count> <plan>:<count> ...."
# where <plan> is a plan from https://www.linode.com/pricing : "2GB | 4GB | 8GB ...."
# and count is a numeric count.
# Example: Create 3 supervisor nodes - a 2 GB, a 4 GB and a 8 GB
# SUPERVISOR_NODES="2GB:1 4GB:1 8GB:1"
SUPERVISOR_NODES="2GB:2"
# Select plan for client node.
# Client node is where developers can submit topologies to the cluster.
# Client node also runs the storm UI daemon, which hosts the storm web UI for
# monitoring the cluster.
#
# Syntax is: "<plan>"
# where <plan> is a plan from https://www.linode.com/pricing : "2GB | 4GB | 8GB ...."
CLIENT_NODE="2GB"
# ** REQUIRED **
#
# The image conf file to use as a template for creating nodes of this cluster.
# Every node's disk will be a replica of this image.
# Path should be relative to this cluster directory, or an absolute path.
# Examples:
# STORM_IMAGE_CONF=../storm-image1
# STORM_IMAGE_CONF=../storm-image1/storm-image1.conf
# STORM_IMAGE_CONF=/home/clustermgr/storm-linode/storm-image1
STORM_IMAGE_CONF=
# ** REQUIRED **
#
# The zookeeper conf file path *should* include a directory (even if it's just ./), because it gets included as a source file
# in the script.
# Path should be relative to this cluster directory, or an absolute path.
# Examples:
# ZOOKEEPER_CLUSTER=../zk-cluster1
# ZOOKEEPER_CLUSTER=../zk-cluster1/zk-cluster1.conf
# ZOOKEEPER_CLUSTER=/home/clustermgr/storm-linode/zk-cluster1
ZOOKEEPER_CLUSTER=
NODE_DISK_SIZE=5000
# A root password for all nodes of this cluster.
# If none is specified, the image's root password is retained.
# If specified, it should contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation.
#
# Some special characters may require escape prefixing and the password to be enclosed in
# single or double quotes.
# Some examples:
# - for password with spaces, enclose in double quotes
# NODE_ROOT_PASSWORD="a PassworD with spaces"
#
# - for password with double quotes, enclose in double quotes and prefix every double quote in the password with a backslash \ :
# NODE_ROOT_PASSWORD="pswd_WITH_\"dbl\"_quotes"
#
# - for password with $, enclose in double quotes and prefix every $ in the password with a backslash \ :
# NODE_ROOT_PASSWORD="pswd_with_\$a_"
NODE_ROOT_PASSWORD=""
# Keypair for ssh root authentication into nodes of this cluster.
# If none is specified, the image's keypair is used.
NODE_ROOT_SSH_PUBLIC_KEY=
NODE_ROOT_SSH_PRIVATE_KEY=
# Public and private hostnames for nodes.
# The private hostnames become the actual hostnames of the nodes, while
# the public hostnames are just entries in each node's /etc/hosts.
NIMBUS_NODE_PUBLIC_HOSTNAME=$CLUSTER_NAME-public-nimbus
NIMBUS_NODE_PRIVATE_HOSTNAME=$CLUSTER_NAME-private-nimbus
SUPERVISOR_NODES_PUBLIC_HOSTNAME_PREFIX=$CLUSTER_NAME-public-supr
SUPERVISOR_NODES_PRIVATE_HOSTNAME_PREFIX=$CLUSTER_NAME-private-supr
CLIENT_NODES_PUBLIC_HOSTNAME_PREFIX=$CLUSTER_NAME-public-client
CLIENT_NODES_PRIVATE_HOSTNAME_PREFIX=$CLUSTER_NAME-private-client
# If this is true, then the cluster manager machine is probably outside
# linode cloud and its private network. So it should use public IPs of linodes for
# all ssh communication.
# If cluster manager machine is also a linode in same datacenter as cluster nodes, comment
# this out or set it to false so that only private IPs are used.
CLUSTER_MANAGER_USES_PUBLIC_IP=false
# Use these template iptables rules to create rules for the cluster.
IPTABLES_V4_RULES_TEMPLATE=../template-storm-iptables-rules.v4
IPTABLES_CLIENT_V4_RULES_TEMPLATE=../template-storm-client-iptables-rules.v4
IPTABLES_V6_RULES_TEMPLATE=../template-storm-iptables-rules.v6