Commit 30aba7af by Chavee Issariyapat

restructure

parent 693db537
......@@ -17,15 +17,18 @@ RUN apk --no-cache --update --available upgrade && \
install -d -o vernemq -g vernemq /vernemq
# Defaults
ENV DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR="app=vernemq" \
DOCKER_VERNEMQ_LOG__CONSOLE=console \
PATH="/vernemq/bin:$PATH" \
VERNEMQ_VERSION="1.11.1"
ENV PATH="/vernemq/bin:$PATH" \
VERNEMQ_VERSION="1.12.5"
WORKDIR /vernemq
COPY --chown=10000:10000 files/vm.args /etc/vernemq/vm.args
COPY --chown=10000:10000 files/_vernemq.conf /etc/vernemq/vernemq.conf
COPY --chown=10000:10000 bin/vernemq.sh /usr/sbin/start_vernemq
COPY --chown=10000:10000 files/vm.args /vernemq/etc/vm.args
COPY --chown=10000:10000 bin/setapikey.sh /usr/sbin/setapikey.sh
COPY --chown=10000:10000 bin/rand_cluster_node.escript /var/lib/vernemq/rand_cluster_node.escript
RUN chown -R 10000:10000 /vernemq && \
ln -s /vernemq/etc /etc/vernemq && \
......@@ -39,15 +42,16 @@ RUN chown -R 10000:10000 /vernemq && \
# 44053 VerneMQ Message Distribution
# 4369 EPMD - Erlang Port Mapper Daemon
# 8888 Prometheus Metrics
# 8008 HTTP API
# 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 Specific Distributed Erlang Port Range
EXPOSE 1883 8883 8080 44053 4369 8888 \
EXPOSE 1883 8883 8008 44053 4369 8888 8008\
9100 9101 9102 9103 9104 9105 9106 9107 9108 9109
VOLUME ["/vernemq/log", "/vernemq/data", "/vernemq/etc"]
HEALTHCHECK CMD curl -s -f http://localhost:8888/health || false
# HEALTHCHECK CMD curl -s -f http://localhost:8888/health || false
USER vernemq
CMD ["start_vernemq"]
# VerneMQ
Original VerneMQ customized with a NEXPIE opinionated webhook and configuration to be used as a based of IoT microservices.
%% -*- erlang -*-
main([ThisNode]) ->
code:add_paths(filelib:wildcard("/usr/lib/vernemq/lib/*/ebin")),
FileName = "/var/lib/vernemq/meta/peer_service/cluster_state",
case filelib:is_regular(FileName) of
true ->
{ok, Bin} = file:read_file(FileName),
{ok, State} = riak_dt_orswot:from_binary(Bin),
AThisNode = list_to_atom(ThisNode),
TargetNodes = riak_dt_orswot:value(State) -- [AThisNode],
L = lists:foldl(
fun(N, Acc) ->
Acc ++ atom_to_list(N) ++ "\n"
end, "", TargetNodes),
io:format(L);
false ->
io:format("")
end.
#!/bin/sh
# retry checking until vernemq start responding
while true ; do
vmq-admin cluster show > output.tmp
VAR0=$(cat output.tmp | grep -A 3 "+----")
echo "wating ............"
#echo $VAR0
if [ "$VAR0" ]; then
rm output.tmp
break
fi
sleep 3
done
# retry adding api key until 'Done'
echo "vernemq is ready"
while true ; do
vmq-admin api-key add key=$DEVICEBROKER_VMQ_APIKEY > output.tmp
VAR0=$(cat output.tmp | grep -A 3 "Done")
A0=$(echo $VAR0 | awk '{printf $1}')
echo $A0
if [ "$A0" = "Done" ]; then
rm output.tmp
break
fi
sleep 3
done
#register webhooks
#echo "register: auth_on_register"
#vmq-admin webhooks register hook=auth_on_register endpoint=http://localhost:40000/authreg --base64payload=true
#echo "register: auth_on_publish"
#vmq-admin webhooks register hook=auth_on_publish endpoint=http://localhost:40000/authpub --base64payload=true
#echo "register: auth_on_subscribe"
#vmq-admin webhooks register hook=auth_on_subscribe endpoint=http://localhost:40000/authsub --base64payload=true
#echo "register: on_deliver"
#vmq-admin webhooks register hook=on_deliver endpoint=http://localhost:40000/ondeliver --base64payload=true
#echo "register: on_client_offline"
#vmq-admin webhooks register hook=on_client_offline endpoint=http://localhost:40000/onoff --base64payload=true
#echo "register: on_client_gone"
#vmq-admin webhooks register hook=on_client_gone endpoint=http://localhost:40000/ongone --base64payload=true
##echo "register: on_publish"
##vmq-admin webhooks register hook=on_publish endpoint=http://localhost:40000/onpub --base64payload=true
#echo "register: on_unsubscribe"
#vmq-admin webhooks register hook=on_unsubscribe endpoint=http://localhost:40000/onunsub --base64payload=true
echo "complete"
\ No newline at end of file
#!/usr/bin/env bash
#IP_ADDRESS=$(ip -4 addr show eth0 | grep -oP "(?<=inet).*(?=/)"| sed -e "s/^[[:space:]]*//" | tail -n 1)
NET_INTERFACE=$(route | grep '^default' | grep -o '[^ ]*$')
NET_INTERFACE=${DOCKER_NET_INTERFACE:-${NET_INTERFACE}}
IP_ADDRESS=$(ip -4 addr show ${NET_INTERFACE} | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | sed -e "s/^[[:space:]]*//" | head -n 1)
IP_ADDRESS=${DOCKER_IP_ADDRESS:-${IP_ADDRESS}}
# Ensure correct ownership and permissions on volumes
chown vernemq:vernemq /var/lib/vernemq /var/log/vernemq
chmod 755 /var/lib/vernemq /var/log/vernemq
# Ensure the Erlang node name is set correctly
if env | grep "DOCKER_VERNEMQ_NODENAME" -q; then
sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" /vernemq/etc/vm.args
if env | grep -q "DOCKER_VERNEMQ_NODENAME"; then
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" /etc/vernemq/vm.args
else
if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
NODENAME=$(hostname -i)
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${NODENAME}/" /etc/vernemq/vm.args
else
sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${IP_ADDRESS}/" /vernemq/etc/vm.args
fi
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${IP_ADDRESS}/" /etc/vernemq/vm.args
fi
if env | grep "DOCKER_VERNEMQ_DISCOVERY_NODE" -q; then
discovery_node=$DOCKER_VERNEMQ_DISCOVERY_NODE
if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
tmp=''
while [[ -z "$tmp" ]]; do
tmp=$(getent hosts tasks.$discovery_node | awk '{print $1}' | head -n 1)
sleep 1
done
discovery_node=$tmp
fi
if [ -n "$DOCKER_VERNEMQ_COMPOSE" ]; then
tmp=''
while [[ -z "$tmp" ]]; do
tmp=$(getent hosts $discovery_node | awk '{print $1}' | head -n 1)
sleep 1
done
discovery_node=$tmp
fi
if env | grep -q "DOCKER_VERNEMQ_DISCOVERY_NODE"; then
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${DOCKER_VERNEMQ_DISCOVERY_NODE}')\"" >> /etc/vernemq/vm.args
fi
sed -i.bak -r "/-eval.+/d" /vernemq/etc/vm.args
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@$discovery_node')\"" >>/vernemq/etc/vm.args
# Cluster discovery implementation based on https://github.com/thesandlord/kubernetes-pod-ip-finder
if env | grep -q "KUBE_VERNEMQ_DISCOVERY_URL"; then
response=$(curl ${KUBE_VERNEMQ_DISCOVERY_URL})
IFS=','
nodes=($(echo "$response" | tr -d '[]"'))
length=$(echo ${#nodes[@]})
for i in "${nodes[@]}"
do
if [ "$i" != "null" ] && [ "$i" != "$IP_ADDRESS" ] && (($length > 1)); then
echo "Start Joining to VerneMQ@${i}."
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${i}')\"" >> /etc/vernemq/vm.args
fi
done
IFS=''
fi
# If you encounter "SSL certification error (subject name does not match the host name)", you may try to set DOCKER_VERNEMQ_KUBERNETES_INSECURE to "1".
insecure=""
if env | grep "DOCKER_VERNEMQ_KUBERNETES_INSECURE" -q; then
insecure="--insecure"
#sed "s@VERNEMQ_WEBHOOK_HOST@$VERNEMQ_WEBHOOK_HOST@g" /etc/vernemq/_vernemq.conf > /etc/vernemq/vernemq.conf
if env | grep -q "DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on"; then
echo "allow_anonymous = on" >> /etc/vernemq/vernemq.conf
fi
if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then
DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local}
# Let's get the namespace if it isn't set
DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)}
# Let's set our nodename correctly
VERNEMQ_KUBERNETES_SUBDOMAIN=${DOCKER_VERNEMQ_KUBERNETES_SUBDOMAIN:-$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[0].spec.subdomain' | sed 's/"//g' | tr '\n' '\0')}
if [ $VERNEMQ_KUBERNETES_SUBDOMAIN == "null" ]; then
VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}
else
VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}
fi
if env | grep -q "DOCKER_VERNEMQ_TRADE_CONSISTENCY=on"; then
echo "trade_consistency = on" >> /etc/vernemq/vernemq.conf
fi
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${VERNEMQ_KUBERNETES_HOSTNAME}/" /vernemq/etc/vm.args
# Hack into K8S DNS resolution (temporarily)
kube_pod_names=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ')
for kube_pod_name in $kube_pod_names; do
if [ $kube_pod_name == "null" ]; then
echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?"
echo "Anyway, we won't attempt to join any cluster."
break
fi
if [ $kube_pod_name != $MY_POD_NAME ]; then
echo "Will join an existing Kubernetes cluster with discovery node at ${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}"
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}')\"" >>/vernemq/etc/vm.args
echo "Did I previously leave the cluster? If so, purging old state."
curl -fsSL http://${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}:8888/status.json >/dev/null 2>&1 ||
(echo "Can't download status.json, better to exit now" && exit 1)
curl -fsSL http://${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}:8888/status.json | grep -q ${VERNEMQ_KUBERNETES_HOSTNAME} ||
(echo "Cluster doesn't know about me, this means I've left previously. Purging old state..." && rm -rf /vernemq/data/*)
break
fi
done
if env | grep -q "DOCKER_VERNEMQ_ALLOW_MULTIPLE_SESSIONS=on"; then
echo "allow_multiple_sessions = on" >> /etc/vernemq/vernemq.conf
fi
if env | grep -q "DOCKER_VERNEMQ_MAX_CLIENT_ID_SIZE"; then
echo "max_client_id_size = ${DOCKER_VERNEMQ_MAX_CLIENT_ID_SIZE}" >> /etc/vernemq/vernemq.conf
fi
if [ -f /vernemq/etc/vernemq.conf.local ]; then
cp /vernemq/etc/vernemq.conf.local /vernemq/etc/vernemq.conf
sed -i -r "s/###IPADDRESS###/${IP_ADDRESS}/" /vernemq/etc/vernemq.conf
if [ -f /etc/vernemq/vernemq.conf.local ]; then
cp /etc/vernemq/vernemq.conf.local /etc/vernemq/vernemq.conf
else
sed -i '/########## Start ##########/,/########## End ##########/d' /vernemq/etc/vernemq.conf
sed -i '/########## Start ##########/,/########## End ##########/d' /etc/vernemq/vernemq.conf
echo "########## Start ##########" >>/vernemq/etc/vernemq.conf
echo "########## Start ##########" >> /etc/vernemq/vernemq.conf
env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|SWARM\|COMPOSE\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >>/vernemq/etc/vernemq.conf
env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >> /etc/vernemq/vernemq.conf
users_are_set=$(env | grep DOCKER_VERNEMQ_USER)
if [ ! -z "$users_are_set" ]; then
echo "vmq_passwd.password_file = /vernemq/etc/vmq.passwd" >>/vernemq/etc/vernemq.conf
touch /vernemq/etc/vmq.passwd
echo "vmq_passwd.password_file = /etc/vernemq/vmq.passwd" >> /etc/vernemq/vernemq.conf
touch /etc/vernemq/vmq.passwd
fi
for vernemq_user in $(env | grep DOCKER_VERNEMQ_USER); do
username=$(echo $vernemq_user | awk -F '=' '{ print $1 }' | sed 's/DOCKER_VERNEMQ_USER_//g' | tr '[:upper:]' '[:lower:]')
password=$(echo $vernemq_user | awk -F '=' '{ print $2 }')
/vernemq/bin/vmq-passwd /vernemq/etc/vmq.passwd $username <<EOF
vmq-passwd /etc/vernemq/vmq.passwd $username <<EOF
$password
$password
EOF
done
if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MINIMUM" ]; then
echo "erlang.distribution.port_range.minimum = 9100" >>/vernemq/etc/vernemq.conf
echo "erlang.distribution.port_range.minimum = 9100" >> /etc/vernemq/vernemq.conf
echo "erlang.distribution.port_range.maximum = 9109" >> /etc/vernemq/vernemq.conf
#echo "listener.tcp.default = ${IP_ADDRESS}:1883" >> /etc/vernemq/vernemq.conf
#echo "listener.ws.default = ${IP_ADDRESS}:8083" >> /etc/vernemq/vernemq.conf
echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >> /etc/vernemq/vernemq.conf
echo "listener.http.metrics = ${IP_ADDRESS}:8008" >> /etc/vernemq/vernemq.conf
# override webhook endpoint is VERNEMQ_WEBHOOK_HOST defined
if env | grep -q "VERNEMQ_WEBHOOK_HOST"; then
echo "vmq_webhooks.webhook1.endpoint = ${VERNEMQ_WEBHOOK_HOST}/authreg" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook2.endpoint = ${VERNEMQ_WEBHOOK_HOST}/authpub" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook3.endpoint = ${VERNEMQ_WEBHOOK_HOST}/authsub" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook4.endpoint = ${VERNEMQ_WEBHOOK_HOST}/ondeliver" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook5.endpoint = ${VERNEMQ_WEBHOOK_HOST}/onunsub" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook6.endpoint = ${VERNEMQ_WEBHOOK_HOST}/onoff" >> /etc/vernemq/vernemq.conf
echo "vmq_webhooks.webhook7.endpoint = ${VERNEMQ_WEBHOOK_HOST}/ongone" >> /etc/vernemq/vernemq.conf
#echo "vmq_webhooks.webhook8.endpoint = ${VERNEMQ_WEBHOOK_HOST}/onpub" >> /etc/vernemq/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MAXIMUM" ]; then
echo "erlang.distribution.port_range.maximum = 9109" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__TCP__DEFAULT" ]; then
echo "listener.tcp.default = ${IP_ADDRESS}:1883" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__WS__DEFAULT" ]; then
echo "listener.ws.default = ${IP_ADDRESS}:8080" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__VMQ__CLUSTERING" ]; then
echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__HTTP__METRICS" ]; then
echo "listener.http.metrics = ${IP_ADDRESS}:8888" >>/vernemq/etc/vernemq.conf
fi
echo "########## End ##########" >>/vernemq/etc/vernemq.conf
echo "########## End ##########" >> /etc/vernemq/vernemq.conf
fi
# Check configuration file
/vernemq/bin/vernemq config generate 2>&1 >/dev/null | tee /tmp/config.out | grep error
su - vernemq -c "/vernemq/bin/vernemq config generate 2>&1 > /dev/null" | tee /tmp/config.out | grep error
if [ $? -ne 1 ]; then
echo "configuration error, exit"
......@@ -151,33 +121,28 @@ siguser1_handler() {
# SIGTERM-handler
sigterm_handler() {
if [ $pid -ne 0 ]; then
# this will stop the VerneMQ process, but first drain the node from all existing client sessions (-k)
if [ -n "$VERNEMQ_KUBERNETES_HOSTNAME" ]; then
terminating_node_name=VerneMQ@$VERNEMQ_KUBERNETES_HOSTNAME
elif [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
terminating_node_name=VerneMQ@$(hostname -i)
else
terminating_node_name=VerneMQ@$IP_ADDRESS
fi
kube_pod_names=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ')
if [ $kube_pod_names == $MY_POD_NAME ]; then
echo "I'm the only pod remaining, not performing leave and state purge."
/vernemq/bin/vmq-admin node stop >/dev/null
else
/vernemq/bin/vmq-admin cluster leave node=$terminating_node_name -k && rm -rf /vernemq/data/*
/vernemq/bin/vmq-admin node stop >/dev/null
fi
sleep 5
kill -s TERM ${pid}
exit 0
# this will stop the VerneMQ process
vmq-admin cluster leave node=VerneMQ@$IP_ADDRESS -k > /dev/null
wait "$pid"
fi
exit 143; # 128 + 15 -- SIGTERM
}
# Setup OS signal handlers
trap 'siguser1_handler' SIGUSR1
trap 'sigterm_handler' SIGTERM
# setup handlers
# on callback, kill the last background process, which is `tail -f /dev/null`
# and execute the specified handler
trap 'kill ${!}; siguser1_handler' SIGUSR1
trap 'kill ${!}; sigterm_handler' SIGTERM
/usr/sbin/setapikey.sh &
/vernemq/bin/vernemq start
pid=$(ps aux | grep '[b]eam.smp' | awk '{print $2}')
touch /var/log/vernemq/console.log
while true
do
tail -f /var/log/vernemq/console.log & wait ${!}
done
# Start VerneMQ
/vernemq/bin/vernemq console -noshell -noinput $@ &
pid=$!
wait $pid
## Allow anonymous users to connect, default is 'off'
##
## Default: offf
##
## Acceptable values:
## - on or off
allow_anonymous = off
## Allow new client connections even when a VerneMQ cluster is inconsistent.
##
## Default: off
##
## Acceptable values:
## - on or off
allow_register_during_netsplit = on
## Allow message publishs even when a VerneMQ cluster is inconsistent.
##
## Default: off
##
## Acceptable values:
## - on or off
allow_publish_during_netsplit = on
## Allow new subscriptions even when a VerneMQ cluster is inconsistent.
##
## Default: off
##
## Acceptable values:
## - on or off
allow_subscribe_during_netsplit = on
## Allow clients to unsubscribe when a VerneMQ cluster is inconsistent.
##
## Default: off
##
## Acceptable values:
## - on or off
allow_unsubscribe_during_netsplit = on
## Allows a client to logon multiple times using the same client id
## (non-standard behaviour!).
##
## Default: off
##
## Acceptable values:
## - on or off
allow_multiple_sessions = off
## Set the time in seconds VerneMQ waits before a retry, in case a (QoS=1 or QoS=2) message
## delivery gets no answer.
##
## Default: 20
##
## Acceptable values:
## - an integer
## retry_interval = 20
## Set the maximum size for client IDs. MQTT v3.1 specifies a
## limit of 23 characters
##
## Default: 100
##
## Acceptable values:
## - an integer
## max_client_id_size = 100
max_client_id_size = 64
## This option allows persistent clients ( = clean session set to
## false) to be removed if they do not reconnect within 'persistent_client_expiration'.
## This is a non-standard option. As far as the MQTT specification is concerned,
## persistent clients persist forever.
## The expiration period should be an integer followed by one of 'd', 'w', 'm', 'y' for
## day, week, month, and year.
##
## Default: never
##
## Acceptable values:
## - text
## persistent_client_expiration = 1w
## The maximum number of QoS 1 or 2 messages that can be in the process of being
## transmitted simultaneously. This includes messages currently going through handshakes
## and messages that are being retried. Defaults to 20. Set to 0 for no maximum. If set
## to 1, this will guarantee in-order delivery of messages.
##
## Default: 20
##
## Acceptable values:
## - an integer
max_inflight_messages = 5
## The maximum number of messages to hold in the queue above
## those messages that are currently in flight. Defaults to 1000. This affects
## messages of any QoS. Set to -1 for no maximum (not recommended).
## This option allows to control how a specific client session can deal
## with message bursts. As a general rule of thumb set
## this number a bit higher than the expected message rate a single consumer is
## required to process. Note that setting this value to 0 will totally block
## delivery from any queue.
##
## Default: 1000
##
## Acceptable values:
## - an integer
max_online_messages = 1000
## The maximum number of QoS 1 or 2 messages to hold in the offline queue.
## Defaults to 1000. Set to -1 for no maximum (not recommended). Set to 0
## if no messages should be stored offline.
##
## Default: 1000
##
## Acceptable values:
## - an integer
max_offline_messages = 40
## This option sets the maximum MQTT size that VerneMQ will
## allow. Messages that exceed this size will not be accepted by
## VerneMQ. The default value is 0, which means that all valid MQTT
## messages are accepted. MQTT imposes a maximum payload size of
## 268435455 bytes.
##
## Default: 0
##
## Acceptable values:
## - an integer
max_message_size = 65536
## If a message is published with a QoS lower than the QoS of the subscription it is
## delivered to, VerneMQ can upgrade the outgoing QoS. This is a non-standard option.
##
## Default: off
##
## Acceptable values:
## - on or off
upgrade_outgoing_qos = off
## listener.max_connections is an integer or 'infinity' defining
## the maximum number of concurrent connections. This option can be overridden
## on the protocol level by:
## - listener.tcp.max_connections
## - listener.ssl.max_connections
## - listener.ws.max_connections
## - listener.wss.max_connections
## or on the listener level by:
## - listener.tcp.my_tcp_listener.max_connections
## - listener.ssl.my_ssl_listener.max_connections
## - listener.ws.my_ws_listener.max_connections
## - listener.wss.my_wss_listener.max_connections
##
## Default: 10000
##
## Acceptable values:
## - an integer
## - the text "infinity"
listener.max_connections = 10000
## Set the nr of acceptors waiting to concurrently accept new connections.
## This can be specified either on the protocol level:
## - listener.tcp.nr_of_acceptors
## - listener.ssl.nr_of_acceptors
## - listener.ws.nr_of_acceptors
## - listener.wss.nr_of_acceptors
## or on the listener level:
## - listener.tcp.my_tcp_listener.nr_of_acceptors
## - listener.ssl.my_ssl_listener.nr_of_acceptors
## - listener.ws.my_ws_listener.nr_of_acceptors
## - listener.wss.my_wss_listener.nr_of_acceptors
##
## Default: 10
##
## Acceptable values:
## - an integer
listener.nr_of_acceptors = 10
## listener.tcp.<name> is an IP address and TCP port that
## the broker will bind to. You can define multiple listeners e.g:
## - listener.tcp.default = 127.0.0.1:1883
## - listener.tcp.internal = 127.0.0.1:10883
## - listener.tcp.my_other_listener = 127.0.0.1:10884
## This also works for SSL listeners and WebSocket handlers:
## - listener.ssl.default = 127.0.0.1:8883
## - listener.ws.default = 127.0.0.1:800
## - listener.wss.default = 127.0.0.1:880
##
## Default: 127.0.0.1:1883
##
## Acceptable values:
## - an IP/port pair, e.g. 127.0.0.1:10011
#listener.tcp.default = 127.0.0.1:1883
listener.tcp.default = 0.0.0.0:1883
listener.ws.default = 0.0.0.0:8083
## listener.vmq.clustering is the IP address and TCP port that
## the broker will bind to accept connections from other cluster
## nodes e.g:
## - listener.vmq.clustering = 0.0.0.0:18883
## This also works for SSL listeners:
## - listener.vmqs.clustering = 0.0.0.0:18884
##
## Default: 0.0.0.0:44053
##
## Acceptable values:
## - an IP/port pair, e.g. 127.0.0.1:10011
listener.vmq.clustering = 0.0.0.0:44053
## listener.http.default is the IP address and TCP port that
## the broker will bind to accept HTTP connections
## - listener.http.default = 0.0.0.0:8888
## This also works for SSL listeners:
## - listener.https.default= 0.0.0.0:8889
##
## Default: 127.0.0.1:8888
##
## Acceptable values:
## - an IP/port pair, e.g. 127.0.0.1:10011
listener.http.default = 127.0.0.1:8008
## Set the mountpoint on the protocol level or on the listener level
## - listener.tcp.mountpoint
## - listener.ssl.mountpoint
## - listener.ws.mountpoint
## - listener.wss.mountpoint
## listener level:
## - listener.tcp.my_tcp_listener.mountpoint
## - listener.ssl.my_ssl_listener.mountpoint
## - listener.ws.my_ws_listener.mountpoint
## - listener.wss.my_wss_listener.mountpoint
##
## Default: off
##
## Acceptable values:
## - text
listener.mountpoint = off
## The cafile is used to define the path to a file containing
## the PEM encoded CA certificates that are trusted. Set the cafile
## on the protocol level or on the listener level:
## - listener.ssl.cafile
## - listener.wss.cafile
## or on the listener level:
## - listener.ssl.my_ssl_listener.cafile
## - listener.wss.my_wss_listener.cafile
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.ssl.cafile = /etc/vernemq/cacerts.pem
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.https.cafile = /etc/vernemq/cacerts.pem
## Set the path to the PEM encoded server certificate
## on the protocol level or on the listener level:
## - listener.ssl.certfile
## - listener.wss.certfile
## or on the listener level:
## - listener.ssl.my_ssl_listener.certfile
## - listener.wss.my_wss_listener.certfile
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.ssl.certfile = /etc/vernemq/cert.pem
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.https.certfile = /etc/vernemq/cert.pem
## Set the path to the PEM encoded key file on the protocol
## level or on the listener level:
## - listener.ssl.keyfile
## - listener.wss.keyfile
## or on the listener level:
## - listener.ssl.my_ssl_listener.keyfile
## - listener.wss.my_wss_listener.keyfile
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.ssl.keyfile = /etc/vernemq/key.pem
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.vmqs.keyfile = /etc/vernemq/key.pem
##
## Default:
##
## Acceptable values:
## - the path to a file
## listener.https.keyfile = /etc/vernemq/key.pem
## Set the list of allowed ciphers (each separated with a colon),
## on the protocol level or on the listener level. Reasonable defaults
## are used if nothing is specified:
## - listener.ssl.ciphers
## - listener.wss.ciphers
## or on the listener level:
## - listener.ssl.my_ssl_listener.ciphers
## - listener.wss.my_wss_listener.ciphers
##
## Default:
##
## Acceptable values:
## - text
## listener.ssl.ciphers =
##
## Default:
##
## Acceptable values:
## - text
## listener.vmqs.ciphers =
##
## Default:
##
## Acceptable values:
## - text
## listener.https.ciphers =
## If you have 'listener.ssl.require_certificate' set to true,
## you can create a certificate revocation list file to revoke access
## to particular client certificates. If you have done this, use crlfile
## to point to the PEM encoded revocation file. This can be done on the
## protocol level or on the listener level.
## - listener.ssl.crlfile
## - listener.wss.crlfile
## or on the listener level:
## - listener.ssl.my_ssl_listener.crlfile
## - listener.wss.my_wss_listener.crlfile
##
## Default:
##
## Acceptable values:
## - text
## listener.ssl.crlfile =
## Enable this option if you want to use SSL client certificates
## to authenticate your clients. This can be done on the protocol level
## or on the listener level.
## - listener.ssl.require_certificate
## - listener.wss.require_certificate
## or on the listener level:
## - listener.ssl.my_ssl_listener.require_certificate
## - listener.wss.my_wss_listener.require_certificate
##
## Default: off
##
## Acceptable values:
## - on or off
## listener.ssl.require_certificate = off
##
## Default: off
##
## Acceptable values:
## - on or off
## listener.vmqs.require_certificate = off
##
## Default: off
##
## Acceptable values:
## - on or off
## listener.https.require_certificate = off
## Configure the TLS protocol version (tlsv1, tlsv1.1, or tlsv1.2) to be
##
## Default: tlsv1.2
##
## Acceptable values:
## - text
## listener.ssl.tls_version = tlsv1.2
##
## Default: tlsv1.2
##
## Acceptable values:
## - text
## listener.vmqs.tls_version = tlsv1.2
##
## Default: tlsv1.2
##
## Acceptable values:
## - text
## listener.https.tls_version = tlsv1.2
## If 'listener.ssl.require_certificate' is enabled, you may enable
## 'listener.ssl.use_identity_as_username' to use the CN value from the client
## certificate as a username. If enabled other authentication plugins are not
## considered. The option can be specified either for all SSL listeners or for
## a specific listener:
## - listener.ssl.use_identity_as_username
## - listener.wss.use_identity_as_username
## or on the listener level:
## - listener.ssl.my_ssl_listener.use_identity_as_username
## - listener.wss.my_wss_listener.use_identity_as_username
##
## Default: off
##
## Acceptable values:
## - on or off
## listener.ssl.use_identity_as_username = off
## Enable the $SYSTree Reporter.
##
## Default: on
##
## Acceptable values:
## - on or off
systree_enabled = on
## The integer number of milliseconds between updates of the $SYS subscription hierarchy,
## which provides status information about the broker. If unset, defaults to 20 seconds.
## Set to 0 to disable publishing the $SYS hierarchy completely.
##
## Default: 20000
##
## Acceptable values:
## - an integer
systree_interval = 20000
## Enable the Graphite Reporter. Ensure to also configure a
## proper graphite.host
##
## Default: off
##
## Acceptable values:
## - on or off
graphite_enabled = off
## the graphite server host name
##
## Default: localhost
##
## Acceptable values:
## - text
graphite_host = localhost
## the tcp port of the graphite server
##
## Default: 2003
##
## Acceptable values:
## - an integer
graphite_port = 2003
## the interval we push metrics to the graphite server in ms
##
## Default: 20000
##
## Acceptable values:
## - an integer
graphite_interval = 20000
## set the prefix that is applied to all metrics reported to graphite
##
## Default:
##
## Acceptable values:
## - text
## graphite_prefix = my-prefix
## the graphite server api key, e.g. used by hostedgraphite.com
##
## Default:
##
## Acceptable values:
## - text
## graphite_api_key = My-Api-Key
## Distribution policy for shared subscriptions. Default is
## 'prefer_local' which will ensure that local subscribers will be
## used if any are available. 'local_only' will select a random local
## subscriber if any are available. 'random' will randomly choose
## between all available subscribers.
##
## Default: prefer_local
##
## Acceptable values:
## - text
shared_subscription_policy = prefer_local
## plugins.<plugin> enables/disables a plugin.
## Plugin specific settings are set via the plugin itself, i.e., to
## set the 'file' setting for the myplugin plugin, add a line like:
## myplugin.file = /path/to/file
##
## Acceptable values:
## - on or off
## plugins.name = on
## plugins.<name>.path defines the location of the plugin
## associated with <name>. This is needed for plugins that are not
## shipped with VerneMQ.
##
## Acceptable values:
## - the path to a directory
## plugins.mypluginname.path = /path/to/myplugin
## plugins.<name>.priority defines the load order of the
## plugins. Plugins are loaded by priority. If no priority is given
## the load order is undefined. Prioritized plugins will always be
## loaded before plugins with no defined priority.
##
## Acceptable values:
## - an integer
## plugins.mypluginname.priority = 5
## File based authentication plugin.
##
## Default: on
##
## Acceptable values:
## - on or off
plugins.vmq_passwd = on
## File based authorization plugin.
##
## Default: on
##
## Acceptable values:
## - on or off
plugins.vmq_acl = off
## Lua based plugins.
##
## Default: off
##
## Acceptable values:
## - on or off
plugins.vmq_diversity = off
## Webhook based plugins.
##
## Default: off
##
## Acceptable values:
## - on or off
plugins.vmq_webhooks = on
## The VerneMQ bridge plugin.
##
## Default: off
##
## Acceptable values:
## - on or off
plugins.vmq_bridge = off
## Set the path to an access control list file.
##
## Default: /etc/vernemq/vmq.acl
##
## Acceptable values:
## - the path to a file
vmq_acl.acl_file = /etc/vernemq/vmq.acl
## set the acl reload interval in seconds, the value 0 disables
## the automatic reloading of the acl file.
##
## Default: 10
##
## Acceptable values:
## - an integer
vmq_acl.acl_reload_interval = 0
## Set the path to a password file.
##
## Default: /etc/vernemq/vmq.passwd
##
## Acceptable values:
## - the path to a file
vmq_passwd.password_file = /etc/vernemq/vmq.passwd
## set the password reload interval in seconds, the value 0
## disables the automatic reloading of the password file.
##
## Default: 10
##
## Acceptable values:
## - an integer
vmq_passwd.password_reload_interval = 0
## Configure the vmq_diversity plugin script dir. The script dir
## is searched for Lua scripts which are automatically loaded when the
## plugin is enabled.
##
## Default: /usr/share/vernemq/lua
##
## Acceptable values:
## - the path to a directory
vmq_diversity.script_dir = /usr/share/vernemq/lua
##
## Default: off
##
## Acceptable values:
## - on or off
vmq_diversity.auth_postgres.enabled = off
##
## Default: localhost
##
## Acceptable values:
## - text
## vmq_diversity.postgres.host = localhost
##
## Default: 5432
##
## Acceptable values:
## - an integer
## vmq_diversity.postgres.port = 5432
##
## Default: root
##
## Acceptable values:
## - text
## vmq_diversity.postgres.user = root
##
## Default: password
##
## Acceptable values:
## - text
## vmq_diversity.postgres.password = password
##
## Default: vernemq_db
##
## Acceptable values:
## - text
## vmq_diversity.postgres.database = vernemq_db
##
## Default: off
##
## Acceptable values:
## - on or off
vmq_diversity.auth_mysql.enabled = off
##
## Default: localhost
##
## Acceptable values:
## - text
## vmq_diversity.mysql.host = localhost
##
## Default: 3306
##
## Acceptable values:
## - an integer
## vmq_diversity.mysql.port = 3306
##
## Default: root
##
## Acceptable values:
## - text
## vmq_diversity.mysql.user = root
##
## Default: password
##
## Acceptable values:
## - text
## vmq_diversity.mysql.password = password
##
## Default: vernemq_db
##
## Acceptable values:
## - text
## vmq_diversity.mysql.database = vernemq_db
##
## Default: off
##
## Acceptable values:
## - on or off
vmq_diversity.auth_mongodb.enabled = off
##
## Default: localhost
##
## Acceptable values:
## - text
## vmq_diversity.mongodb.host = localhost
##
## Default: 27017
##
## Acceptable values:
## - an integer
## vmq_diversity.mongodb.port = 27017
##
## Acceptable values:
## - text
## vmq_diversity.mongodb.login =
##
## Acceptable values:
## - text
## vmq_diversity.mongodb.password =
##
## Acceptable values:
## - text
## vmq_diversity.mongodb.database =
##
## Default: off
##
## Acceptable values:
## - on or off
vmq_diversity.auth_redis.enabled = off
##
## Default: localhost
##
## Acceptable values:
## - text
## vmq_diversity.redis.host = localhost
##
## Default: 6379
##
## Acceptable values:
## - an integer
## vmq_diversity.redis.port = 6379
##
## Default:
##
## Acceptable values:
## - text
## vmq_diversity.redis.password =
##
## Default: 0
##
## Acceptable values:
## - an integer
## vmq_diversity.redis.database = 0
##
## Default: localhost
##
## Acceptable values:
## - text
## vmq_diversity.memcache.host = localhost
##
## Default: 11211
##
## Acceptable values:
## - an integer
## vmq_diversity.memcache.port = 11211
## vmq_diversity.<name>.plugin = <file> loads a specific lua
## script when `vmq_diversity` starts. The scripts are loaded in the
## order defined by the names given, i.e., the script with <name>
## 'script1' is started before the plugin with <name> 'script2'.
##
## Acceptable values:
## - the path to a file
## vmq_diversity.script1.file = path/to/my/script.lua
## To configure and register a webhook a hook and an endpoint
## need to be configured and this is achieved by associating both with
## a name. vmq_webhooks.<name>.hook = <hook> associates the hook
## <hook> with the name <name>. Webhooks are registered in the order
## of the name given to it. Therefore a webhook with name 'webhook1'
## is regisered before a webhook with the name 'webhook2'.
##
## Acceptable values:
## - one of: auth_on_register, auth_on_publish, auth_on_subscribe, on_register, on_publish, on_subscribe, on_unsubscribe, on_deliver, on_offline_message, on_client_wakeup, on_client_offline, on_client_gone
## Associate an endpoint with a name.
##
## Acceptable values:
## - text
## vmq_webhooks.webhook1.endpoint = http://localhost/myendpoints
vmq_webhooks.webhook1.hook = auth_on_register
vmq_webhooks.webhook1.endpoint = VERNEMQ_WEBHOOK_HOST/authreg
vmq_webhooks.webhook2.hook = auth_on_publish
vmq_webhooks.webhook2.endpoint = VERNEMQ_WEBHOOK_HOST/authpub
vmq_webhooks.webhook3.hook = auth_on_subscribe
vmq_webhooks.webhook3.endpoint = VERNEMQ_WEBHOOK_HOST/authsub
vmq_webhooks.webhook4.hook = on_deliver
vmq_webhooks.webhook4.endpoint = VERNEMQ_WEBHOOK_HOST/ondeliver
vmq_webhooks.webhook5.hook = on_unsubscribe
vmq_webhooks.webhook5.endpoint = VERNEMQ_WEBHOOK_HOST/onunsub
vmq_webhooks.webhook6.hook = on_client_offline
vmq_webhooks.webhook6.endpoint = VERNEMQ_WEBHOOK_HOST/onoff
vmq_webhooks.webhook7.hook = on_client_gone
vmq_webhooks.webhook7.endpoint = VERNEMQ_WEBHOOK_HOST/ongone
## vmq_webhooks.webhook8.hook = on_publish
## vmq_webhooks.webhook8.endpoint = VERNEMQ_WEBHOOK_HOST/onpub
## Specify the address and port of the bridge to connect to. Several
## bridges can configured by using different bridge names (e.g. br0). If the
## connection supports SSL encryption bridge.ssl.<name> can be used.
##
## Acceptable values:
## - text
## vmq_bridge.tcp.br0 = 127.0.0.1:1889
## Set the clean session option for the bridge. By default this is disabled,
## which means that all subscriptions on the remote broker are kept in case of
## the network connection dropping. If enabled, all subscriptions and messages
## on the remote broker will be cleaned up if the connection drops.
##
## Default: off
##
## Acceptable values:
## - on or off
## vmq_bridge.tcp.br0.cleansession = off
## Set the client id for this bridge connection. If not defined, this
## defaults to 'name.hostname', where name is the connection name and hostname
## is the hostname of this computer.
##
## Default: auto
##
## Acceptable values:
## - text
## vmq_bridge.tcp.br0.client_id = auto
## Set the number of seconds after which the bridge should send a ping if
## no other traffic has occurred.
##
## Default: 60
##
## Acceptable values:
## - an integer
## vmq_bridge.tcp.br0.keepalive_interval = 60
## Configure a username for the bridge. This is used for authentication
## purposes when connecting to a broker that support MQTT v3.1 and requires a
## username and/or password to connect. See also the password option.
##
## Acceptable values:
## - text
## vmq_bridge.tcp.br0.username = my_remote_user
## Configure a password for the bridge. This is used for authentication
## purposes when connecting to a broker that support MQTT v3.1 and requires a
## username and/or password to connect. This option is only valid if a username
## is also supplied.
##
## Acceptable values:
## - text
## vmq_bridge.tcp.br0.password = my_remote_password
## Define one or more topic pattern to be shared between the two brokers.
## Any topics matching the pattern (including wildcards) are shared.
## The following format is used:
## pattern [[[ out | in | both ] qos-level] local-prefix remote-prefix]
## [ out | in | both ]: specifies that this bridge exports messages (out), imports
## messages (in) or shared in both directions (both). If undefined we default to
## export (out).
## qos-level: specifies the publish/subscribe QoS level used for this
## toppic. If undefined we default to QoS 0.
## local-prefix and remote-prefix: For incoming topics, the bridge
## will prepend the pattern with the remote prefix and subscribe to
## the resulting topic on the remote broker. When a matching
## incoming message is received, the remote prefix will be removed
## from the topic and then the local prefix added.
## For outgoing topics, the bridge will prepend the pattern with the
## local prefix and subscribe to the resulting topic on the local
## broker. When an outgoing message is processed, the local prefix
## will be removed from the topic then the remote prefix added.
##
## Acceptable values:
## - text
## vmq_bridge.tcp.br0.topic.1 = topic
## Set the amount of time a bridge using the automatic start type will wait
## until attempting to reconnect. Defaults to 30 seconds.
##
## Default: 10
##
## Acceptable values:
## - an integer
## vmq_bridge.tcp.br0.restart_timeout = 10
## If try_private is enabled, the bridge will attempt to indicate to the
## remote broker that it is a bridge not an ordinary client.
## Note that loop detection for bridges is not yet implemented.
##
## Default: on
##
## Acceptable values:
## - on or off
## vmq_bridge.tcp.br0.try_private = on
## The cafile is used to define the path to a file containing
## the PEM encoded CA certificates that are trusted.
##
## Default:
##
## Acceptable values:
## - the path to a file
## vmq_bridge.ssl.sbr0.cafile = /etc/vernemq/cacerts.pem
## Define the path to a folder containing
## the PEM encoded CA certificates that are trusted.
##
## Default:
##
## Acceptable values:
## - the path to a file
## vmq_bridge.ssl.sbr0.capath = /etc/vernemq/cacerts
## Set the path to the PEM encoded server certificate.
##
## Default:
##
## Acceptable values:
## - the path to a file
## vmq_bridge.ssl.sbr0.certfile = /etc/vernemq/cert.pem
## Set the path to the PEM encoded key file.
##
## Default:
##
## Acceptable values:
## - the path to a file
## vmq_bridge.ssl.sbr0.keyfile = /etc/vernemq/key.pem
## When using certificate based TLS, the bridge will attempt to verify the
## hostname provided in the remote certificate matches the host/address being
## connected to. This may cause problems in testing scenarios, so this option
## may be enabled to disable the hostname verification.
## Setting this option to true means that a malicious third party could
## potentially inpersonate your server, so it should always be disabled in
## production environments.
##
## Default: off
##
## Acceptable values:
## - on or off
## vmq_bridge.ssl.sbr0.insecure = off
## Configure the TLS protocol version (tlsv1, tlsv1.1, or tlsv1.2) to be
## used for this bridge.
##
## Default: tlsv1.2
##
## Acceptable values:
## - text
## vmq_bridge.ssl.sbr0.tls_version = tlsv1.2
## Pre-shared-key encryption provides an alternative to certificate based
## encryption. This option specifies the identity used.
##
## Default:
##
## Acceptable values:
## - text
## vmq_bridge.ssl.sbr0.identity =
## Pre-shared-key encryption provides an alternative to certificate based
## encryption. This option specifies the shared secret used in hexadecimal
## format without leading '0x'.
##
## Default:
##
## Acceptable values:
## - text
## vmq_bridge.ssl.sbr0.psk =
## Where to emit the default log messages (typically at 'info'
## severity):
## off: disabled
## file: the file specified by log.console.file
## console: to standard output (seen when using `vmq attach-direct`)
## both: log.console.file and standard out.
##
## Default: file
##
## Acceptable values:
## - one of: off, file, console, both
log.console = file
## The severity level of the console log, default is 'info'.
##
## Default: info
##
## Acceptable values:
## - one of: debug, info, warning, error
log.console.level = info
## When 'log.console' is set to 'file' or 'both', the file where
## console messages will be logged.
##
## Default: /var/log/vernemq/console.log
##
## Acceptable values:
## - the path to a file
log.console.file = /var/log/vernemq/console.log
## The file where error messages will be logged.
##
## Default: /var/log/vernemq/error.log
##
## Acceptable values:
## - the path to a file
log.error.file = /var/log/vernemq/error.log
## When set to 'on', enables log output to syslog.
##
## Default: off
##
## Acceptable values:
## - on or off
log.syslog = off
## Whether to enable the crash log.
##
## Default: on
##
## Acceptable values:
## - on or off
log.crash = on
## If the crash log is enabled, the file where its messages will
## be written.
##
## Default: /var/log/vernemq/crash.log
##
## Acceptable values:
## - the path to a file
log.crash.file = /var/log/vernemq/crash.log
## Maximum size in bytes of individual messages in the crash log
##
## Default: 64KB
##
## Acceptable values:
## - a byte size with units, e.g. 10GB
log.crash.maximum_message_size = 64KB
## Maximum size of the crash log in bytes, before it is rotated
##
## Default: 10MB
##
## Acceptable values:
## - a byte size with units, e.g. 10GB
log.crash.size = 10MB
## The schedule on which to rotate the crash log. For more
## information see:
## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
##
## Default: $D0
##
## Acceptable values:
## - text
log.crash.rotation = $D0
## The number of rotated crash logs to keep. When set to
## 'current', only the current open log file is kept.
##
## Default: 5
##
## Acceptable values:
## - an integer
## - the text "current"
log.crash.rotation.keep = 5
## Name of the Erlang node
##
## Default: VerneMQ@127.0.0.1
##
## Acceptable values:
## - text
nodename = VerneMQ@127.0.0.1
## Cookie for distributed node communication. All nodes in the
## same cluster should use the same cookie or they will not be able to
## communicate.
## IMPORTANT!!! SET the cookie to a private value! DO NOT LEAVE AT DEFAULT!
##
## Default: vmq
##
## Acceptable values:
## - text
distributed_cookie = vmq
## Sets the number of threads in async thread pool, valid range
## is 0-1024. If thread support is available, the default is 64.
## More information at: http://erlang.org/doc/man/erl.html
##
## Default: 64
##
## Acceptable values:
## - an integer
erlang.async_threads = 64
## The number of concurrent ports/sockets
## Valid range is 1024-134217727
##
## Default: 262144
##
## Acceptable values:
## - an integer
erlang.max_ports = 262144
## Set scheduler forced wakeup interval. All run queues will be
## scanned each Interval milliseconds. While there are sleeping
## schedulers in the system, one scheduler will be woken for each
## non-empty run queue found. An Interval of zero disables this
## feature, which also is the default.
## This feature is a workaround for lengthy executing native code, and
## native code that do not bump reductions properly.
## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
##
## Acceptable values:
## - an integer
## erlang.schedulers.force_wakeup_interval = 500
## Enable or disable scheduler compaction of load. By default
## scheduler compaction of load is enabled. When enabled, load
## balancing will strive for a load distribution which causes as many
## scheduler threads as possible to be fully loaded (i.e., not run out
## of work). This is accomplished by migrating load (e.g. runnable
## processes) into a smaller set of schedulers when schedulers
## frequently run out of work. When disabled, the frequency with which
## schedulers run out of work will not be taken into account by the
## load balancing logic.
## More information: http://www.erlang.org/doc/man/erl.html#+scl
##
## Acceptable values:
## - one of: true, false
## erlang.schedulers.compaction_of_load = false
## Enable or disable scheduler utilization balancing of load. By
## default scheduler utilization balancing is disabled and instead
## scheduler compaction of load is enabled which will strive for a
## load distribution which causes as many scheduler threads as
## possible to be fully loaded (i.e., not run out of work). When
## scheduler utilization balancing is enabled the system will instead
## try to balance scheduler utilization between schedulers. That is,
## strive for equal scheduler utilization on all schedulers.
## More information: http://www.erlang.org/doc/man/erl.html#+sub
##
## Acceptable values:
## - one of: true, false
## erlang.schedulers.utilization_balancing = true
## This parameter defines the percentage of total server memory
## to assign to LevelDB. LevelDB will dynamically adjust its internal
## cache sizes to stay within this size. The memory size can
## alternately be assigned as a byte count via leveldb.maximum_memory
## instead.
##
## Default: 70
##
## Acceptable values:
## - an integer
leveldb.maximum_memory.percent = 70
## Special Options
## https://docs.vernemq.com/configuration/advanced_options
##
max_message_rate = 50
# EULA for 1.10.0 and over
# accept_eula = yes
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment