Commit 693db537 by root

update

parent e4afcfc4
FROM debian:jessie
# Builder
FROM erlang:23.3.2-alpine AS builder
RUN apk add --update git build-base bsd-compat-headers openssl-dev snappy-dev curl \
&& git clone -b 1.12.5 https://github.com/vernemq/vernemq \
&& cd vernemq \
&& make -j 16 rel
MAINTAINER Erlio GmbH info@vernemq.com
# Executor
FROM alpine:3.9
RUN apt-get update && apt-get install -y \
libssl-dev \
logrotate \
sudo \
curl \
jq \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /vernemq/_build/default/rel /
ENV VERNEMQ_VERSION 1.5.0
RUN apk --no-cache --update --available upgrade && \
apk add --no-cache ncurses-libs openssl libstdc++ jq curl bash snappy-dev && \
addgroup --gid 10000 vernemq && \
adduser --uid 10000 -H -D -G vernemq -h /vernemq vernemq && \
install -d -o vernemq -g vernemq /vernemq
# Defaults
ENV DOCKER_VERNEMQ_KUBERNETES_NAMESPACE default
ENV DOCKER_VERNEMQ_KUBERNETES_APP_LABEL vernemq
ENV DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR="app=vernemq" \
DOCKER_VERNEMQ_LOG__CONSOLE=console \
PATH="/vernemq/bin:$PATH" \
VERNEMQ_VERSION="1.11.1"
ADD https://bintray.com/artifact/download/erlio/vernemq/deb/jessie/vernemq_$VERNEMQ_VERSION-1_amd64.deb /tmp/vernemq.deb
RUN dpkg -i /tmp/vernemq.deb
RUN rm /tmp/vernemq.deb
ADD files/vm.args /etc/vernemq/vm.args
ADD bin/vernemq.sh /usr/sbin/start_vernemq
ADD bin/rand_cluster_node.escript /var/lib/vernemq/rand_cluster_node.escript
ADD files/vernemq.conf /etc/vernemq/vernemq.conf
# MQTT
EXPOSE 1883
WORKDIR /vernemq
COPY --chown=10000:10000 bin/vernemq.sh /usr/sbin/start_vernemq
COPY --chown=10000:10000 files/vm.args /vernemq/etc/vm.args
# MQTT/SSL
EXPOSE 8883
RUN chown -R 10000:10000 /vernemq && \
ln -s /vernemq/etc /etc/vernemq && \
ln -s /vernemq/data /var/lib/vernemq && \
ln -s /vernemq/log /var/log/vernemq
# MQTT WebSockets
EXPOSE 8083
# Ports
# 1883 MQTT
# 8883 MQTT/SSL
# 8080 MQTT WebSockets
# 44053 VerneMQ Message Distribution
# 4369 EPMD - Erlang Port Mapper Daemon
# 8888 Prometheus Metrics
# 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 Specific Distributed Erlang Port Range
# VerneMQ Message Distribution
EXPOSE 44053
EXPOSE 1883 8883 8080 44053 4369 8888 \
9100 9101 9102 9103 9104 9105 9106 9107 9108 9109
# EPMD - Erlang Port Mapper Daemon
EXPOSE 4369
# Specific Distributed Erlang Port Range
EXPOSE 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109
# Prometheus Metrics
EXPOSE 8888
VOLUME ["/vernemq/log", "/vernemq/data", "/vernemq/etc"]
VOLUME ["/var/log/vernemq", "/var/lib/vernemq", "/etc/vernemq"]
HEALTHCHECK CMD curl -s -f http://localhost:8888/health || false
CMD ["start_vernemq"]
USER vernemq
CMD ["start_vernemq"]
# docker-vernemq
# VerneMQ
## What is VerneMQ?
VerneMQ is a high-performance, distributed MQTT message broker. It scales
horizontally and vertically on commodity hardware to support a high number of
concurrent publishers and consumers while maintaining low latency and fault
tolerance. VerneMQ is the reliable message hub for your IoT platform or smart
products.
VerneMQ is an Apache2 licensed distributed MQTT broker, developed in Erlang.
## How to use this image
### Start a VerneMQ cluster node
docker run --name vernemq1 -d erlio/docker-vernemq
Somtimes you need to configure a forwarding for ports (on a Mac for example):
docker run -p 1883:1883 --name vernemq1 -d erlio/docker-vernemq
This starts a new node that listens on 1883 for MQTT connections and on 8080 for MQTT over websocket connections. However, at this moment the broker won't be able to authenticate the connecting clients. To allow anonymous clients use the ```DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on``` environment variable.
docker run -e "DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on" --name vernemq1 -d erlio/docker-vernemq
### Autojoining a VerneMQ cluster
This allows a newly started container to automatically join a VerneMQ cluster. Assuming you started your first node like the example above you could autojoin the cluster (which currently consists of a single container 'vernemq1') like the following:
docker run -e "DOCKER_VERNEMQ_DISCOVERY_NODE=<IP-OF-VERNEMQ1>" --name vernemq2 -d erlio/docker-vernemq
(Note, you can find the IP of a docker container using `docker inspect <containername/cid> | grep \"IPAddress\"`).
### Automated clustering on Kubernetes
When running VerneMQ inside Kubernetes, it is possible to cause pods matching a specific label to cluster altogether automatically.
This feature uses Kubernetes' API to discover other peers, and relies on the [default pod service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which has to be enabled.
Simply set ```DOCKER_VERNEMQ_DISCOVERY_KUBERNETES=1``` in your pod's environment, and expose your own pod name through ```MY_POD_NAME``` . By default, this setting will cause all pods in the ```default``` namespace with the ```app=vernemq``` label to join the same cluster. Namespace and label settings can be overridden with ```DOCKER_VERNEMQ_KUBERNETES_NAMESPACE``` and ```DOCKER_VERNEMQ_KUBERNETES_APP_LABEL```.
An example configuration of your pod's environment looks like this:
env:
- name: DOCKER_VERNEMQ_DISCOVERY_KUBERNETES
value: "1"
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: DOCKER_VERNEMQ_KUBERNETES_NAMESPACE
value: "mynamespace"
- name: DOCKER_VERNEMQ_KUBERNETES_APP_LABEL
value: "myverneinstance"
When enabling Kubernetes autoclustering, don't set ```DOCKER_VERNEMQ_DISCOVERY_NODE```.
> If you encounter "SSL certification error (subject name does not match the host name)" like below, you may try to set ```DOCKER_VERNEMQ_KUBERNETES_INSECURE``` to "1".
>
> ```text
> kubectl logs vernemq-0
> % Total % Received % Xferd Average Speed Time Time Time Current
> Dload Upload Total Spent Left Speed
> 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0curl: (51) SSL: certificate subject name 'client' does not match target host name 'kubernetes.default.svc.cluster.local'
> % Total % Received % Xferd Average Speed Time Time Time Current
> Dload Upload Total Spent Left Speed
> 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0curl: (51) SSL: certificate subject name 'client' does not match target host name 'kubernetes.default.svc.cluster.local'
> vernemq failed to start within 15 seconds,
> see the output of 'vernemq console' for more information.
> If you want to wait longer, set the environment variable
> WAIT_FOR_ERLANG to the number of seconds to wait.
> ...
> ```
### Checking cluster status
To check if the bove containers have successfully clustered you can issue the ```vmq-admin``` command:
docker exec vernemq1 vmq-admin cluster show
+--------------------+-------+
| Node |Running|
+--------------------+-------+
|VerneMQ@172.17.0.151| true |
|VerneMQ@172.17.0.152| true |
+--------------------+-------+
If you started VerneMQ cluster inside Kubernetes using ```DOCKER_VERNEMQ_DISCOVERY_KUBERNETES=1```, you can execute ```vmq-admin``` through ```kubectl```:
kubectl exec vernemq-0 -- vmq-admin cluster show
+---------------------------------------------------+-------+
| Node |Running|
+---------------------------------------------------+-------+
|VerneMQ@vernemq-0.vernemq.default.svc.cluster.local| true |
|VerneMQ@vernemq-1.vernemq.default.svc.cluster.local| true |
+---------------------------------------------------+-------+
All ```vmq-admin``` commands are available. See https://vernemq.com/docs/administration/ for more information.
### VerneMQ Configuration
All configuration parameters that are available in `vernemq.conf` can be defined
using the `DOCKER_VERNEMQ` prefix followed by the confguration parameter name.
E.g: `allow_anonymous=on` is `-e "DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on"` or
`allow_register_during_netsplit=on` is
`-e "DOCKER_VERNEMQ_ALLOW_REGISTER_DURING_NETSPLIT=on"`. All available configuration
parameters can be found on https://vernemq.com/docs/configuration/.
#### Remarks
Some of our configuration variables contain dots `.`. For example if you want to
adjust the log level of VerneMQ you'd use `-e
"DOCKER_VERNEMQ_LOG.CONSOLE.LEVEL=debug"`. However, some container platforms
such as Kubernetes don't support dots and other special characters in
environment variables. If you are on such a platform you could substitute the
dots with two underscores `__`. The example above would look like `-e
"DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL=debug"`.
#### File Based Authentication
You can set up [File Based Authentication](https://vernemq.com/docs/configuration/authentication.html)
by adding users and passwords as environment variables as follows:
`DOCKER_VERNEMQ_USER_<USERNAME>='password'`
where `<USERNAME>` is the username you want to use. This can be done as many times as necessary
to create the users you want. The usernames will always be created in lowercase
*CAVEAT* - You cannot have a `=` character in your password.
#!/usr/bin/env bash
NET_INTERFACE=$(route | grep '^default' | grep -o '[^ ]*$')
NET_INTERFACE=${DOCKER_NET_INTERFACE:-${NET_INTERFACE}}
IP_ADDRESS=$(ip -4 addr show ${NET_INTERFACE} | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | sed -e "s/^[[:space:]]*//" | head -n 1)
IP_ADDRESS=${DOCKER_IP_ADDRESS:-${IP_ADDRESS}}
# Ensure the Erlang node name is set correctly
if env | grep "DOCKER_VERNEMQ_NODENAME" -q; then
sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" /vernemq/etc/vm.args
else
if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
NODENAME=$(hostname -i)
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${NODENAME}/" /etc/vernemq/vm.args
else
sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${IP_ADDRESS}/" /vernemq/etc/vm.args
fi
fi
if env | grep "DOCKER_VERNEMQ_DISCOVERY_NODE" -q; then
discovery_node=$DOCKER_VERNEMQ_DISCOVERY_NODE
if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
tmp=''
while [[ -z "$tmp" ]]; do
tmp=$(getent hosts tasks.$discovery_node | awk '{print $1}' | head -n 1)
sleep 1
done
discovery_node=$tmp
fi
if [ -n "$DOCKER_VERNEMQ_COMPOSE" ]; then
tmp=''
while [[ -z "$tmp" ]]; do
tmp=$(getent hosts $discovery_node | awk '{print $1}' | head -n 1)
sleep 1
done
discovery_node=$tmp
fi
sed -i.bak -r "/-eval.+/d" /vernemq/etc/vm.args
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@$discovery_node')\"" >>/vernemq/etc/vm.args
fi
# If you encounter "SSL certification error (subject name does not match the host name)", you may try to set DOCKER_VERNEMQ_KUBERNETES_INSECURE to "1".
insecure=""
if env | grep "DOCKER_VERNEMQ_KUBERNETES_INSECURE" -q; then
insecure="--insecure"
fi
if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then
DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local}
# Let's get the namespace if it isn't set
DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)}
# Let's set our nodename correctly
VERNEMQ_KUBERNETES_SUBDOMAIN=${DOCKER_VERNEMQ_KUBERNETES_SUBDOMAIN:-$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[0].spec.subdomain' | sed 's/"//g' | tr '\n' '\0')}
if [ $VERNEMQ_KUBERNETES_SUBDOMAIN == "null" ]; then
VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}
else
VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}
fi
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${VERNEMQ_KUBERNETES_HOSTNAME}/" /vernemq/etc/vm.args
# Hack into K8S DNS resolution (temporarily)
kube_pod_names=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ')
for kube_pod_name in $kube_pod_names; do
if [ $kube_pod_name == "null" ]; then
echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?"
echo "Anyway, we won't attempt to join any cluster."
break
fi
if [ $kube_pod_name != $MY_POD_NAME ]; then
echo "Will join an existing Kubernetes cluster with discovery node at ${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}"
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}')\"" >>/vernemq/etc/vm.args
echo "Did I previously leave the cluster? If so, purging old state."
curl -fsSL http://${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}:8888/status.json >/dev/null 2>&1 ||
(echo "Can't download status.json, better to exit now" && exit 1)
curl -fsSL http://${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}:8888/status.json | grep -q ${VERNEMQ_KUBERNETES_HOSTNAME} ||
(echo "Cluster doesn't know about me, this means I've left previously. Purging old state..." && rm -rf /vernemq/data/*)
break
fi
done
fi
if [ -f /vernemq/etc/vernemq.conf.local ]; then
cp /vernemq/etc/vernemq.conf.local /vernemq/etc/vernemq.conf
sed -i -r "s/###IPADDRESS###/${IP_ADDRESS}/" /vernemq/etc/vernemq.conf
else
sed -i '/########## Start ##########/,/########## End ##########/d' /vernemq/etc/vernemq.conf
echo "########## Start ##########" >>/vernemq/etc/vernemq.conf
env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|SWARM\|COMPOSE\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >>/vernemq/etc/vernemq.conf
users_are_set=$(env | grep DOCKER_VERNEMQ_USER)
if [ ! -z "$users_are_set" ]; then
echo "vmq_passwd.password_file = /vernemq/etc/vmq.passwd" >>/vernemq/etc/vernemq.conf
touch /vernemq/etc/vmq.passwd
fi
for vernemq_user in $(env | grep DOCKER_VERNEMQ_USER); do
username=$(echo $vernemq_user | awk -F '=' '{ print $1 }' | sed 's/DOCKER_VERNEMQ_USER_//g' | tr '[:upper:]' '[:lower:]')
password=$(echo $vernemq_user | awk -F '=' '{ print $2 }')
/vernemq/bin/vmq-passwd /vernemq/etc/vmq.passwd $username <<EOF
$password
$password
EOF
done
if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MINIMUM" ]; then
echo "erlang.distribution.port_range.minimum = 9100" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MAXIMUM" ]; then
echo "erlang.distribution.port_range.maximum = 9109" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__TCP__DEFAULT" ]; then
echo "listener.tcp.default = ${IP_ADDRESS}:1883" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__WS__DEFAULT" ]; then
echo "listener.ws.default = ${IP_ADDRESS}:8080" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__VMQ__CLUSTERING" ]; then
echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >>/vernemq/etc/vernemq.conf
fi
if [ -z "$DOCKER_VERNEMQ_LISTENER__HTTP__METRICS" ]; then
echo "listener.http.metrics = ${IP_ADDRESS}:8888" >>/vernemq/etc/vernemq.conf
fi
echo "########## End ##########" >>/vernemq/etc/vernemq.conf
fi
# Check configuration file
/vernemq/bin/vernemq config generate 2>&1 >/dev/null | tee /tmp/config.out | grep error
if [ $? -ne 1 ]; then
echo "configuration error, exit"
echo "$(cat /tmp/config.out)"
exit $?
fi
pid=0
# SIGUSR1-handler
siguser1_handler() {
echo "stopped"
}
# SIGTERM-handler
sigterm_handler() {
if [ $pid -ne 0 ]; then
# this will stop the VerneMQ process, but first drain the node from all existing client sessions (-k)
if [ -n "$VERNEMQ_KUBERNETES_HOSTNAME" ]; then
terminating_node_name=VerneMQ@$VERNEMQ_KUBERNETES_HOSTNAME
elif [ -n "$DOCKER_VERNEMQ_SWARM" ]; then
terminating_node_name=VerneMQ@$(hostname -i)
else
terminating_node_name=VerneMQ@$IP_ADDRESS
fi
kube_pod_names=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ')
if [ $kube_pod_names == $MY_POD_NAME ]; then
echo "I'm the only pod remaining, not performing leave and state purge."
/vernemq/bin/vmq-admin node stop >/dev/null
else
/vernemq/bin/vmq-admin cluster leave node=$terminating_node_name -k && rm -rf /vernemq/data/*
/vernemq/bin/vmq-admin node stop >/dev/null
fi
sleep 5
kill -s TERM ${pid}
exit 0
fi
}
# Setup OS signal handlers
trap 'siguser1_handler' SIGUSR1
trap 'sigterm_handler' SIGTERM
# Start VerneMQ
/vernemq/bin/vernemq console -noshell -noinput $@ &
pid=$!
wait $pid
+P 256000
-env ERL_MAX_ETS_TABLES 256000
-env ERL_CRASH_DUMP /erl_crash.dump
-env ERL_FULLSWEEP_AFTER 0
-env ERL_MAX_PORTS 256000
+A 64
-setcookie vmq
-name VerneMQ@127.0.0.1
+K true
+W w
-smp enable
+zdbbl 32768
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment