Commit e4afcfc4 by root

rearrange branch

parent c31221c3
node_modules
.DS_Store
node_nodules
.vscode
*.rdb
xtest*
.env
.make-function
temp
# a helper shell script
MAKE_VERSION=1.1.2
MAKE_VERSION=1.2.5
define MAKEFUNCTIONCODE
function hasChanges() {
......@@ -10,7 +10,18 @@ define MAKEFUNCTIONCODE
}
function getRelease() {
awk -F= '/^RELEASE=/{print $$2}' release.conf
# old : use tag in release.conf
# awk -F= '/^RELEASE=/{print $$2}' release.conf
# new use tag from git of the form -> 1.0.1-2-3b10c420
TAGN=$$(git describe | sed -e "s/-g.*//")
TV=$$(echo $$TAGN | sed -e "s/-.*//")
TM=$$(echo $$TAGN | sed -e "s/^[^\-]*//")
if [ -z "$$TM" ]
then
TM="-0"
fi
echo $$TV$$TM
}
function getBaseTag() {
......@@ -62,8 +73,21 @@ define MAKEFUNCTIONCODE
function getVersion() {
result=$$(getRelease)
if hasChanges ; then
result="$$result-dirty"
fi
echo $$result
}
function getVersionWithHash() {
result=$$(getRelease)
if differsFromRelease; then
result="$$result-$$(git log -n 1 --format=%h .)"
# output only first 7 characters
# result="$$result-$$(git log -n 1 --format=%h .)"
# output first 8 characters
result="$$result-$$(git log -n 1 --format=%H . | head -c 8)"
fi
if hasChanges ; then
......@@ -101,19 +125,66 @@ SHELL=/bin/bash
include release.conf
IMAGE=$(REGISTRY_HOST)/$(GROUPNAME)/$(PROJECTNAME)
VERSION=$(shell . $(RELEASE_SUPPORT) ; getVersion)
VERSIONHASH=$(shell . $(RELEASE_SUPPORT) ; getVersionWithHash)
TAG=$(shell . $(RELEASE_SUPPORT); getTag)
PROJECTPATH?=.
BUILDARG?=
RELEASE_SUPPORT := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))/.make-function
export MAKEFUNCTIONCODE
.PHONY: pre-build docker-build post-build build release patch-release minor-release major-release tag check-status check-release showtag show-tag \
push do-push post-push post-init update-make
# If the first argument is "run"...
doit=
ifeq (rmi,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifeq (images,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifeq (deploy,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifeq (show-deploy,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifeq (annotate,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifeq (rollback,$(firstword $(MAKECMDGOALS)))
doit=true
endif
ifdef doit
# use the rest as arguments for "make xxx"
#CMD_ARGS := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))
# ...and turn them into do-nothing targets
#$(eval $(CMD_ARGS):;@:)
ifdef n
DEPLOYARG= -n $(n) --context=$(c)
else
DEPLOYARG= -n nexpieio --context=$(c)
endif
endif
# ifeq ($(CMD_ARGS),)
# else
# EMPTY=
# SPACE=$(EMPTY) $(EMPTY)
# TDARG = $(subst $(SPACE), --context=,$(CMD_ARGS))
# DARG = -n $(TDARG)
# endif
.PHONY: pre-build docker-build post-build build cleanbuild release patch-release minor-release major-release tag check-status check-release showtag show-tag \
push do-push post-push post-init update-make rmi images deploy show-deploy annotate rollback
init:
@echo "$$MAKEFUNCTIONCODE" > ./.make-function
build: init pre-build docker-build post-build
cleanbuild: init pre-build docker-cleanbuild post-build
pre-build:
post-build:
......@@ -126,65 +197,60 @@ post-init:
@rm .make-function
docker-build:
docker build -t $(IMAGE):$(VERSION) .
docker build $(BUILDARG) -t $(IMAGE):$(VERSIONHASH) $(PROJECTPATH)
@DOCKER_MAJOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f1) ; \
DOCKER_MINOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f2) ; \
if [ $$DOCKER_MAJOR -eq 1 ] && [ $$DOCKER_MINOR -lt 10 ] ; then \
echo docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
echo docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
echo docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
else \
echo docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ; \
echo docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ; \
echo docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
fi
docker-cleanbuild:
docker build --no-cache -t $(IMAGE):$(VERSIONHASH) $(PROJECTPATH)
@DOCKER_MAJOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f1) ; \
DOCKER_MINOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f2) ; \
if [ $$DOCKER_MAJOR -eq 1 ] && [ $$DOCKER_MINOR -lt 10 ] ; then \
echo docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
echo docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
docker tag -f $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
else \
echo docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):latest ; \
echo docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
docker tag $(IMAGE):$(VERSIONHASH) $(IMAGE):$(VERSION) ;\
fi
release: check-status check-release build push
push: init do-push post-push
rmi: delete-docker-image
images: list-docker-image
do-push:
docker push $(IMAGE):$(VERSIONHASH)
docker push $(IMAGE):$(VERSION)
docker push $(IMAGE):latest
snapshot: build push
build-push: build push
show-tag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@. $(RELEASE_SUPPORT); getVersionWithHash
@rm .make-function
showtag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@. $(RELEASE_SUPPORT); getVersionWithHash
@rm .make-function
tag-patch-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextPatchLevel)
tag-patch-release: release.conf tag
tag-minor-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMinorLevel)
tag-minor-release: release.conf tag
tag-major-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMajorLevel)
tag-major-release: release.conf tag
patch-release: tag-patch-release release
@echo $(VERSION)
minor-release: tag-minor-release release
@echo $(VERSION)
major-release: tag-major-release release
@echo $(VERSION)
tag: TAG=$(shell . $(RELEASE_SUPPORT); getTag $(VERSION))
tag: check-status
@. $(RELEASE_SUPPORT) ; ! tagExists $(TAG) || (echo "ERROR: tag $(TAG) for version $(VERSION) already tagged in git" >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; setRelease $(VERSION)
git add .
git commit -m "bumped to version $(VERSION)" ;
git tag $(TAG) ;
@ if [ -n "$(shell git remote -v)" ] ; then git push --tags ; else echo 'no remote to push tags to' ; fi
check-status: init
@. $(RELEASE_SUPPORT) ; ! hasChanges || (echo "ERROR: there are still outstanding changes" >&2) ;
@rm .make-function
......@@ -200,3 +266,60 @@ update-make: init
make-version:
@echo $(MAKE_VERSION)
# awk to cut the column 3, xargs to run command passing arg from pipe
delete-docker-image:
@docker images | grep $(CMD_ARGS) | awk '{print $$3}' | xargs docker rmi || true
# || true to force exoit code to 0 otherwise if no line matched, make will show some error
list-docker-image:
@docker images | grep $(CMD_ARGS) || true
env-snippet:
@jq -r '..|select(type=="string") + "="' config/custom-environment-variables.json > _temp_file_.env
@keg -s nexpieconfig _temp_file_.env > /dev/null
@cat env-snippet-nexpieconfig.yaml
@rm _temp_file_.env
@rm nexpieconfig.yaml
@rm env-snippet-nexpieconfig.yaml
show-deploy: init release.conf
ifndef c
@echo Please run make show-deploy c={context}
else
kubectl rollout history deployment/$(DEPLOYMENT) $(DEPLOYARG)
endif
@rm .make-function
annotate: init release.conf
ifndef c
@echo Please run make show-deploy c={context} a="{annotation text}"
else ifndef a
@echo Please run make show-deploy c={context} a="{annotation text}"
else
kubectl annotate deployment/$(DEPLOYMENT) kubernetes.io/change-cause="$(a)" $(DEPLOYARG)
endif
@rm .make-function
rollback: init release.conf
ifndef r
@echo Please run make rollback r={revision} c={context}
else
kubectl rollout undo deployment/$(DEPLOYMENT) --to-revision=$(r) $(DEPLOYARG)
endif
@rm .make-function
deploy: init release.conf
ifndef c
@echo Deploy failed : Please make deploy with c={context}
else
@cat k8s/deployment.yaml | sed "s/{{IMAGE_TAG}}/$(VERSIONHASH)/g" | kubectl apply -f - $(DEPLOYARG)
@rm .make-function
ifdef a
@kubectl annotate deployment/$(DEPLOYMENT) kubernetes.io/change-cause="$(a)" $(DEPLOYARG)
endif
endif
.DS_Store
node_modules/
config/*.json
*.rdb
FROM node:8.10.0-alpine
WORKDIR /usr/src/app
ENV TERM=xterm
RUN apk add --update --no-cache python \
python-dev \
py-pip \
yarn \
openssh-client \
git \
&& rm -rf /var/cache/apk/*
COPY . .
RUN npm install
RUN apk del git \
&& rm -rf /var/cache/apk/*
EXPOSE 40000
CMD npm start
# a helper shell script
MAKE_VERSION=1.1.2
define MAKEFUNCTIONCODE
function hasChanges() {
# old version, just track uncommited + untracked files
# test -n "$$(git status -s .)"
# expect 1 to be normal so the untracked file .make-funcition will be ignored
test 0 -ne "$$(git status -s . | sed -e '/.make-function/d' | wc -l)"
}
function getRelease() {
awk -F= '/^RELEASE=/{print $$2}' release.conf
}
function getBaseTag() {
sed -n -e "s/^tag=\(.*\)$$(getRelease)\$$/\1/p" release.conf
}
function getTag() {
if [ -z "$$1" ] ; then
awk -F= '/^tag/{print $$2}' release.conf
else
echo "$$(getBaseTag)$$1"
fi
}
function setRelease() {
if [ -n "$$1" ] ; then
sed -i.x -e "s/^tag=.*/tag=$$(getTag $$1)/" release.conf
sed -i.x -e "s/^RELEASE=.*/RELEASE=$$1/g" release.conf
rm -f release.conf.x
runPreTagCommand "$$1"
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function runPreTagCommand() {
if [ -n "$$1" ] ; then
COMMAND=$$(sed -n -e "s/@@RELEASE@@/$$1/g" -e 's/^pre_tag_command=\(.*\)/\1/p' release.conf)
if [ -n "$$COMMAND" ] ; then
if ! OUTPUT=$$(bash -c "$$COMMAND" 2>&1) ; then echo $$OUTPUT >&2 && exit 1 ; fi
fi
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function tagExists() {
tag=$${1:-$$(getTag)}
test -n "$$tag" && test -n "$$(git tag | grep "^$$tag\$$")"
}
function differsFromRelease() {
tag=$$(getTag)
! tagExists $$tag || test -n "$$(git diff --shortstat -r $$tag .)"
}
function getVersion() {
result=$$(getRelease)
if differsFromRelease; then
result="$$result-$$(git log -n 1 --format=%h .)"
fi
if hasChanges ; then
result="$$result-dirty"
fi
echo $$result
}
function nextPatchLevel() {
version=$${1:-$$(getRelease)}
major_and_minor=$$(echo $$version | cut -d. -f1,2)
patch=$$(echo $$version | cut -d. -f3)
version=$$(printf "%s.%d" $$major_and_minor $$(($$patch + 1)))
echo $$version
}
function nextMinorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
minor=$$(echo $$version | cut -d. -f2);
version=$$(printf "%d.%d.0" $$major $$(($$minor + 1))) ;
echo $$version
}
function nextMajorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
version=$$(printf "%d.0.0" $$(($$major + 1)))
echo $$version
}
endef
SHELL=/bin/bash
include release.conf
IMAGE=$(REGISTRY_HOST)/$(GROUPNAME)/$(PROJECTNAME)
VERSION=$(shell . $(RELEASE_SUPPORT) ; getVersion)
TAG=$(shell . $(RELEASE_SUPPORT); getTag)
RELEASE_SUPPORT := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))/.make-function
export MAKEFUNCTIONCODE
.PHONY: pre-build docker-build post-build build release patch-release minor-release major-release tag check-status check-release showtag show-tag \
push do-push post-push post-init update-make
init:
@echo "$$MAKEFUNCTIONCODE" > ./.make-function
build: init pre-build docker-build post-build
pre-build:
post-build:
@rm .make-function
post-push:
@rm .make-function
post-init:
@rm .make-function
docker-build:
docker build -t $(IMAGE):$(VERSION) .
@DOCKER_MAJOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f1) ; \
DOCKER_MINOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f2) ; \
if [ $$DOCKER_MAJOR -eq 1 ] && [ $$DOCKER_MINOR -lt 10 ] ; then \
echo docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
else \
echo docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ; \
fi
release: check-status check-release build push
push: init do-push post-push
do-push:
docker push $(IMAGE):$(VERSION)
docker push $(IMAGE):latest
snapshot: build push
show-tag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
showtag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
tag-patch-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextPatchLevel)
tag-patch-release: release.conf tag
tag-minor-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMinorLevel)
tag-minor-release: release.conf tag
tag-major-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMajorLevel)
tag-major-release: release.conf tag
patch-release: tag-patch-release release
@echo $(VERSION)
minor-release: tag-minor-release release
@echo $(VERSION)
major-release: tag-major-release release
@echo $(VERSION)
tag: TAG=$(shell . $(RELEASE_SUPPORT); getTag $(VERSION))
tag: check-status
@. $(RELEASE_SUPPORT) ; ! tagExists $(TAG) || (echo "ERROR: tag $(TAG) for version $(VERSION) already tagged in git" >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; setRelease $(VERSION)
git add .
git commit -m "bumped to version $(VERSION)" ;
git tag $(TAG) ;
@ if [ -n "$(shell git remote -v)" ] ; then git push --tags ; else echo 'no remote to push tags to' ; fi
check-status: init
@. $(RELEASE_SUPPORT) ; ! hasChanges || (echo "ERROR: there are still outstanding changes" >&2) ;
@rm .make-function
check-release: init release.conf post-init
@. $(RELEASE_SUPPORT) ; tagExists $(TAG) || (echo "ERROR: version not yet tagged in git. make [minor,major,patch]-release." >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; ! differsFromRelease $(TAG) || (echo "ERROR: current directory differs from tagged $(TAG). make [minor,major,patch]-release." ; exit 1)
update-make: init
@rm Makefile
wget https://dev.nexpie.com/sample/docker-makefile-sample/raw/master/Makefile
@rm .make-function
make-version:
@echo $(MAKE_VERSION)
var config = require('config');
// https://github.com/isaacs/node-lru-cache
var on_message_redis = require('./redis/on_message').on_message_redis
var LRU = require("lru-cache"),
cache = LRU({
max: 500,
maxAge: 1000 * 60 * 5
});
module.exports = function (options = {}) {
return function (req, res, next) {
if (req.header('vernemq-hook') == 'auth_on_publish' && req && req.body) {
if (options.debug) {
console.log('auth_on_publish-------------');
console.log(req.body);
}
var topic = req.body.topic;
var authstatus;
var cachekey = 'pub:' + req.body.client_id + ':' + req.body.username + ':' + topic;
var authstatus = cache.get(cachekey);
if (typeof (authstatus) === 'undefined') {
cache.set(cachekey, true); // cache missed
}
var response
var decoded = require('jwt-verify').verify(req.body.username).res
if (decoded) {
if (decoded.role === 'realtimedb') response = { 'result': 'ok' }
else response = { result: { error: 'not allowed' } }
res.send(response);
next();
}
else {
var GGID = require('./utils/getGroupID');
var output = {};
GGID.getGroupID(req.body.username, req.body.client_id, function (group) {
if (group) {
var _ftopic = require('./utils/router').rewriteTopic(topic, 'pub', group, req.body.client_id, output); // get topic where concat with groupID
response = {
'result': 'ok',
'modifiers': {
'topic': _ftopic,
'qos': 0,
'retain': false
}
}
if (options.debug) {
console.log(response);
}
if (output.verb == 'get' || output.verb == 'read') {
response.modifiers.payload = Buffer.from(req.body.client_id).toString('base64');
}
// save on redis before send response
try {
on_message_redis(req.body.client_id, req.body.payload, topic)
res.send(response);
}
catch (e) {
res.send({ result: { error: 'not allowed' } });
}
next();
}
else {
res.send({ result: { error: 'not allowed' } });
next();
}
});
}
}
else {
res.send({ result: { error: 'not allowed' } });
next();
}
}
}
var validator = require('./validator');
var config = require('config');
// var seneca = require('seneca')({log: 'silent'}).client({ port: config.get('device_registry_port'), host: config.get('device_registry_host') });
var seneca = require('seneca')({ log: 'silent' }).client({ port: config.get('token_registry_port'), host: config.get('token_registry_host') });
var on_register_redis = require('./redis/on_register').on_register_redis
// https://github.com/isaacs/node-lru-cache
var LRU = require("lru-cache"),
cache = LRU({
max: 500,
maxAge: 1000 * 60 * 5
});
var debug = false;
// var authclient = require('seneca')({ log: 'silent' })
// .client({ port: config.get('authserv_port'), host: config.get('authserv_host') });
function authCheck(client_id, token, password, callback) {
const util = require('util')
// console.log(util.inspect(myObject, false, null, true));
// console.log('jwt-result:' + util.inspect(require('jwt-verify').verify(token), false, null, true))
if (require('jwt-verify').verify(token).res) { // auth realtimedb by token
callback(true);
}
else {
console.log('token :' + token)
seneca.act('ms:tokenregistry, cmd:getAttributes, type:device, tokencode:' + token, function (err, res) {
console.log('result : ' + util.inspect(res.result, false, null, true))
if (res.result) {
var token_profile = (res && res.result) ? res.result : {};
var mqttauth = {
clientid: client_id,
token: token,
password: password
};
console.log('validator:', util.inspect(validator.auth_connect(mqttauth, token_profile), false, null, true))
callback(validator.auth_connect(mqttauth, token_profile).status);
}
else {
callback(false);
}
});
}
}
module.exports = function (options = {}) {
debug = options.debug || false;
return function (req, res, next) {
if (req.header('vernemq-hook') == 'auth_on_register' && req && req.body) {
if (debug) {
console.log('auth_on_register-------------');
console.log(req.body);
}
var cachekey = 'auth:' + req.body.client_id + ':' + req.body.username + ':' + req.body.password;
var authstatus = cache.get(cachekey);
if (typeof (authstatus) == 'undefined') {
authCheck(req.body.client_id, req.body.username, req.body.password, function (result) {
cache.set(cachekey, result);
if (result) {
try {
on_register_redis(req.body.client_id)
res.send({ result: 'ok' });
}
catch (e) {
res.send({ result: { error: 'not allowed' } });
}
} else {
res.send({ result: { error: 'not allowed' } });
}
});
}
else {
if (authstatus) {
on_register_redis(req.body.client_id)
res.send({ result: 'ok' });
} else {
res.send({ result: { error: 'not allowed' } });
}
}
next();
}
else {
res.send({ result: { error: 'not allowed' } });
next();
}
}
}
var config = require('config');
// https://github.com/isaacs/node-lru-cache
var LRU = require("lru-cache"),
cache = LRU({
max: 500,
maxAge: 1000 * 60 * 5
});
module.exports = function (options = {}) {
return function (req, res, next) {
if (options.debug) {
console.log('auth_on_subscribe-------------');
console.log(req.body);
}
if (req.header('vernemq-hook') == 'auth_on_subscribe' && req && req.body) {
var authstatus;
var topic = req.body.topics[0].topic;
var cachekey = 'sub:' + req.body.client_id + ':' + req.body.username + ':' + topic;
var authstatus = cache.get(cachekey);
if (typeof (authstatus) == 'undefined') {
cache.set(cachekey, true); // cache missed
}
var response
var decoded = require('jwt-verify').verify(req.body.username).res
if (decoded) {
if (decoded.role === 'realtimedb') response = { 'result': 'ok' }
else response = { result: {error: 'not allowed'} }
if (options.debug) {
console.log('response:');
console.log(req.body);
}
res.send(response);
next();
}
else {
var GGID = require('./utils/getGroupID');
GGID.getGroupID(req.body.username, req.body.client_id, function (group) {
if (group) {
var _ftopic = require('./utils/router').rewriteTopic(topic, 'sub', group, req.body.client_id) // get topic where concat with groupID
var _topic = [{ // setTopic for response
'topic': _ftopic,
'qos': 0
}]
response = {
'result': 'ok',
'topics': _topic
}
if (options.debug) {
console.log(response);
}
res.send(response);
next();
}
else {
res.send({ result: {error: 'not allowed'} });
next();
}
});
}
}
else {
res.send({ result: {error: 'not allowed'} });
next();
}
}
}
{
"device_registry_host" : "DEVICEREGISTRY_HOST",
"device_registry_port" : "DEVICEREGISTRY_PORT",
"token_registry_host" : "TOKENREGISTRY_HOST",
"token_registry_port" : "TOKENREGISTRY_PORT",
"auth_on_register_debug" : "AUTH_ON_REGISTER_DEBUG",
"auth_on_publish_debug" : "AUTH_ON_PUBLISH_DEBUG",
"auth_on_subscribe_debug" : "AUTH_ON_SUBSCRIBE_DEBUG",
"on_publish_debug" : "ON_PUBLISH_DEBUG",
"on_deliver_debug" : "ON_DELIVER_DEBUG",
"on_unsubscribe_debug" : "ON_UNSUBSCRIBE_DEBUG",
"on_offline_debug" : "ON_OFFLINE_DEBUG"
}
{
"device_registry_host" : "127.0.0.1",
"device_registry_port" : 8990,
"auth_on_register_debug" : false,
"auth_on_publish_debug" : false,
"auth_on_subscribe_debug" : false,
"on_publish_debug" : false,
"on_deliver_debug" : false,
"on_unsubscribe_debug" : false,
"on_offline_debug" : false
}
{
"device_registry_host" : "alpha.nexpie.io",
"device_registry_port" : 8990,
"token_registry_host" : "localhost",
"token_registry_port" : 8790,
"auth_on_register_debug" : true,
"auth_on_publish_debug" : true,
"auth_on_subscribe_debug" : true,
"on_publish_debug" : true,
"on_deliver_debug" : true,
"on_unsubscribe_debug" : true,
"on_offline_debug" : true
}
{
"device_registry_host" : "alpha.nexpie.io",
"device_registry_port" : 8990,
"token_registry_host" : "alpha.nexpie.io",
"token_registry_port" : 8790,
"auth_on_register_debug" : true,
"auth_on_publish_debug" : true,
"auth_on_subscribe_debug" : true,
"on_publish_debug" : true,
"on_deliver_debug" : true,
"on_unsubscribe_debug" : true,
"on_offline_debug" : true
}
process.env["NODE_CONFIG_DIR"] = __dirname + "/config/";
var config = require('config');
var auth_on_register_debug = config.get('auth_on_register_debug');
var auth_on_publish_debug = config.get('auth_on_publish_debug');
var auth_on_subscribe_debug = config.get('auth_on_subscribe_debug');
var on_publish_debug = config.get('on_publish_debug');
var on_deliver_debug = config.get('on_deliver_debug');
var on_unsubscribe_debug = config.get('on_unsubscribe_debug');
var on_offline_debug = config.get('on_offline_debug');
var restify = require('restify');
var auth_on_register = require('./auth_on_register')({ debug: auth_on_register_debug });
var auth_on_publish = require('./auth_on_publish')({ debug: auth_on_publish_debug });
var auth_on_subscribe = require('./auth_on_subscribe')({ debug: auth_on_subscribe_debug });
var on_publish = require('./on_publish')({ debug: on_publish_debug });
var on_deliver = require('./on_deliver')({ debug: on_deliver_debug });
var on_unsubscribe = require('./on_unsubscribe')({ debug: on_unsubscribe_debug });
var on_offline = require('./on_offline')({ debug: on_offline_debug });
const server = restify.createServer({
name: 'authhook',
version: '1.0.1',
});
const port = 40000;
server.use(restify.plugins.bodyParser());
// for testing
function authByUserPasswd(client_id, username, password) {
if (!client_id || !username || !password) return false;
else return true;
}
server.get('/about', function (req, res, next) {
res.send(server.name);
});
server.post('/authreg', auth_on_register);
server.post('/authpub', auth_on_publish);
server.post('/authsub', auth_on_subscribe);
server.post('/onsub', (req, res, next) => {
console.log(req.body)
res.send('')
})
server.post('/onunsub', on_unsubscribe)
server.post('/ondeliver', on_deliver)
server.post('/onpub', on_publish)
server.post('/onoff', on_offline)
server.post('/ongone', on_offline)
server.listen(port, function () {
console.log('%s listening at %s', server.name, server.url);
});
var modify = require('./utils/modify');
module.exports = function(options = {}) {
return function (req, res, next) {
console.log('----on_deliver-----')
var topics = modify.removeTempWordInTopic(req.body.topic)
console.log(req.body)
// console.log(topics)
var response = {
'result': 'ok',
'modifiers': {
"topic": topics
}
}
console.log(response);
res.send(response)
}
// module.exports.on_deliver = on_deliver
// function joinTopic(topics) {
// var topic = topics.split('/!')[1]
// return topics.split('/!')[0] + topic.substring(topic.indexOf('/'), topic.length)
// }
}
var on_offline_redis = require('./redis/on_offline').on_offline_redis
module.exports = function (options = {}) {
debug = options.debug || false;
return function (req, res, next) {
console.log('auth_on_offline')
on_offline_redis(req.body.client_id)
res.status(200);
res.send('');
}
}
var config = require('config');
// https://github.com/isaacs/node-lru-cache
var LRU = require("lru-cache") ,
cache = LRU({
max: 500,
maxAge: 1000 * 60 * 5
});
module.exports = function(options={}) {
return function(req, res, next) {
if (req.header('vernemq-hook') == 'on_publish' && req && req.body) {
if (options.debug) {
console.log('on_publish-------------');
console.log(req.body);
}
console.log(req.body)
var topic = req.body.topic;
var cachekey = 'pub:'+req.body.client_id +':'+ req.body.username+':'+topic;
var authstatus = cache.get(cachekey);
res.status(200);
res.send('');
next();
}
else {
res.status(200);
res.send('');
next();
}
}
}
module.exports = function(options = {}) {
return function (req, res, next) {
console.log('------on unsubscribe-------')
console.log(req.body)
var topic = req.body.topics[0];
var token = req.body.username
var client_id = req.body.client_id
var response
var decoded = require('jwt-verify').verify(token).res
if (decoded) {
if (decoded.role === 'realtimedb') {
response = { result: 'ok' }
}
else response = { result: {error: 'not allowed'} }
res.send(response);
next()
}
else {
var GGID = require('./utils/getGroupID');
GGID.getGroupID(token, client_id, function(group) {
if (group) {
var _ftopic = require('./utils/router').rewriteTopic(topic, 'unsub', group, client_id)
response = {
"result": "ok",
"topics": [_ftopic]
}
}
else response = { result: {error: 'not allowed'} }
if (options.debug) {
console.log('on_unsubscribe-------------');
console.log(response);
}
res.send(response);
next()
});
}
}
}
{
"name": "authhook",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"start": "node index.js",
"test": "mocha"
},
"author": "",
"license": "ISC",
"dependencies": {
"config": "^1.30.0",
"ioredis": "^4.2.0",
"jsonwebtoken": "^8.3.0",
"jwt-verify": "git+https://nexpienpm:CCqLQjg4ytvzN7QbssUV@dev.nexpie.com/npm/jwt-verify.git",
"lru-cache": "^4.1.3",
"restify": "^7.1.1",
"seneca": "^3.6.0",
"util": "^0.11.0"
},
"devDependencies": {
"mocha": "^5.2.0"
}
}
var Redis = require('ioredis')
var redis = new Redis()
function on_message_redis(deviceid, payload, topic) { // first time to access on authhook auth_on_register
if (deviceid.indexOf('mqtt') === 0) {
console.log(deviceid)
}
else {
var information = setValue(deviceid, payload, topic)
console.log(information.total_message_size)
redis.hincrby(information.keys, 'actual_message_count', 1)
redis.hincrby(information.keys, 'charged_message_count', information.charged_message_count)
redis.hincrby(information.keys, 'total_message_size', Math.floor(information.total_message_size))
redis.hincrby(setBrokerValue().keys, 'actual_message_count', 1)
redis.hincrby(setBrokerValue().keys, 'charged_message_count', information.charged_message_count)
redis.hincrby(setBrokerValue().keys, 'total_message_size', Math.floor(information.total_message_size))
redis.hset(information.keys, 'last_publish', information.last_publish)
}
}
module.exports.on_message_redis = on_message_redis
redis.on("error", (error) => {
console.log("Redis connection error", error);
});
redis.on('reconnecting', function reconnecting() {
console.log('Connection reestablished');
});
redis.on('connect', function connect() {
console.log('connecting');
});
function setValue(deviceid, payload, topic) {
var dateNow = Math.floor(Date.now() / 1000)
var keys = 'deviceid:' + deviceid
var last_publish = dateNow
var total_message_size = (payload.length + topic.length) * (3 / 4)
console.log('total_message_size: ' + total_message_size)
var msg_count_size = total_message_size
var charged_message_count = 1
while (msg_count_size > 5000) {
charged_message_count += 1
msg_count_size = msg_count_size - 5000
}
return {
keys: keys,
last_publish: last_publish,
charged_message_count: charged_message_count,
total_message_size: total_message_size
}
}
function setBrokerValue() {
var keys = '_broker:1'
return {
keys: keys
}
}
\ No newline at end of file
var Redis = require('ioredis')
var redis = new Redis()
function on_offline_redis(deviceid) { // first time to access on authhook auth_on_register
if (deviceid.indexOf('mqtt') === 0) {
}
else {
var information = setValue(deviceid)
var status = redis.hget(information.keys, 'status')
if (status !== false) {
redis.hset(information.keys, 'status', information.status, 'offline_on', information.offline_on)
deviceUsageCount()
}
}
}
module.exports.on_offline_redis = on_offline_redis
redis.on("error", (error) => {
console.log("Redis connection error", error);
});
redis.on('reconnecting', function reconnecting() {
console.log('Connection reestablished');
});
redis.on('connect', function connect() {
console.log('connecting');
});
function setValue(deviceid) {
var dateNow = Math.floor(Date.now() / 1000)
var keys = 'deviceid:' + deviceid
var status = false
var offline_on = dateNow
return {
keys: keys,
status: status,
offline_on: offline_on
}
}
function deviceUsageCount() {
console.log('device_usage_offline')
redis.hincrby('_broker:1', 'device_online', -1)
}
\ No newline at end of file
var Redis = require('ioredis')
var redis = new Redis()
function on_register_redis(deviceid) { // first time to access on authhook auth_on_register
if (deviceid.indexOf('mqtt') === 0) {
}
else {
var information = setValue(deviceid)
redis.hdel(information.keys, 'offline_on')
var status = redis.hget(information.keys, 'status')
if (status !== true) {
redis.hset(information.keys, 'status', information.status, 'register_on', information.register_on, 'last_check', information.last_check)
deviceUsageCount()
}
}
}
module.exports.on_register_redis = on_register_redis
redis.on("error", (error) => {
console.log("Redis connection error", error);
});
redis.on('reconnecting', function reconnecting() {
console.log('Connection reestablished');
});
redis.on('connect', function connect() {
console.log('connecting');
});
function setValue(deviceid) {
var dateNow = Math.floor(Date.now() / 1000)
var keys = 'deviceid:' + deviceid
var status = true
var register_on = dateNow
var last_check = dateNow
return {
keys: keys,
status: status,
register_on: register_on,
last_check: last_check
}
}
function deviceUsageCount() {
console.log('device_usage_online')
redis.hincrby('_broker:1', 'device_online', 1)
}
\ No newline at end of file
REGISTRY_HOST=dock.nexpie.com
GROUPNAME=stack
PROJECTNAME=vernemq/authhook
RELEASE=1.0.0
var assert = require('assert')
var getTopic = require('../utils/router').rewriteTopic
describe('GetTopic', () => {
describe('realtime msg', () => {
it('should return topic that id_group have insert between @msg and topic', () => {
var topic = '@msg/any/light'
var id_group = 'bulb'
var tgConcat = '@msg/!bulb/any/light'
var _topic = getTopic(topic, id_group)
assert.equal(_topic, tgConcat)
})
})
describe('shadow', () => {
describe('update pub', () => {
it('should return topic that id have insert between @shadow/update and topic', () => {
var topic = '@shadow/update/any/light'
var id = 'd4484cdb-a47a-4936-bcfd-33c1af39b857'
var tgConcat = '@shadow/update/!d4484cdb-a47a-4936-bcfd-33c1af39b857/any/light'
var _topic = getTopic(topic, id)
assert.equal(_topic, tgConcat)
})
})
describe('update sub', () => {
it('should return topic that id have insert behind @shadow/updated', () => {
var topic = '@shadow/updated'
var id = 'd4484cdb-a47a-4936-bcfd-33c1af39b857'
var tgConcat = '@shadow/updated/!d4484cdb-a47a-4936-bcfd-33c1af39b857'
var _topic = getTopic(topic, id)
assert.equal(_topic, tgConcat)
})
})
describe('get pub', () => {
it('should return topic that id have insert between @shadow/get and topic', () => {
var topic = '@shadow/get/any/light'
var id = 'd4484cdb-a47a-4936-bcfd-33c1af39b857'
var tgConcat = '@shadow/get/!d4484cdb-a47a-4936-bcfd-33c1af39b857/any/light'
var _topic = getTopic(topic, id)
assert.equal(_topic, tgConcat)
})
})
describe('get sub', () => {
it('should return topic that id have insert between @client and #', () => {
var topic = '@client/#'
var id = 'd4484cdb-a47a-4936-bcfd-33c1af39b857'
var tgConcat = '@client/!d4484cdb-a47a-4936-bcfd-33c1af39b857/#'
var _topic = getTopic(topic, id)
assert.equal(_topic, tgConcat)
})
})
})
})
module.exports.getGroupID = getGroupID
var config = require('config');
var seneca = require('seneca')({log: 'silent'}).client({ port: config.get('device_registry_port'), host: config.get('device_registry_host') });
// var getRole = require('../checkClientRole').getRole
function getGroupID(token, client_id, callback) {
var role = require('jwt-verify').verify(token).res
if (role && role.hasOwnProperty('groupId')) {
callback(role.groupId);
}
else {
doDB(client_id, function(groupid) {
if (groupid) callback(groupid);
else callback(client_id); // fake group to isolate device
});
}
}
function doDB(client_id, callback) {
seneca.act('cmd:getGroupByClientid, clientid:'+client_id, function(err, res) {
if (err) callback(null);
else callback((res && res.result &&res.result[0])?res.result[0]:null);
});
}
module.exports.insertWordIntoTopic = insertWordIntoTopic
module.exports.removeTempWordInTopic = removeTempWordInTopic
module.exports.replaceWords = replaceWords
module.exports.cleanUpTopic = cleanUpTopic
/*
Ex: insertWordIntoTopic('@shadow/home/temp', '!GROUP', 1)
expected return : "@shadow/!GROUP/home/temp"
*/
function insertWordIntoTopic(topic, word, pos) {
var a = topic.split('/');
a.splice(pos, 0, word);
// console.log('>>> insertWordIntoTopic('+topic +','+ word +','+ pos +');');
// console.log(a.join('/'));
return a.join('/');
}
/*
Ex : removeTempWordInTopic('@shadow/!GROUP/home/temp')
expected return : "@shadowhome/temp"
*/
function removeTempWordInTopic(topic) {
var a = topic.split('/');
var k=0, l=a.length;
while (k<l) {
if (a[k].substr(0,1)=='!') {
a.splice(k,1);
l--;
}
else k++;
}
return a.join('/');
}
function replaceWords(topic, word, from, to) {
var a = topic.split('/');
a.splice(from,to-from+1);
if (word != null) a.splice(from, 0, word);
return a.join('/');
}
// Sample
// var str = insertWordIntoTopic('@shadow/home/temp', '!GROUP', 0);
// console.log(str);
// console.log(removeTempWordInTopic(str));
//console.log(replaceWords('@shadow/set/home/bob/bedroom/temp', 'CLIENTID-XXXXX', 2, 3));
function cleanUpTopic(topic) {
topic = topic.trim();
if (topic.substr(-1)=='/') return topic.slice(-1);
}
var modify = require('./modify');
function rewriteTopic(topic, op, groupid, clientid, output) {
var chunk = topic.split('/');
switch (chunk[0]) {
case '@msg' :
return modify.insertWordIntoTopic(topic, '!'+groupid, 1);
case '@feed' :
if (chunk[1] == 'senml') {
if (chunk[2]=='update' || chunk[2].startsWith('update:')) {
return modify.insertWordIntoTopic(topic, clientid, 3);
}
else return topic;
}
else return topic;
case '@shadow' :
if (chunk[1] == 'data') {
if (chunk[2]=='get' || chunk[2]=='update' || chunk[2].startsWith('get:') || chunk[2].startsWith('update:')) {
return modify.insertWordIntoTopic(topic, clientid, 3);
}
else if (chunk[2]=='updated' ) {
return modify.insertWordIntoTopic(topic, '!'+clientid, 3);
}
else if ( chunk[2].startsWith('updated:')) {
return modify.insertWordIntoTopic(topic, '!'+groupid, 3);
}
else {
return topic;
}
}
else if (chunk[1] == 'state') {
if (chunk[2]=='get' || chunk[2]=='update' || chunk[2].startsWith('get:') || chunk[2].startsWith('update:')) {
return modify.insertWordIntoTopic(topic, clientid, 3);
}
else if (chunk[2]=='updated' || chunk[2]=='mismatched' ) {
return modify.insertWordIntoTopic(topic, '!'+clientid, 3);
}
else if ( chunk[2].startsWith('updated:') || chunk[2].startsWith('mismatched:')) {
return modify.insertWordIntoTopic(topic, '!'+groupid, 3);
}
else {
return topic;
}
}
break;
case '@private' :
//console.log(modify.insertWordIntoTopic(topic, '!'+clientid, 1));
return modify.insertWordIntoTopic(topic, '!'+clientid, 1);
}
}
module.exports.rewriteTopic = rewriteTopic
module.exports.auth_connect = auth_connect
/*
{ _key: '1196651',
_id: 'access_token/1196651',
_rev: '_XdtTzQK--_',
"type": "v1",
"code": "f2a05d9a420e141d2d268b0c41f2af8e",
"device": "bd2w9fkc-bobclient",
"scope": [
"w:@shadow/read",
"w:@shadow/write"
],
"iat": 1537502874269,
"nbf": 1537502874269,
"exp": 1537609874269,
"for": [
{
"clientid": "bd2w9fkc-bobclient",
"verify": false
}
],
"user": "nexpie"
}
*/
function auth_connect(mqttauth, token_profile) {
console.log('auth_connect')
var dateNow = Math.floor(Date.now() / 1000)
var res = {
status : true
}
if (token_profile) {
if (token_profile.for) {
var found = false;
for (var i=0; i<token_profile.for.length; i++) {
if (token_profile.for[i].clientid == mqttauth.clientid) {
found = true;
break;
}
}
if (!found) return {status: false, reason: 'uneligible'};
}
if (dateNow > token_profile.exp) return {status: false, reason: 'expired'};
if (token_profile.nbf && (dateNow < token_profile.nbf)) return {status: false, reason: 'uneligible'};
return {status: true};
}
else return false;
}
\ No newline at end of file
%% -*- erlang -*-
main([ThisNode]) ->
code:add_paths(filelib:wildcard("/usr/lib/vernemq/lib/*/ebin")),
FileName = "/var/lib/vernemq/meta/peer_service/cluster_state",
case filelib:is_regular(FileName) of
true ->
{ok, Bin} = file:read_file(FileName),
{ok, State} = riak_dt_orswot:from_binary(Bin),
AThisNode = list_to_atom(ThisNode),
TargetNodes = riak_dt_orswot:value(State) -- [AThisNode],
L = lists:foldl(
fun(N, Acc) ->
Acc ++ atom_to_list(N) ++ "\n"
end, "", TargetNodes),
io:format(L);
false ->
io:format("")
end.
#!/usr/bin/env bash
IP_ADDRESS=$(ip -4 addr show eth0 | grep -oP "(?<=inet).*(?=/)"| sed -e "s/^[[:space:]]*//" | tail -n 1)
# Ensure correct ownership and permissions on volumes
chown vernemq:vernemq /var/lib/vernemq /var/log/vernemq
chmod 755 /var/lib/vernemq /var/log/vernemq
# Ensure the Erlang node name is set correctly
if env | grep -q "DOCKER_VERNEMQ_NODENAME"; then
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" /etc/vernemq/vm.args
else
sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${IP_ADDRESS}/" /etc/vernemq/vm.args
fi
if env | grep -q "DOCKER_VERNEMQ_DISCOVERY_NODE"; then
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${DOCKER_VERNEMQ_DISCOVERY_NODE}')\"" >> /etc/vernemq/vm.args
fi
# Cluster discovery implementation based on https://github.com/thesandlord/kubernetes-pod-ip-finder
if env | grep -q "KUBE_VERNEMQ_DISCOVERY_URL"; then
response=$(curl ${KUBE_VERNEMQ_DISCOVERY_URL})
IFS=','
nodes=($(echo "$response" | tr -d '[]"'))
length=$(echo ${#nodes[@]})
for i in "${nodes[@]}"
do
if [ "$i" != "null" ] && [ "$i" != "$IP_ADDRESS" ] && (($length > 1)); then
echo "Start Joining to VerneMQ@${i}."
echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${i}')\"" >> /etc/vernemq/vm.args
fi
done
IFS=''
fi
if env | grep -q "DOCKER_VERNEMQ_ALLOW_ANONYMOUS=on"; then
echo "allow_anonymous = on" >> /etc/vernemq/vernemq.conf
fi
if env | grep -q "DOCKER_VERNEMQ_TRADE_CONSISTENCY=on"; then
echo "trade_consistency = on" >> /etc/vernemq/vernemq.conf
fi
if env | grep -q "DOCKER_VERNEMQ_ALLOW_MULTIPLE_SESSIONS=on"; then
echo "allow_multiple_sessions = on" >> /etc/vernemq/vernemq.conf
fi
if env | grep -q "DOCKER_VERNEMQ_MAX_CLIENT_ID_SIZE"; then
echo "max_client_id_size = ${DOCKER_VERNEMQ_MAX_CLIENT_ID_SIZE}" >> /etc/vernemq/vernemq.conf
fi
if [ -f /etc/vernemq/vernemq.conf.local ]; then
cp /etc/vernemq/vernemq.conf.local /etc/vernemq/vernemq.conf
else
sed -i '/########## Start ##########/,/########## End ##########/d' /etc/vernemq/vernemq.conf
echo "########## Start ##########" >> /etc/vernemq/vernemq.conf
env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >> /etc/vernemq/vernemq.conf
users_are_set=$(env | grep DOCKER_VERNEMQ_USER)
if [ ! -z "$users_are_set" ]; then
echo "vmq_passwd.password_file = /etc/vernemq/vmq.passwd" >> /etc/vernemq/vernemq.conf
touch /etc/vernemq/vmq.passwd
fi
for vernemq_user in $(env | grep DOCKER_VERNEMQ_USER); do
username=$(echo $vernemq_user | awk -F '=' '{ print $1 }' | sed 's/DOCKER_VERNEMQ_USER_//g' | tr '[:upper:]' '[:lower:]')
password=$(echo $vernemq_user | awk -F '=' '{ print $2 }')
vmq-passwd /etc/vernemq/vmq.passwd $username <<EOF
$password
$password
EOF
done
echo "erlang.distribution.port_range.minimum = 9100" >> /etc/vernemq/vernemq.conf
echo "erlang.distribution.port_range.maximum = 9109" >> /etc/vernemq/vernemq.conf
echo "listener.tcp.default = ${IP_ADDRESS}:1883" >> /etc/vernemq/vernemq.conf
echo "listener.ws.default = ${IP_ADDRESS}:8083" >> /etc/vernemq/vernemq.conf
echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >> /etc/vernemq/vernemq.conf
echo "listener.http.metrics = ${IP_ADDRESS}:8888" >> /etc/vernemq/vernemq.conf
echo "########## End ##########" >> /etc/vernemq/vernemq.conf
fi
# Check configuration file
su - vernemq -c "/usr/sbin/vernemq config generate 2>&1 > /dev/null" | tee /tmp/config.out | grep error
if [ $? -ne 1 ]; then
echo "configuration error, exit"
echo "$(cat /tmp/config.out)"
exit $?
fi
pid=0
# SIGUSR1-handler
siguser1_handler() {
echo "stopped"
}
# SIGTERM-handler
sigterm_handler() {
if [ $pid -ne 0 ]; then
# this will stop the VerneMQ process
vmq-admin cluster leave node=VerneMQ@$IP_ADDRESS -k > /dev/null
wait "$pid"
fi
exit 143; # 128 + 15 -- SIGTERM
}
# setup handlers
# on callback, kill the last background process, which is `tail -f /dev/null`
# and execute the specified handler
trap 'kill ${!}; siguser1_handler' SIGUSR1
trap 'kill ${!}; sigterm_handler' SIGTERM
/usr/sbin/vernemq start
pid=$(ps aux | grep '[b]eam.smp' | awk '{print $2}')
while true
do
tail -f /var/log/vernemq/console.log & wait ${!}
done
+P 256000
-env ERL_MAX_ETS_TABLES 256000
-env ERL_CRASH_DUMP /erl_crash.dump
-env ERL_FULLSWEEP_AFTER 0
-env ERL_MAX_PORTS 65536
+A 64
-setcookie vmq
-name VerneMQ@127.0.0.1
+K true
+W w
-smp enable
+zdbbl 32768
# VerneMQube
## VerneMQ on Kubernetes - Auto Discovery Cluster
### Prerquisties
- First you need a [Kubernetes Cluster](http://kubernetes.io/gettingstarted/)
- Then you need to run [Kubernetes Pod IP finder](https://github.com/thesandlord/kubernetes-pod-ip-finder) for nodes discovery
NOTE: I didn't managed to make the node discovery work with services because of Erlang Port Mapper daemon logic and Service ClusterIP limitation. If you make it work let me know what did you do.
### How to Use
1. Launch Service
- ```kubectl create -f vernemq-service.yaml```
1. Launch Deployment
- ```kubectl create -f vernemq-deployment.yaml```
### Checking cluster status
```kubectl get pods```
```
vernemq-3099899313-7gkrr 2/2 Running 0 17s
vernemq-3099899313-qbi91 2/2 Running 0 17s
vernemq-3099899313-u5rrq 2/2 Running 0 17s
kubernetes-pod-ip-finder-6le9j 1/1 Running 0 4h
```
```kubectl exec vernemq-3099899313-7gkrr vmq-admin cluster status```
```
+------------------+-------+
| Node |Running|
+------------------+-------+
|VerneMQ@172.17.0.6| true |
|VerneMQ@172.17.0.7| true |
|VerneMQ@172.17.0.8| true |
+------------------+-------+
```
apiVersion: v1
kind: Service
metadata:
name: redis-service
spec:
ports:
- port: 6379
targetPort: 6379
selector:
"app": "nexpieio-vernemq-broker"
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: vernemq
namespace: default
labels:
app: broker
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
selector:
matchLabels:
app: broker
tier: backend
template:
metadata:
labels:
app: broker
tier: backend
spec:
containers:
- name: vernemq-authhook
image: dock.nexpie.com/stack/broker/authhook
ports:
- containerPort: 40000
name: authhook
- name: vernemq
image: dock.nexpie.com/stack/broker
env:
- name: KUBE_VERNEMQ_DISCOVERY_URL
#Considering namespace = default. Change according your namespace Eg: kubernetes-pod-ip-finder.dev.svc.cluster.local/?app=broker
value: http://kubernetes-pod-ip-finder.default.svc.cluster.local/?app=broker
ports:
- containerPort: 1883
name: mqtt
- containerPort: 8080
name: websock
- containerPort: 44053
name: epmd
- containerPort: 4369
name: data
- containerPort: 9100
- containerPort: 9101
- containerPort: 9102
- containerPort: 9103
- containerPort: 9104
- containerPort: 9105
- containerPort: 9106
- containerPort: 9107
- containerPort: 9108
- containerPort: 9109
apiVersion: v1
kind: Service
metadata:
name: vernemq
labels:
app: broker
tier: backend
spec:
selector:
app: broker
tier: backend
ports:
- port: 8080
targetPort: 80
name: websock
- port: 1883
targetPort: 1883
name: mqtt
{
"lockfileVersion": 1
}
node_modules
.DS_Store
FROM node:8.10.0-alpine
WORKDIR /usr/src/app
ENV TERM=xterm
RUN apk add --update --no-cache python \
python-dev \
py-pip \
yarn \
openssh-client \
&& rm -rf /var/cache/apk/*
COPY . .
RUN npm install
EXPOSE 40000
CMD npm start
# a helper shell script
MAKE_VERSION=1.1.2
define MAKEFUNCTIONCODE
function hasChanges() {
# old version, just track uncommited + untracked files
# test -n "$$(git status -s .)"
# expect 1 to be normal so the untracked file .make-funcition will be ignored
test 1 -ne "$$(git status -s . | wc -l )"
}
function getRelease() {
awk -F= '/^RELEASE=/{print $$2}' release.conf
}
function getBaseTag() {
sed -n -e "s/^tag=\(.*\)$$(getRelease)\$$/\1/p" release.conf
}
function getTag() {
if [ -z "$$1" ] ; then
awk -F= '/^tag/{print $$2}' release.conf
else
echo "$$(getBaseTag)$$1"
fi
}
function setRelease() {
if [ -n "$$1" ] ; then
sed -i.x -e "s/^tag=.*/tag=$$(getTag $$1)/" release.conf
sed -i.x -e "s/^RELEASE=.*/RELEASE=$$1/g" release.conf
rm -f release.conf.x
runPreTagCommand "$$1"
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function runPreTagCommand() {
if [ -n "$$1" ] ; then
COMMAND=$$(sed -n -e "s/@@RELEASE@@/$$1/g" -e 's/^pre_tag_command=\(.*\)/\1/p' release.conf)
if [ -n "$$COMMAND" ] ; then
if ! OUTPUT=$$(bash -c "$$COMMAND" 2>&1) ; then echo $$OUTPUT >&2 && exit 1 ; fi
fi
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function tagExists() {
tag=$${1:-$$(getTag)}
test -n "$$tag" && test -n "$$(git tag | grep "^$$tag\$$")"
}
function differsFromRelease() {
tag=$$(getTag)
! tagExists $$tag || test -n "$$(git diff --shortstat -r $$tag .)"
}
function getVersion() {
result=$$(getRelease)
if differsFromRelease; then
result="$$result-$$(git log -n 1 --format=%h .)"
fi
if hasChanges ; then
result="$$result-dirty"
fi
echo $$result
}
function nextPatchLevel() {
version=$${1:-$$(getRelease)}
major_and_minor=$$(echo $$version | cut -d. -f1,2)
patch=$$(echo $$version | cut -d. -f3)
version=$$(printf "%s.%d" $$major_and_minor $$(($$patch + 1)))
echo $$version
}
function nextMinorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
minor=$$(echo $$version | cut -d. -f2);
version=$$(printf "%d.%d.0" $$major $$(($$minor + 1))) ;
echo $$version
}
function nextMajorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
version=$$(printf "%d.0.0" $$(($$major + 1)))
echo $$version
}
endef
SHELL=/bin/bash
include release.conf
IMAGE=$(REGISTRY_HOST)/$(GROUPNAME)/$(PROJECTNAME)
VERSION=$(shell . $(RELEASE_SUPPORT) ; getVersion)
TAG=$(shell . $(RELEASE_SUPPORT); getTag)
RELEASE_SUPPORT := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))/.make-function
export MAKEFUNCTIONCODE
.PHONY: pre-build docker-build post-build build release patch-release minor-release major-release tag check-status check-release showtag show-tag \
push do-push post-push post-init update-make
init:
@echo "$$MAKEFUNCTIONCODE" > ./.make-function
build: init pre-build docker-build post-build
pre-build:
post-build:
@rm .make-function
post-push:
@rm .make-function
post-init:
@rm .make-function
docker-build:
docker build -t $(IMAGE):$(VERSION) .
@DOCKER_MAJOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f1) ; \
DOCKER_MINOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f2) ; \
if [ $$DOCKER_MAJOR -eq 1 ] && [ $$DOCKER_MINOR -lt 10 ] ; then \
echo docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
else \
echo docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ; \
fi
release: check-status check-release build push
push: init do-push post-push
do-push:
docker push $(IMAGE):$(VERSION)
docker push $(IMAGE):latest
snapshot: build push
show-tag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
showtag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
tag-patch-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextPatchLevel)
tag-patch-release: release.conf tag
tag-minor-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMinorLevel)
tag-minor-release: release.conf tag
tag-major-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMajorLevel)
tag-major-release: release.conf tag
patch-release: tag-patch-release release
@echo $(VERSION)
minor-release: tag-minor-release release
@echo $(VERSION)
major-release: tag-major-release release
@echo $(VERSION)
tag: TAG=$(shell . $(RELEASE_SUPPORT); getTag $(VERSION))
tag: check-status
@. $(RELEASE_SUPPORT) ; ! tagExists $(TAG) || (echo "ERROR: tag $(TAG) for version $(VERSION) already tagged in git" >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; setRelease $(VERSION)
git add .
git commit -m "bumped to version $(VERSION)" ;
git tag $(TAG) ;
@ if [ -n "$(shell git remote -v)" ] ; then git push --tags ; else echo 'no remote to push tags to' ; fi
check-status: init
@. $(RELEASE_SUPPORT) ; ! hasChanges || (echo "ERROR: there are still outstanding changes" >&2) ;
@rm .make-function
check-release: init release.conf post-init
@. $(RELEASE_SUPPORT) ; tagExists $(TAG) || (echo "ERROR: version not yet tagged in git. make [minor,major,patch]-release." >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; ! differsFromRelease $(TAG) || (echo "ERROR: current directory differs from tagged $(TAG). make [minor,major,patch]-release." ; exit 1)
update-make: init
@rm Makefile
wget https://dev.nexpie.com/sample/docker-makefile-sample/raw/master/Makefile
@rm .make-function
make-version:
@echo $(MAKE_VERSION)
{
"rabbitmq_host" : "npn1.kube.nexpie.com",
"rabbitmq_port" : "32672",
"rabbitmq_username" : "b1ffb9e39865f3cb70cc2ef14920e023",
"rabbitmq_password" : "c3efbc0c4bdb1747a31cebac20d9b9b8"
}
{
"rabbitmq_host" : "RABBITMQ_HOST",
"rabbitmq_port" : "RABBITMQ_PORT",
"rabbitmq_username" : "RABBITMQ_USERNAME",
"rabbitmq_password" : "RABBITMQ_PASSWORD"
}
{
"authserv_host" : "203.154.135.231",
"authserv_port" : 31088
}
// process.env["NODE_ENV"] = "development";
// process.env["NODE_CONFIG_DIR"] = __dirname + "/config/";
var restify = require('restify');
var on_publish = require('./on_publish')({debug:true});
const server = restify.createServer({
name: 'pubhook',
version: '1.0.0',
});
const port = 41000;
server.use(restify.plugins.bodyParser());
server.get('/about', function (req, res, next) {
res.send(server.name);
});
server.post('/onpub', on_publish);
server.listen(port, function () {
console.log('%s listening at %s', server.name, server.url);
});
var config = require('config');
// https://github.com/isaacs/node-lru-cache
var LRU = require("lru-cache") ,
cache = LRU({
max: 500,
maxAge: 1000 * 60 * 5
});
var client_wf = require('seneca')()
.use('seneca-amqp-transport')
.client({
type: 'amqp',
pin: 'cmd:wf',
url: 'amqp://'+config.get('rabbitmq_username')+':'+config.get('rabbitmq_password')+'@'+config.get('rabbitmq_host')+':'+config.get('rabbitmq_port')
});
var client_push = require('seneca')()
.use('seneca-amqp-transport')
.client({
type: 'amqp',
pin: 'cmd:push',
url: 'amqp://'+config.get('rabbitmq_username')+':'+config.get('rabbitmq_password')+'@'+config.get('rabbitmq_host')+':'+config.get('rabbitmq_port')
});
module.exports = function(options={}) {
return function(req, res, next) {
if (req.header('vernemq-hook') == 'on_publish' && req && req.body) {
if (options.debug) {
console.log('on_publish-------------');
console.log(req.body);
}
var data = req.body;
if (data.topic.substring(0,5) == '$exec') {
var stopic = data.topic.substr(6);
var a = data.topic.split('/');
var ptext = Buffer.from(data.payload, 'base64').toString();
if (stopic && ptext) {
switch (a[0]) {
case 'wf' :
case 'writefeed' :
let wfmsg = [{
"topic": stopic,
"payload":ptext,
"ts": Date.now(),
"cid": data.client_id,
}];
client_wf.act('cmd:wf', {
message: JSON.stringify(wfmsg)
}, (err, res) => {
if (err) {
throw err;
}
});
break;
case 'push' :
let pushmsg = [{
"topic": stopic,
"payload":ptext,
"ts": Date.now(),
"cid": data.client_id,
}];
client_push.act('cmd:push', {
message: JSON.stringify(pushmsg)
}, (err, res) => {
if (err) {
throw err;
}
});
break;
}
}
}
var topic = req.body.topic;
var cachekey = 'pub:'+req.body.client_id +':'+ req.body.username+':'+topic;
var authstatus = cache.get(cachekey);
res.status(200);
res.send('');
next();
}
else {
res.status(200);
res.send('');
next();
}
}
}
{
"name": "authhook",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"start": "node index.js"
},
"author": "",
"license": "ISC",
"dependencies": {
"config": "^1.30.0",
"lru-cache": "^4.1.3",
"restify": "^7.1.1",
"seneca": "^3.6.0",
"seneca-amqp-transport": "^2.2.0"
}
}
REGISTRY_HOST=dock.nexpie.com
GROUPNAME=stack
PROJECTNAME=vernemq/pubhook
RELEASE=1.0.0
FROM alpine:3.8
# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added
RUN addgroup -S redis && adduser -S -G redis redis
RUN apk add --no-cache \
# grab su-exec for easy step-down from root
'su-exec>=0.2' \
# add tzdata for https://github.com/docker-library/redis/issues/138
tzdata
ENV REDIS_VERSION 5.0.0
ENV REDIS_DOWNLOAD_URL http://download.redis.io/releases/redis-5.0.0.tar.gz
ENV REDIS_DOWNLOAD_SHA 70c98b2d0640b2b73c9d8adb4df63bcb62bad34b788fe46d1634b6cf87dc99a4
# for redis-sentinel see: http://redis.io/topics/sentinel
RUN set -ex; \
\
apk add --no-cache --virtual .build-deps \
coreutils \
gcc \
jemalloc-dev \
linux-headers \
make \
musl-dev \
; \
\
wget -O redis.tar.gz "$REDIS_DOWNLOAD_URL"; \
echo "$REDIS_DOWNLOAD_SHA *redis.tar.gz" | sha256sum -c -; \
mkdir -p /usr/src/redis; \
tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1; \
rm redis.tar.gz; \
\
# disable Redis protected mode [1] as it is unnecessary in context of Docker
# (ports are not automatically exposed when running inside Docker, but rather explicitly by specifying -p / -P)
# [1]: https://github.com/antirez/redis/commit/edd4d555df57dc84265fdfb4ef59a4678832f6da
grep -q '^#define CONFIG_DEFAULT_PROTECTED_MODE 1$' /usr/src/redis/src/server.h; \
sed -ri 's!^(#define CONFIG_DEFAULT_PROTECTED_MODE) 1$!\1 0!' /usr/src/redis/src/server.h; \
grep -q '^#define CONFIG_DEFAULT_PROTECTED_MODE 0$' /usr/src/redis/src/server.h; \
# for future reference, we modify this directly in the source instead of just supplying a default configuration flag because apparently "if you specify any argument to redis-server, [it assumes] you are going to specify everything"
# see also https://github.com/docker-library/redis/issues/4#issuecomment-50780840
# (more exactly, this makes sure the default behavior of "save on SIGTERM" stays functional by default)
\
make -C /usr/src/redis -j "$(nproc)"; \
make -C /usr/src/redis install; \
\
rm -r /usr/src/redis; \
\
runDeps="$( \
scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \
| tr ',' '\n' \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \
apk add --virtual .redis-rundeps $runDeps; \
apk del .build-deps; \
\
redis-server --version
RUN mkdir /data && chown redis:redis /data
VOLUME /data
WORKDIR /data
COPY docker-entrypoint.sh /usr/local/bin/
ENTRYPOINT ["docker-entrypoint.sh"]
EXPOSE 6379
CMD ["redis-server"]
\ No newline at end of file
# a helper shell script
MAKE_VERSION=1.1.2
define MAKEFUNCTIONCODE
function hasChanges() {
# old version, just track uncommited + untracked files
# test -n "$$(git status -s .)"
# expect 1 to be normal so the untracked file .make-funcition will be ignored
test 1 -ne "$$(git status -s . | wc -l )"
}
function getRelease() {
awk -F= '/^RELEASE=/{print $$2}' release.conf
}
function getBaseTag() {
sed -n -e "s/^tag=\(.*\)$$(getRelease)\$$/\1/p" release.conf
}
function getTag() {
if [ -z "$$1" ] ; then
awk -F= '/^tag/{print $$2}' release.conf
else
echo "$$(getBaseTag)$$1"
fi
}
function setRelease() {
if [ -n "$$1" ] ; then
sed -i.x -e "s/^tag=.*/tag=$$(getTag $$1)/" release.conf
sed -i.x -e "s/^RELEASE=.*/RELEASE=$$1/g" release.conf
rm -f release.conf.x
runPreTagCommand "$$1"
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function runPreTagCommand() {
if [ -n "$$1" ] ; then
COMMAND=$$(sed -n -e "s/@@RELEASE@@/$$1/g" -e 's/^pre_tag_command=\(.*\)/\1/p' release.conf)
if [ -n "$$COMMAND" ] ; then
if ! OUTPUT=$$(bash -c "$$COMMAND" 2>&1) ; then echo $$OUTPUT >&2 && exit 1 ; fi
fi
else
echo "ERROR: missing release version parameter " >&2
return 1
fi
}
function tagExists() {
tag=$${1:-$$(getTag)}
test -n "$$tag" && test -n "$$(git tag | grep "^$$tag\$$")"
}
function differsFromRelease() {
tag=$$(getTag)
! tagExists $$tag || test -n "$$(git diff --shortstat -r $$tag .)"
}
function getVersion() {
result=$$(getRelease)
if differsFromRelease; then
result="$$result-$$(git log -n 1 --format=%h .)"
fi
if hasChanges ; then
result="$$result-dirty"
fi
echo $$result
}
function nextPatchLevel() {
version=$${1:-$$(getRelease)}
major_and_minor=$$(echo $$version | cut -d. -f1,2)
patch=$$(echo $$version | cut -d. -f3)
version=$$(printf "%s.%d" $$major_and_minor $$(($$patch + 1)))
echo $$version
}
function nextMinorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
minor=$$(echo $$version | cut -d. -f2);
version=$$(printf "%d.%d.0" $$major $$(($$minor + 1))) ;
echo $$version
}
function nextMajorLevel() {
version=$${1:-$$(getRelease)}
major=$$(echo $$version | cut -d. -f1);
version=$$(printf "%d.0.0" $$(($$major + 1)))
echo $$version
}
endef
SHELL=/bin/bash
include release.conf
IMAGE=$(REGISTRY_HOST)/$(GROUPNAME)/$(PROJECTNAME)
VERSION=$(shell . $(RELEASE_SUPPORT) ; getVersion)
TAG=$(shell . $(RELEASE_SUPPORT); getTag)
RELEASE_SUPPORT := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))/.make-function
export MAKEFUNCTIONCODE
.PHONY: pre-build docker-build post-build build release patch-release minor-release major-release tag check-status check-release showtag show-tag \
push do-push post-push post-init update-make
init:
@echo "$$MAKEFUNCTIONCODE" > ./.make-function
build: init pre-build docker-build post-build
pre-build:
post-build:
@rm .make-function
post-push:
@rm .make-function
post-init:
@rm .make-function
docker-build:
docker build -t $(IMAGE):$(VERSION) .
@DOCKER_MAJOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f1) ; \
DOCKER_MINOR=$(shell docker -v | sed -e 's/.*version //' -e 's/,.*//' | cut -d\. -f2) ; \
if [ $$DOCKER_MAJOR -eq 1 ] && [ $$DOCKER_MINOR -lt 10 ] ; then \
echo docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag -f $(IMAGE):$(VERSION) $(IMAGE):latest ;\
else \
echo docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ;\
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest ; \
fi
release: check-status check-release build push
push: init do-push post-push
do-push:
docker push $(IMAGE):$(VERSION)
docker push $(IMAGE):latest
snapshot: build push
show-tag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
showtag: init release.conf
@. $(RELEASE_SUPPORT); getVersion
@rm .make-function
tag-patch-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextPatchLevel)
tag-patch-release: release.conf tag
tag-minor-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMinorLevel)
tag-minor-release: release.conf tag
tag-major-release: init
VERSION := $(shell . $(RELEASE_SUPPORT); nextMajorLevel)
tag-major-release: release.conf tag
patch-release: tag-patch-release release
@echo $(VERSION)
minor-release: tag-minor-release release
@echo $(VERSION)
major-release: tag-major-release release
@echo $(VERSION)
tag: TAG=$(shell . $(RELEASE_SUPPORT); getTag $(VERSION))
tag: check-status
@. $(RELEASE_SUPPORT) ; ! tagExists $(TAG) || (echo "ERROR: tag $(TAG) for version $(VERSION) already tagged in git" >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; setRelease $(VERSION)
git add .
git commit -m "bumped to version $(VERSION)" ;
git tag $(TAG) ;
@ if [ -n "$(shell git remote -v)" ] ; then git push --tags ; else echo 'no remote to push tags to' ; fi
check-status: init
@. $(RELEASE_SUPPORT) ; ! hasChanges || (echo "ERROR: there are still outstanding changes" >&2) ;
@rm .make-function
check-release: init release.conf post-init
@. $(RELEASE_SUPPORT) ; tagExists $(TAG) || (echo "ERROR: version not yet tagged in git. make [minor,major,patch]-release." >&2 && exit 1) ;
@. $(RELEASE_SUPPORT) ; ! differsFromRelease $(TAG) || (echo "ERROR: current directory differs from tagged $(TAG). make [minor,major,patch]-release." ; exit 1)
update-make: init
@rm Makefile
wget https://dev.nexpie.com/sample/docker-makefile-sample/raw/master/Makefile
@rm .make-function
make-version:
@echo $(MAKE_VERSION)
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
# or first arg is `something.conf`
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
set -- redis-server "$@"
fi
# allow the container to be started with `--user`
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
find . \! -user redis -exec chown redis '{}' +
exec su-exec redis "$0" "$@"
fi
exec "$@"
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis
namespace: nexpieio
labels:
app: redis
tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 1
# selector can be applied automatically
# from the labels in the pod template if not set
selector:
matchLabels:
app: redis
tier: backend
template:
metadata:
labels:
app: redis
tier: backend
spec:
imagePullSecrets:
- name: docknexpie
containers:
- name: redis
image: dock.nexpie.com/stack/vernemq/redis:1.0.0-d640c8a
ports:
- containerPort: 6379
name: redis
REGISTRY_HOST=dock.nexpie.com
GROUPNAME=stack
PROJECTNAME=vernemq/redis
RELEASE=1.0.0
REGISTRY_HOST=dock.nexpie.com
GROUPNAME=stack
PROJECTNAME=vernemq
RELEASE=1.0.0
RELEASE=1.12.5
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment