Commit 0e2b809f by Torkel Ödegaard

docker: added graphite v1.x docker block

parent 4fe79edd
......@@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
# Add system service config
add ./files/nginx.conf /etc/nginx/nginx.conf
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
# Nginx
#
# graphite
......@@ -39,6 +40,7 @@ expose 80
# Carbon line receiver port
expose 2003
# Carbon cache query port
expose 7002
......
FROM phusion/baseimage:0.9.22
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
RUN apt-get -y update \
&& apt-get -y upgrade \
&& apt-get -y --force-yes install vim \
nginx \
python-dev \
python-flup \
python-pip \
python-ldap \
expect \
git \
memcached \
sqlite3 \
libffi-dev \
libcairo2 \
libcairo2-dev \
python-cairo \
python-rrdtool \
pkg-config \
nodejs \
&& rm -rf /var/lib/apt/lists/*
# fix python dependencies (LTS Django and newer memcached/txAMQP)
RUN pip install django==1.8.18 \
python-memcached==1.53 \
txAMQP==0.6.2 \
&& pip install --upgrade pip
# install whisper
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
WORKDIR /usr/local/src/whisper
RUN python ./setup.py install
# install carbon
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
WORKDIR /usr/local/src/carbon
RUN pip install -r requirements.txt \
&& python ./setup.py install
# install graphite
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
WORKDIR /usr/local/src/graphite-web
RUN pip install -r requirements.txt \
&& python ./setup.py install
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
WORKDIR /opt/graphite/webapp
RUN mkdir -p /var/log/graphite/ \
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
# install statsd
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
ADD conf/opt/statsd/config.js /opt/statsd/config.js
# config nginx
RUN rm /etc/nginx/sites-enabled/default
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
# init django admin
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
RUN chmod +x /usr/local/bin/manage.sh \
&& /usr/local/bin/django_admin_init.exp
# logging support
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
# daemons
ADD conf/etc/service/carbon/run /etc/service/carbon/run
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
ADD conf/etc/service/graphite/run /etc/service/graphite/run
ADD conf/etc/service/statsd/run /etc/service/statsd/run
ADD conf/etc/service/nginx/run /etc/service/nginx/run
# default conf setup
ADD conf /etc/graphite-statsd/conf
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
# cleanup
RUN apt-get clean\
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# defaults
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
WORKDIR /
ENV HOME /root
CMD ["/sbin/my_init"]
/var/log/*.log /var/log/*/*.log {
weekly
size 50M
missingok
rotate 10
compress
delaycompress
notifempty
copytruncate
su root syslog
}
#!/bin/bash
conf_dir=/etc/graphite-statsd/conf
# auto setup graphite with default configs if /opt/graphite is missing
# needed for the use case when a docker host volume is mounted at an of the following:
# - /opt/graphite
# - /opt/graphite/conf
# - /opt/graphite/webapp/graphite
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
if [[ -z $graphite_dir_contents ]]; then
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
cd /usr/local/src/graphite-web && python ./setup.py install
fi
if [[ -z $graphite_storage_dir_contents ]]; then
/usr/local/bin/django_admin_init.exp
fi
if [[ -z $graphite_conf_dir_contents ]]; then
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
fi
if [[ -z $graphite_webapp_dir_contents ]]; then
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
fi
# auto setup statsd with default config if /opt/statsd is missing
# needed for the use case when a docker host volume is mounted at an of the following:
# - /opt/statsd
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
if [[ -z $statsd_dir_contents ]]; then
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
fi
user www-data;
worker_processes 4;
pid /run/nginx.pid;
daemon off;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
##
# nginx-naxsi config
##
# Uncomment it if you installed nginx-naxsi
##
#include /etc/nginx/naxsi_core.rules;
##
# nginx-passenger config
##
# Uncomment it if you installed nginx-passenger
##
#passenger_root /usr;
#passenger_ruby /usr/bin/ruby;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}
server {
listen 80;
root /opt/graphite/static;
index index.html;
location /media {
# django admin static files
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
}
location /admin/auth/admin {
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
}
location /admin/auth/user/admin {
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
}
location / {
proxy_pass http://localhost:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
add_header 'Access-Control-Allow-Credentials' 'true';
}
}
#!/bin/bash
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
#!/bin/bash
rm -f /opt/graphite/storage/carbon-cache-a.pid
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
#!/bin/bash
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
#!/bin/bash
mkdir -p /var/log/nginx
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
#!/bin/bash
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
# The form of each line in this file should be as follows:
#
# output_template (frequency) = method input_pattern
#
# This will capture any received metrics that match 'input_pattern'
# for calculating an aggregate metric. The calculation will occur
# every 'frequency' seconds and the 'method' can specify 'sum' or
# 'avg'. The name of the aggregate metric will be derived from
# 'output_template' filling in any captured fields from 'input_pattern'.
#
# For example, if you're metric naming scheme is:
#
# <env>.applications.<app>.<server>.<metric>
#
# You could configure some aggregations like so:
#
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
#
# As an example, if the following metrics are received:
#
# prod.applications.apache.www01.requests
# prod.applications.apache.www01.requests
#
# They would all go into the same aggregation buffer and after 60 seconds the
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
# by summing their values.
#
# Template components such as <env> will match everything up to the next dot.
# To match metric multiple components including the dots, use <<metric>> in the
# input template:
#
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
#
# Note that any time this file is modified, it will be re-read automatically.
# This file takes a single regular expression per line
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
# match one of these expressions will be dropped
# This file is reloaded automatically when changes are made
^some\.noisy\.metric\.prefix\..*
# This is a configuration file with AMQP enabled
[cache]
LOCAL_DATA_DIR =
# Specify the user to drop privileges to
# If this is blank carbon runs as the user that invokes it
# This user must have write access to the local data directory
USER =
# Limit the size of the cache to avoid swapping or becoming CPU bound.
# Sorts and serving cache queries gets more expensive as the cache grows.
# Use the value "inf" (infinity) for an unlimited cache size.
MAX_CACHE_SIZE = inf
# Limits the number of whisper update_many() calls per second, which effectively
# means the number of write requests sent to the disk. This is intended to
# prevent over-utilizing the disk and thus starving the rest of the system.
# When the rate of required updates exceeds this, then carbon's caching will
# take effect and increase the overall throughput accordingly.
MAX_UPDATES_PER_SECOND = 1000
# Softly limits the number of whisper files that get created each minute.
# Setting this value low (like at 50) is a good way to ensure your graphite
# system will not be adversely impacted when a bunch of new metrics are
# sent to it. The trade off is that it will take much longer for those metrics'
# database files to all get created and thus longer until the data becomes usable.
# Setting this value high (like "inf" for infinity) will cause graphite to create
# the files quickly but at the risk of slowing I/O down considerably for a while.
MAX_CREATES_PER_MINUTE = inf
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
UDP_RECEIVER_INTERFACE = 0.0.0.0
UDP_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Enable AMQP if you want to receve metrics using you amqp broker
ENABLE_AMQP = True
# Verbose means a line will be logged for every metric received
# useful for testing
AMQP_VERBOSE = True
# your credentials for the amqp server
# AMQP_USER = guest
# AMQP_PASSWORD = guest
# the network settings for the amqp server
# AMQP_HOST = localhost
# AMQP_PORT = 5672
# if you want to include the metric name as part of the message body
# instead of as the routing key, set this to True
# AMQP_METRIC_NAME_IN_BODY = False
# NOTE: you cannot run both a cache and a relay on the same server
# with the default configuration, you have to specify a distinict
# interfaces and ports for the listeners.
[relay]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
CACHE_SERVERS = server1, server2, server3
MAX_QUEUE_SIZE = 10000
# This configuration file controls the behavior of the Dashboard UI, available
# at http://my-graphite-server/dashboard/.
#
# This file must contain a [ui] section that defines values for all of the
# following settings.
[ui]
default_graph_width = 400
default_graph_height = 250
automatic_variants = true
refresh_interval = 60
autocomplete_delay = 375
merge_hover_delay = 750
# You can set this 'default', 'white', or a custom theme name.
# To create a custom theme, copy the dashboard-default.css file
# to dashboard-myThemeName.css in the content/css directory and
# modify it to your liking.
theme = default
[keyboard-shortcuts]
toggle_toolbar = ctrl-z
toggle_metrics_panel = ctrl-space
erase_all_graphs = alt-x
save_dashboard = alt-s
completer_add_metrics = alt-enter
completer_del_metrics = alt-backspace
give_completer_focus = shift-space
# These settings apply to the UI as a whole, all other sections in this file
# pertain only to specific metric types.
#
# The dashboard presents only metrics that fall into specified naming schemes
# defined in this file. This creates a simpler, more targetted view of the
# data. The general form for defining a naming scheme is as follows:
#
#[Metric Type]
#scheme = basis.path.<field1>.<field2>.<fieldN>
#field1.label = Foo
#field2.label = Bar
#
#
# Where each <field> will be displayed as a dropdown box
# in the UI and the remaining portion of the namespace
# shown in the Metric Selector panel. The .label options set the labels
# displayed for each dropdown.
#
# For example:
#
#[Sales]
#scheme = sales.<channel>.<type>.<brand>
#channel.label = Channel
#type.label = Product Type
#brand.label = Brand
#
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
# will be available in the Metric Selector (upper-right panel).
[default]
background = black
foreground = white
majorLine = white
minorLine = grey
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
fontName = Sans
fontSize = 10
fontBold = False
fontItalic = False
[noc]
background = black
foreground = white
majorLine = white
minorLine = grey
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
fontName = Sans
fontSize = 10
fontBold = False
fontItalic = False
[plain]
background = white
foreground = black
minorLine = grey
majorLine = rose
[summary]
background = black
lineColors = #6666ff, #66ff66, #ff6666
[alphas]
background = white
foreground = black
majorLine = grey
minorLine = rose
lineColors = 00ff00aa,ff000077,00337799
# Relay destination rules for carbon-relay. Entries are scanned in order,
# and the first pattern a metric matches will cause processing to cease after sending
# unless `continue` is set to true
#
# [name]
# pattern = <regex>
# destinations = <list of destination addresses>
# continue = <boolean> # default: False
#
# name: Arbitrary unique name to identify the rule
# pattern: Regex pattern to match against the metric name
# destinations: Comma-separated list of destinations.
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
# continue: Continue processing rules if this rule matches (default: False)
# You must have exactly one section with 'default = true'
# Note that all destinations listed must also exist in carbon.conf
# in the DESTINATIONS setting in the [relay] section
[default]
default = true
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
# This file defines regular expression patterns that can be used to
# rewrite metric names in a search & replace fashion. It consists of two
# sections, [pre] and [post]. The rules in the pre section are applied to
# metric names as soon as they are received. The post rules are applied
# after aggregation has taken place.
#
# The general form of each rule is as follows:
#
# regex-pattern = replacement-text
#
# For example:
#
# [post]
# _sum$ =
# _avg$ =
#
# These rules would strip off a suffix of _sum or _avg from any metric names
# after aggregation.
# Aggregation methods for whisper files. Entries are scanned in order,
# and first match wins. This file is scanned for changes every 60 seconds
#
# [name]
# pattern = <regex>
# xFilesFactor = <float between 0 and 1>
# aggregationMethod = <average|sum|last|max|min>
#
# name: Arbitrary unique name for the rule
# pattern: Regex pattern to match against the metric name
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
# aggregationMethod: function to apply to data points for aggregation
#
[min]
pattern = \.lower$
xFilesFactor = 0.1
aggregationMethod = min
[max]
pattern = \.upper(_\d+)?$
xFilesFactor = 0.1
aggregationMethod = max
[sum]
pattern = \.sum$
xFilesFactor = 0
aggregationMethod = sum
[count]
pattern = \.count$
xFilesFactor = 0
aggregationMethod = sum
[count_legacy]
pattern = ^stats_counts.*
xFilesFactor = 0
aggregationMethod = sum
[default_average]
pattern = .*
xFilesFactor = 0.3
aggregationMethod = average
# Schema definitions for Whisper files. Entries are scanned in order,
[carbon]
pattern = ^carbon\..*
retentions = 1m:31d,10m:1y,1h:5y
[highres]
pattern = ^highres.*
retentions = 1s:1d,1m:7d
[statsd]
pattern = ^statsd.*
retentions = 1m:7d,10m:1y
[default]
pattern = .*
retentions = 10s:1d,1m:7d,10m:1y
# This file takes a single regular expression per line
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
# match one of these expressions will be persisted. If this file is empty or
# missing, all metrics will pass through.
# This file is reloaded automatically when changes are made
.*
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
from os.path import dirname, join, abspath
#Django settings below, do not touch!
APPEND_SLASH = False
TEMPLATE_DEBUG = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(dirname( abspath(__file__) ), 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Absolute path to the directory that holds media.
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
MIDDLEWARE_CLASSES = (
'graphite.middleware.LogExceptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'graphite.urls'
INSTALLED_APPS = (
'graphite.metrics',
'graphite.render',
'graphite.browser',
'graphite.composer',
'graphite.account',
'graphite.dashboard',
'graphite.whitelist',
'graphite.events',
'graphite.url_shortener',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'tagging',
)
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
STATIC_URL = '/static/'
STATIC_ROOT = '/opt/graphite/static/'
## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
#SECRET_KEY = 'UNSAFE_DEFAULT'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
#TIME_ZONE = 'America/Los_Angeles'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
# Logging
#LOG_RENDERING_PERFORMANCE = True
#LOG_CACHE_PERFORMANCE = True
#LOG_METRIC_ACCESS = True
# Enable full debug page display on exceptions (Internal Server Error pages)
#DEBUG = True
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (127.0.0.1) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#####################################
# Filesystem Paths #
#####################################
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
#GRAPHITE_ROOT = '/opt/graphite'
# Most installs done outside of a separate tree such as /opt/graphite will only
# need to change these three settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT
#CONF_DIR = '/opt/graphite/conf'
#STORAGE_DIR = '/opt/graphite/storage'
#CONTENT_DIR = '/opt/graphite/webapp/content'
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
#WHISPER_DIR = '/opt/graphite/storage/whisper'
#RRD_DIR = '/opt/graphite/storage/rrd'
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
#LOG_DIR = '/opt/graphite/storage/log/webapp'
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
#####################################
# Email Configuration #
#####################################
# This is used for emailing rendered Graphs
# Default backend is SMTP
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
# To drop emails on the floor, enable the Dummy backend:
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#####################################
# Authentication Configuration #
#####################################
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
# OR
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
#USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
#
# The following built-in database engines are available:
# django.db.backends.postgresql # Removed in Django 1.4
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
#DATABASES = {
# 'default': {
# 'NAME': '/opt/graphite/storage/graphite.db',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': ''
# }
#}
#
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
## These are timeout values (in seconds) for requests to remote webapps
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use 127.0.0.1 here in most cases
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
import os
LOG_DIR = '/var/log/graphite'
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
if (os.getenv("MEMCACHE_HOST") is not None):
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
{
"graphiteHost": "127.0.0.1",
"graphitePort": 2003,
"port": 8125,
"flushInterval": 10000
}
#!/usr/bin/env expect
set timeout -1
spawn /usr/local/bin/manage.sh
expect "Would you like to create one now" {
send "yes\r"
}
expect "Username" {
send "root\r"
}
expect "Email address:" {
send "root.graphite@mailinator.com\r"
}
expect "Password:" {
send "root\r"
}
expect "Password *:" {
send "root\r"
}
expect "Superuser created successfully"
#!/bin/bash
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
\ No newline at end of file
graphite:
build: blocks/graphite1
ports:
- "8080:80"
- "2003:2003"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
fake-graphite-data:
image: grafana/fake-data-gen
net: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2003
[cache]
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
# Specify the user to drop privileges to
# If this is blank carbon runs as the user that invokes it
# This user must have write access to the local data directory
USER =
# Limit the size of the cache to avoid swapping or becoming CPU bound.
# Sorts and serving cache queries gets more expensive as the cache grows.
# Use the value "inf" (infinity) for an unlimited cache size.
MAX_CACHE_SIZE = inf
# Limits the number of whisper update_many() calls per second, which effectively
# means the number of write requests sent to the disk. This is intended to
# prevent over-utilizing the disk and thus starving the rest of the system.
# When the rate of required updates exceeds this, then carbon's caching will
# take effect and increase the overall throughput accordingly.
MAX_UPDATES_PER_SECOND = 1000
# Softly limits the number of whisper files that get created each minute.
# Setting this value low (like at 50) is a good way to ensure your graphite
# system will not be adversely impacted when a bunch of new metrics are
# sent to it. The trade off is that it will take much longer for those metrics'
# database files to all get created and thus longer until the data becomes usable.
# Setting this value high (like "inf" for infinity) will cause graphite to create
# the files quickly but at the risk of slowing I/O down considerably for a while.
MAX_CREATES_PER_MINUTE = inf
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
LOG_UPDATES = False
# Enable AMQP if you want to receve metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received
# useful for testing
# AMQP_VERBOSE = False
# AMQP_HOST = localhost
# AMQP_PORT = 5672
# AMQP_VHOST = /
# AMQP_USER = guest
# AMQP_PASSWORD = guest
# AMQP_EXCHANGE = graphite
# Patterns for all of the metrics this machine will store. Read more at
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
#
# Example: store all sales, linux servers, and utilization metrics
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
#
# Example: store everything
# BIND_PATTERNS = #
# NOTE: you cannot run both a cache and a relay on the same server
# with the default configuration, you have to specify a distinict
# interfaces and ports for the listeners.
[relay]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
CACHE_SERVERS = server1, server2, server3
MAX_QUEUE_SIZE = 10000
import datetime
import time
from django.utils.timezone import get_current_timezone
from django.core.urlresolvers import get_script_prefix
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from pytz import timezone
from graphite.util import json
from graphite.events import models
from graphite.render.attime import parseATTime
def to_timestamp(dt):
return time.mktime(dt.timetuple())
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return to_timestamp(obj)
return json.JSONEncoder.default(self, obj)
def view_events(request):
if request.method == "GET":
context = { 'events' : fetch(request),
'slash' : get_script_prefix()
}
return render_to_response("events.html", context)
else:
return post_event(request)
def detail(request, event_id):
e = get_object_or_404(models.Event, pk=event_id)
context = { 'event' : e,
'slash' : get_script_prefix()
}
return render_to_response("event.html", context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.body)
assert isinstance(event, dict)
values = {}
values["what"] = event["what"]
values["tags"] = event.get("tags", None)
values["when"] = datetime.datetime.fromtimestamp(
event.get("when", time.time()))
if "data" in event:
values["data"] = event["data"]
e = models.Event(**values)
e.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
if 'jsonp' in request.REQUEST:
response = HttpResponse(
"%s(%s)" % (request.REQUEST.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
mimetype='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
mimetype="application/json")
return response
def fetch(request):
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
def make_naive(dt):
if 'tz' in request.GET:
tz = timezone(request.GET['tz'])
else:
tz = get_current_timezone()
local_dt = dt.astimezone(tz)
if hasattr(local_dt, 'normalize'):
local_dt = local_dt.normalize()
return local_dt.replace(tzinfo=None)
if request.GET.get("from", None) is not None:
time_from = make_naive(parseATTime(request.GET["from"]))
else:
time_from = datetime.datetime.fromtimestamp(0)
if request.GET.get("until", None) is not None:
time_until = make_naive(parseATTime(request.GET["until"]))
else:
time_until = datetime.datetime.now()
tags = request.GET.get("tags", None)
if tags is not None:
tags = request.GET.get("tags").split(" ")
return [x.as_dict() for x in
models.Event.find_events(time_from, time_until, tags=tags)]
[
{
"pk": 1,
"model": "auth.user",
"fields": {
"username": "admin",
"first_name": "",
"last_name": "",
"is_active": true,
"is_superuser": true,
"is_staff": true,
"last_login": "2011-09-20 17:02:14",
"groups": [],
"user_permissions": [],
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
"email": "root@example.com",
"date_joined": "2011-09-20 17:02:14"
}
}
]
# Edit this file to override the default graphite settings, do not edit settings.py
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
#DEBUG = True
# Set your local timezone (django will try to figure this out automatically)
TIME_ZONE = 'UTC'
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
#REMOTE_RENDERING = True
#RENDERING_HOSTS = ['fastserver01','fastserver02']
#LOG_RENDERING_PERFORMANCE = True
#LOG_CACHE_PERFORMANCE = True
# If you've got more than one backend server they should all be listed here
#CLUSTER_SERVERS = []
# Override this if you need to provide documentation specific to your graphite deployment
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
# Enable email-related features
#SMTP_SERVER = "mail.mycompany.com"
# LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
#DATABASE_ENGINE = 'mysql' # or 'postgres'
#DATABASE_NAME = 'graphite'
#DATABASE_USER = 'graphite'
#DATABASE_PASSWORD = 'graphite-is-awesome'
#DATABASE_HOST = 'mysql.mycompany.com'
#DATABASE_PORT = '3306'
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
daemon off;
user www-data;
worker_processes 1;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
server_names_hash_bucket_size 32;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
gzip on;
gzip_disable "msie6";
server {
listen 80 default_server;
server_name _;
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header Host $host;
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
}
add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, OPTIONS";
add_header Access-Control-Allow-Headers "origin, authorization, accept";
location /content {
alias /opt/graphite/webapp/content;
}
location /media {
alias /usr/share/pyshared/django/contrib/admin/media;
}
}
}
{
graphitePort: 2003,
graphiteHost: "127.0.0.1",
port: 8125,
mgmt_port: 8126,
backends: ['./backends/graphite'],
debug: true
}
[min]
pattern = \.min$
xFilesFactor = 0.1
aggregationMethod = min
[max]
pattern = \.max$
xFilesFactor = 0.1
aggregationMethod = max
[sum]
pattern = \.count$
xFilesFactor = 0
aggregationMethod = sum
[default_average]
pattern = .*
xFilesFactor = 0.5
aggregationMethod = average
[carbon]
pattern = ^carbon\..*
retentions = 1m:31d,10m:1y,1h:5y
[highres]
pattern = ^highres.*
retentions = 1s:1d,1m:7d
[statsd]
pattern = ^statsd.*
retentions = 1m:7d,10m:1y
[default]
pattern = .*
retentions = 10s:1d,1m:7d,10m:1y
[supervisord]
nodaemon = true
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
[program:nginx]
command = /usr/sbin/nginx
stdout_logfile = /var/log/supervisor/%(program_name)s.log
stderr_logfile = /var/log/supervisor/%(program_name)s.log
autorestart = true
[program:carbon-cache]
;user = www-data
command = /opt/graphite/bin/carbon-cache.py --debug start
stdout_logfile = /var/log/supervisor/%(program_name)s.log
stderr_logfile = /var/log/supervisor/%(program_name)s.log
autorestart = true
[program:graphite-webapp]
;user = www-data
directory = /opt/graphite/webapp
environment = PYTHONPATH='/opt/graphite/webapp'
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
stdout_logfile = /var/log/supervisor/%(program_name)s.log
stderr_logfile = /var/log/supervisor/%(program_name)s.log
autorestart = true
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment