Commit 34e17f72 by Torkel Ödegaard

feat(alerting): requests looks to be working again

parent d1acfb44
......@@ -22,7 +22,7 @@ func ValidateOrgAlert(c *middleware.Context) {
}
}
// GET /api/alerts/changes
// GET /api/alerting/changes
func GetAlertChanges(c *middleware.Context) Response {
query := models.GetAlertChangesQuery{
OrgId: c.OrgId,
......@@ -69,7 +69,7 @@ func GetAlerts(c *middleware.Context) Response {
WarnLevel: alert.WarnLevel,
CritLevel: alert.CritLevel,
Frequency: alert.Frequency,
Title: alert.Title,
Name: alert.Name,
Description: alert.Description,
QueryRange: alert.QueryRange,
Aggregator: alert.Aggregator,
......
......@@ -244,9 +244,7 @@ func Register(r *macaron.Macaron) {
r.Group("/alerts", func() {
r.Group("/rules", func() {
r.Get("/:alertId/states", wrap(GetAlertStates))
r.Put("/:alertId/state", bind(m.UpdateAlertStateCommand{}), wrap(PutAlertState))
r.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
//r.Delete("/:alertId", ValidateOrgAlert, wrap(DelAlert)) disabled until we know how to handle it dashboard updates
r.Get("/", wrap(GetAlerts))
......
......@@ -77,7 +77,7 @@ func getDatasource(id int64, orgId int64) (*m.DataSource, error) {
return nil, err
}
return &query.Result, nil
return query.Result, nil
}
func ProxyDataSourceRequest(c *middleware.Context) {
......
......@@ -123,9 +123,7 @@ func GetDataSourceByName(c *middleware.Context) Response {
return ApiError(500, "Failed to query datasources", err)
}
ds := query.Result
dtos := convertModelToDtos(ds)
dtos := convertModelToDtos(query.Result)
return Json(200, &dtos)
}
......@@ -148,7 +146,7 @@ func GetDataSourceIdByName(c *middleware.Context) Response {
return Json(200, &dtos)
}
func convertModelToDtos(ds m.DataSource) dtos.DataSource {
func convertModelToDtos(ds *m.DataSource) dtos.DataSource {
return dtos.DataSource{
Id: ds.Id,
OrgId: ds.OrgId,
......
......@@ -11,7 +11,7 @@ type AlertRuleDTO struct {
WarnOperator string `json:"warnOperator"`
CritOperator string `json:"critOperator"`
Frequency int64 `json:"frequency"`
Title string `json:"title"`
Name string `json:"name"`
Description string `json:"description"`
QueryRange int `json:"queryRange"`
Aggregator string `json:"aggregator"`
......
......@@ -17,7 +17,7 @@ type AlertRule struct {
WarnOperator string `json:"warnOperator"`
CritOperator string `json:"critOperator"`
Frequency int64 `json:"frequency"`
Title string `json:"title"`
Name string `json:"name"`
Description string `json:"description"`
QueryRange int `json:"queryRange"`
Aggregator string `json:"aggregator"`
......@@ -38,7 +38,7 @@ func (this *AlertRule) Equals(other *AlertRule) bool {
result = result || this.Query != other.Query
result = result || this.QueryRefId != other.QueryRefId
result = result || this.Frequency != other.Frequency
result = result || this.Title != other.Title
result = result || this.Name != other.Name
result = result || this.Description != other.Description
result = result || this.QueryRange != other.QueryRange
//don't compare .State! That would be insane.
......
......@@ -131,13 +131,13 @@ type GetDataSourcesQuery struct {
type GetDataSourceByIdQuery struct {
Id int64
OrgId int64
Result DataSource
Result *DataSource
}
type GetDataSourceByNameQuery struct {
Name string
OrgId int64
Result DataSource
Result *DataSource
}
// ---------------------
......
......@@ -5,6 +5,7 @@ import (
"github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
_ "github.com/grafana/grafana/pkg/tsdb/graphite"
)
var (
......@@ -31,11 +32,11 @@ func Init() {
// go scheduler.handleResponses()
}
func saveState(response *AlertResult) {
func saveState(result *AlertResult) {
cmd := &m.UpdateAlertStateCommand{
AlertId: response.Id,
NewState: response.State,
Info: response.Description,
AlertId: result.AlertJob.Rule.Id,
NewState: result.State,
Info: result.Description,
}
if err := bus.Dispatch(cmd); err != nil {
......
......@@ -3,6 +3,7 @@ package alerting
import (
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models"
)
......@@ -27,12 +28,13 @@ func ParseAlertsFromDashboard(cmd *m.SaveDashboardCommand) []*m.AlertRule {
WarnOperator: alerting.Get("warnOperator").MustString(),
CritOperator: alerting.Get("critOperator").MustString(),
Frequency: alerting.Get("frequency").MustInt64(),
Title: alerting.Get("title").MustString(),
Name: alerting.Get("name").MustString(),
Description: alerting.Get("description").MustString(),
QueryRange: alerting.Get("queryRange").MustInt(),
Aggregator: alerting.Get("aggregator").MustString(),
}
log.Info("Alertrule: %v", alert.Name)
for _, targetsObj := range panel.Get("targets").MustArray() {
target := simplejson.NewFromAny(targetsObj)
......
......@@ -33,7 +33,7 @@ func NewEngine() *Engine {
}
func (e *Engine) Start() {
log.Info("Alerting: Engine.Start()")
log.Info("Alerting: engine.Start()")
go e.alertingTicker()
go e.execDispatch()
......@@ -51,13 +51,12 @@ func (e *Engine) alertingTicker() {
for {
select {
case tick := <-e.ticker.C:
// update rules ever tenth tick
// TEMP SOLUTION update rules ever tenth tick
if tickIndex%10 == 0 {
e.scheduler.Update(e.ruleReader.Fetch())
}
e.scheduler.Tick(tick, e.execQueue)
tickIndex++
}
}
......@@ -65,7 +64,7 @@ func (e *Engine) alertingTicker() {
func (e *Engine) execDispatch() {
for job := range e.execQueue {
log.Trace("Alerting: Engine:execDispatch() starting job %s", job.Rule.Title)
log.Trace("Alerting: engine:execDispatch() starting job %s", job.Rule.Name)
job.Running = true
e.executeJob(job)
}
......@@ -80,33 +79,39 @@ func (e *Engine) executeJob(job *AlertJob) {
select {
case <-time.After(time.Second * 5):
e.resultQueue <- &AlertResult{
Id: job.Rule.Id,
State: alertstates.Pending,
Duration: float64(time.Since(now).Nanoseconds()) / float64(1000000),
Error: fmt.Errorf("Timeout"),
AlertJob: job,
}
log.Trace("Alerting: engine.executeJob(): timeout")
case result := <-resultChan:
result.Duration = float64(time.Since(now).Nanoseconds()) / float64(1000000)
log.Trace("Alerting: engine.executeJob(): exeuction took %vms", result.Duration)
log.Trace("Alerting: engine.executeJob(): done %vms", result.Duration)
e.resultQueue <- result
}
}
func (e *Engine) resultHandler() {
for result := range e.resultQueue {
log.Debug("Alerting: engine.resultHandler(): alert(%d) status(%s) actual(%v) retry(%d)", result.Id, result.State, result.ActualValue, result.AlertJob.RetryCount)
log.Debug("Alerting: engine.resultHandler(): alert(%d) status(%s) actual(%v) retry(%d)", result.AlertJob.Rule.Id, result.State, result.ActualValue, result.AlertJob.RetryCount)
result.AlertJob.Running = false
if result.IsResultIncomplete() {
// handle result error
if result.Error != nil {
result.AlertJob.RetryCount++
if result.AlertJob.RetryCount < maxRetries {
log.Error(3, "Alerting: Rule('%s') Result Error: %v, Retrying..", result.AlertJob.Rule.Name, result.Error)
e.execQueue <- result.AlertJob
} else {
saveState(&AlertResult{
Id: result.Id,
State: alertstates.Critical,
Description: fmt.Sprintf("Failed to run check after %d retires", maxRetries),
})
log.Error(3, "Alerting: Rule('%s') Result Error: %v, Max retries reached", result.AlertJob.Rule.Name, result.Error)
result.State = alertstates.Critical
result.Description = fmt.Sprintf("Failed to run check after %d retires, Error: %v", maxRetries, result.Error)
saveState(result)
}
} else {
result.AlertJob.RetryCount = 0
......
......@@ -2,6 +2,7 @@ package alerting
import (
"fmt"
"strconv"
"math"
......@@ -78,38 +79,79 @@ var aggregator = map[string]aggregationFn{
}
func (e *ExecutorImpl) Execute(job *AlertJob, resultQueue chan *AlertResult) {
response, err := e.GetSeries(job)
timeSeries, err := e.executeQuery(job)
if err != nil {
resultQueue <- &AlertResult{State: alertstates.Pending, Id: job.Rule.Id, AlertJob: job}
resultQueue <- &AlertResult{
Error: err,
State: alertstates.Pending,
AlertJob: job,
}
}
result := e.validateRule(job.Rule, response)
result := e.evaluateRule(job.Rule, timeSeries)
result.AlertJob = job
resultQueue <- result
}
func (e *ExecutorImpl) GetSeries(job *AlertJob) (tsdb.TimeSeriesSlice, error) {
query := &m.GetDataSourceByIdQuery{
func (e *ExecutorImpl) executeQuery(job *AlertJob) (tsdb.TimeSeriesSlice, error) {
getDsInfo := &m.GetDataSourceByIdQuery{
Id: job.Rule.DatasourceId,
OrgId: job.Rule.OrgId,
}
err := bus.Dispatch(query)
if err := bus.Dispatch(getDsInfo); err != nil {
return nil, fmt.Errorf("Could not find datasource for %d", job.Rule.DatasourceId)
}
req := e.GetRequestForAlertRule(job.Rule, getDsInfo.Result)
result := make(tsdb.TimeSeriesSlice, 0)
resp, err := tsdb.HandleRequest(req)
if err != nil {
return nil, fmt.Errorf("Could not find datasource for %d", job.Rule.DatasourceId)
return nil, fmt.Errorf("Alerting: GetSeries() tsdb.HandleRequest() error %v", err)
}
// if query.Result.Type == m.DS_GRAPHITE {
// return GraphiteClient{}.GetSeries(*job, query.Result)
// }
for _, v := range resp.Results {
if v.Error != nil {
return nil, fmt.Errorf("Alerting: GetSeries() tsdb.HandleRequest() response error %v", v)
}
return nil, fmt.Errorf("Grafana does not support alerts for %s", query.Result.Type)
result = append(result, v.Series...)
}
return result, nil
}
func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice) *AlertResult {
func (e *ExecutorImpl) GetRequestForAlertRule(rule *AlertRule, datasource *m.DataSource) *tsdb.Request {
req := &tsdb.Request{
TimeRange: tsdb.TimeRange{
From: "-" + strconv.Itoa(rule.QueryRange) + "s",
To: "now",
},
Queries: tsdb.QuerySlice{
&tsdb.Query{
RefId: rule.QueryRefId,
Query: rule.Query,
DataSource: &tsdb.DataSourceInfo{
Id: datasource.Id,
Name: datasource.Name,
PluginId: datasource.Type,
Url: datasource.Url,
},
},
},
}
return req
}
func (e *ExecutorImpl) evaluateRule(rule *AlertRule, series tsdb.TimeSeriesSlice) *AlertResult {
log.Trace("Alerting: executor.evaluateRule: %v, query result: series: %v", rule.Name, len(series))
for _, serie := range series {
log.Info("Alerting: executor.validate: %v", serie.Name)
if aggregator[rule.Aggregator] == nil {
continue
}
......@@ -122,7 +164,6 @@ func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice
if critResult {
return &AlertResult{
State: alertstates.Critical,
Id: rule.Id,
ActualValue: aggValue,
Description: fmt.Sprintf(descriptionFmt, aggValue, serie.Name),
}
......@@ -134,12 +175,11 @@ func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice
if warnResult {
return &AlertResult{
State: alertstates.Warn,
Id: rule.Id,
Description: fmt.Sprintf(descriptionFmt, aggValue, serie.Name),
ActualValue: aggValue,
}
}
}
return &AlertResult{State: alertstates.Ok, Id: rule.Id, Description: "Alert is OK!"}
return &AlertResult{State: alertstates.Ok, Description: "Alert is OK!"}
}
......@@ -20,7 +20,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Ok)
})
......@@ -31,7 +31,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Critical)
})
......@@ -42,7 +42,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{9, 0}, {9, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Critical)
})
......@@ -53,7 +53,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{9, 0}, {9, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Ok)
})
......@@ -64,7 +64,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{11, 0}, {9, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Ok)
})
......@@ -75,7 +75,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{1, 0}, {11, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Critical)
})
})
......@@ -89,7 +89,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Ok)
})
......@@ -101,7 +101,7 @@ func TestAlertingExecutor(t *testing.T) {
tsdb.NewTimeSeries("test1", [][2]float64{{11, 0}}),
}
result := executor.validateRule(rule, timeSeries)
result := executor.evaluateRule(rule, timeSeries)
So(result.State, ShouldEqual, alertstates.Critical)
})
})
......
package alerting
import "github.com/grafana/grafana/pkg/services/alerting/alertstates"
type AlertJob struct {
Offset int64
Delay bool
......@@ -11,18 +9,14 @@ type AlertJob struct {
}
type AlertResult struct {
Id int64
State string
ActualValue float64
Duration float64
Description string
Error error
AlertJob *AlertJob
}
func (ar *AlertResult) IsResultIncomplete() bool {
return ar.State == alertstates.Pending
}
type AlertRule struct {
Id int64
OrgId int64
......@@ -36,7 +30,7 @@ type AlertRule struct {
WarnOperator string
CritOperator string
Frequency int64
Title string
Name string
Description string
QueryRange int
Aggregator string
......
......@@ -60,7 +60,7 @@ func (arr *AlertRuleReader) Fetch() []*AlertRule {
model.CritLevel = ruleDef.CritLevel
model.CritOperator = ruleDef.CritOperator
model.Frequency = ruleDef.Frequency
model.Title = ruleDef.Title
model.Name = ruleDef.Name
model.Description = ruleDef.Description
model.Aggregator = ruleDef.Aggregator
model.State = ruleDef.State
......
......@@ -47,7 +47,7 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *AlertJob) {
for _, job := range s.jobs {
if now%job.Rule.Frequency == 0 && job.Running == false {
log.Trace("Scheduler: Putting job on to exec queue: %s", job.Rule.Title)
log.Trace("Scheduler: Putting job on to exec queue: %s", job.Rule.Name)
execQueue <- job
}
}
......
......@@ -31,7 +31,7 @@ func TestAlertRuleChangesDataAccess(t *testing.T) {
WarnOperator: ">",
CritOperator: ">",
Frequency: 10,
Title: "Alerting title",
Name: "Alerting title",
Description: "Alerting description",
QueryRange: 3600,
Aggregator: "avg",
......
......@@ -26,7 +26,7 @@ func TestAlertingDataAccess(t *testing.T) {
WarnOperator: ">",
CritOperator: ">",
Frequency: 10,
Title: "Alerting title",
Name: "Alerting title",
Description: "Alerting description",
QueryRange: 3600,
Aggregator: "avg",
......@@ -65,7 +65,7 @@ func TestAlertingDataAccess(t *testing.T) {
So(alert.CritOperator, ShouldEqual, ">")
So(alert.Query, ShouldEqual, "Query")
So(alert.QueryRefId, ShouldEqual, "A")
So(alert.Title, ShouldEqual, "Alerting title")
So(alert.Name, ShouldEqual, "Alerting title")
So(alert.Description, ShouldEqual, "Alerting description")
So(alert.QueryRange, ShouldEqual, 3600)
So(alert.Aggregator, ShouldEqual, "avg")
......@@ -189,7 +189,7 @@ func TestAlertingDataAccess(t *testing.T) {
WarnOperator: ">",
CritOperator: ">",
Frequency: 10,
Title: "Alerting title",
Name: "Alerting title",
Description: "Alerting description",
QueryRange: 3600,
Aggregator: "avg",
......
......@@ -25,7 +25,7 @@ func TestAlertingStateAccess(t *testing.T) {
WarnOperator: ">",
CritOperator: ">",
Frequency: 10,
Title: "Alerting title",
Name: "Alerting title",
Description: "Alerting description",
QueryRange: 3600,
Aggregator: "avg",
......
......@@ -110,7 +110,7 @@ func TestAlertModel(t *testing.T) {
"aggregator": "sum",
"queryRange": "10m",
"frequency": 10,
"title": "active desktop users",
"name": "active desktop users",
"description": "restart webservers"
},
"links": []
......@@ -386,7 +386,7 @@ func TestAlertModel(t *testing.T) {
So(v.Query, ShouldNotBeEmpty)
So(v.QueryRefId, ShouldNotBeEmpty)
So(v.QueryRange, ShouldNotBeEmpty)
So(v.Title, ShouldNotBeEmpty)
So(v.Name, ShouldNotBeEmpty)
So(v.Description, ShouldNotBeEmpty)
}
......
......@@ -19,22 +19,26 @@ func init() {
}
func GetDataSourceById(query *m.GetDataSourceByIdQuery) error {
sess := x.Limit(100, 0).Where("org_id=? AND id=?", query.OrgId, query.Id)
has, err := sess.Get(&query.Result)
datasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}
has, err := x.Get(&datasource)
if !has {
return m.ErrDataSourceNotFound
}
query.Result = &datasource
return err
}
func GetDataSourceByName(query *m.GetDataSourceByNameQuery) error {
sess := x.Limit(100, 0).Where("org_id=? AND name=?", query.OrgId, query.Name)
has, err := sess.Get(&query.Result)
datasource := m.DataSource{OrgId: query.OrgId, Name: query.Name}
has, err := x.Get(&datasource)
if !has {
return m.ErrDataSourceNotFound
}
query.Result = &datasource
return err
}
......
......@@ -21,7 +21,7 @@ func addAlertMigrations(mg *Migrator) {
{Name: "crit_level", Type: DB_Float, Nullable: false},
{Name: "crit_operator", Type: DB_NVarchar, Length: 10, Nullable: false},
{Name: "frequency", Type: DB_BigInt, Nullable: false},
{Name: "title", Type: DB_NVarchar, Length: 255, Nullable: false},
{Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false},
{Name: "description", Type: DB_NVarchar, Length: 255, Nullable: false},
{Name: "query_range", Type: DB_Int, Nullable: false},
{Name: "aggregator", Type: DB_NVarchar, Length: 255, Nullable: false},
......@@ -32,7 +32,7 @@ func addAlertMigrations(mg *Migrator) {
}
// create table
mg.AddMigration("create alert_rule table v1", NewAddTableMigration(alertV1))
mg.AddMigration("create alert_rule table v2", NewAddTableMigration(alertV1))
alert_changes := Table{
Name: "alert_rule_change",
......
......@@ -26,7 +26,7 @@ func (bg *Batch) process(context *QueryContext) {
if executor == nil {
bg.Done = true
result := &BatchResult{
Error: errors.New("Could not find executor for data source type " + bg.Queries[0].DataSource.Type),
Error: errors.New("Could not find executor for data source type " + bg.Queries[0].DataSource.PluginId),
QueryResults: make(map[string]*QueryResult),
}
for _, query := range bg.Queries {
......
......@@ -13,12 +13,12 @@ func init() {
}
func getExecutorFor(dsInfo *DataSourceInfo) Executor {
if fn, exists := registry[dsInfo.Type]; exists {
if fn, exists := registry[dsInfo.PluginId]; exists {
return fn(dsInfo)
}
return nil
}
func RegisterExecutor(dsType string, fn GetExecutorFn) {
registry[dsType] = fn
func RegisterExecutor(pluginId string, fn GetExecutorFn) {
registry[pluginId] = fn
}
package tsdb
type FakeExecutor struct {
results map[string]*QueryResult
resultsFn map[string]ResultsFn
}
type ResultsFn func(context *QueryContext) *QueryResult
func NewFakeExecutor(dsInfo *DataSourceInfo) *FakeExecutor {
return &FakeExecutor{
results: make(map[string]*QueryResult),
resultsFn: make(map[string]ResultsFn),
}
}
func (e *FakeExecutor) Execute(queries QuerySlice, context *QueryContext) *BatchResult {
result := &BatchResult{QueryResults: make(map[string]*QueryResult)}
for _, query := range queries {
if results, has := e.results[query.RefId]; has {
result.QueryResults[query.RefId] = results
}
if testFunc, has := e.resultsFn[query.RefId]; has {
result.QueryResults[query.RefId] = testFunc(context)
}
}
return result
}
func (e *FakeExecutor) Return(refId string, series TimeSeriesSlice) {
e.results[refId] = &QueryResult{
RefId: refId, Series: series,
}
}
func (e *FakeExecutor) HandleQuery(refId string, fn ResultsFn) {
e.resultsFn[refId] = fn
}
package graphite
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/Unknwon/log"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
)
type GraphiteExecutor struct {
*tsdb.DataSourceInfo
}
func NewGraphiteExecutor(dsInfo *tsdb.DataSourceInfo) tsdb.Executor {
return &GraphiteExecutor{dsInfo}
}
func init() {
tsdb.RegisterExecutor("graphite", NewGraphiteExecutor)
}
func (e *GraphiteExecutor) Execute(queries tsdb.QuerySlice, context *tsdb.QueryContext) *tsdb.BatchResult {
result := &tsdb.BatchResult{}
params := url.Values{
"from": []string{context.TimeRange.From},
"until": []string{context.TimeRange.To},
"format": []string{"json"},
"maxDataPoints": []string{"500"},
}
for _, query := range queries {
params["target"] = []string{
getTargetFromQuery(query.Query),
}
}
client := http.Client{Timeout: time.Duration(10 * time.Second)}
res, err := client.PostForm(e.Url+"/render?", params)
if err != nil {
result.Error = err
return result
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
result.Error = err
return result
}
var data []TargetResponseDTO
err = json.Unmarshal(body, &data)
if err != nil {
log.Info("Error: %v", string(body))
result.Error = err
return result
}
result.QueryResults = make(map[string]*tsdb.QueryResult)
queryRes := &tsdb.QueryResult{}
for _, series := range data {
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: series.Target,
Points: series.DataPoints,
})
}
result.QueryResults["A"] = queryRes
return result
}
func getTargetFromQuery(query string) string {
json, _ := simplejson.NewJson([]byte(query))
return json.Get("target").MustString()
}
package graphite
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
"github.com/grafana/grafana/pkg/tsdb"
)
func TestGraphite(t *testing.T) {
Convey("When executing graphite query", t, func() {
executor := NewGraphiteExecutor(&tsdb.DataSourceInfo{
Url: "http://localhost:8080",
})
queries := tsdb.QuerySlice{
&tsdb.Query{Query: "apps.backend.*.counters.requests.count"},
}
context := tsdb.NewQueryContext(queries, tsdb.TimeRange{})
result := executor.Execute(queries, context)
So(result.Error, ShouldBeNil)
Convey("Should return series", func() {
So(result.QueryResults, ShouldNotBeEmpty)
})
})
}
package graphite
type TargetResponseDTO struct {
Target string `json:"target"`
DataPoints [][2]float64 `json:"datapoints"`
}
package tsdb
import "time"
type TimeRange struct {
From time.Time
To time.Time
From string
To string
}
type Request struct {
......@@ -21,7 +19,7 @@ type Response struct {
type DataSourceInfo struct {
Id int64
Name string
Type string
PluginId string
Url string
Password string
User string
......
......@@ -27,6 +27,10 @@ func HandleRequest(req *Request) (*Response, error) {
response.BatchTimings = append(response.BatchTimings, batchResult.Timings)
if batchResult.Error != nil {
return nil, batchResult.Error
}
for refId, result := range batchResult.QueryResults {
context.Results[refId] = result
}
......
package tsdb
import (
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestMetricQuery(t *testing.T) {
Convey("When batches groups for query", t, func() {
Convey("Given 3 queries for 2 data sources", func() {
request := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1}},
{RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1}},
{RefId: "C", Query: "asd", DataSource: &DataSourceInfo{Id: 2}},
},
}
batches, err := getBatches(request)
So(err, ShouldBeNil)
Convey("Should group into two batches", func() {
So(len(batches), ShouldEqual, 2)
})
})
Convey("Given query 2 depends on query 1", func() {
request := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1}},
{RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 2}},
{RefId: "C", Query: "#A / #B", DataSource: &DataSourceInfo{Id: 3}, Depends: []string{"A", "B"}},
},
}
batches, err := getBatches(request)
So(err, ShouldBeNil)
Convey("Should return three batch groups", func() {
So(len(batches), ShouldEqual, 3)
})
Convey("Group 3 should have group 1 and 2 as dependencies", func() {
So(batches[2].Depends["A"], ShouldEqual, true)
So(batches[2].Depends["B"], ShouldEqual, true)
})
})
})
Convey("When executing request with one query", t, func() {
req := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
},
}
fakeExecutor := registerFakeExecutor()
fakeExecutor.Return("A", TimeSeriesSlice{&TimeSeries{Name: "argh"}})
res, err := HandleRequest(req)
So(err, ShouldBeNil)
Convey("Should return query results", func() {
So(res.Results["A"].Series, ShouldNotBeEmpty)
So(res.Results["A"].Series[0].Name, ShouldEqual, "argh")
})
})
Convey("When executing one request with two queries from same data source", t, func() {
req := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
{RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
},
}
fakeExecutor := registerFakeExecutor()
fakeExecutor.Return("A", TimeSeriesSlice{&TimeSeries{Name: "argh"}})
fakeExecutor.Return("B", TimeSeriesSlice{&TimeSeries{Name: "barg"}})
res, err := HandleRequest(req)
So(err, ShouldBeNil)
Convey("Should return query results", func() {
So(len(res.Results), ShouldEqual, 2)
So(res.Results["B"].Series[0].Name, ShouldEqual, "barg")
})
Convey("Should have been batched in one request", func() {
So(len(res.BatchTimings), ShouldEqual, 1)
})
})
Convey("When executing one request with three queries from different datasources", t, func() {
req := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
{RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
{RefId: "C", Query: "asd", DataSource: &DataSourceInfo{Id: 2, Type: "test"}},
},
}
res, err := HandleRequest(req)
So(err, ShouldBeNil)
Convey("Should have been batched in two requests", func() {
So(len(res.BatchTimings), ShouldEqual, 2)
})
})
Convey("When query uses data source of unknown type", t, func() {
req := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "asdasdas"}},
},
}
res, err := HandleRequest(req)
So(err, ShouldBeNil)
Convey("Should return error", func() {
So(res.Results["A"].Error.Error(), ShouldContainSubstring, "not find")
})
})
Convey("When executing request that depend on other query", t, func() {
req := &Request{
Queries: QuerySlice{
{RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}},
{RefId: "B", Query: "#A / 2", DataSource: &DataSourceInfo{Id: 2, Type: "test"},
Depends: []string{"A"},
},
},
}
fakeExecutor := registerFakeExecutor()
fakeExecutor.HandleQuery("A", func(c *QueryContext) *QueryResult {
time.Sleep(10 * time.Millisecond)
return &QueryResult{
Series: TimeSeriesSlice{
&TimeSeries{Name: "Ares"},
}}
})
fakeExecutor.HandleQuery("B", func(c *QueryContext) *QueryResult {
return &QueryResult{
Series: TimeSeriesSlice{
&TimeSeries{Name: "Bres+" + c.Results["A"].Series[0].Name},
}}
})
res, err := HandleRequest(req)
So(err, ShouldBeNil)
Convey("Should have been batched in two requests", func() {
So(len(res.BatchTimings), ShouldEqual, 2)
})
Convey("Query B should have access to Query A results", func() {
So(res.Results["B"].Series[0].Name, ShouldEqual, "Bres+Ares")
})
})
}
func registerFakeExecutor() *FakeExecutor {
executor := NewFakeExecutor(nil)
RegisterExecutor("test", func(dsInfo *DataSourceInfo) Executor {
return executor
})
return executor
}
......@@ -21,7 +21,7 @@
<tr ng-repeat="alert in ctrl.alerts">
<td>
<a href="alerting/{{alert.id}}/states">
{{alert.title}}
{{alert.name}}
</a>
</td>
<td class="text-center">
......
......@@ -29,8 +29,8 @@ export class AlertTabCtrl {
_.defaults(this.panel.alerting, this.defaultValues);
var defaultTitle = (this.panelCtrl.dashboard.title + ' ' + this.panel.title + ' alert');
this.panel.alerting.title = this.panel.alerting.title || defaultTitle;
var defaultName = (this.panelCtrl.dashboard.title + ' ' + this.panel.title + ' alert');
this.panel.alerting.name = this.panel.alerting.name || defaultName;
this.panel.targets.map(target => {
this.metricTargets.push(target);
......
......@@ -60,7 +60,7 @@
<h5 class="section-heading">Alert info</h5>
<div class="gf-form">
<span class="gf-form-label width-10">Alert name</span>
<input type="text" class="gf-form-input width-22" ng-model="ctrl.panel.alerting.title">
<input type="text" class="gf-form-input width-22" ng-model="ctrl.panel.alerting.name">
</div>
<div class="gf-form-inline">
<div class="gf-form">
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment