Commit cc6df0ac by Arve Knudsen Committed by GitHub

CloudWatch: Clean up code (#24666)

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
parent 7b6deef4
......@@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/util/errutil"
)
func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryContext *tsdb.TsdbQuery) (*tsdb.Response, error) {
......@@ -49,12 +50,12 @@ func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryCo
}
resp, err := svc.DescribeAlarms(params)
if err != nil {
return nil, errors.New("Failed to call cloudwatch:DescribeAlarms")
return nil, errutil.Wrap("failed to call cloudwatch:DescribeAlarms", err)
}
alarmNames = filterAlarms(resp, namespace, metricName, dimensions, statistics, period)
} else {
if region == "" || namespace == "" || metricName == "" || len(statistics) == 0 {
return result, errors.New("Invalid annotations query")
return result, errors.New("invalid annotations query")
}
var qd []*cloudwatch.Dimension
......@@ -80,7 +81,7 @@ func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryCo
}
resp, err := svc.DescribeAlarmsForMetric(params)
if err != nil {
return nil, errors.New("Failed to call cloudwatch:DescribeAlarmsForMetric")
return nil, errutil.Wrap("failed to call cloudwatch:DescribeAlarmsForMetric", err)
}
for _, alarm := range resp.MetricAlarms {
alarmNames = append(alarmNames, alarm.AlarmName)
......@@ -107,7 +108,7 @@ func (e *CloudWatchExecutor) executeAnnotationQuery(ctx context.Context, queryCo
}
resp, err := svc.DescribeAlarmHistory(params)
if err != nil {
return nil, errors.New("Failed to call cloudwatch:DescribeAlarmHistory")
return nil, errutil.Wrap("failed to call cloudwatch:DescribeAlarmHistory", err)
}
for _, history := range resp.AlarmHistoryItems {
annotation := make(map[string]string)
......@@ -178,6 +179,7 @@ func filterAlarms(alarms *cloudwatch.DescribeAlarmsOutput, namespace string, met
for _, s := range statistics {
if *alarm.Statistic == s {
found = true
break
}
}
if !found {
......
......@@ -98,7 +98,6 @@ func (e *CloudWatchExecutor) alertQuery(ctx context.Context, logsClient *cloudwa
queryParams := queryContext.Queries[0].Model
startQueryOutput, err := e.executeStartQuery(ctx, logsClient, queryParams, queryContext.TimeRange)
if err != nil {
return nil, err
}
......@@ -128,7 +127,6 @@ func (e *CloudWatchExecutor) alertQuery(ctx context.Context, logsClient *cloudwa
}
func (e *CloudWatchExecutor) Query(ctx context.Context, dsInfo *models.DataSource, queryContext *tsdb.TsdbQuery) (*tsdb.Response, error) {
var result *tsdb.Response
e.DataSource = dsInfo
/*
......@@ -146,8 +144,9 @@ func (e *CloudWatchExecutor) Query(ctx context.Context, dsInfo *models.DataSourc
}
queryType := queryParams.Get("type").MustString("")
var err error
var err error
var result *tsdb.Response
switch queryType {
case "metricFindQuery":
result, err = e.executeMetricFindQuery(ctx, queryContext)
......@@ -187,7 +186,7 @@ func (e *CloudWatchExecutor) executeLogAlertQuery(ctx context.Context, queryCont
queryParams.Set("queryId", *result.QueryId)
// Get Query Results
// Get query results
getQueryResultsOutput, err := e.alertQuery(ctx, logsClient, queryContext)
if err != nil {
return nil, err
......@@ -204,14 +203,13 @@ func (e *CloudWatchExecutor) executeLogAlertQuery(ctx context.Context, queryCont
}
response := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
response.Results["A"] = &tsdb.QueryResult{
RefId: "A",
Dataframes: [][]byte{dataframeEnc},
Results: map[string]*tsdb.QueryResult{
"A": {
RefId: "A",
Dataframes: [][]byte{dataframeEnc},
},
},
}
return response, nil
}
......
......@@ -37,7 +37,6 @@ func (q *cloudWatchQuery) isInferredSearchExpression() bool {
if len(q.Dimensions) == 0 {
return !q.MatchExact
}
if !q.MatchExact {
return true
}
......
......@@ -7,7 +7,8 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/cloudwatch"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var counter = 1
......@@ -34,17 +35,12 @@ func (client *cloudWatchFakeClient) GetMetricDataWithContext(ctx aws.Context, in
}
func TestGetMetricDataExecutorTest(t *testing.T) {
Convey("TestGetMetricDataExecutorTest", t, func() {
Convey("pagination works", func() {
executor := &CloudWatchExecutor{}
inputs := &cloudwatch.GetMetricDataInput{MetricDataQueries: []*cloudwatch.MetricDataQuery{}}
res, err := executor.executeRequest(context.Background(), &cloudWatchFakeClient{}, inputs)
So(err, ShouldBeNil)
So(len(res), ShouldEqual, 2)
So(len(res[0].MetricDataResults[0].Values), ShouldEqual, 2)
So(*res[0].MetricDataResults[0].Values[1], ShouldEqual, 23.5)
So(*res[1].MetricDataResults[0].Values[0], ShouldEqual, 100)
})
})
executor := &CloudWatchExecutor{}
inputs := &cloudwatch.GetMetricDataInput{MetricDataQueries: []*cloudwatch.MetricDataQuery{}}
res, err := executor.executeRequest(context.Background(), &cloudWatchFakeClient{}, inputs)
require.NoError(t, err)
require.Len(t, res, 2)
require.Len(t, res[0].MetricDataResults[0].Values, 2)
assert.Equal(t, 23.5, *res[0].MetricDataResults[0].Values[1])
assert.Equal(t, 100.0, *res[1].MetricDataResults[0].Values[0])
}
......@@ -22,7 +22,6 @@ func (e *CloudWatchExecutor) executeLogActions(ctx context.Context, queryContext
for _, query := range queryContext.Queries {
query := query
eg.Go(func() error {
dataframe, err := e.executeLogAction(ectx, queryContext, query)
if err != nil {
......@@ -66,7 +65,6 @@ func (e *CloudWatchExecutor) executeLogActions(ctx context.Context, queryContext
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
......@@ -76,7 +74,6 @@ func (e *CloudWatchExecutor) executeLogActions(ctx context.Context, queryContext
response := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
for result := range resultChan {
response.Results[result.RefId] = result
}
......@@ -189,9 +186,9 @@ func (e *CloudWatchExecutor) handleGetLogEvents(ctx context.Context, logsClient
func (e *CloudWatchExecutor) handleDescribeLogGroups(ctx context.Context, logsClient cloudwatchlogsiface.CloudWatchLogsAPI, parameters *simplejson.Json) (*data.Frame, error) {
logGroupNamePrefix := parameters.Get("logGroupNamePrefix").MustString("")
var response *cloudwatchlogs.DescribeLogGroupsOutput = nil
var err error
if len(logGroupNamePrefix) < 1 {
response, err = logsClient.DescribeLogGroupsWithContext(ctx, &cloudwatchlogs.DescribeLogGroupsInput{
Limit: aws.Int64(parameters.Get("limit").MustInt64(50)),
......@@ -202,7 +199,6 @@ func (e *CloudWatchExecutor) handleDescribeLogGroups(ctx context.Context, logsCl
LogGroupNamePrefix: aws.String(logGroupNamePrefix),
})
}
if err != nil || response == nil {
return nil, err
}
......@@ -230,7 +226,7 @@ func (e *CloudWatchExecutor) executeStartQuery(ctx context.Context, logsClient c
}
if !startTime.Before(endTime) {
return nil, fmt.Errorf("invalid time range: Start time must be before end time")
return nil, fmt.Errorf("invalid time range: start time must be before end time")
}
// The fields @log and @logStream are always included in the results of a user's query
......@@ -312,7 +308,6 @@ func (e *CloudWatchExecutor) handleGetQueryResults(ctx context.Context, logsClie
}
dataFrame, err := logsResultsToDataframes(getQueryResultsOutput)
if err != nil {
return nil, err
}
......
......@@ -13,6 +13,7 @@ import (
"github.com/grafana/grafana/pkg/tsdb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
//***
......@@ -112,7 +113,7 @@ func TestExecuteStartQuery(t *testing.T) {
var expectedResponse *cloudwatchlogs.StartQueryOutput = nil
assert.Equal(t, expectedResponse, response)
assert.Equal(t, fmt.Errorf("invalid time range: Start time must be before end time"), err)
assert.Equal(t, fmt.Errorf("invalid time range: start time must be before end time"), err)
}
......@@ -187,8 +188,11 @@ func TestHandleGetQueryResults(t *testing.T) {
})
frame, err := executor.handleGetQueryResults(context.Background(), logsClient, params, "A")
timeA, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-20 10:37:23.000")
timeB, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-20 10:40:43.000")
require.NoError(t, err)
timeA, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-20 10:37:23.000")
require.NoError(t, err)
timeB, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-20 10:40:43.000")
require.NoError(t, err)
expectedTimeField := data.NewField("@timestamp", nil, []*time.Time{
aws.Time(timeA), aws.Time(timeB),
})
......
......@@ -32,7 +32,7 @@ func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*d
// Maintaining a list of field names in the order returned from CloudWatch
// as just iterating over fieldValues would not give a consistent order
fieldNames := make([]*string, 0)
fieldNames := make([]string, 0)
for i, row := range nonEmptyRows {
for _, resultField := range row {
......@@ -42,7 +42,7 @@ func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*d
}
if _, exists := fieldValues[*resultField.Field]; !exists {
fieldNames = append(fieldNames, resultField.Field)
fieldNames = append(fieldNames, *resultField.Field)
// Check if field is time field
if _, err := time.Parse(cloudWatchTSFormat, *resultField.Value); err == nil {
......@@ -67,11 +67,11 @@ func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*d
newFields := make([]*data.Field, 0)
for _, fieldName := range fieldNames {
newFields = append(newFields, data.NewField(*fieldName, nil, fieldValues[*fieldName]))
newFields = append(newFields, data.NewField(fieldName, nil, fieldValues[fieldName]))
if *fieldName == "@timestamp" {
if fieldName == "@timestamp" {
newFields[len(newFields)-1].SetConfig(&data.FieldConfig{Title: "Time"})
} else if *fieldName == logStreamIdentifierInternal || *fieldName == logIdentifierInternal {
} else if fieldName == logStreamIdentifierInternal || fieldName == logIdentifierInternal {
newFields[len(newFields)-1].SetConfig(
&data.FieldConfig{
Custom: map[string]interface{}{
......
......@@ -9,6 +9,7 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
//***
......@@ -126,10 +127,14 @@ func TestLogsResultsToDataframes(t *testing.T) {
},
}
dataframes, _ := logsResultsToDataframes(fakeCloudwatchResponse)
timeA, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 15:04:05.000")
timeB, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 16:04:05.000")
timeC, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 17:04:05.000")
dataframes, err := logsResultsToDataframes(fakeCloudwatchResponse)
require.NoError(t, err)
timeA, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 15:04:05.000")
require.NoError(t, err)
timeB, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 16:04:05.000")
require.NoError(t, err)
timeC, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 17:04:05.000")
require.NoError(t, err)
timeVals := []*time.Time{
&timeA, &timeB, &timeC,
}
......@@ -232,9 +237,12 @@ func TestGroupKeyGeneration(t *testing.T) {
}
func TestGroupingResults(t *testing.T) {
timeA, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 15:04:05.000")
timeB, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 16:04:05.000")
timeC, _ := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 17:04:05.000")
timeA, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 15:04:05.000")
require.NoError(t, err)
timeB, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 16:04:05.000")
require.NoError(t, err)
timeC, err := time.Parse("2006-01-02 15:04:05.000", "2020-03-02 17:04:05.000")
require.NoError(t, err)
timeVals := []*time.Time{
&timeA, &timeA, &timeA, &timeB, &timeB, &timeB, &timeC, &timeC, &timeC,
}
......@@ -344,6 +352,7 @@ func TestGroupingResults(t *testing.T) {
},
}
groupedResults, _ := groupResults(fakeDataFrame, []string{"@log"})
groupedResults, err := groupResults(fakeDataFrame, []string{"@log"})
require.NoError(t, err)
assert.ElementsMatch(t, expectedGroupedFrames, groupedResults)
}
......@@ -14,8 +14,8 @@ import (
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mockedEc2 struct {
......@@ -44,7 +44,7 @@ func (m mockedRGTA) GetResourcesPages(in *resourcegroupstaggingapi.GetResourcesI
func TestCloudWatchMetrics(t *testing.T) {
Convey("When calling getMetricsForCustomMetrics", t, func() {
t.Run("When calling getMetricsForCustomMetrics", func(t *testing.T) {
dsInfo := &DatasourceInfo{
Region: "us-east-1",
Namespace: "Foo",
......@@ -65,14 +65,13 @@ func TestCloudWatchMetrics(t *testing.T) {
},
}, nil
}
metrics, _ := getMetricsForCustomMetrics(dsInfo, f)
metrics, err := getMetricsForCustomMetrics(dsInfo, f)
require.NoError(t, err)
Convey("Should contain Test_MetricName", func() {
So(metrics, ShouldContain, "Test_MetricName")
})
assert.Contains(t, metrics, "Test_MetricName")
})
Convey("When calling getDimensionsForCustomMetrics", t, func() {
t.Run("When calling getDimensionsForCustomMetrics", func(t *testing.T) {
dsInfo := &DatasourceInfo{
Region: "us-east-1",
Namespace: "Foo",
......@@ -93,14 +92,13 @@ func TestCloudWatchMetrics(t *testing.T) {
},
}, nil
}
dimensionKeys, _ := getDimensionsForCustomMetrics(dsInfo, f)
dimensionKeys, err := getDimensionsForCustomMetrics(dsInfo, f)
require.NoError(t, err)
Convey("Should contain Test_DimensionName", func() {
So(dimensionKeys, ShouldContain, "Test_DimensionName")
})
assert.Contains(t, dimensionKeys, "Test_DimensionName")
})
Convey("When calling handleGetRegions", t, func() {
t.Run("When calling handleGetRegions", func(t *testing.T) {
executor := &CloudWatchExecutor{
ec2Svc: mockedEc2{RespRegions: ec2.DescribeRegionsOutput{
Regions: []*ec2.Region{
......@@ -117,16 +115,15 @@ func TestCloudWatchMetrics(t *testing.T) {
SecureJsonData: securejsondata.SecureJsonData{},
}
result, _ := executor.handleGetRegions(context.Background(), simplejson.New(), &tsdb.TsdbQuery{})
result, err := executor.handleGetRegions(context.Background(), simplejson.New(), &tsdb.TsdbQuery{})
require.NoError(t, err)
Convey("Should return regions", func() {
So(result[0].Text, ShouldEqual, "ap-east-1")
So(result[1].Text, ShouldEqual, "ap-northeast-1")
So(result[2].Text, ShouldEqual, "ap-northeast-2")
})
assert.Equal(t, "ap-east-1", result[0].Text)
assert.Equal(t, "ap-northeast-1", result[1].Text)
assert.Equal(t, "ap-northeast-2", result[2].Text)
})
Convey("When calling handleGetEc2InstanceAttribute", t, func() {
t.Run("When calling handleGetEc2InstanceAttribute", func(t *testing.T) {
executor := &CloudWatchExecutor{
ec2Svc: mockedEc2{Resp: ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
......@@ -153,15 +150,13 @@ func TestCloudWatchMetrics(t *testing.T) {
filters := make(map[string]interface{})
filters["tag:Environment"] = []string{"production"}
json.Set("filters", filters)
result, _ := executor.handleGetEc2InstanceAttribute(context.Background(), json, &tsdb.TsdbQuery{})
result, err := executor.handleGetEc2InstanceAttribute(context.Background(), json, &tsdb.TsdbQuery{})
require.NoError(t, err)
Convey("Should equal production InstanceId", func() {
So(result[0].Text, ShouldEqual, "i-12345678")
})
assert.Equal(t, "i-12345678", result[0].Text)
})
Convey("When calling handleGetEbsVolumeIds", t, func() {
t.Run("When calling handleGetEbsVolumeIds", func(t *testing.T) {
executor := &CloudWatchExecutor{
ec2Svc: mockedEc2{Resp: ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
......@@ -208,22 +203,21 @@ func TestCloudWatchMetrics(t *testing.T) {
json := simplejson.New()
json.Set("region", "us-east-1")
json.Set("instanceId", "{i-1, i-2, i-3, i-4}")
result, _ := executor.handleGetEbsVolumeIds(context.Background(), json, &tsdb.TsdbQuery{})
result, err := executor.handleGetEbsVolumeIds(context.Background(), json, &tsdb.TsdbQuery{})
require.NoError(t, err)
Convey("Should return all 8 VolumeIds", func() {
So(len(result), ShouldEqual, 8)
So(result[0].Text, ShouldEqual, "vol-1-1")
So(result[1].Text, ShouldEqual, "vol-1-2")
So(result[2].Text, ShouldEqual, "vol-2-1")
So(result[3].Text, ShouldEqual, "vol-2-2")
So(result[4].Text, ShouldEqual, "vol-3-1")
So(result[5].Text, ShouldEqual, "vol-3-2")
So(result[6].Text, ShouldEqual, "vol-4-1")
So(result[7].Text, ShouldEqual, "vol-4-2")
})
require.Len(t, result, 8)
assert.Equal(t, "vol-1-1", result[0].Text)
assert.Equal(t, "vol-1-2", result[1].Text)
assert.Equal(t, "vol-2-1", result[2].Text)
assert.Equal(t, "vol-2-2", result[3].Text)
assert.Equal(t, "vol-3-1", result[4].Text)
assert.Equal(t, "vol-3-2", result[5].Text)
assert.Equal(t, "vol-4-1", result[6].Text)
assert.Equal(t, "vol-4-2", result[7].Text)
})
Convey("When calling handleGetResourceArns", t, func() {
t.Run("When calling handleGetResourceArns", func(t *testing.T) {
executor := &CloudWatchExecutor{
rgtaSvc: mockedRGTA{
Resp: resourcegroupstaggingapi.GetResourcesOutput{
......@@ -257,15 +251,13 @@ func TestCloudWatchMetrics(t *testing.T) {
tags := make(map[string]interface{})
tags["Environment"] = []string{"production"}
json.Set("tags", tags)
result, _ := executor.handleGetResourceArns(context.Background(), json, &tsdb.TsdbQuery{})
Convey("Should return all two instances", func() {
So(result[0].Text, ShouldEqual, "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678901234567")
So(result[0].Value, ShouldEqual, "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678901234567")
So(result[1].Text, ShouldEqual, "arn:aws:ec2:us-east-1:123456789012:instance/i-76543210987654321")
So(result[1].Value, ShouldEqual, "arn:aws:ec2:us-east-1:123456789012:instance/i-76543210987654321")
result, err := executor.handleGetResourceArns(context.Background(), json, &tsdb.TsdbQuery{})
require.NoError(t, err)
})
assert.Equal(t, "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678901234567", result[0].Text)
assert.Equal(t, "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678901234567", result[0].Value)
assert.Equal(t, "arn:aws:ec2:us-east-1:123456789012:instance/i-76543210987654321", result[1].Text)
assert.Equal(t, "arn:aws:ec2:us-east-1:123456789012:instance/i-76543210987654321", result[1].Value)
})
}
......@@ -281,5 +273,4 @@ func TestParseMultiSelectValue(t *testing.T) {
values = parseMultiSelectValue("i-{01}")
assert.Equal(t, []string{"i-{01}"}, values)
}
......@@ -25,6 +25,10 @@ func (e *CloudWatchExecutor) transformRequestQueriesToCloudWatchQueries(requestQ
id = fmt.Sprintf("%s_%v", id, strings.ReplaceAll(*stat, ".", "_"))
}
if _, ok := cloudwatchQueries[id]; ok {
return nil, fmt.Errorf("error in query %q - query ID %q is not unique", requestQuery.RefId, id)
}
query := &cloudWatchQuery{
Id: id,
RefId: requestQuery.RefId,
......@@ -39,11 +43,6 @@ func (e *CloudWatchExecutor) transformRequestQueriesToCloudWatchQueries(requestQ
ReturnData: requestQuery.ReturnData,
MatchExact: requestQuery.MatchExact,
}
if _, ok := cloudwatchQueries[id]; ok {
return nil, fmt.Errorf("Error in query %s. Query id %s is not unique", query.RefId, query.Id)
}
cloudwatchQueries[id] = query
}
}
......@@ -52,13 +51,12 @@ func (e *CloudWatchExecutor) transformRequestQueriesToCloudWatchQueries(requestQ
}
func (e *CloudWatchExecutor) transformQueryResponseToQueryResult(cloudwatchResponses []*cloudwatchResponse) map[string]*tsdb.QueryResult {
results := make(map[string]*tsdb.QueryResult)
responsesByRefID := make(map[string][]*cloudwatchResponse)
for _, res := range cloudwatchResponses {
responsesByRefID[res.RefId] = append(responsesByRefID[res.RefId], res)
}
results := make(map[string]*tsdb.QueryResult)
for refID, responses := range responsesByRefID {
queryResult := tsdb.NewQueryResult()
queryResult.RefId = refID
......@@ -94,7 +92,6 @@ func (e *CloudWatchExecutor) transformQueryResponseToQueryResult(cloudwatchRespo
if requestExceededMaxLimit {
queryResult.ErrorString = "Cloudwatch GetMetricData error: Maximum number of allowed metrics exceeded. Your search may have been limited."
}
if partialData {
queryResult.ErrorString = "Cloudwatch GetMetricData error: Too many datapoints requested - your search has been limited. Please try to reduce the time range"
}
......
......@@ -17,18 +17,18 @@ import (
// Parses the json queries and returns a requestQuery. The requestQuery has a 1 to 1 mapping to a query editor row
func (e *CloudWatchExecutor) parseQueries(queryContext *tsdb.TsdbQuery, startTime time.Time, endTime time.Time) (map[string][]*requestQuery, error) {
requestQueries := make(map[string][]*requestQuery)
for i, model := range queryContext.Queries {
queryType := model.Model.Get("type").MustString()
if queryType != "timeSeriesQuery" && queryType != "" {
continue
}
RefID := queryContext.Queries[i].RefId
query, err := parseRequestQuery(queryContext.Queries[i].Model, RefID, startTime, endTime)
refID := queryContext.Queries[i].RefId
query, err := parseRequestQuery(queryContext.Queries[i].Model, refID, startTime, endTime)
if err != nil {
return nil, &queryError{err, RefID}
return nil, &queryError{err: err, RefID: refID}
}
if _, exist := requestQueries[query.Region]; !exist {
requestQueries[query.Region] = make([]*requestQuery, 0)
}
......@@ -39,26 +39,23 @@ func (e *CloudWatchExecutor) parseQueries(queryContext *tsdb.TsdbQuery, startTim
}
func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time, endTime time.Time) (*requestQuery, error) {
reNumber := regexp.MustCompile(`^\d+$`)
region, err := model.Get("region").String()
if err != nil {
return nil, err
}
namespace, err := model.Get("namespace").String()
if err != nil {
return nil, err
}
metricName, err := model.Get("metricName").String()
if err != nil {
return nil, err
}
dimensions, err := parseDimensions(model)
if err != nil {
return nil, err
}
statistics, err := parseStatistics(model)
if err != nil {
return nil, err
......@@ -78,7 +75,7 @@ func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time
}
}
} else {
if regexp.MustCompile(`^\d+$`).Match([]byte(p)) {
if reNumber.Match([]byte(p)) {
period, err = strconv.Atoi(p)
if err != nil {
return nil, err
......@@ -124,7 +121,6 @@ func parseRequestQuery(model *simplejson.Json, refId string, startTime time.Time
func parseStatistics(model *simplejson.Json) ([]string, error) {
var statistics []string
for _, s := range model.Get("statistics").MustArray() {
statistics = append(statistics, s.(string))
}
......@@ -148,7 +144,6 @@ func parseDimensions(model *simplejson.Json) (map[string][]string, error) {
}
sortedDimensions := sortDimensions(parsedDimensions)
return sortedDimensions, nil
}
......
......@@ -12,8 +12,10 @@ import (
func TestRequestParser(t *testing.T) {
Convey("TestRequestParser", t, func() {
timeRange := tsdb.NewTimeRange("now-1h", "now-2h")
from, _ := timeRange.ParseFrom()
to, _ := timeRange.ParseTo()
from, err := timeRange.ParseFrom()
So(err, ShouldBeNil)
to, err := timeRange.ParseTo()
So(err, ShouldBeNil)
Convey("when parsing query editor row json", func() {
Convey("using new dimensions structure", func() {
query := simplejson.NewFromAny(map[string]interface{}{
......@@ -102,8 +104,10 @@ func TestRequestParser(t *testing.T) {
Convey("when time range is short", func() {
query.Set("period", "900")
timeRange := tsdb.NewTimeRange("now-1h", "now-2h")
from, _ := timeRange.ParseFrom()
to, _ := timeRange.ParseTo()
from, err := timeRange.ParseFrom()
So(err, ShouldBeNil)
to, err := timeRange.ParseTo()
So(err, ShouldBeNil)
res, err := parseRequestQuery(query, "ref1", from, to)
So(err, ShouldBeNil)
......
......@@ -14,7 +14,6 @@ import (
func (e *CloudWatchExecutor) parseResponse(metricDataOutputs []*cloudwatch.GetMetricDataOutput, queries map[string]*cloudWatchQuery) ([]*cloudwatchResponse, error) {
mdr := make(map[string]map[string]*cloudwatch.MetricDataResult)
for _, mdo := range metricDataOutputs {
requestExceededMaxLimit := false
for _, message := range mdo.Messages {
......@@ -42,20 +41,20 @@ func (e *CloudWatchExecutor) parseResponse(metricDataOutputs []*cloudwatch.GetMe
cloudWatchResponses := make([]*cloudwatchResponse, 0)
for id, lr := range mdr {
response := &cloudwatchResponse{}
series, partialData, err := parseGetMetricDataTimeSeries(lr, queries[id])
if err != nil {
return cloudWatchResponses, err
return nil, err
}
response.series = series
response.Period = queries[id].Period
response.Expression = queries[id].UsedExpression
response.RefId = queries[id].RefId
response.Id = queries[id].Id
response.RequestExceededMaxLimit = queries[id].RequestExceededMaxLimit
response.PartialData = partialData
response := &cloudwatchResponse{
series: series,
Period: queries[id].Period,
Expression: queries[id].UsedExpression,
RefId: queries[id].RefId,
Id: queries[id].Id,
RequestExceededMaxLimit: queries[id].RequestExceededMaxLimit,
PartialData: partialData,
}
cloudWatchResponses = append(cloudWatchResponses, response)
}
......@@ -63,14 +62,14 @@ func (e *CloudWatchExecutor) parseResponse(metricDataOutputs []*cloudwatch.GetMe
}
func parseGetMetricDataTimeSeries(metricDataResults map[string]*cloudwatch.MetricDataResult, query *cloudWatchQuery) (*tsdb.TimeSeriesSlice, bool, error) {
result := tsdb.TimeSeriesSlice{}
partialData := false
metricDataResultLabels := make([]string, 0)
for k := range metricDataResults {
metricDataResultLabels = append(metricDataResultLabels, k)
}
sort.Strings(metricDataResultLabels)
partialData := false
result := tsdb.TimeSeriesSlice{}
for _, label := range metricDataResultLabels {
metricDataResult := metricDataResults[label]
if *metricDataResult.StatusCode != "Complete" {
......@@ -79,7 +78,7 @@ func parseGetMetricDataTimeSeries(metricDataResults map[string]*cloudwatch.Metri
for _, message := range metricDataResult.Messages {
if *message.Code == "ArithmeticError" {
return nil, false, fmt.Errorf("ArithmeticError in query %s: %s", query.RefId, *message.Value)
return nil, false, fmt.Errorf("ArithmeticError in query %q: %s", query.RefId, *message.Value)
}
}
......@@ -169,17 +168,17 @@ func formatAlias(query *cloudWatchQuery, stat string, dimensions map[string]stri
if len(query.Alias) == 0 && query.isMathExpression() {
return query.Id
}
if len(query.Alias) == 0 && query.isInferredSearchExpression() && !query.isMultiValuedDimensionExpression() {
return label
}
data := map[string]string{}
data["region"] = region
data["namespace"] = namespace
data["metric"] = metricName
data["stat"] = stat
data["period"] = period
data := map[string]string{
"region": region,
"namespace": namespace,
"metric": metricName,
"stat": stat,
"period": period,
}
if len(label) != 0 {
data["label"] = label
}
......
......@@ -10,107 +10,108 @@ import (
)
func (e *CloudWatchExecutor) executeTimeSeriesQuery(ctx context.Context, queryContext *tsdb.TsdbQuery) (*tsdb.Response, error) {
results := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
startTime, err := queryContext.TimeRange.ParseFrom()
if err != nil {
return nil, err
}
endTime, err := queryContext.TimeRange.ParseTo()
if err != nil {
return nil, err
}
if !startTime.Before(endTime) {
return nil, fmt.Errorf("Invalid time range: Start time must be before end time")
return nil, fmt.Errorf("invalid time range: start time must be before end time")
}
requestQueriesByRegion, err := e.parseQueries(queryContext, startTime, endTime)
if err != nil {
return results, err
return nil, err
}
if len(requestQueriesByRegion) == 0 {
return &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}, nil
}
resultChan := make(chan *tsdb.QueryResult, len(queryContext.Queries))
eg, ectx := errgroup.WithContext(ctx)
if len(requestQueriesByRegion) > 0 {
for r, q := range requestQueriesByRegion {
requestQueries := q
region := r
eg.Go(func() error {
defer func() {
if err := recover(); err != nil {
plog.Error("Execute Get Metric Data Query Panic", "error", err, "stack", log.Stack(1))
if theErr, ok := err.(error); ok {
resultChan <- &tsdb.QueryResult{
Error: theErr,
}
for r, q := range requestQueriesByRegion {
requestQueries := q
region := r
eg.Go(func() error {
defer func() {
if err := recover(); err != nil {
plog.Error("Execute Get Metric Data Query Panic", "error", err, "stack", log.Stack(1))
if theErr, ok := err.(error); ok {
resultChan <- &tsdb.QueryResult{
Error: theErr,
}
}
}()
client, err := e.getClient(region)
if err != nil {
return err
}
}()
queries, err := e.transformRequestQueriesToCloudWatchQueries(requestQueries)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
client, err := e.getClient(region)
if err != nil {
return err
}
queries, err := e.transformRequestQueriesToCloudWatchQueries(requestQueries)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
return nil
}
return nil
}
metricDataInput, err := e.buildMetricDataInput(startTime, endTime, queries)
if err != nil {
return err
}
metricDataInput, err := e.buildMetricDataInput(startTime, endTime, queries)
if err != nil {
return err
}
cloudwatchResponses := make([]*cloudwatchResponse, 0)
mdo, err := e.executeRequest(ectx, client, metricDataInput)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
cloudwatchResponses := make([]*cloudwatchResponse, 0)
mdo, err := e.executeRequest(ectx, client, metricDataInput)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
return nil
}
return nil
}
responses, err := e.parseResponse(mdo, queries)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
responses, err := e.parseResponse(mdo, queries)
if err != nil {
for _, query := range requestQueries {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: err,
}
return nil
}
cloudwatchResponses = append(cloudwatchResponses, responses...)
res := e.transformQueryResponseToQueryResult(cloudwatchResponses)
for _, queryRes := range res {
resultChan <- queryRes
}
return nil
})
}
}
}
cloudwatchResponses = append(cloudwatchResponses, responses...)
res := e.transformQueryResponseToQueryResult(cloudwatchResponses)
for _, queryRes := range res {
resultChan <- queryRes
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
close(resultChan)
results := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
for result := range resultChan {
results.Results[result.RefId] = result
}
return results, nil
}
......@@ -5,24 +5,19 @@ import (
"testing"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
)
func TestTimeSeriesQuery(t *testing.T) {
Convey("TestTimeSeriesQuery", t, func() {
executor := &CloudWatchExecutor{}
executor := &CloudWatchExecutor{}
Convey("Time range is valid", func() {
Convey("End time before start time should result in error", func() {
_, err := executor.executeTimeSeriesQuery(context.TODO(), &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-2h")})
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
})
t.Run("End time before start time should result in error", func(t *testing.T) {
_, err := executor.executeTimeSeriesQuery(context.TODO(), &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-2h")})
assert.EqualError(t, err, "invalid time range: start time must be before end time")
})
Convey("End time equals start time should result in error", func() {
_, err := executor.executeTimeSeriesQuery(context.TODO(), &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-1h")})
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
})
})
t.Run("End time equals start time should result in error", func(t *testing.T) {
_, err := executor.executeTimeSeriesQuery(context.TODO(), &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-1h")})
assert.EqualError(t, err, "invalid time range: start time must be before end time")
})
}
......@@ -46,5 +46,5 @@ type queryError struct {
}
func (e *queryError) Error() string {
return fmt.Sprintf("Error parsing query %s, %s", e.RefID, e.err)
return fmt.Sprintf("error parsing query %q, %s", e.RefID, e.err)
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment