Commit 7896836d by Arve Knudsen Committed by GitHub

Chore: Reduce TSDB Go code complexity (#26401)

* tsdb: Make code less complex
parent 6145bf77
...@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo ...@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo
return data, nil return data, nil
} }
func handleDistributionSeries(series timeSeries, defaultMetricName string, seriesLabels map[string]string,
query *cloudMonitoringQuery, queryRes *tsdb.QueryResult) {
points := make([]tsdb.TimePoint, 0)
for i := len(series.Points) - 1; i >= 0; i-- {
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
}
func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data cloudMonitoringResponse, query *cloudMonitoringQuery) error { func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data cloudMonitoringResponse, query *cloudMonitoringQuery) error {
labels := make(map[string]map[string]bool) labels := make(map[string]map[string]bool)
for _, series := range data.TimeSeries { for _, series := range data.TimeSeries {
points := make([]tsdb.TimePoint, 0)
seriesLabels := make(map[string]string) seriesLabels := make(map[string]string)
defaultMetricName := series.Metric.Type defaultMetricName := series.Metric.Type
labels["resource.type"] = map[string]bool{series.Resource.Type: true} labels["resource.type"] = map[string]bool{series.Resource.Type: true}
...@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data ...@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data
// reverse the order to be ascending // reverse the order to be ascending
if series.ValueType != "DISTRIBUTION" { if series.ValueType != "DISTRIBUTION" {
for i := len(series.Points) - 1; i >= 0; i-- { handleDistributionSeries(series, defaultMetricName, seriesLabels, query, queryRes)
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
} else { } else {
buckets := make(map[int]*tsdb.TimeSeries) buckets := make(map[int]*tsdb.TimeSeries)
......
...@@ -65,46 +65,48 @@ type ( ...@@ -65,46 +65,48 @@ type (
} }
cloudMonitoringResponse struct { cloudMonitoringResponse struct {
TimeSeries []struct { TimeSeries []timeSeries `json:"timeSeries"`
Metric struct {
Labels map[string]string `json:"labels"`
Type string `json:"type"`
} `json:"metric"`
Resource struct {
Type string `json:"type"`
Labels map[string]string `json:"labels"`
} `json:"resource"`
MetaData map[string]map[string]interface{} `json:"metadata"`
MetricKind string `json:"metricKind"`
ValueType string `json:"valueType"`
Points []struct {
Interval struct {
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
} `json:"interval"`
Value struct {
DoubleValue float64 `json:"doubleValue"`
StringValue string `json:"stringValue"`
BoolValue bool `json:"boolValue"`
IntValue string `json:"int64Value"`
DistributionValue struct {
Count string `json:"count"`
Mean float64 `json:"mean"`
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation"`
Range struct {
Min int `json:"min"`
Max int `json:"max"`
} `json:"range"`
BucketOptions cloudMonitoringBucketOptions `json:"bucketOptions"`
BucketCounts []string `json:"bucketCounts"`
Examplars []struct {
Value float64 `json:"value"`
Timestamp string `json:"timestamp"`
// attachments
} `json:"examplars"`
} `json:"distributionValue"`
} `json:"value"`
} `json:"points"`
} `json:"timeSeries"`
} }
) )
type timeSeries struct {
Metric struct {
Labels map[string]string `json:"labels"`
Type string `json:"type"`
} `json:"metric"`
Resource struct {
Type string `json:"type"`
Labels map[string]string `json:"labels"`
} `json:"resource"`
MetaData map[string]map[string]interface{} `json:"metadata"`
MetricKind string `json:"metricKind"`
ValueType string `json:"valueType"`
Points []struct {
Interval struct {
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
} `json:"interval"`
Value struct {
DoubleValue float64 `json:"doubleValue"`
StringValue string `json:"stringValue"`
BoolValue bool `json:"boolValue"`
IntValue string `json:"int64Value"`
DistributionValue struct {
Count string `json:"count"`
Mean float64 `json:"mean"`
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation"`
Range struct {
Min int `json:"min"`
Max int `json:"max"`
} `json:"range"`
BucketOptions cloudMonitoringBucketOptions `json:"bucketOptions"`
BucketCounts []string `json:"bucketCounts"`
Examplars []struct {
Value float64 `json:"value"`
Timestamp string `json:"timestamp"`
// attachments
} `json:"examplars"`
} `json:"distributionValue"`
} `json:"value"`
} `json:"points"`
}
...@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter ...@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter
} }
func (e *timeSeriesQuery) execute() (*tsdb.Response, error) { func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
result := &tsdb.Response{}
result.Results = make(map[string]*tsdb.QueryResult)
tsQueryParser := newTimeSeriesQueryParser() tsQueryParser := newTimeSeriesQueryParser()
queries, err := tsQueryParser.parse(e.tsdbQuery) queries, err := tsQueryParser.parse(e.tsdbQuery)
if err != nil { if err != nil {
...@@ -37,135 +34,146 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) { ...@@ -37,135 +34,146 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch()) from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch())
to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch()) to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch())
result := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
for _, q := range queries { for _, q := range queries {
minInterval, err := e.client.GetMinInterval(q.Interval) if err := e.processQuery(q, ms, from, to, result); err != nil {
if err != nil {
return nil, err return nil, err
} }
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval) }
b := ms.Search(interval) req, err := ms.Build()
b.Size(0) if err != nil {
filters := b.Query().Bool().Filter() return nil, err
filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS) }
if q.RawQuery != "" { res, err := e.client.ExecuteMultisearch(req)
filters.AddQueryStringFilter(q.RawQuery, true) if err != nil {
} return nil, err
}
if len(q.BucketAggs) == 0 { rp := newResponseParser(res.Responses, queries, res.DebugInfo)
if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" { return rp.getTimeSeries()
result.Results[q.RefID] = &tsdb.QueryResult{ }
RefId: q.RefID,
Error: fmt.Errorf("invalid query, missing metrics and aggregations"), func (e *timeSeriesQuery) processQuery(q *Query, ms *es.MultiSearchRequestBuilder, from, to string,
ErrorString: "invalid query, missing metrics and aggregations", result *tsdb.Response) error {
} minInterval, err := e.client.GetMinInterval(q.Interval)
continue if err != nil {
return err
}
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval)
b := ms.Search(interval)
b.Size(0)
filters := b.Query().Bool().Filter()
filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS)
if q.RawQuery != "" {
filters.AddQueryStringFilter(q.RawQuery, true)
}
if len(q.BucketAggs) == 0 {
if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" {
result.Results[q.RefID] = &tsdb.QueryResult{
RefId: q.RefID,
Error: fmt.Errorf("invalid query, missing metrics and aggregations"),
ErrorString: "invalid query, missing metrics and aggregations",
} }
metric := q.Metrics[0] return nil
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
continue
} }
metric := q.Metrics[0]
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
return nil
}
aggBuilder := b.Agg() aggBuilder := b.Agg()
// iterate backwards to create aggregations bottom-down // iterate backwards to create aggregations bottom-down
for _, bucketAgg := range q.BucketAggs { for _, bucketAgg := range q.BucketAggs {
switch bucketAgg.Type { switch bucketAgg.Type {
case dateHistType: case dateHistType:
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to) aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
case histogramType: case histogramType:
aggBuilder = addHistogramAgg(aggBuilder, bucketAgg) aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
case filtersType: case filtersType:
aggBuilder = addFiltersAgg(aggBuilder, bucketAgg) aggBuilder = addFiltersAgg(aggBuilder, bucketAgg)
case termsType: case termsType:
aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics) aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics)
case geohashGridType: case geohashGridType:
aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg) aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg)
}
} }
}
for _, m := range q.Metrics { for _, m := range q.Metrics {
m := m m := m
if m.Type == countType { if m.Type == countType {
continue continue
} }
if isPipelineAgg(m.Type) { if isPipelineAgg(m.Type) {
if isPipelineAggWithMultipleBucketPaths(m.Type) { if isPipelineAggWithMultipleBucketPaths(m.Type) {
if len(m.PipelineVariables) > 0 { if len(m.PipelineVariables) > 0 {
bucketPaths := map[string]interface{}{} bucketPaths := map[string]interface{}{}
for name, pipelineAgg := range m.PipelineVariables { for name, pipelineAgg := range m.PipelineVariables {
if _, err := strconv.Atoi(pipelineAgg); err == nil { if _, err := strconv.Atoi(pipelineAgg); err == nil {
var appliedAgg *MetricAgg var appliedAgg *MetricAgg
for _, pipelineMetric := range q.Metrics { for _, pipelineMetric := range q.Metrics {
if pipelineMetric.ID == pipelineAgg { if pipelineMetric.ID == pipelineAgg {
appliedAgg = pipelineMetric appliedAgg = pipelineMetric
break break
}
} }
if appliedAgg != nil { }
if appliedAgg.Type == countType { if appliedAgg != nil {
bucketPaths[name] = "_count" if appliedAgg.Type == countType {
} else { bucketPaths[name] = "_count"
bucketPaths[name] = pipelineAgg } else {
} bucketPaths[name] = pipelineAgg
} }
} }
} }
aggBuilder.Pipeline(m.ID, m.Type, bucketPaths, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} else {
continue
} }
aggBuilder.Pipeline(m.ID, m.Type, bucketPaths, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} else { } else {
if _, err := strconv.Atoi(m.PipelineAggregate); err == nil { continue
var appliedAgg *MetricAgg }
for _, pipelineMetric := range q.Metrics { } else {
if pipelineMetric.ID == m.PipelineAggregate { if _, err := strconv.Atoi(m.PipelineAggregate); err == nil {
appliedAgg = pipelineMetric var appliedAgg *MetricAgg
break for _, pipelineMetric := range q.Metrics {
} if pipelineMetric.ID == m.PipelineAggregate {
appliedAgg = pipelineMetric
break
} }
if appliedAgg != nil { }
bucketPath := m.PipelineAggregate if appliedAgg != nil {
if appliedAgg.Type == countType { bucketPath := m.PipelineAggregate
bucketPath = "_count" if appliedAgg.Type == countType {
} bucketPath = "_count"
aggBuilder.Pipeline(m.ID, m.Type, bucketPath, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
continue aggBuilder.Pipeline(m.ID, m.Type, bucketPath, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
continue
} }
} else {
aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} }
req, err := ms.Build() return nil
if err != nil {
return nil, err
}
res, err := e.client.ExecuteMultisearch(req)
if err != nil {
return nil, err
}
rp := newResponseParser(res.Responses, queries, res.DebugInfo)
return rp.getTimeSeries()
} }
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder { func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment