Commit 7896836d by Arve Knudsen Committed by GitHub

Chore: Reduce TSDB Go code complexity (#26401)

* tsdb: Make code less complex
parent 6145bf77
......@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo
return data, nil
}
func handleDistributionSeries(series timeSeries, defaultMetricName string, seriesLabels map[string]string,
query *cloudMonitoringQuery, queryRes *tsdb.QueryResult) {
points := make([]tsdb.TimePoint, 0)
for i := len(series.Points) - 1; i >= 0; i-- {
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
}
func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data cloudMonitoringResponse, query *cloudMonitoringQuery) error {
labels := make(map[string]map[string]bool)
for _, series := range data.TimeSeries {
points := make([]tsdb.TimePoint, 0)
seriesLabels := make(map[string]string)
defaultMetricName := series.Metric.Type
labels["resource.type"] = map[string]bool{series.Resource.Type: true}
......@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data
// reverse the order to be ascending
if series.ValueType != "DISTRIBUTION" {
for i := len(series.Points) - 1; i >= 0; i-- {
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
handleDistributionSeries(series, defaultMetricName, seriesLabels, query, queryRes)
} else {
buckets := make(map[int]*tsdb.TimeSeries)
......
......@@ -65,7 +65,11 @@ type (
}
cloudMonitoringResponse struct {
TimeSeries []struct {
TimeSeries []timeSeries `json:"timeSeries"`
}
)
type timeSeries struct {
Metric struct {
Labels map[string]string `json:"labels"`
Type string `json:"type"`
......@@ -105,6 +109,4 @@ type (
} `json:"distributionValue"`
} `json:"value"`
} `json:"points"`
} `json:"timeSeries"`
}
)
}
......@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter
}
func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
result := &tsdb.Response{}
result.Results = make(map[string]*tsdb.QueryResult)
tsQueryParser := newTimeSeriesQueryParser()
queries, err := tsQueryParser.parse(e.tsdbQuery)
if err != nil {
......@@ -37,12 +34,35 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch())
to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch())
result := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
for _, q := range queries {
minInterval, err := e.client.GetMinInterval(q.Interval)
if err := e.processQuery(q, ms, from, to, result); err != nil {
return nil, err
}
}
req, err := ms.Build()
if err != nil {
return nil, err
}
res, err := e.client.ExecuteMultisearch(req)
if err != nil {
return nil, err
}
rp := newResponseParser(res.Responses, queries, res.DebugInfo)
return rp.getTimeSeries()
}
func (e *timeSeriesQuery) processQuery(q *Query, ms *es.MultiSearchRequestBuilder, from, to string,
result *tsdb.Response) error {
minInterval, err := e.client.GetMinInterval(q.Interval)
if err != nil {
return err
}
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval)
b := ms.Search(interval)
......@@ -61,13 +81,13 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
Error: fmt.Errorf("invalid query, missing metrics and aggregations"),
ErrorString: "invalid query, missing metrics and aggregations",
}
continue
return nil
}
metric := q.Metrics[0]
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
continue
return nil
}
aggBuilder := b.Agg()
......@@ -152,20 +172,8 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
})
}
}
}
req, err := ms.Build()
if err != nil {
return nil, err
}
res, err := e.client.ExecuteMultisearch(req)
if err != nil {
return nil, err
}
rp := newResponseParser(res.Responses, queries, res.DebugInfo)
return rp.getTimeSeries()
return nil
}
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment