Commit 7896836d by Arve Knudsen Committed by GitHub

Chore: Reduce TSDB Go code complexity (#26401)

* tsdb: Make code less complex
parent 6145bf77
...@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo ...@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo
return data, nil return data, nil
} }
func handleDistributionSeries(series timeSeries, defaultMetricName string, seriesLabels map[string]string,
query *cloudMonitoringQuery, queryRes *tsdb.QueryResult) {
points := make([]tsdb.TimePoint, 0)
for i := len(series.Points) - 1; i >= 0; i-- {
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
}
func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data cloudMonitoringResponse, query *cloudMonitoringQuery) error { func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data cloudMonitoringResponse, query *cloudMonitoringQuery) error {
labels := make(map[string]map[string]bool) labels := make(map[string]map[string]bool)
for _, series := range data.TimeSeries { for _, series := range data.TimeSeries {
points := make([]tsdb.TimePoint, 0)
seriesLabels := make(map[string]string) seriesLabels := make(map[string]string)
defaultMetricName := series.Metric.Type defaultMetricName := series.Metric.Type
labels["resource.type"] = map[string]bool{series.Resource.Type: true} labels["resource.type"] = map[string]bool{series.Resource.Type: true}
...@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data ...@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data
// reverse the order to be ascending // reverse the order to be ascending
if series.ValueType != "DISTRIBUTION" { if series.ValueType != "DISTRIBUTION" {
for i := len(series.Points) - 1; i >= 0; i-- { handleDistributionSeries(series, defaultMetricName, seriesLabels, query, queryRes)
point := series.Points[i]
value := point.Value.DoubleValue
if series.ValueType == "INT64" {
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
if err == nil {
value = parsedValue
}
}
if series.ValueType == "BOOL" {
if point.Value.BoolValue {
value = 1
} else {
value = 0
}
}
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
}
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, seriesLabels, nil, query)
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
Name: metricName,
Points: points,
})
} else { } else {
buckets := make(map[int]*tsdb.TimeSeries) buckets := make(map[int]*tsdb.TimeSeries)
......
...@@ -65,46 +65,48 @@ type ( ...@@ -65,46 +65,48 @@ type (
} }
cloudMonitoringResponse struct { cloudMonitoringResponse struct {
TimeSeries []struct { TimeSeries []timeSeries `json:"timeSeries"`
Metric struct {
Labels map[string]string `json:"labels"`
Type string `json:"type"`
} `json:"metric"`
Resource struct {
Type string `json:"type"`
Labels map[string]string `json:"labels"`
} `json:"resource"`
MetaData map[string]map[string]interface{} `json:"metadata"`
MetricKind string `json:"metricKind"`
ValueType string `json:"valueType"`
Points []struct {
Interval struct {
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
} `json:"interval"`
Value struct {
DoubleValue float64 `json:"doubleValue"`
StringValue string `json:"stringValue"`
BoolValue bool `json:"boolValue"`
IntValue string `json:"int64Value"`
DistributionValue struct {
Count string `json:"count"`
Mean float64 `json:"mean"`
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation"`
Range struct {
Min int `json:"min"`
Max int `json:"max"`
} `json:"range"`
BucketOptions cloudMonitoringBucketOptions `json:"bucketOptions"`
BucketCounts []string `json:"bucketCounts"`
Examplars []struct {
Value float64 `json:"value"`
Timestamp string `json:"timestamp"`
// attachments
} `json:"examplars"`
} `json:"distributionValue"`
} `json:"value"`
} `json:"points"`
} `json:"timeSeries"`
} }
) )
type timeSeries struct {
Metric struct {
Labels map[string]string `json:"labels"`
Type string `json:"type"`
} `json:"metric"`
Resource struct {
Type string `json:"type"`
Labels map[string]string `json:"labels"`
} `json:"resource"`
MetaData map[string]map[string]interface{} `json:"metadata"`
MetricKind string `json:"metricKind"`
ValueType string `json:"valueType"`
Points []struct {
Interval struct {
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
} `json:"interval"`
Value struct {
DoubleValue float64 `json:"doubleValue"`
StringValue string `json:"stringValue"`
BoolValue bool `json:"boolValue"`
IntValue string `json:"int64Value"`
DistributionValue struct {
Count string `json:"count"`
Mean float64 `json:"mean"`
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation"`
Range struct {
Min int `json:"min"`
Max int `json:"max"`
} `json:"range"`
BucketOptions cloudMonitoringBucketOptions `json:"bucketOptions"`
BucketCounts []string `json:"bucketCounts"`
Examplars []struct {
Value float64 `json:"value"`
Timestamp string `json:"timestamp"`
// attachments
} `json:"examplars"`
} `json:"distributionValue"`
} `json:"value"`
} `json:"points"`
}
...@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter ...@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter
} }
func (e *timeSeriesQuery) execute() (*tsdb.Response, error) { func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
result := &tsdb.Response{}
result.Results = make(map[string]*tsdb.QueryResult)
tsQueryParser := newTimeSeriesQueryParser() tsQueryParser := newTimeSeriesQueryParser()
queries, err := tsQueryParser.parse(e.tsdbQuery) queries, err := tsQueryParser.parse(e.tsdbQuery)
if err != nil { if err != nil {
...@@ -37,135 +34,146 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) { ...@@ -37,135 +34,146 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch()) from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch())
to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch()) to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch())
result := &tsdb.Response{
Results: make(map[string]*tsdb.QueryResult),
}
for _, q := range queries { for _, q := range queries {
minInterval, err := e.client.GetMinInterval(q.Interval) if err := e.processQuery(q, ms, from, to, result); err != nil {
if err != nil {
return nil, err return nil, err
} }
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval) }
b := ms.Search(interval) req, err := ms.Build()
b.Size(0) if err != nil {
filters := b.Query().Bool().Filter() return nil, err
filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS) }
if q.RawQuery != "" { res, err := e.client.ExecuteMultisearch(req)
filters.AddQueryStringFilter(q.RawQuery, true) if err != nil {
} return nil, err
}
if len(q.BucketAggs) == 0 { rp := newResponseParser(res.Responses, queries, res.DebugInfo)
if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" { return rp.getTimeSeries()
result.Results[q.RefID] = &tsdb.QueryResult{ }
RefId: q.RefID,
Error: fmt.Errorf("invalid query, missing metrics and aggregations"), func (e *timeSeriesQuery) processQuery(q *Query, ms *es.MultiSearchRequestBuilder, from, to string,
ErrorString: "invalid query, missing metrics and aggregations", result *tsdb.Response) error {
} minInterval, err := e.client.GetMinInterval(q.Interval)
continue if err != nil {
return err
}
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval)
b := ms.Search(interval)
b.Size(0)
filters := b.Query().Bool().Filter()
filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS)
if q.RawQuery != "" {
filters.AddQueryStringFilter(q.RawQuery, true)
}
if len(q.BucketAggs) == 0 {
if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" {
result.Results[q.RefID] = &tsdb.QueryResult{
RefId: q.RefID,
Error: fmt.Errorf("invalid query, missing metrics and aggregations"),
ErrorString: "invalid query, missing metrics and aggregations",
} }
metric := q.Metrics[0] return nil
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
continue
} }
metric := q.Metrics[0]
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
return nil
}
aggBuilder := b.Agg() aggBuilder := b.Agg()
// iterate backwards to create aggregations bottom-down // iterate backwards to create aggregations bottom-down
for _, bucketAgg := range q.BucketAggs { for _, bucketAgg := range q.BucketAggs {
switch bucketAgg.Type { switch bucketAgg.Type {
case dateHistType: case dateHistType:
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to) aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
case histogramType: case histogramType:
aggBuilder = addHistogramAgg(aggBuilder, bucketAgg) aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
case filtersType: case filtersType:
aggBuilder = addFiltersAgg(aggBuilder, bucketAgg) aggBuilder = addFiltersAgg(aggBuilder, bucketAgg)
case termsType: case termsType:
aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics) aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics)
case geohashGridType: case geohashGridType:
aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg) aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg)
}
} }
}
for _, m := range q.Metrics { for _, m := range q.Metrics {
m := m m := m
if m.Type == countType { if m.Type == countType {
continue continue
} }
if isPipelineAgg(m.Type) { if isPipelineAgg(m.Type) {
if isPipelineAggWithMultipleBucketPaths(m.Type) { if isPipelineAggWithMultipleBucketPaths(m.Type) {
if len(m.PipelineVariables) > 0 { if len(m.PipelineVariables) > 0 {
bucketPaths := map[string]interface{}{} bucketPaths := map[string]interface{}{}
for name, pipelineAgg := range m.PipelineVariables { for name, pipelineAgg := range m.PipelineVariables {
if _, err := strconv.Atoi(pipelineAgg); err == nil { if _, err := strconv.Atoi(pipelineAgg); err == nil {
var appliedAgg *MetricAgg var appliedAgg *MetricAgg
for _, pipelineMetric := range q.Metrics { for _, pipelineMetric := range q.Metrics {
if pipelineMetric.ID == pipelineAgg { if pipelineMetric.ID == pipelineAgg {
appliedAgg = pipelineMetric appliedAgg = pipelineMetric
break break
}
} }
if appliedAgg != nil { }
if appliedAgg.Type == countType { if appliedAgg != nil {
bucketPaths[name] = "_count" if appliedAgg.Type == countType {
} else { bucketPaths[name] = "_count"
bucketPaths[name] = pipelineAgg } else {
} bucketPaths[name] = pipelineAgg
} }
} }
} }
aggBuilder.Pipeline(m.ID, m.Type, bucketPaths, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} else {
continue
} }
aggBuilder.Pipeline(m.ID, m.Type, bucketPaths, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} else { } else {
if _, err := strconv.Atoi(m.PipelineAggregate); err == nil { continue
var appliedAgg *MetricAgg }
for _, pipelineMetric := range q.Metrics { } else {
if pipelineMetric.ID == m.PipelineAggregate { if _, err := strconv.Atoi(m.PipelineAggregate); err == nil {
appliedAgg = pipelineMetric var appliedAgg *MetricAgg
break for _, pipelineMetric := range q.Metrics {
} if pipelineMetric.ID == m.PipelineAggregate {
appliedAgg = pipelineMetric
break
} }
if appliedAgg != nil { }
bucketPath := m.PipelineAggregate if appliedAgg != nil {
if appliedAgg.Type == countType { bucketPath := m.PipelineAggregate
bucketPath = "_count" if appliedAgg.Type == countType {
} bucketPath = "_count"
aggBuilder.Pipeline(m.ID, m.Type, bucketPath, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
continue aggBuilder.Pipeline(m.ID, m.Type, bucketPath, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
continue
} }
} else {
aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} else {
aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
a.Settings = m.Settings.MustMap()
})
} }
} }
req, err := ms.Build() return nil
if err != nil {
return nil, err
}
res, err := e.client.ExecuteMultisearch(req)
if err != nil {
return nil, err
}
rp := newResponseParser(res.Responses, queries, res.DebugInfo)
return rp.getTimeSeries()
} }
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder { func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
......
...@@ -276,45 +276,60 @@ func (e *sqlQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, ...@@ -276,45 +276,60 @@ func (e *sqlQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows,
return nil return nil
} }
func (e *sqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error { func newProcessCfg(query *tsdb.Query, tsdbQuery *tsdb.TsdbQuery, rows *core.Rows) (*processCfg, error) {
pointsBySeries := make(map[string]*tsdb.TimeSeries)
seriesByQueryOrder := list.New()
columnNames, err := rows.Columns() columnNames, err := rows.Columns()
if err != nil { if err != nil {
return err return nil, err
} }
columnTypes, err := rows.ColumnTypes() columnTypes, err := rows.ColumnTypes()
if err != nil { if err != nil {
return err return nil, err
} }
rowCount := 0 fillMissing := query.Model.Get("fill").MustBool(false)
timeIndex := -1
metricIndex := -1 cfg := &processCfg{
metricPrefix := false rowCount: 0,
var metricPrefixValue string columnTypes: columnTypes,
columnNames: columnNames,
rows: rows,
timeIndex: -1,
metricIndex: -1,
metricPrefix: false,
fillMissing: fillMissing,
seriesByQueryOrder: list.New(),
pointsBySeries: make(map[string]*tsdb.TimeSeries),
tsdbQuery: tsdbQuery,
}
return cfg, nil
}
func (e *sqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult,
tsdbQuery *tsdb.TsdbQuery) error {
cfg, err := newProcessCfg(query, tsdbQuery, rows)
if err != nil {
return err
}
// check columns of resultset: a column named time is mandatory // check columns of resultset: a column named time is mandatory
// the first text column is treated as metric name unless a column named metric is present // the first text column is treated as metric name unless a column named metric is present
for i, col := range columnNames { for i, col := range cfg.columnNames {
for _, tc := range e.timeColumnNames { for _, tc := range e.timeColumnNames {
if col == tc { if col == tc {
timeIndex = i cfg.timeIndex = i
continue continue
} }
} }
switch col { switch col {
case "metric": case "metric":
metricIndex = i cfg.metricIndex = i
default: default:
if metricIndex == -1 { if cfg.metricIndex == -1 {
columnType := columnTypes[i].DatabaseTypeName() columnType := cfg.columnTypes[i].DatabaseTypeName()
for _, mct := range e.metricColumnTypes { for _, mct := range e.metricColumnTypes {
if columnType == mct { if columnType == mct {
metricIndex = i cfg.metricIndex = i
continue continue
} }
} }
...@@ -323,154 +338,179 @@ func (e *sqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.R ...@@ -323,154 +338,179 @@ func (e *sqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.R
} }
// use metric column as prefix with multiple value columns // use metric column as prefix with multiple value columns
if metricIndex != -1 && len(columnNames) > 3 { if cfg.metricIndex != -1 && len(cfg.columnNames) > 3 {
metricPrefix = true cfg.metricPrefix = true
} }
if timeIndex == -1 { if cfg.timeIndex == -1 {
return fmt.Errorf("Found no column named %s", strings.Join(e.timeColumnNames, " or ")) return fmt.Errorf("Found no column named %s", strings.Join(e.timeColumnNames, " or "))
} }
fillMissing := query.Model.Get("fill").MustBool(false) if cfg.fillMissing {
var fillInterval float64 cfg.fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
fillValue := null.Float{}
fillPrevious := false
if fillMissing {
fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
switch query.Model.Get("fillMode").MustString() { switch query.Model.Get("fillMode").MustString() {
case "null": case "null":
case "previous": case "previous":
fillPrevious = true cfg.fillPrevious = true
case "value": case "value":
fillValue.Float64 = query.Model.Get("fillValue").MustFloat64() cfg.fillValue.Float64 = query.Model.Get("fillValue").MustFloat64()
fillValue.Valid = true cfg.fillValue.Valid = true
} }
} }
for rows.Next() { for rows.Next() {
var timestamp float64 if err := e.processRow(cfg); err != nil {
var value null.Float
var metric string
if rowCount > rowLimit {
return fmt.Errorf("query row limit exceeded, limit %d", rowLimit)
}
values, err := e.queryResultTransformer.TransformQueryResult(columnTypes, rows)
if err != nil {
return err return err
} }
}
// converts column named time to unix timestamp in milliseconds to make for elem := cfg.seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
// native mysql datetime types and epoch dates work in key := elem.Value.(string)
// annotation and table queries. result.Series = append(result.Series, cfg.pointsBySeries[key])
ConvertSqlTimeColumnToEpochMs(values, timeIndex) if !cfg.fillMissing {
continue
switch columnValue := values[timeIndex].(type) {
case int64:
timestamp = float64(columnValue)
case float64:
timestamp = columnValue
default:
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
} }
if metricIndex >= 0 { series := cfg.pointsBySeries[key]
if columnValue, ok := values[metricIndex].(string); ok { // fill in values from last fetched value till interval end
if metricPrefix { intervalStart := series.Points[len(series.Points)-1][1].Float64
metricPrefixValue = columnValue intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
} else {
metric = columnValue if cfg.fillPrevious {
} if len(series.Points) > 0 {
cfg.fillValue = series.Points[len(series.Points)-1][0]
} else { } else {
return fmt.Errorf("Column metric must be of type %s. metric column name: %s type: %s but datatype is %T", strings.Join(e.metricColumnTypes, ", "), columnNames[metricIndex], columnTypes[metricIndex].DatabaseTypeName(), values[metricIndex]) cfg.fillValue.Valid = false
} }
} }
for i, col := range columnNames { // align interval start
if i == timeIndex || i == metricIndex { intervalStart = math.Floor(intervalStart/cfg.fillInterval) * cfg.fillInterval
continue for i := intervalStart + cfg.fillInterval; i < intervalEnd; i += cfg.fillInterval {
} series.Points = append(series.Points, tsdb.TimePoint{cfg.fillValue, null.FloatFrom(i)})
cfg.rowCount++
if value, err = ConvertSqlValueColumnToFloat(col, values[i]); err != nil { }
return err }
}
if metricIndex == -1 { result.Meta.Set("rowCount", cfg.rowCount)
metric = col return nil
} else if metricPrefix { }
metric = metricPrefixValue + " " + col
}
series, exist := pointsBySeries[metric] type processCfg struct {
if !exist { rowCount int
series = &tsdb.TimeSeries{Name: metric} columnTypes []*sql.ColumnType
pointsBySeries[metric] = series columnNames []string
seriesByQueryOrder.PushBack(metric) rows *core.Rows
} timeIndex int
metricIndex int
metricPrefix bool
metricPrefixValue string
fillMissing bool
pointsBySeries map[string]*tsdb.TimeSeries
seriesByQueryOrder *list.List
fillValue null.Float
tsdbQuery *tsdb.TsdbQuery
fillInterval float64
fillPrevious bool
}
if fillMissing { func (e *sqlQueryEndpoint) processRow(cfg *processCfg) error {
var intervalStart float64 var timestamp float64
if !exist { var value null.Float
intervalStart = float64(tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6) var metric string
} else {
intervalStart = series.Points[len(series.Points)-1][1].Float64 + fillInterval
}
if fillPrevious { if cfg.rowCount > rowLimit {
if len(series.Points) > 0 { return fmt.Errorf("query row limit exceeded, limit %d", rowLimit)
fillValue = series.Points[len(series.Points)-1][0] }
} else {
fillValue.Valid = false
}
}
// align interval start values, err := e.queryResultTransformer.TransformQueryResult(cfg.columnTypes, cfg.rows)
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval if err != nil {
return err
}
for i := intervalStart; i < timestamp; i += fillInterval { // converts column named time to unix timestamp in milliseconds to make
series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)}) // native mysql datetime types and epoch dates work in
rowCount++ // annotation and table queries.
} ConvertSqlTimeColumnToEpochMs(values, cfg.timeIndex)
}
series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)}) switch columnValue := values[cfg.timeIndex].(type) {
case int64:
timestamp = float64(columnValue)
case float64:
timestamp = columnValue
default:
return fmt.Errorf("invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v",
columnValue, columnValue)
}
if setting.Env == setting.DEV { if cfg.metricIndex >= 0 {
e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value) if columnValue, ok := values[cfg.metricIndex].(string); ok {
if cfg.metricPrefix {
cfg.metricPrefixValue = columnValue
} else {
metric = columnValue
} }
} else {
return fmt.Errorf("column metric must be of type %s. metric column name: %s type: %s but datatype is %T",
strings.Join(e.metricColumnTypes, ", "), cfg.columnNames[cfg.metricIndex],
cfg.columnTypes[cfg.metricIndex].DatabaseTypeName(), values[cfg.metricIndex])
} }
} }
for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() { for i, col := range cfg.columnNames {
key := elem.Value.(string) if i == cfg.timeIndex || i == cfg.metricIndex {
result.Series = append(result.Series, pointsBySeries[key]) continue
}
if fillMissing { if value, err = ConvertSqlValueColumnToFloat(col, values[i]); err != nil {
series := pointsBySeries[key] return err
// fill in values from last fetched value till interval end }
intervalStart := series.Points[len(series.Points)-1][1].Float64
intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
if fillPrevious { if cfg.metricIndex == -1 {
metric = col
} else if cfg.metricPrefix {
metric = cfg.metricPrefixValue + " " + col
}
series, exist := cfg.pointsBySeries[metric]
if !exist {
series = &tsdb.TimeSeries{Name: metric}
cfg.pointsBySeries[metric] = series
cfg.seriesByQueryOrder.PushBack(metric)
}
if cfg.fillMissing {
var intervalStart float64
if !exist {
intervalStart = float64(cfg.tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6)
} else {
intervalStart = series.Points[len(series.Points)-1][1].Float64 + cfg.fillInterval
}
if cfg.fillPrevious {
if len(series.Points) > 0 { if len(series.Points) > 0 {
fillValue = series.Points[len(series.Points)-1][0] cfg.fillValue = series.Points[len(series.Points)-1][0]
} else { } else {
fillValue.Valid = false cfg.fillValue.Valid = false
} }
} }
// align interval start // align interval start
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval intervalStart = math.Floor(intervalStart/cfg.fillInterval) * cfg.fillInterval
for i := intervalStart + fillInterval; i < intervalEnd; i += fillInterval {
series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)}) for i := intervalStart; i < timestamp; i += cfg.fillInterval {
rowCount++ series.Points = append(series.Points, tsdb.TimePoint{cfg.fillValue, null.FloatFrom(i)})
cfg.rowCount++
} }
} }
series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
if setting.Env == setting.DEV {
e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
}
} }
result.Meta.Set("rowCount", rowCount)
return nil return nil
} }
...@@ -526,6 +566,7 @@ func ConvertSqlTimeColumnToEpochMs(values tsdb.RowValues, timeIndex int) { ...@@ -526,6 +566,7 @@ func ConvertSqlTimeColumnToEpochMs(values tsdb.RowValues, timeIndex int) {
} }
// ConvertSqlValueColumnToFloat converts timeseries value column to float. // ConvertSqlValueColumnToFloat converts timeseries value column to float.
//nolint: gocyclo
func ConvertSqlValueColumnToFloat(columnName string, columnValue interface{}) (null.Float, error) { func ConvertSqlValueColumnToFloat(columnName string, columnValue interface{}) (null.Float, error) {
var value null.Float var value null.Float
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment