Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
N
nexpie-grafana-theme
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Registry
Registry
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kornkitt Poolsup
nexpie-grafana-theme
Commits
7896836d
Unverified
Commit
7896836d
authored
Aug 18, 2020
by
Arve Knudsen
Committed by
GitHub
Aug 18, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Chore: Reduce TSDB Go code complexity (#26401)
* tsdb: Make code less complex
parent
6145bf77
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
191 additions
and
176 deletions
+191
-176
pkg/tsdb/cloudmonitoring/cloudmonitoring.go
+34
-29
pkg/tsdb/cloudmonitoring/types.go
+43
-41
pkg/tsdb/elasticsearch/time_series_query.go
+114
-106
pkg/tsdb/sqleng/sql_engine.go
+0
-0
No files found.
pkg/tsdb/cloudmonitoring/cloudmonitoring.go
View file @
7896836d
...
...
@@ -502,11 +502,43 @@ func (e *CloudMonitoringExecutor) unmarshalResponse(res *http.Response) (cloudMo
return
data
,
nil
}
func
handleDistributionSeries
(
series
timeSeries
,
defaultMetricName
string
,
seriesLabels
map
[
string
]
string
,
query
*
cloudMonitoringQuery
,
queryRes
*
tsdb
.
QueryResult
)
{
points
:=
make
([]
tsdb
.
TimePoint
,
0
)
for
i
:=
len
(
series
.
Points
)
-
1
;
i
>=
0
;
i
--
{
point
:=
series
.
Points
[
i
]
value
:=
point
.
Value
.
DoubleValue
if
series
.
ValueType
==
"INT64"
{
parsedValue
,
err
:=
strconv
.
ParseFloat
(
point
.
Value
.
IntValue
,
64
)
if
err
==
nil
{
value
=
parsedValue
}
}
if
series
.
ValueType
==
"BOOL"
{
if
point
.
Value
.
BoolValue
{
value
=
1
}
else
{
value
=
0
}
}
points
=
append
(
points
,
tsdb
.
NewTimePoint
(
null
.
FloatFrom
(
value
),
float64
((
point
.
Interval
.
EndTime
)
.
Unix
())
*
1000
))
}
metricName
:=
formatLegendKeys
(
series
.
Metric
.
Type
,
defaultMetricName
,
seriesLabels
,
nil
,
query
)
queryRes
.
Series
=
append
(
queryRes
.
Series
,
&
tsdb
.
TimeSeries
{
Name
:
metricName
,
Points
:
points
,
})
}
func
(
e
*
CloudMonitoringExecutor
)
parseResponse
(
queryRes
*
tsdb
.
QueryResult
,
data
cloudMonitoringResponse
,
query
*
cloudMonitoringQuery
)
error
{
labels
:=
make
(
map
[
string
]
map
[
string
]
bool
)
for
_
,
series
:=
range
data
.
TimeSeries
{
points
:=
make
([]
tsdb
.
TimePoint
,
0
)
seriesLabels
:=
make
(
map
[
string
]
string
)
defaultMetricName
:=
series
.
Metric
.
Type
labels
[
"resource.type"
]
=
map
[
string
]
bool
{
series
.
Resource
.
Type
:
true
}
...
...
@@ -566,34 +598,7 @@ func (e *CloudMonitoringExecutor) parseResponse(queryRes *tsdb.QueryResult, data
// reverse the order to be ascending
if
series
.
ValueType
!=
"DISTRIBUTION"
{
for
i
:=
len
(
series
.
Points
)
-
1
;
i
>=
0
;
i
--
{
point
:=
series
.
Points
[
i
]
value
:=
point
.
Value
.
DoubleValue
if
series
.
ValueType
==
"INT64"
{
parsedValue
,
err
:=
strconv
.
ParseFloat
(
point
.
Value
.
IntValue
,
64
)
if
err
==
nil
{
value
=
parsedValue
}
}
if
series
.
ValueType
==
"BOOL"
{
if
point
.
Value
.
BoolValue
{
value
=
1
}
else
{
value
=
0
}
}
points
=
append
(
points
,
tsdb
.
NewTimePoint
(
null
.
FloatFrom
(
value
),
float64
((
point
.
Interval
.
EndTime
)
.
Unix
())
*
1000
))
}
metricName
:=
formatLegendKeys
(
series
.
Metric
.
Type
,
defaultMetricName
,
seriesLabels
,
nil
,
query
)
queryRes
.
Series
=
append
(
queryRes
.
Series
,
&
tsdb
.
TimeSeries
{
Name
:
metricName
,
Points
:
points
,
})
handleDistributionSeries
(
series
,
defaultMetricName
,
seriesLabels
,
query
,
queryRes
)
}
else
{
buckets
:=
make
(
map
[
int
]
*
tsdb
.
TimeSeries
)
...
...
pkg/tsdb/cloudmonitoring/types.go
View file @
7896836d
...
...
@@ -65,46 +65,48 @@ type (
}
cloudMonitoringResponse
struct
{
TimeSeries
[]
struct
{
Metric
struct
{
Labels
map
[
string
]
string
`json:"labels"`
Type
string
`json:"type"`
}
`json:"metric"`
Resource
struct
{
Type
string
`json:"type"`
Labels
map
[
string
]
string
`json:"labels"`
}
`json:"resource"`
MetaData
map
[
string
]
map
[
string
]
interface
{}
`json:"metadata"`
MetricKind
string
`json:"metricKind"`
ValueType
string
`json:"valueType"`
Points
[]
struct
{
Interval
struct
{
StartTime
time
.
Time
`json:"startTime"`
EndTime
time
.
Time
`json:"endTime"`
}
`json:"interval"`
Value
struct
{
DoubleValue
float64
`json:"doubleValue"`
StringValue
string
`json:"stringValue"`
BoolValue
bool
`json:"boolValue"`
IntValue
string
`json:"int64Value"`
DistributionValue
struct
{
Count
string
`json:"count"`
Mean
float64
`json:"mean"`
SumOfSquaredDeviation
float64
`json:"sumOfSquaredDeviation"`
Range
struct
{
Min
int
`json:"min"`
Max
int
`json:"max"`
}
`json:"range"`
BucketOptions
cloudMonitoringBucketOptions
`json:"bucketOptions"`
BucketCounts
[]
string
`json:"bucketCounts"`
Examplars
[]
struct
{
Value
float64
`json:"value"`
Timestamp
string
`json:"timestamp"`
// attachments
}
`json:"examplars"`
}
`json:"distributionValue"`
}
`json:"value"`
}
`json:"points"`
}
`json:"timeSeries"`
TimeSeries
[]
timeSeries
`json:"timeSeries"`
}
)
type
timeSeries
struct
{
Metric
struct
{
Labels
map
[
string
]
string
`json:"labels"`
Type
string
`json:"type"`
}
`json:"metric"`
Resource
struct
{
Type
string
`json:"type"`
Labels
map
[
string
]
string
`json:"labels"`
}
`json:"resource"`
MetaData
map
[
string
]
map
[
string
]
interface
{}
`json:"metadata"`
MetricKind
string
`json:"metricKind"`
ValueType
string
`json:"valueType"`
Points
[]
struct
{
Interval
struct
{
StartTime
time
.
Time
`json:"startTime"`
EndTime
time
.
Time
`json:"endTime"`
}
`json:"interval"`
Value
struct
{
DoubleValue
float64
`json:"doubleValue"`
StringValue
string
`json:"stringValue"`
BoolValue
bool
`json:"boolValue"`
IntValue
string
`json:"int64Value"`
DistributionValue
struct
{
Count
string
`json:"count"`
Mean
float64
`json:"mean"`
SumOfSquaredDeviation
float64
`json:"sumOfSquaredDeviation"`
Range
struct
{
Min
int
`json:"min"`
Max
int
`json:"max"`
}
`json:"range"`
BucketOptions
cloudMonitoringBucketOptions
`json:"bucketOptions"`
BucketCounts
[]
string
`json:"bucketCounts"`
Examplars
[]
struct
{
Value
float64
`json:"value"`
Timestamp
string
`json:"timestamp"`
// attachments
}
`json:"examplars"`
}
`json:"distributionValue"`
}
`json:"value"`
}
`json:"points"`
}
pkg/tsdb/elasticsearch/time_series_query.go
View file @
7896836d
...
...
@@ -24,9 +24,6 @@ var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, inter
}
func
(
e
*
timeSeriesQuery
)
execute
()
(
*
tsdb
.
Response
,
error
)
{
result
:=
&
tsdb
.
Response
{}
result
.
Results
=
make
(
map
[
string
]
*
tsdb
.
QueryResult
)
tsQueryParser
:=
newTimeSeriesQueryParser
()
queries
,
err
:=
tsQueryParser
.
parse
(
e
.
tsdbQuery
)
if
err
!=
nil
{
...
...
@@ -37,135 +34,146 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
from
:=
fmt
.
Sprintf
(
"%d"
,
e
.
tsdbQuery
.
TimeRange
.
GetFromAsMsEpoch
())
to
:=
fmt
.
Sprintf
(
"%d"
,
e
.
tsdbQuery
.
TimeRange
.
GetToAsMsEpoch
())
result
:=
&
tsdb
.
Response
{
Results
:
make
(
map
[
string
]
*
tsdb
.
QueryResult
),
}
for
_
,
q
:=
range
queries
{
minInterval
,
err
:=
e
.
client
.
GetMinInterval
(
q
.
Interval
)
if
err
!=
nil
{
if
err
:=
e
.
processQuery
(
q
,
ms
,
from
,
to
,
result
);
err
!=
nil
{
return
nil
,
err
}
interval
:=
e
.
intervalCalculator
.
Calculate
(
e
.
tsdbQuery
.
TimeRange
,
minInterval
)
}
b
:=
ms
.
Search
(
interval
)
b
.
Size
(
0
)
filters
:=
b
.
Query
()
.
Bool
()
.
Filter
()
filters
.
AddDateRangeFilter
(
e
.
client
.
GetTimeField
(),
to
,
from
,
es
.
DateFormatEpochMS
)
req
,
err
:=
ms
.
Build
(
)
if
err
!=
nil
{
return
nil
,
err
}
if
q
.
RawQuery
!=
""
{
filters
.
AddQueryStringFilter
(
q
.
RawQuery
,
true
)
}
res
,
err
:=
e
.
client
.
ExecuteMultisearch
(
req
)
if
err
!=
nil
{
return
nil
,
err
}
if
len
(
q
.
BucketAggs
)
==
0
{
if
len
(
q
.
Metrics
)
==
0
||
q
.
Metrics
[
0
]
.
Type
!=
"raw_document"
{
result
.
Results
[
q
.
RefID
]
=
&
tsdb
.
QueryResult
{
RefId
:
q
.
RefID
,
Error
:
fmt
.
Errorf
(
"invalid query, missing metrics and aggregations"
),
ErrorString
:
"invalid query, missing metrics and aggregations"
,
}
continue
rp
:=
newResponseParser
(
res
.
Responses
,
queries
,
res
.
DebugInfo
)
return
rp
.
getTimeSeries
()
}
func
(
e
*
timeSeriesQuery
)
processQuery
(
q
*
Query
,
ms
*
es
.
MultiSearchRequestBuilder
,
from
,
to
string
,
result
*
tsdb
.
Response
)
error
{
minInterval
,
err
:=
e
.
client
.
GetMinInterval
(
q
.
Interval
)
if
err
!=
nil
{
return
err
}
interval
:=
e
.
intervalCalculator
.
Calculate
(
e
.
tsdbQuery
.
TimeRange
,
minInterval
)
b
:=
ms
.
Search
(
interval
)
b
.
Size
(
0
)
filters
:=
b
.
Query
()
.
Bool
()
.
Filter
()
filters
.
AddDateRangeFilter
(
e
.
client
.
GetTimeField
(),
to
,
from
,
es
.
DateFormatEpochMS
)
if
q
.
RawQuery
!=
""
{
filters
.
AddQueryStringFilter
(
q
.
RawQuery
,
true
)
}
if
len
(
q
.
BucketAggs
)
==
0
{
if
len
(
q
.
Metrics
)
==
0
||
q
.
Metrics
[
0
]
.
Type
!=
"raw_document"
{
result
.
Results
[
q
.
RefID
]
=
&
tsdb
.
QueryResult
{
RefId
:
q
.
RefID
,
Error
:
fmt
.
Errorf
(
"invalid query, missing metrics and aggregations"
),
ErrorString
:
"invalid query, missing metrics and aggregations"
,
}
metric
:=
q
.
Metrics
[
0
]
b
.
Size
(
metric
.
Settings
.
Get
(
"size"
)
.
MustInt
(
500
))
b
.
SortDesc
(
"@timestamp"
,
"boolean"
)
b
.
AddDocValueField
(
"@timestamp"
)
continue
return
nil
}
metric
:=
q
.
Metrics
[
0
]
b
.
Size
(
metric
.
Settings
.
Get
(
"size"
)
.
MustInt
(
500
))
b
.
SortDesc
(
"@timestamp"
,
"boolean"
)
b
.
AddDocValueField
(
"@timestamp"
)
return
nil
}
aggBuilder
:=
b
.
Agg
()
// iterate backwards to create aggregations bottom-down
for
_
,
bucketAgg
:=
range
q
.
BucketAggs
{
switch
bucketAgg
.
Type
{
case
dateHistType
:
aggBuilder
=
addDateHistogramAgg
(
aggBuilder
,
bucketAgg
,
from
,
to
)
case
histogramType
:
aggBuilder
=
addHistogramAgg
(
aggBuilder
,
bucketAgg
)
case
filtersType
:
aggBuilder
=
addFiltersAgg
(
aggBuilder
,
bucketAgg
)
case
termsType
:
aggBuilder
=
addTermsAgg
(
aggBuilder
,
bucketAgg
,
q
.
Metrics
)
case
geohashGridType
:
aggBuilder
=
addGeoHashGridAgg
(
aggBuilder
,
bucketAgg
)
}
aggBuilder
:=
b
.
Agg
()
// iterate backwards to create aggregations bottom-down
for
_
,
bucketAgg
:=
range
q
.
BucketAggs
{
switch
bucketAgg
.
Type
{
case
dateHistType
:
aggBuilder
=
addDateHistogramAgg
(
aggBuilder
,
bucketAgg
,
from
,
to
)
case
histogramType
:
aggBuilder
=
addHistogramAgg
(
aggBuilder
,
bucketAgg
)
case
filtersType
:
aggBuilder
=
addFiltersAgg
(
aggBuilder
,
bucketAgg
)
case
termsType
:
aggBuilder
=
addTermsAgg
(
aggBuilder
,
bucketAgg
,
q
.
Metrics
)
case
geohashGridType
:
aggBuilder
=
addGeoHashGridAgg
(
aggBuilder
,
bucketAgg
)
}
}
for
_
,
m
:=
range
q
.
Metrics
{
m
:=
m
if
m
.
Type
==
countType
{
continue
}
for
_
,
m
:=
range
q
.
Metrics
{
m
:=
m
if
m
.
Type
==
countType
{
continue
}
if
isPipelineAgg
(
m
.
Type
)
{
if
isPipelineAggWithMultipleBucketPaths
(
m
.
Type
)
{
if
len
(
m
.
PipelineVariables
)
>
0
{
bucketPaths
:=
map
[
string
]
interface
{}{}
for
name
,
pipelineAgg
:=
range
m
.
PipelineVariables
{
if
_
,
err
:=
strconv
.
Atoi
(
pipelineAgg
);
err
==
nil
{
var
appliedAgg
*
MetricAgg
for
_
,
pipelineMetric
:=
range
q
.
Metrics
{
if
pipelineMetric
.
ID
==
pipelineAgg
{
appliedAgg
=
pipelineMetric
break
}
if
isPipelineAgg
(
m
.
Type
)
{
if
isPipelineAggWithMultipleBucketPaths
(
m
.
Type
)
{
if
len
(
m
.
PipelineVariables
)
>
0
{
bucketPaths
:=
map
[
string
]
interface
{}{}
for
name
,
pipelineAgg
:=
range
m
.
PipelineVariables
{
if
_
,
err
:=
strconv
.
Atoi
(
pipelineAgg
);
err
==
nil
{
var
appliedAgg
*
MetricAgg
for
_
,
pipelineMetric
:=
range
q
.
Metrics
{
if
pipelineMetric
.
ID
==
pipelineAgg
{
appliedAgg
=
pipelineMetric
break
}
if
appliedAgg
!=
nil
{
if
appliedAgg
.
Type
==
countType
{
bucketPaths
[
name
]
=
"_count"
}
else
{
bucketPaths
[
name
]
=
pipelineAgg
}
}
if
appliedAgg
!=
nil
{
if
appliedAgg
.
Type
==
countType
{
bucketPaths
[
name
]
=
"_count"
}
else
{
bucketPaths
[
name
]
=
pipelineAgg
}
}
}
aggBuilder
.
Pipeline
(
m
.
ID
,
m
.
Type
,
bucketPaths
,
func
(
a
*
es
.
PipelineAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
else
{
continue
}
aggBuilder
.
Pipeline
(
m
.
ID
,
m
.
Type
,
bucketPaths
,
func
(
a
*
es
.
PipelineAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
else
{
if
_
,
err
:=
strconv
.
Atoi
(
m
.
PipelineAggregate
);
err
==
nil
{
var
appliedAgg
*
MetricAgg
for
_
,
pipelineMetric
:=
range
q
.
Metrics
{
if
pipelineMetric
.
ID
==
m
.
PipelineAggregate
{
appliedAgg
=
pipelineMetric
break
}
continue
}
}
else
{
if
_
,
err
:=
strconv
.
Atoi
(
m
.
PipelineAggregate
);
err
==
nil
{
var
appliedAgg
*
MetricAgg
for
_
,
pipelineMetric
:=
range
q
.
Metrics
{
if
pipelineMetric
.
ID
==
m
.
PipelineAggregate
{
appliedAgg
=
pipelineMetric
break
}
if
appliedAgg
!=
nil
{
bucketPath
:=
m
.
PipelineAggregate
if
appliedAgg
.
Type
==
countType
{
bucketPath
=
"_count"
}
aggBuilder
.
Pipeline
(
m
.
ID
,
m
.
Type
,
bucketPath
,
func
(
a
*
es
.
PipelineAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
if
appliedAgg
!=
nil
{
bucketPath
:=
m
.
PipelineAggregate
if
appliedAgg
.
Type
==
countType
{
bucketPath
=
"_count"
}
}
else
{
continue
aggBuilder
.
Pipeline
(
m
.
ID
,
m
.
Type
,
bucketPath
,
func
(
a
*
es
.
PipelineAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
}
else
{
continue
}
}
else
{
aggBuilder
.
Metric
(
m
.
ID
,
m
.
Type
,
m
.
Field
,
func
(
a
*
es
.
MetricAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
}
else
{
aggBuilder
.
Metric
(
m
.
ID
,
m
.
Type
,
m
.
Field
,
func
(
a
*
es
.
MetricAggregation
)
{
a
.
Settings
=
m
.
Settings
.
MustMap
()
})
}
}
req
,
err
:=
ms
.
Build
()
if
err
!=
nil
{
return
nil
,
err
}
res
,
err
:=
e
.
client
.
ExecuteMultisearch
(
req
)
if
err
!=
nil
{
return
nil
,
err
}
rp
:=
newResponseParser
(
res
.
Responses
,
queries
,
res
.
DebugInfo
)
return
rp
.
getTimeSeries
()
return
nil
}
func
addDateHistogramAgg
(
aggBuilder
es
.
AggBuilder
,
bucketAgg
*
BucketAgg
,
timeFrom
,
timeTo
string
)
es
.
AggBuilder
{
...
...
pkg/tsdb/sqleng/sql_engine.go
View file @
7896836d
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment