Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
N
nexpie-grafana-theme
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Registry
Registry
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kornkitt Poolsup
nexpie-grafana-theme
Commits
2cf797b5
Commit
2cf797b5
authored
Jun 03, 2016
by
bergquist
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tech(alerting): minor refactoring and code style
parent
65eb3738
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
28 additions
and
42 deletions
+28
-42
pkg/models/alerts.go
+9
-5
pkg/services/alerting/alert_rule_reader.go
+3
-25
pkg/services/alerting/alerting.go
+16
-12
No files found.
pkg/models/alerts.go
View file @
2cf797b5
...
...
@@ -110,11 +110,11 @@ type GetAlertChangesQuery struct {
}
type
AlertJob
struct
{
Offset
int64
Delay
bool
Running
bool
Retry
int
Rule
AlertRule
Offset
int64
Delay
bool
Running
bool
Retry
Count
int
Rule
AlertRule
}
type
AlertResult
struct
{
...
...
@@ -125,3 +125,7 @@ type AlertResult struct {
Description
string
AlertJob
*
AlertJob
}
func
(
ar
*
AlertResult
)
IsResultIncomplete
()
bool
{
return
ar
.
State
==
AlertStatePending
}
pkg/services/alerting/alert_rule_reader.go
View file @
2cf797b5
...
...
@@ -5,6 +5,7 @@ import (
"time"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log"
m
"github.com/grafana/grafana/pkg/models"
)
...
...
@@ -51,31 +52,6 @@ func (arr *AlertRuleReader) updateRules() {
arr
.
Lock
()
defer
arr
.
Unlock
()
/*
rules = []m.AlertRule{
//{Id: 1, Title: "alert rule 1", Interval: "10s", Frequency: 10},
//{Id: 2, Title: "alert rule 2", Interval: "10s", Frequency: 10},
//{Id: 3, Title: "alert rule 3", Interval: "10s", Frequency: 10},
//{Id: 4, Title: "alert rule 4", Interval: "10s", Frequency: 5},
//{Id: 5, Title: "alert rule 5", Interval: "10s", Frequency: 5},
{
Id: 1,
OrgId: 1,
Title: "alert rule 1",
Frequency: 3,
DatasourceId: 1,
WarnOperator: ">",
WarnLevel: 3,
CritOperator: ">",
CritLevel: 4,
Aggregator: "avg",
//Query: `{"refId":"A","target":"statsd.fakesite.counters.session_start.*.count","textEditor":true}"`,
Query: `{"hide":false,"refId":"A","target":"aliasByNode(statsd.fakesite.counters.session_start.*.count, 4)","textEditor":false}`,
QueryRange: 3600,
},
}
*/
cmd
:=
&
m
.
GetAlertsQuery
{
OrgId
:
1
,
}
...
...
@@ -83,6 +59,8 @@ func (arr *AlertRuleReader) updateRules() {
if
err
==
nil
{
alertJobs
=
cmd
.
Result
}
else
{
log
.
Error
(
1
,
"AlertRuleReader: Could not load alerts"
)
}
}
...
...
pkg/services/alerting/alerting.go
View file @
2cf797b5
...
...
@@ -74,8 +74,8 @@ func (scheduler *Scheduler) updateJobs(alertRuleFn func() []m.AlertRule) {
job
=
scheduler
.
jobs
[
rule
.
Id
]
}
else
{
job
=
&
m
.
AlertJob
{
Running
:
false
,
Retry
:
0
,
Running
:
false
,
Retry
Count
:
0
,
}
}
...
...
@@ -110,24 +110,28 @@ func (scheduler *Scheduler) executor(executor Executor) {
func
(
scheduler
*
Scheduler
)
handleResponses
()
{
for
response
:=
range
scheduler
.
responseQueue
{
log
.
Info
(
"Response: alert(%d) status(%s) actual(%v) retry(%d)
running(%v)"
,
response
.
Id
,
response
.
State
,
response
.
ActualValue
,
response
.
AlertJob
.
Retry
,
response
.
AlertJob
.
Running
)
log
.
Info
(
"Response: alert(%d) status(%s) actual(%v) retry(%d)
"
,
response
.
Id
,
response
.
State
,
response
.
ActualValue
,
response
.
AlertJob
.
RetryCount
)
response
.
AlertJob
.
Running
=
false
if
response
.
State
==
m
.
AlertStatePending
{
response
.
AlertJob
.
Retry
++
if
response
.
AlertJob
.
Retry
>
maxRetries
{
response
.
State
=
m
.
AlertStateCritical
response
.
Description
=
fmt
.
Sprintf
(
"Failed to run check after %d retires"
,
maxRetries
)
scheduler
.
saveState
(
response
)
if
response
.
IsResultIncomplete
()
{
response
.
AlertJob
.
RetryCount
++
if
response
.
AlertJob
.
RetryCount
<
maxRetries
{
scheduler
.
runQueue
<-
response
.
AlertJob
}
else
{
saveState
(
&
m
.
AlertResult
{
Id
:
response
.
Id
,
State
:
m
.
AlertStateCritical
,
Description
:
fmt
.
Sprintf
(
"Failed to run check after %d retires"
,
maxRetries
),
})
}
}
else
{
response
.
AlertJob
.
Retry
=
0
s
cheduler
.
s
aveState
(
response
)
response
.
AlertJob
.
Retry
Count
=
0
saveState
(
response
)
}
}
}
func
(
scheduler
*
Scheduler
)
saveState
(
response
*
m
.
AlertResult
)
{
func
saveState
(
response
*
m
.
AlertResult
)
{
cmd
:=
&
m
.
UpdateAlertStateCommand
{
AlertId
:
response
.
Id
,
NewState
:
response
.
State
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment