Commit 07582a8e by Arve Knudsen Committed by GitHub

Chore: Fix various spelling errors in back-end code (#25241)

* Chore: Fix various spelling errors in back-end code
Co-authored-by: Sofia Papagiannaki <papagian@users.noreply.github.com>
Co-authored-by: Josh Soref <jsoref@users.noreply.github.com>>
parent 13787294
......@@ -15,7 +15,7 @@ For more information on developing for the backend:
| /pkg/cmd | The binaries that we build: grafana-server and grafana-cli. |
| /pkg/components | A mix of third-party packages and packages we have implemented ourselves. Includes our packages that have out-grown the util package and don't naturally belong somewhere else. |
| /pkg/infra | Packages in infra should be packages that are used in multiple places in Grafana without knowing anything about the Grafana domain. |
| /pkg/services | Packages in services are responsible for peristing domain objects and manage the relationship between domain objects. Services should communicate with each other using DI when possible. Most of Grafana's codebase still relies on global state for this. Any new features going forward should use DI. |
| /pkg/services | Packages in services are responsible for persisting domain objects and manage the relationship between domain objects. Services should communicate with each other using DI when possible. Most of Grafana's codebase still relies on global state for this. Any new features going forward should use DI. |
| /pkg/tsdb | All backend implementations of the data sources in Grafana. Used by both Grafana's frontend and alerting. |
| /pkg/util | Small helper functions that are used in multiple parts of the codebase. Many functions are placed directly in the util folders which is something we want to avoid. Its better to give the util function a more descriptive package name. Ex `errutil`. |
......
......@@ -5,7 +5,7 @@ import (
"github.com/grafana/grafana/pkg/models"
)
func (server *HTTPServer) AdminProvisioningReloadDasboards(c *models.ReqContext) Response {
func (server *HTTPServer) AdminProvisioningReloadDashboards(c *models.ReqContext) Response {
err := server.ProvisioningService.ProvisionDashboards()
if err != nil && err != context.Canceled {
return Error(500, "", err)
......
......@@ -403,7 +403,7 @@ func (hs *HTTPServer) registerRoutes() {
adminRoute.Get("/users/:id/auth-tokens", Wrap(hs.AdminGetUserAuthTokens))
adminRoute.Post("/users/:id/revoke-auth-token", bind(models.RevokeAuthTokenCmd{}), Wrap(hs.AdminRevokeUserAuthToken))
adminRoute.Post("/provisioning/dashboards/reload", Wrap(hs.AdminProvisioningReloadDasboards))
adminRoute.Post("/provisioning/dashboards/reload", Wrap(hs.AdminProvisioningReloadDashboards))
adminRoute.Post("/provisioning/datasources/reload", Wrap(hs.AdminProvisioningReloadDatasources))
adminRoute.Post("/provisioning/notifications/reload", Wrap(hs.AdminProvisioningReloadNotifications))
adminRoute.Post("/ldap/reload", Wrap(hs.ReloadLDAPCfg))
......
......@@ -169,7 +169,7 @@ func TestFolderPermissionApiEndpoint(t *testing.T) {
})
})
Convey("When trying to override inherited permissions with lower presedence", func() {
Convey("When trying to override inherited permissions with lower precedence", func() {
origNewGuardian := guardian.New
guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{
CanAdminValue: true,
......
......@@ -30,7 +30,7 @@ func (hs *HTTPServer) setIndexViewData(c *models.ReqContext) (*dtos.IndexViewDat
}
prefs := prefsQuery.Result
// Read locale from acccept-language
// Read locale from accept-language
acceptLang := c.Req.Header.Get("Accept-Language")
locale := "en-US"
......
......@@ -309,7 +309,7 @@ func (server *HTTPServer) GetUserFromLDAP(c *models.ReqContext) Response {
err = u.FetchOrgs()
if err != nil {
return Error(http.StatusBadRequest, "An oganization was not found - Please verify your LDAP configuration", err)
return Error(http.StatusBadRequest, "An organization was not found - Please verify your LDAP configuration", err)
}
cmd := &models.GetTeamsForLDAPGroupCommand{Groups: user.Groups}
......
......@@ -146,7 +146,7 @@ func TestGetUserFromLDAPApiEndpoint_OrgNotfound(t *testing.T) {
expected := `
{
"error": "Unable to find organization with ID '2'",
"message": "An oganization was not found - Please verify your LDAP configuration"
"message": "An organization was not found - Please verify your LDAP configuration"
}
`
assert.JSONEq(t, expected, sc.resp.Body.String())
......
......@@ -40,7 +40,7 @@ type Bus interface {
AddEventListener(handler HandlerFunc)
// SetTransactionManager allows the user to replace the internal
// noop TransactionManager that is responsible for manageing
// noop TransactionManager that is responsible for managing
// transactions in `InTransaction`
SetTransactionManager(tm TransactionManager)
}
......
......@@ -119,7 +119,7 @@ var adminCommands = []*cli.Command{
{
Name: "encrypt-datasource-passwords",
Usage: "Migrates passwords from unsecured fields to secure_json_data field. Return ok unless there is an error. Safe to execute multiple times.",
Action: runDbCommand(datamigrations.EncryptDatasourcePaswords),
Action: runDbCommand(datamigrations.EncryptDatasourcePasswords),
},
},
},
......
......@@ -25,9 +25,9 @@ var (
}
)
// EncryptDatasourcePaswords migrates un-encrypted secrets on datasources
// EncryptDatasourcePasswords migrates unencrypted secrets on datasources
// to the secureJson Column.
func EncryptDatasourcePaswords(c utils.CommandLine, sqlStore *sqlstore.SqlStore) error {
func EncryptDatasourcePasswords(c utils.CommandLine, sqlStore *sqlstore.SqlStore) error {
return sqlStore.WithDbSession(context.Background(), func(session *sqlstore.DBSession) error {
passwordsUpdated, err := migrateColumn(session, "password")
if err != nil {
......
......@@ -39,22 +39,22 @@ func TestPasswordMigrationCommand(t *testing.T) {
}
_, err := session.Insert(&datasources)
assert.Nil(t, err)
require.NoError(t, err)
// force secure_json_data to be null to verify that migration can handle that
_, err = session.Exec("update data_source set secure_json_data = null where name = 'influxdb'")
assert.Nil(t, err)
require.NoError(t, err)
//run migration
c, err := commandstest.NewCliContext(map[string]string{})
require.Nil(t, err)
err = EncryptDatasourcePaswords(c, sqlstore)
assert.Nil(t, err)
err = EncryptDatasourcePasswords(c, sqlstore)
require.NoError(t, err)
//verify that no datasources still have password or basic_auth
var dss []*models.DataSource
err = session.SQL("select * from data_source").Find(&dss)
assert.Nil(t, err)
require.NoError(t, err)
assert.Equal(t, len(dss), 4)
for _, ds := range dss {
......
......@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
)
func TestVersionComparsion(t *testing.T) {
func TestVersionComparison(t *testing.T) {
t.Run("Validate that version is outdated", func(t *testing.T) {
versions := []models.Version{
{Version: "1.1.1"},
......
......@@ -60,7 +60,7 @@ func FloatFromString(f string, nullString string) (Float, error) {
// UnmarshalJSON implements json.Unmarshaler.
// It supports number and null input.
// 0 will not be considered a null Float.
// It also supports unmarshalling a sql.NullFloat64.
// It also supports unmarshaling a sql.NullFloat64.
func (f *Float) UnmarshalJSON(data []byte) error {
var err error
var v interface{}
......
......@@ -292,10 +292,10 @@ func writeMetric(buf *bufio.Writer, m model.Metric, mf *dto.MetricFamily) error
}
}
return addExtentionConventionForRollups(buf, mf, m)
return addExtensionConventionForRollups(buf, mf, m)
}
func addExtentionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error {
func addExtensionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error {
// Adding `.count` `.sum` suffix makes it possible to configure
// different rollup strategies based on metric type
......
......@@ -111,7 +111,7 @@ var (
// MDataSourceProxyReqTimer is a metric summary for dataproxy request duration
MDataSourceProxyReqTimer prometheus.Summary
// MAlertingExecutionTime is a metric summary of alert exeuction duration
// MAlertingExecutionTime is a metric summary of alert execution duration
MAlertingExecutionTime prometheus.Summary
// MRenderingSummary is a metric summary for image rendering request duration
......@@ -162,7 +162,7 @@ var (
// grafanaBuildVersion is a metric with a constant '1' value labeled by version, revision, branch, and goversion from which Grafana was built
grafanaBuildVersion *prometheus.GaugeVec
grafanPluginBuildInfoDesc *prometheus.GaugeVec
grafanaPluginBuildInfoDesc *prometheus.GaugeVec
)
func init() {
......@@ -389,7 +389,7 @@ func init() {
MAlertingExecutionTime = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "alerting_execution_time_milliseconds",
Help: "summary of alert exeuction duration",
Help: "summary of alert execution duration",
Objectives: objectiveMap,
Namespace: ExporterName,
})
......@@ -478,7 +478,7 @@ func init() {
Namespace: ExporterName,
}, []string{"version", "revision", "branch", "goversion", "edition"})
grafanPluginBuildInfoDesc = prometheus.NewGaugeVec(prometheus.GaugeOpts{
grafanaPluginBuildInfoDesc = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "plugin_build_info",
Help: "A metric with a constant '1' value labeled by pluginId, pluginType and version from which Grafana plugin was built",
Namespace: ExporterName,
......@@ -496,7 +496,7 @@ func SetBuildInformation(version, revision, branch string) {
}
func SetPluginBuildInformation(pluginID, pluginType, version string) {
grafanPluginBuildInfoDesc.WithLabelValues(pluginID, pluginType, version).Set(1)
grafanaPluginBuildInfoDesc.WithLabelValues(pluginID, pluginType, version).Set(1)
}
func initMetricVars() {
......@@ -549,7 +549,7 @@ func initMetricVars() {
StatsTotalActiveAdmins,
StatsTotalDataSources,
grafanaBuildVersion,
grafanPluginBuildInfoDesc,
grafanaPluginBuildInfoDesc,
)
}
......
......@@ -38,8 +38,8 @@ func (sl *ServerLockService) LockAndExecute(ctx context.Context, actionName stri
// avoid execution if last lock happened less than `maxInterval` ago
if rowLock.LastExecution != 0 {
lastExeuctionTime := time.Unix(rowLock.LastExecution, 0)
if lastExeuctionTime.Unix() > time.Now().Add(-maxInterval).Unix() {
lastExecutionTime := time.Unix(rowLock.LastExecution, 0)
if lastExecutionTime.Unix() > time.Now().Add(-maxInterval).Unix() {
return nil
}
}
......
......@@ -23,7 +23,7 @@ func TestGrafanaLogin(t *testing.T) {
So(sc.validatePasswordCalled, ShouldBeFalse)
})
Convey("it should not pupulate user object", func() {
Convey("it should not populate user object", func() {
So(sc.loginUserQuery.User, ShouldBeNil)
})
})
......@@ -40,7 +40,7 @@ func TestGrafanaLogin(t *testing.T) {
So(sc.validatePasswordCalled, ShouldBeTrue)
})
Convey("it should not pupulate user object", func() {
Convey("it should not populate user object", func() {
So(sc.loginUserQuery.User, ShouldBeNil)
})
})
......@@ -57,7 +57,7 @@ func TestGrafanaLogin(t *testing.T) {
So(sc.validatePasswordCalled, ShouldBeTrue)
})
Convey("it should pupulate user object", func() {
Convey("it should populate user object", func() {
So(sc.loginUserQuery.User, ShouldNotBeNil)
So(sc.loginUserQuery.User.Login, ShouldEqual, sc.loginUserQuery.Username)
So(sc.loginUserQuery.User.Password, ShouldEqual, sc.loginUserQuery.Password)
......@@ -76,7 +76,7 @@ func TestGrafanaLogin(t *testing.T) {
So(sc.validatePasswordCalled, ShouldBeFalse)
})
Convey("it should not pupulate user object", func() {
Convey("it should not populate user object", func() {
So(sc.loginUserQuery.User, ShouldBeNil)
})
})
......
......@@ -177,7 +177,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
tests := []struct {
Name string
APIURLReponse interface{}
APIURLResponse interface{}
OAuth2Extra interface{}
RoleAttributePath string
ExpectedEmail string
......@@ -215,7 +215,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
},
{
Name: "Given no id_token, a valid role path, a valid api response, use api response",
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"role": "Admin",
"email": "john.doe@example.com",
},
......@@ -225,7 +225,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
},
{
Name: "Given no id_token, no role path, a valid api response, use api response",
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"email": "john.doe@example.com",
},
RoleAttributePath: "",
......@@ -234,7 +234,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
},
{
Name: "Given no id_token, a role path, a valid api response without a role, use api response",
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"email": "john.doe@example.com",
},
RoleAttributePath: "role",
......@@ -253,7 +253,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
// { "role": "Admin", "email": "john.doe@example.com" }
"id_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiQWRtaW4iLCJlbWFpbCI6ImpvaG4uZG9lQGV4YW1wbGUuY29tIn0.9PtHcCaXxZa2HDlASyKIaFGfOKlw2ILQo32xlvhvhRg",
},
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"role": "FromResponse",
"email": "from_response@example.com",
},
......@@ -267,7 +267,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
// { "role": "Admin", "email": "john.doe@example.com" }
"id_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiQWRtaW4iLCJlbWFpbCI6ImpvaG4uZG9lQGV4YW1wbGUuY29tIn0.9PtHcCaXxZa2HDlASyKIaFGfOKlw2ILQo32xlvhvhRg",
},
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"role": "FromResponse",
"email": "from_response@example.com",
},
......@@ -281,7 +281,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
// { "role": "Admin" }
"id_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiQWRtaW4ifQ.k5GwPcZvGe2BE_jgwN0ntz0nz4KlYhEd0hRRLApkTJ4",
},
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"email": "from_response@example.com",
},
RoleAttributePath: "role",
......@@ -294,7 +294,7 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
// { "email": "john.doe@example.com" }
"id_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6ImpvaG4uZG9lQGV4YW1wbGUuY29tIn0.k5GwPcZvGe2BE_jgwN0ntz0nz4KlYhEd0hRRLApkTJ4",
},
APIURLReponse: map[string]interface{}{
APIURLResponse: map[string]interface{}{
"role": "FromResponse",
},
RoleAttributePath: "role",
......@@ -306,11 +306,13 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
for _, test := range tests {
provider.roleAttributePath = test.RoleAttributePath
t.Run(test.Name, func(t *testing.T) {
response, _ := json.Marshal(test.APIURLReponse)
response, err := json.Marshal(test.APIURLResponse)
require.NoError(t, err)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
_, _ = io.WriteString(w, string(response))
_, err = io.WriteString(w, string(response))
require.NoError(t, err)
}))
provider.apiUrl = ts.URL
staticToken := oauth2.Token{
......@@ -321,7 +323,8 @@ func TestUserInfoSearchesForEmailAndRole(t *testing.T) {
}
token := staticToken.WithExtra(test.OAuth2Extra)
actualResult, _ := provider.UserInfo(ts.Client(), token)
actualResult, err := provider.UserInfo(ts.Client(), token)
require.NoError(t, err)
require.Equal(t, test.ExpectedEmail, actualResult.Email)
require.Equal(t, test.ExpectedEmail, actualResult.Login)
require.Equal(t, test.ExpectedRole, actualResult.Role)
......
......@@ -116,7 +116,7 @@ func sanitizeMethod(m string) string {
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// currently 0, sanitizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
switch s {
......
......@@ -9,9 +9,10 @@ import (
func TestAlertingModelTest(t *testing.T) {
Convey("Testing Alerting model", t, func() {
json1, _ := simplejson.NewJson([]byte(`{ "field": "value" }`))
json2, _ := simplejson.NewJson([]byte(`{ "field": "value" }`))
json1, err := simplejson.NewJson([]byte(`{ "field": "value" }`))
So(err, ShouldBeNil)
json2, err := simplejson.NewJson([]byte(`{ "field": "value" }`))
So(err, ShouldBeNil)
rule1 := &Alert{
Settings: json1,
......@@ -26,18 +27,18 @@ func TestAlertingModelTest(t *testing.T) {
}
Convey("Testing AlertRule equals", func() {
So(rule1.ContainsUpdates(rule2), ShouldBeFalse)
})
Convey("Changing the expression should contain update", func() {
json2, _ := simplejson.NewJson([]byte(`{ "field": "newValue" }`))
json2, err := simplejson.NewJson([]byte(`{ "field": "newValue" }`))
So(err, ShouldBeNil)
rule1.Settings = json2
So(rule1.ContainsUpdates(rule2), ShouldBeTrue)
})
Convey("Should parse alertRule tags correctly", func() {
json2, _ := simplejson.NewJson([]byte(`{
json2, err := simplejson.NewJson([]byte(`{
"field": "value",
"alertRuleTags": {
"foo": "bar",
......@@ -45,6 +46,7 @@ func TestAlertingModelTest(t *testing.T) {
"tagMap": { "mapValue": "value" }
}
}`))
So(err, ShouldBeNil)
rule1.Settings = json2
expectedTags := []*Tag{
{Id: 0, Key: "foo", Value: "bar"},
......
......@@ -34,7 +34,7 @@ var (
ErrDashboardCannotSaveProvisionedDashboard = errors.New("Cannot save provisioned dashboard")
ErrDashboardRefreshIntervalTooShort = errors.New("Dashboard refresh interval is too low")
ErrDashboardCannotDeleteProvisionedDashboard = errors.New("provisioned dashboard cannot be deleted")
ErrDashboardIdentifierNotSet = errors.New("Unique identfier needed to be able to get a dashboard")
ErrDashboardIdentifierNotSet = errors.New("Unique identifier needed to be able to get a dashboard")
RootFolderName = "General"
)
......
......@@ -214,7 +214,7 @@ func TestDataSourceProxyCache(t *testing.T) {
}))
defer backend.Close()
// 2. Get HTTP transport from datasoruce which uses the test server as backend
// 2. Get HTTP transport from datasource which uses the test server as backend
ds.Url = backend.URL
transport, err := ds.GetHttpTransport()
if err != nil {
......
......@@ -20,7 +20,7 @@ type SendEmailCommand struct {
Data map[string]interface{}
Info string
ReplyTo []string
EmbededFiles []string
EmbeddedFiles []string
AttachedFiles []*SendEmailAttachFile
}
......
......@@ -6,7 +6,7 @@ import (
"strings"
)
func ComposePluginStartCommmand(executable string) string {
func ComposePluginStartCommand(executable string) string {
os := strings.ToLower(runtime.GOOS)
arch := runtime.GOARCH
extension := ""
......
......@@ -51,7 +51,7 @@ func TestPluginDashboards(t *testing.T) {
So(err, ShouldBeNil)
Convey("should return 2 dashboarrd", func() {
Convey("should return 2 dashboards", func() {
So(len(dashboards), ShouldEqual, 2)
})
......
......@@ -45,7 +45,7 @@ func (p *DataSourcePlugin) Load(decoder *json.Decoder, pluginDir string, backend
}
if p.Backend {
cmd := ComposePluginStartCommmand(p.Executable)
cmd := ComposePluginStartCommand(p.Executable)
fullpath := path.Join(p.PluginDir, cmd)
descriptor := backendplugin.NewBackendPluginDescriptor(p.Id, fullpath, backendplugin.PluginStartFuncs{
OnLegacyStart: p.onLegacyPluginStart,
......
......@@ -32,7 +32,7 @@ func (r *RendererPlugin) Load(decoder *json.Decoder, pluginDir string, backendPl
r.backendPluginManager = backendPluginManager
cmd := ComposePluginStartCommmand("plugin_start")
cmd := ComposePluginStartCommand("plugin_start")
fullpath := path.Join(r.PluginDir, cmd)
descriptor := backendplugin.NewRendererPluginDescriptor(r.Id, fullpath, backendplugin.PluginStartFuncs{
OnLegacyStart: r.onLegacyPluginStart,
......
......@@ -35,7 +35,7 @@ func (p *TransformPlugin) Load(decoder *json.Decoder, pluginDir string, backendP
return err
}
cmd := ComposePluginStartCommmand(p.Executable)
cmd := ComposePluginStartCommand(p.Executable)
fullpath := path.Join(p.PluginDir, cmd)
descriptor := backendplugin.NewBackendPluginDescriptor(p.Id, fullpath, backendplugin.PluginStartFuncs{
OnStart: p.onPluginStart,
......
......@@ -77,7 +77,7 @@ func TestParsingAlertRuleSettings(t *testing.T) {
shouldErr require.ErrorAssertionFunc
}{
{
name: "can parse singel condition",
name: "can parse single condition",
file: "testdata/settings/one_condition.json",
expected: []int64{3},
shouldErr: require.NoError,
......
......@@ -14,8 +14,8 @@ var (
rangedTypes = []string{"within_range", "outside_range"}
)
// AlertEvaluator evaluates the reduced value of a timeserie.
// Returning true if a timeserie is violating the condition
// AlertEvaluator evaluates the reduced value of a timeseries.
// Returning true if a timeseries is violating the condition
// ex: ThresholdEvaluator, NoValueEvaluator, RangeEvaluator
type AlertEvaluator interface {
Eval(reducedValue null.Float) bool
......
......@@ -9,7 +9,7 @@ import (
"github.com/grafana/grafana/pkg/components/simplejson"
)
func evalutorScenario(json string, reducedValue float64, datapoints ...float64) bool {
func evaluatorScenario(json string, reducedValue float64, datapoints ...float64) bool {
jsonModel, err := simplejson.NewJson([]byte(json))
So(err, ShouldBeNil)
......@@ -19,37 +19,37 @@ func evalutorScenario(json string, reducedValue float64, datapoints ...float64)
return evaluator.Eval(null.FloatFrom(reducedValue))
}
func TestEvalutors(t *testing.T) {
func TestEvaluators(t *testing.T) {
Convey("greater then", t, func() {
So(evalutorScenario(`{"type": "gt", "params": [1] }`, 3), ShouldBeTrue)
So(evalutorScenario(`{"type": "gt", "params": [3] }`, 1), ShouldBeFalse)
So(evaluatorScenario(`{"type": "gt", "params": [1] }`, 3), ShouldBeTrue)
So(evaluatorScenario(`{"type": "gt", "params": [3] }`, 1), ShouldBeFalse)
})
Convey("less then", t, func() {
So(evalutorScenario(`{"type": "lt", "params": [1] }`, 3), ShouldBeFalse)
So(evalutorScenario(`{"type": "lt", "params": [3] }`, 1), ShouldBeTrue)
So(evaluatorScenario(`{"type": "lt", "params": [1] }`, 3), ShouldBeFalse)
So(evaluatorScenario(`{"type": "lt", "params": [3] }`, 1), ShouldBeTrue)
})
Convey("within_range", t, func() {
So(evalutorScenario(`{"type": "within_range", "params": [1, 100] }`, 3), ShouldBeTrue)
So(evalutorScenario(`{"type": "within_range", "params": [1, 100] }`, 300), ShouldBeFalse)
So(evalutorScenario(`{"type": "within_range", "params": [100, 1] }`, 3), ShouldBeTrue)
So(evalutorScenario(`{"type": "within_range", "params": [100, 1] }`, 300), ShouldBeFalse)
So(evaluatorScenario(`{"type": "within_range", "params": [1, 100] }`, 3), ShouldBeTrue)
So(evaluatorScenario(`{"type": "within_range", "params": [1, 100] }`, 300), ShouldBeFalse)
So(evaluatorScenario(`{"type": "within_range", "params": [100, 1] }`, 3), ShouldBeTrue)
So(evaluatorScenario(`{"type": "within_range", "params": [100, 1] }`, 300), ShouldBeFalse)
})
Convey("outside_range", t, func() {
So(evalutorScenario(`{"type": "outside_range", "params": [1, 100] }`, 1000), ShouldBeTrue)
So(evalutorScenario(`{"type": "outside_range", "params": [1, 100] }`, 50), ShouldBeFalse)
So(evalutorScenario(`{"type": "outside_range", "params": [100, 1] }`, 1000), ShouldBeTrue)
So(evalutorScenario(`{"type": "outside_range", "params": [100, 1] }`, 50), ShouldBeFalse)
So(evaluatorScenario(`{"type": "outside_range", "params": [1, 100] }`, 1000), ShouldBeTrue)
So(evaluatorScenario(`{"type": "outside_range", "params": [1, 100] }`, 50), ShouldBeFalse)
So(evaluatorScenario(`{"type": "outside_range", "params": [100, 1] }`, 1000), ShouldBeTrue)
So(evaluatorScenario(`{"type": "outside_range", "params": [100, 1] }`, 50), ShouldBeFalse)
})
Convey("no_value", t, func() {
Convey("should be false if serie have values", func() {
So(evalutorScenario(`{"type": "no_value", "params": [] }`, 50), ShouldBeFalse)
Convey("should be false if series have values", func() {
So(evaluatorScenario(`{"type": "no_value", "params": [] }`, 50), ShouldBeFalse)
})
Convey("should be true when the serie have no value", func() {
Convey("should be true when the series have no value", func() {
jsonModel, err := simplejson.NewJson([]byte(`{"type": "no_value", "params": [] }`))
So(err, ShouldBeNil)
......
......@@ -84,7 +84,7 @@ func TestQueryCondition(t *testing.T) {
So(cr.Firing, ShouldBeFalse)
})
Convey("Should fire if only first serie matches", func() {
Convey("Should fire if only first series matches", func() {
ctx.series = tsdb.TimeSeriesSlice{
tsdb.NewTimeSeries("test1", tsdb.NewTimeSeriesPointsFromArgs(120, 0)),
tsdb.NewTimeSeries("test2", tsdb.NewTimeSeriesPointsFromArgs(0, 0)),
......@@ -149,7 +149,7 @@ func TestQueryCondition(t *testing.T) {
So(cr.NoDataFound, ShouldBeTrue)
})
Convey("Should not set NoDataFound if one serie is empty", func() {
Convey("Should not set NoDataFound if one series is empty", func() {
ctx.series = tsdb.TimeSeriesSlice{
tsdb.NewTimeSeries("test1", tsdb.NewTimeSeriesPointsFromArgs()),
tsdb.NewTimeSeries("test2", tsdb.NewTimeSeriesPointsFromArgs(120, 0)),
......
......@@ -9,10 +9,10 @@ import (
"github.com/grafana/grafana/pkg/tsdb"
)
// queryReducer reduces an timeserie to a nullable float
// queryReducer reduces a timeseries to a nullable float
type queryReducer struct {
// Type is how the timeserie should be reduced.
// Type is how the timeseries should be reduced.
// Ex avg, sum, max, min, count
Type string
}
......
......@@ -153,7 +153,8 @@ func TestAlertingEvaluationHandler(t *testing.T) {
So(context.ConditionEvals, ShouldEqual, "[[false OR false] OR false] = false")
})
Convey("Should retuasdfrn no data if one condition has nodata", func() {
// FIXME: What should the actual test case name be here?
Convey("Should not return NoDataFound if all conditions have data and using OR", func() {
context := NewEvalContext(context.TODO(), &Rule{
Conditions: []Condition{
&conditionStub{operator: "or", noData: false},
......@@ -166,7 +167,7 @@ func TestAlertingEvaluationHandler(t *testing.T) {
So(context.NoDataFound, ShouldBeFalse)
})
Convey("Should return no data if one condition has nodata", func() {
Convey("Should return NoDataFound if one condition has no data", func() {
context := NewEvalContext(context.TODO(), &Rule{
Conditions: []Condition{
&conditionStub{operator: "and", noData: true},
......@@ -178,7 +179,7 @@ func TestAlertingEvaluationHandler(t *testing.T) {
So(context.NoDataFound, ShouldBeTrue)
})
Convey("Should return no data if both conditions have no data and using AND", func() {
Convey("Should not return no data if at least one condition has no data and using AND", func() {
context := NewEvalContext(context.TODO(), &Rule{
Conditions: []Condition{
&conditionStub{operator: "and", noData: true},
......@@ -190,7 +191,7 @@ func TestAlertingEvaluationHandler(t *testing.T) {
So(context.NoDataFound, ShouldBeFalse)
})
Convey("Should not return no data if both conditions have no data and using OR", func() {
Convey("Should return no data if at least one condition has no data and using OR", func() {
context := NewEvalContext(context.TODO(), &Rule{
Conditions: []Condition{
&conditionStub{operator: "or", noData: true},
......
......@@ -36,7 +36,7 @@ type ResultLogEntry struct {
Data interface{}
}
// EvalMatch represents the serie violating the threshold.
// EvalMatch represents the series violating the threshold.
type EvalMatch struct {
Value null.Float `json:"value"`
Metric string `json:"metric"`
......
......@@ -234,7 +234,7 @@ type testNotifier struct {
Name string
Type string
UID string
IsDeault bool
IsDefault bool
UploadImage bool
SendReminder bool
DisableResolveMessage bool
......@@ -251,7 +251,7 @@ func newTestNotifier(model *models.AlertNotification) (Notifier, error) {
return &testNotifier{
UID: model.Uid,
Name: model.Name,
IsDeault: model.IsDefault,
IsDefault: model.IsDefault,
Type: model.Type,
UploadImage: uploadImage,
SendReminder: model.SendReminder,
......@@ -267,7 +267,7 @@ func (n *testNotifier) Notify(evalCtx *EvalContext) error {
return nil
}
func (n *testNotifier) ShouldNotify(ctx context.Context, evalCtx *EvalContext, notiferState *models.AlertNotificationState) bool {
func (n *testNotifier) ShouldNotify(ctx context.Context, evalCtx *EvalContext, notifierState *models.AlertNotificationState) bool {
return true
}
......@@ -284,7 +284,7 @@ func (n *testNotifier) GetNotifierUID() string {
}
func (n *testNotifier) GetIsDefault() bool {
return n.IsDeault
return n.IsDefault
}
func (n *testNotifier) GetSendReminder() bool {
......
......@@ -49,7 +49,7 @@ func NewNotifierBase(model *models.AlertNotification) NotifierBase {
}
// ShouldNotify checks this evaluation should send an alert notification
func (n *NotifierBase) ShouldNotify(ctx context.Context, context *alerting.EvalContext, notiferState *models.AlertNotificationState) bool {
func (n *NotifierBase) ShouldNotify(ctx context.Context, context *alerting.EvalContext, notifierState *models.AlertNotificationState) bool {
prevState := context.PrevAlertState
newState := context.Rule.State
......@@ -60,8 +60,8 @@ func (n *NotifierBase) ShouldNotify(ctx context.Context, context *alerting.EvalC
if prevState == newState && n.SendReminder {
// Do not notify if interval has not elapsed
lastNotify := time.Unix(notiferState.UpdatedAt, 0)
if notiferState.UpdatedAt != 0 && lastNotify.Add(n.Frequency).After(time.Now()) {
lastNotify := time.Unix(notifierState.UpdatedAt, 0)
if notifierState.UpdatedAt != 0 && lastNotify.Add(n.Frequency).After(time.Now()) {
return false
}
......@@ -94,8 +94,8 @@ func (n *NotifierBase) ShouldNotify(ctx context.Context, context *alerting.EvalC
}
// Do not notify if state pending and it have been updated last minute
if notiferState.State == models.AlertNotificationStatePending {
lastUpdated := time.Unix(notiferState.UpdatedAt, 0)
if notifierState.State == models.AlertNotificationStatePending {
lastUpdated := time.Unix(notifierState.UpdatedAt, 0)
if lastUpdated.Add(1 * time.Minute).After(time.Now()) {
return false
}
......
......@@ -104,7 +104,7 @@ func TestShouldSendAlertNotification(t *testing.T) {
expect: false,
},
{
name: "alerting -> alerting with reminder and last notifciation sent 11 minutes ago should trigger",
name: "alerting -> alerting with reminder and last notification sent 11 minutes ago should trigger",
newState: models.AlertStateAlerting,
prevState: models.AlertStateAlerting,
frequency: time.Minute * 10,
......@@ -114,7 +114,7 @@ func TestShouldSendAlertNotification(t *testing.T) {
expect: true,
},
{
name: "OK -> alerting with notifciation state pending and updated 30 seconds ago should not trigger",
name: "OK -> alerting with notification state pending and updated 30 seconds ago should not trigger",
newState: models.AlertStateAlerting,
prevState: models.AlertStateOK,
state: &models.AlertNotificationState{State: models.AlertNotificationStatePending, UpdatedAt: tnow.Add(-30 * time.Second).Unix()},
......@@ -122,7 +122,7 @@ func TestShouldSendAlertNotification(t *testing.T) {
expect: false,
},
{
name: "OK -> alerting with notifciation state pending and updated 2 minutes ago should trigger",
name: "OK -> alerting with notification state pending and updated 2 minutes ago should trigger",
newState: models.AlertStateAlerting,
prevState: models.AlertStateOK,
state: &models.AlertNotificationState{State: models.AlertNotificationStatePending, UpdatedAt: tnow.Add(-2 * time.Minute).Unix()},
......
......@@ -103,10 +103,10 @@ func (en *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
"AlertPageUrl": setting.AppUrl + "alerting",
"EvalMatches": evalContext.EvalMatches,
},
To: en.Addresses,
SingleEmail: en.SingleEmail,
Template: "alert_notification.html",
EmbededFiles: []string{},
To: en.Addresses,
SingleEmail: en.SingleEmail,
Template: "alert_notification.html",
EmbeddedFiles: []string{},
},
}
......@@ -116,7 +116,7 @@ func (en *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
} else {
file, err := os.Stat(evalContext.ImageOnDiskPath)
if err == nil {
cmd.EmbededFiles = []string{evalContext.ImageOnDiskPath}
cmd.EmbeddedFiles = []string{evalContext.ImageOnDiskPath}
cmd.Data["EmbeddedImage"] = file.Name()
}
}
......
......@@ -111,7 +111,7 @@ func getTimeDurationStringToSeconds(str string) (int64, error) {
return int64(value * multiplier), nil
}
// NewRuleFromDBAlert mappes an db version of
// NewRuleFromDBAlert maps a db version of
// alert to an in-memory version.
func NewRuleFromDBAlert(ruleDef *models.Alert) (*Rule, error) {
model := &Rule{}
......@@ -130,7 +130,7 @@ func NewRuleFromDBAlert(ruleDef *models.Alert) (*Rule, error) {
model.Frequency = ruleDef.Frequency
// frequency cannot be zero since that would not execute the alert rule.
// so we fallback to 60 seconds if `Freqency` is missing
// so we fallback to 60 seconds if `Frequency` is missing
if model.Frequency == 0 {
model.Frequency = 60
}
......
......@@ -65,7 +65,7 @@ func TestGuardianAdmin(t *testing.T) {
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_EDIT, FULL_ACCESS)
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_VIEW, FULL_ACCESS)
// parent folder has viweer role with permission
// parent folder has viewer role with permission
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_ADMIN, FULL_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_EDIT, FULL_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_VIEW, FULL_ACCESS)
......@@ -114,7 +114,7 @@ func TestGuardianEditor(t *testing.T) {
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_EDIT, EDITOR_ACCESS)
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_VIEW, VIEWER_ACCESS)
// parent folder has viweer role with permission
// parent folder has viewer role with permission
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_ADMIN, NO_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_EDIT, NO_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_VIEW, NO_ACCESS)
......@@ -163,7 +163,7 @@ func TestGuardianViewer(t *testing.T) {
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_EDIT, NO_ACCESS)
sc.parentFolderPermissionScenario(EDITOR, models.PERMISSION_VIEW, NO_ACCESS)
// parent folder has viweer role with permission
// parent folder has viewer role with permission
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_ADMIN, FULL_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_EDIT, EDITOR_ACCESS)
sc.parentFolderPermissionScenario(VIEWER, models.PERMISSION_VIEW, VIEWER_ACCESS)
......@@ -549,7 +549,7 @@ func (sc *scenarioContext) verifyUpdateChildDashboardPermissionsShouldNotBeAllow
newDefaultTeamPermission(childDashboardID, p),
}
// perminssion to update is higher than parent folder permission
// permission to update is higher than parent folder permission
if p > parentFolderPermission {
permissionList = append(permissionList, newEditorRolePermission(childDashboardID, p))
}
......@@ -560,7 +560,7 @@ func (sc *scenarioContext) verifyUpdateChildDashboardPermissionsShouldNotBeAllow
newDefaultTeamPermission(childDashboardID, p),
}
// perminssion to update is higher than parent folder permission
// permission to update is higher than parent folder permission
if p > parentFolderPermission {
permissionList = append(permissionList, newViewerRolePermission(childDashboardID, p))
}
......@@ -586,7 +586,7 @@ func (sc *scenarioContext) verifyUpdateChildDashboardPermissionsWithOverrideShou
}
for _, p := range []models.PermissionType{models.PERMISSION_ADMIN, models.PERMISSION_EDIT, models.PERMISSION_VIEW} {
// perminssion to update is higher tban parent folder permission
// permission to update is higher than parent folder permission
if p > parentFolderPermission {
continue
}
......@@ -631,7 +631,7 @@ func (sc *scenarioContext) verifyUpdateChildDashboardPermissionsWithOverrideShou
}
for _, p := range []models.PermissionType{models.PERMISSION_ADMIN, models.PERMISSION_EDIT, models.PERMISSION_VIEW} {
// perminssion to update is lower than/equal parent folder permission
// permission to update is lower than or equal to parent folder permission
if p <= parentFolderPermission {
continue
}
......
......@@ -453,7 +453,7 @@ func (server *Server) AdminBind() error {
err := server.userBind(server.Config.BindDN, server.Config.BindPassword)
if err != nil {
server.log.Error(
"Cannot authentificate admin user in LDAP",
"Cannot authenticate admin user in LDAP",
"error",
err,
)
......
......@@ -121,7 +121,7 @@ func TestLDAPLogin(t *testing.T) {
So(resp.Login, ShouldEqual, "markelog")
})
Convey("Should perform unauthentificate bind without admin", func() {
Convey("Should perform unauthenticated bind without admin", func() {
connection := &MockConnection{}
entry := ldap.Entry{
DN: "test",
......@@ -147,7 +147,7 @@ func TestLDAPLogin(t *testing.T) {
So(connection.UnauthenticatedBindCalled, ShouldBeTrue)
})
Convey("Should perform authentificate binds", func() {
Convey("Should perform authenticated binds", func() {
connection := &MockConnection{}
entry := ldap.Entry{
DN: "test",
......
......@@ -20,7 +20,7 @@ type Message struct {
Body string
Info string
ReplyTo []string
EmbededFiles []string
EmbeddedFiles []string
AttachedFiles []*AttachedFile
}
......
......@@ -73,7 +73,7 @@ func (ns *NotificationService) setFiles(
m *gomail.Message,
msg *Message,
) {
for _, file := range msg.EmbededFiles {
for _, file := range msg.EmbeddedFiles {
m.Embed(file)
}
......@@ -181,7 +181,7 @@ func (ns *NotificationService) buildEmailMessage(cmd *models.SendEmailCommand) (
From: fmt.Sprintf("%s <%s>", ns.Cfg.Smtp.FromName, ns.Cfg.Smtp.FromAddress),
Subject: subject,
Body: buffer.String(),
EmbededFiles: cmd.EmbededFiles,
EmbeddedFiles: cmd.EmbeddedFiles,
AttachedFiles: buildAttachedFiles(cmd.AttachedFiles),
}, nil
}
......
......@@ -119,13 +119,13 @@ func subjectTemplateFunc(obj map[string]interface{}, value string) string {
func (ns *NotificationService) sendEmailCommandHandlerSync(ctx context.Context, cmd *models.SendEmailCommandSync) error {
message, err := ns.buildEmailMessage(&models.SendEmailCommand{
Data: cmd.Data,
Info: cmd.Info,
Template: cmd.Template,
To: cmd.To,
SingleEmail: cmd.SingleEmail,
EmbededFiles: cmd.EmbededFiles,
Subject: cmd.Subject,
Data: cmd.Data,
Info: cmd.Info,
Template: cmd.Template,
To: cmd.To,
SingleEmail: cmd.SingleEmail,
EmbeddedFiles: cmd.EmbeddedFiles,
Subject: cmd.Subject,
})
if err != nil {
......
......@@ -30,9 +30,9 @@ func TestEmailIntegrationTest(t *testing.T) {
cmd := &models.SendEmailCommand{
Data: map[string]interface{}{
"Title": "[CRITICAL] Imaginary timeserie alert",
"Title": "[CRITICAL] Imaginary timeseries alert",
"State": "Firing",
"Name": "Imaginary timeserie alert",
"Name": "Imaginary timeseries alert",
"Severity": "ok",
"SeverityColor": "#D63232",
"Message": "Alert message that will support markdown in some distant future.",
......
......@@ -13,7 +13,7 @@ var (
symlinkedFolder = "testdata/test-dashboards/symlink"
)
func TestProvsionedSymlinkedFolder(t *testing.T) {
func TestProvisionedSymlinkedFolder(t *testing.T) {
cfg := &config{
Name: "Default",
Type: "file",
......
......@@ -150,8 +150,8 @@ func TestDatasourceAsConfig(t *testing.T) {
})
Convey("skip invalid directory", func() {
cfgProvifer := &configReader{log: log.New("test logger")}
cfg, err := cfgProvifer.readConfig("./invalid-directory")
cfgProvider := &configReader{log: log.New("test logger")}
cfg, err := cfgProvider.readConfig("./invalid-directory")
if err != nil {
t.Fatalf("readConfig return an error %v", err)
}
......@@ -161,8 +161,8 @@ func TestDatasourceAsConfig(t *testing.T) {
Convey("can read all properties from version 1", func() {
_ = os.Setenv("TEST_VAR", "name")
cfgProvifer := &configReader{log: log.New("test logger")}
cfg, err := cfgProvifer.readConfig(allProperties)
cfgProvider := &configReader{log: log.New("test logger")}
cfg, err := cfgProvider.readConfig(allProperties)
_ = os.Unsetenv("TEST_VAR")
if err != nil {
t.Fatalf("readConfig return an error %v", err)
......@@ -190,8 +190,8 @@ func TestDatasourceAsConfig(t *testing.T) {
})
Convey("can read all properties from version 0", func() {
cfgProvifer := &configReader{log: log.New("test logger")}
cfg, err := cfgProvifer.readConfig(versionZero)
cfgProvider := &configReader{log: log.New("test logger")}
cfg, err := cfgProvider.readConfig(versionZero)
if err != nil {
t.Fatalf("readConfig return an error %v", err)
}
......
......@@ -45,8 +45,8 @@ func TestNotificationAsConfig(t *testing.T) {
Convey("Can read correct properties", func() {
_ = os.Setenv("TEST_VAR", "default")
cfgProvifer := &configReader{log: log.New("test logger")}
cfg, err := cfgProvifer.readConfig(correct_properties)
cfgProvider := &configReader{log: log.New("test logger")}
cfg, err := cfgProvider.readConfig(correct_properties)
_ = os.Unsetenv("TEST_VAR")
if err != nil {
t.Fatalf("readConfig return an error %v", err)
......@@ -293,8 +293,8 @@ func TestNotificationAsConfig(t *testing.T) {
})
Convey("Skip invalid directory", func() {
cfgProvifer := &configReader{log: log.New("test logger")}
cfg, err := cfgProvifer.readConfig(emptyFolder)
cfgProvider := &configReader{log: log.New("test logger")}
cfg, err := cfgProvider.readConfig(emptyFolder)
if err != nil {
t.Fatalf("readConfig return an error %v", err)
}
......@@ -302,15 +302,15 @@ func TestNotificationAsConfig(t *testing.T) {
})
Convey("Unknown notifier should return error", func() {
cfgProvifer := &configReader{log: log.New("test logger")}
_, err := cfgProvifer.readConfig(unknownNotifier)
cfgProvider := &configReader{log: log.New("test logger")}
_, err := cfgProvider.readConfig(unknownNotifier)
So(err, ShouldNotBeNil)
So(err.Error(), ShouldEqual, "Unsupported notification type")
})
Convey("Read incorrect properties", func() {
cfgProvifer := &configReader{log: log.New("test logger")}
_, err := cfgProvifer.readConfig(incorrect_settings)
cfgProvider := &configReader{log: log.New("test logger")}
_, err := cfgProvider.readConfig(incorrect_settings)
So(err, ShouldNotBeNil)
So(err.Error(), ShouldEqual, "Alert validation error: Could not find url property in settings")
})
......
......@@ -154,7 +154,7 @@ func (val *StringMapValue) Value() map[string]string {
return val.value
}
// transformInterface tries to transform any interface type into proper value with env expansion. It travers maps and
// transformInterface tries to transform any interface type into proper value with env expansion. It traverses maps and
// slices and the actual interpolation is done on all simple string values in the structure. It returns a copy of any
// map or slice value instead of modifying them in place and also return value without interpolation but with converted
// type as a second value.
......
......@@ -11,7 +11,7 @@ import (
func TestQuotaCommandsAndQueries(t *testing.T) {
Convey("Testing Qutoa commands & queries", t, func() {
Convey("Testing Quota commands & queries", t, func() {
InitTestDB(t)
userId := int64(1)
orgId := int64(0)
......
......@@ -91,7 +91,7 @@ func TestInfluxdbQueryParser(t *testing.T) {
}
]
],
"alias": "serie alias",
"alias": "series alias",
"tags": [
{
"key": "datacenter",
......@@ -118,7 +118,7 @@ func TestInfluxdbQueryParser(t *testing.T) {
So(len(res.Tags), ShouldEqual, 2)
So(res.Tz, ShouldEqual, "Europe/Paris")
So(res.Interval, ShouldEqual, time.Second*20)
So(res.Alias, ShouldEqual, "serie alias")
So(res.Alias, ShouldEqual, "series alias")
})
Convey("can part raw query json model", func() {
......@@ -140,7 +140,7 @@ func TestInfluxdbQueryParser(t *testing.T) {
],
"interval": ">10s",
"policy": "default",
"query": "RawDummieQuery",
"query": "RawDummyQuery",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
......@@ -171,7 +171,7 @@ func TestInfluxdbQueryParser(t *testing.T) {
res, err := parser.Parse(modelJson, dsInfo)
So(err, ShouldBeNil)
So(res.RawQuery, ShouldEqual, "RawDummieQuery")
So(res.RawQuery, ShouldEqual, "RawDummyQuery")
So(len(res.GroupBy), ShouldEqual, 2)
So(len(res.Selects), ShouldEqual, 1)
So(len(res.Tags), ShouldEqual, 0)
......
......@@ -51,7 +51,7 @@ func (rp *ResponseParser) transformRows(rows []Row, queryResult *tsdb.QueryResul
}
}
result = append(result, &tsdb.TimeSeries{
Name: rp.formatSerieName(row, column, query),
Name: rp.formatSeriesName(row, column, query),
Points: points,
Tags: row.Tags,
})
......@@ -61,9 +61,9 @@ func (rp *ResponseParser) transformRows(rows []Row, queryResult *tsdb.QueryResul
return result
}
func (rp *ResponseParser) formatSerieName(row Row, column string, query *Query) string {
func (rp *ResponseParser) formatSeriesName(row Row, column string, query *Query) string {
if query.Alias == "" {
return rp.buildSerieNameFromQuery(row, column)
return rp.buildSeriesNameFromQuery(row, column)
}
nameSegment := strings.Split(row.Name, ".")
......@@ -102,7 +102,7 @@ func (rp *ResponseParser) formatSerieName(row Row, column string, query *Query)
return string(result)
}
func (rp *ResponseParser) buildSerieNameFromQuery(row Row, column string) string {
func (rp *ResponseParser) buildSeriesNameFromQuery(row Row, column string) string {
var tags []string
for k, v := range row.Tags {
......
......@@ -61,7 +61,7 @@ func TestInfluxdbResponseParser(t *testing.T) {
So(result.Series[0].Points[2][0].Valid, ShouldBeFalse)
})
Convey("can format serie names", func() {
Convey("can format series names", func() {
So(result.Series[0].Name, ShouldEqual, "cpu.mean { datacenter: America }")
So(result.Series[1].Name, ShouldEqual, "cpu.sum { datacenter: America }")
})
......@@ -92,10 +92,10 @@ func TestInfluxdbResponseParser(t *testing.T) {
Convey("$ alias", func() {
Convey("simple alias", func() {
query := &Query{Alias: "serie alias"}
query := &Query{Alias: "series alias"}
result := parser.Parse(response, query)
So(result.Series[0].Name, ShouldEqual, "serie alias")
So(result.Series[0].Name, ShouldEqual, "series alias")
})
Convey("measurement alias", func() {
......@@ -137,10 +137,10 @@ func TestInfluxdbResponseParser(t *testing.T) {
Convey("[[]] alias", func() {
Convey("simple alias", func() {
query := &Query{Alias: "serie alias"}
query := &Query{Alias: "series alias"}
result := parser.Parse(response, query)
So(result.Series[0].Name, ShouldEqual, "serie alias")
So(result.Series[0].Name, ShouldEqual, "series alias")
})
Convey("measurement alias", func() {
......
......@@ -23,7 +23,7 @@ import (
// Use the docker/blocks/mssql_tests/docker-compose.yaml to spin up a
// preconfigured MSSQL server suitable for running these tests.
// There is also a datasource and dashboard provisioned by devenv scripts that you can
// use to verify that the generated data are vizualized as expected, see
// use to verify that the generated data are visualized as expected, see
// devenv/README.md for setup instructions.
// If needed, change the variable below to the IP address of the database.
var serverIP = "localhost"
......
......@@ -26,7 +26,7 @@ import (
// Use the docker/blocks/mysql_tests/docker-compose.yaml to spin up a
// preconfigured MySQL server suitable for running these tests.
// There is also a datasource and dashboard provisioned by devenv scripts that you can
// use to verify that the generated data are vizualized as expected, see
// use to verify that the generated data are visualized as expected, see
// devenv/README.md for setup instructions.
func TestMySQL(t *testing.T) {
// change to true to run the MySQL tests
......
......@@ -27,7 +27,7 @@ import (
// Use the docker/blocks/postgres_tests/docker-compose.yaml to spin up a
// preconfigured Postgres server suitable for running these tests.
// There is also a datasource and dashboard provisioned by devenv scripts that you can
// use to verify that the generated data are vizualized as expected, see
// use to verify that the generated data are visualized as expected, see
// devenv/README.md for setup instructions.
func TestPostgres(t *testing.T) {
// change to true to run the PostgreSQL tests
......
......@@ -31,7 +31,7 @@ func TestPrometheus(t *testing.T) {
So(formatLegend(metric, query), ShouldEqual, "legend backend mobile ")
})
Convey("build full serie name", func() {
Convey("build full series name", func() {
metric := map[p.LabelName]p.LabelValue{
p.LabelName(p.MetricNameLabel): p.LabelValue("http_request_total"),
p.LabelName("app"): p.LabelValue("backend"),
......
......@@ -47,7 +47,7 @@ func init() {
factor := 2
for i := 0; i < 10; i++ {
timeWalkerMs := context.TimeRange.GetFromAsMsEpoch()
serie := &tsdb.TimeSeries{Name: strconv.Itoa(start)}
ts := &tsdb.TimeSeries{Name: strconv.Itoa(start)}
start *= factor
points := make(tsdb.TimeSeriesPoints, 0)
......@@ -57,8 +57,8 @@ func init() {
timeWalkerMs += query.IntervalMs * 50
}
serie.Points = points
series = append(series, serie)
ts.Points = points
series = append(series, ts)
}
queryRes := tsdb.NewQueryResult()
......@@ -77,7 +77,7 @@ func init() {
var series []*tsdb.TimeSeries
for i := 0; i < 10; i++ {
timeWalkerMs := context.TimeRange.GetFromAsMsEpoch()
serie := &tsdb.TimeSeries{Name: strconv.Itoa(i * 10)}
ts := &tsdb.TimeSeries{Name: strconv.Itoa(i * 10)}
points := make(tsdb.TimeSeriesPoints, 0)
for j := int64(0); j < 100 && timeWalkerMs < to; j++ {
......@@ -86,8 +86,8 @@ func init() {
timeWalkerMs += query.IntervalMs * 50
}
serie.Points = points
series = append(series, serie)
ts.Points = points
series = append(series, ts)
}
queryRes := tsdb.NewQueryResult()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment