mirror of
https://github.com/prometheus/prometheus.git
synced 2025-07-03 11:03:25 +00:00
Fix linting issues found by golangci-lint v2.0.2 (#16368)
* Fix linting issues found by golangci-lint v2.0.2 --------- Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
parent
2e9ab9cc62
commit
e7e3ab2824
50 changed files with 178 additions and 208 deletions
|
@ -164,31 +164,9 @@ linters:
|
||||||
- name: unused-parameter
|
- name: unused-parameter
|
||||||
- name: var-declaration
|
- name: var-declaration
|
||||||
- name: var-naming
|
- name: var-naming
|
||||||
staticcheck:
|
|
||||||
checks:
|
|
||||||
- all # Enable all checks.
|
|
||||||
# FIXME: We should enable this check once we have fixed all the issues.
|
|
||||||
- -QF1001
|
|
||||||
- -QF1002
|
|
||||||
- -QF1003
|
|
||||||
- -QF1006
|
|
||||||
- -QF1007
|
|
||||||
- -QF1008
|
|
||||||
- -QF1009
|
|
||||||
- -QF1010
|
|
||||||
- -QF1012
|
|
||||||
- -ST1000
|
|
||||||
- -ST1003
|
|
||||||
- -ST1005
|
|
||||||
- -ST1012
|
|
||||||
- -ST1016
|
|
||||||
- -ST1020
|
|
||||||
testifylint:
|
testifylint:
|
||||||
disable:
|
disable:
|
||||||
- empty # FIXME
|
|
||||||
- equal-values # FIXME
|
|
||||||
- float-compare
|
- float-compare
|
||||||
- formatter # FIXME
|
|
||||||
- go-require
|
- go-require
|
||||||
- len # FIXME
|
- len # FIXME
|
||||||
- useless-assert # FIXME: wait for golangci-lint > v2.0.2
|
- useless-assert # FIXME: wait for golangci-lint > v2.0.2
|
||||||
|
|
|
@ -268,7 +268,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
go func() { done <- prom.Wait() }()
|
go func() { done <- prom.Wait() }()
|
||||||
select {
|
select {
|
||||||
case err := <-done:
|
case err := <-done:
|
||||||
require.Fail(t, "prometheus should be still running: %v", err)
|
t.Fatalf("prometheus should be still running: %v", err)
|
||||||
case <-time.After(startupTime):
|
case <-time.After(startupTime):
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
<-done
|
<-done
|
||||||
|
@ -332,7 +332,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
go func() { done <- prom.Wait() }()
|
go func() { done <- prom.Wait() }()
|
||||||
select {
|
select {
|
||||||
case err := <-done:
|
case err := <-done:
|
||||||
require.Fail(t, "prometheus should be still running: %v", err)
|
t.Fatalf("prometheus should be still running: %v", err)
|
||||||
case <-time.After(startupTime):
|
case <-time.After(startupTime):
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
<-done
|
<-done
|
||||||
|
|
|
@ -88,7 +88,7 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
|
||||||
_, err = p.configFile.Seek(0, 0)
|
_, err = p.configFile.Seek(0, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if queryLogFile != "" {
|
if queryLogFile != "" {
|
||||||
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
|
_, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
_, err = p.configFile.Write([]byte(p.configuration()))
|
_, err = p.configFile.Write([]byte(p.configuration()))
|
||||||
|
|
|
@ -510,7 +510,7 @@ func TestCheckRules(t *testing.T) {
|
||||||
os.Stdin = r
|
os.Stdin = r
|
||||||
|
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||||
require.Equal(t, successExitCode, exitCode, "")
|
require.Equal(t, successExitCode, exitCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-bad", func(t *testing.T) {
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
@ -532,7 +532,7 @@ func TestCheckRules(t *testing.T) {
|
||||||
os.Stdin = r
|
os.Stdin = r
|
||||||
|
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||||
require.Equal(t, failureExitCode, exitCode, "")
|
require.Equal(t, failureExitCode, exitCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
@ -554,7 +554,7 @@ func TestCheckRules(t *testing.T) {
|
||||||
os.Stdin = r
|
os.Stdin = r
|
||||||
|
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
|
||||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
require.Equal(t, lintErrExitCode, exitCode)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -572,19 +572,19 @@ func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||||
t.Run("rules-good", func(t *testing.T) {
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
|
||||||
require.Equal(t, successExitCode, exitCode, "")
|
require.Equal(t, successExitCode, exitCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-bad", func(t *testing.T) {
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
|
||||||
require.Equal(t, failureExitCode, exitCode, "")
|
require.Equal(t, failureExitCode, exitCode)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
|
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
|
||||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
require.Equal(t, lintErrExitCode, exitCode)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -321,12 +321,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
|
||||||
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
|
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) {
|
||||||
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to check alerts for this time.
|
// We need to check alerts for this time.
|
||||||
// If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval'
|
// If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval'
|
||||||
// then we compare alerts with the Eval at `ts`.
|
// then we compare alerts with the Eval at `ts`.
|
||||||
|
|
|
@ -115,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||||
|
|
||||||
region, err := metadata.Region()
|
region, err := metadata.Region()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
//nolint:staticcheck // Capitalized first word.
|
||||||
return errors.New("Lightsail SD configuration requires a region")
|
return errors.New("Lightsail SD configuration requires a region")
|
||||||
}
|
}
|
||||||
c.Region = region
|
c.Region = region
|
||||||
|
|
|
@ -425,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
d := newDiscovery(t, config)
|
d := newDiscovery(t, config)
|
||||||
|
|
||||||
// Should be empty if not initialized.
|
// Should be empty if not initialized.
|
||||||
require.Equal(t, "", d.clientDatacenter)
|
require.Empty(t, d.clientDatacenter)
|
||||||
|
|
||||||
err = d.getDatacenter()
|
err = d.getDatacenter()
|
||||||
|
|
||||||
// An error should be returned.
|
// An error should be returned.
|
||||||
require.EqualError(t, err, tc.errMessage)
|
require.EqualError(t, err, tc.errMessage)
|
||||||
// Should still be empty.
|
// Should still be empty.
|
||||||
require.Equal(t, "", d.clientDatacenter)
|
require.Empty(t, d.clientDatacenter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -219,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
|
||||||
podPhaseLabel: lv(string(pod.Status.Phase)),
|
podPhaseLabel: lv(string(pod.Status.Phase)),
|
||||||
podNodeNameLabel: lv(pod.Spec.NodeName),
|
podNodeNameLabel: lv(pod.Spec.NodeName),
|
||||||
podHostIPLabel: lv(pod.Status.HostIP),
|
podHostIPLabel: lv(pod.Status.HostIP),
|
||||||
podUID: lv(string(pod.ObjectMeta.UID)),
|
podUID: lv(string(pod.UID)),
|
||||||
}
|
}
|
||||||
|
|
||||||
addObjectMetaLabels(ls, pod.ObjectMeta, RolePod)
|
addObjectMetaLabels(ls, pod.ObjectMeta, RolePod)
|
||||||
|
|
|
@ -194,7 +194,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
events, err := d.client.ListEvents(ctx, &eventsOpts)
|
events, err := d.client.ListEvents(ctx, &eventsOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var e *linodego.Error
|
var e *linodego.Error
|
||||||
if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) {
|
if !errors.As(err, &e) || e.Code != http.StatusUnauthorized {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If we get a 401, the token doesn't have `events:read_only` scope.
|
// If we get a 401, the token doesn't have `events:read_only` scope.
|
||||||
|
|
|
@ -695,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
for x := 0; x < totalUpdatesCount; x++ {
|
for x := 0; x < totalUpdatesCount; x++ {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
require.FailNow(t, "%d: no update arrived within the timeout limit", x)
|
t.Fatalf("%d: no update arrived within the timeout limit", x)
|
||||||
case tgs := <-provUpdates:
|
case tgs := <-provUpdates:
|
||||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||||
for _, got := range discoveryManager.allGroups() {
|
for _, got := range discoveryManager.allGroups() {
|
||||||
|
@ -769,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if match != present {
|
if present {
|
||||||
msg := ""
|
require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets)
|
||||||
if !present {
|
} else {
|
||||||
msg = "not"
|
require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets)
|
||||||
}
|
|
||||||
require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,9 +1089,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
targetGroups, ok := discoveryManager.targets[p]
|
targetGroups, ok := discoveryManager.targets[p]
|
||||||
require.True(t, ok, "'%v' should be present in targets", p)
|
require.True(t, ok, "'%v' should be present in targets", p)
|
||||||
// Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
|
// Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
|
||||||
require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
|
require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p)
|
||||||
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
|
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
|
||||||
require.Empty(t, syncedTargets["prometheus"], 0)
|
require.Empty(t, syncedTargets["prometheus"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
|
@ -1373,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
time.Sleep(expected.delay)
|
time.Sleep(expected.delay)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
require.FailNow(t, "step %d: no update received in the expected timeframe", i)
|
t.Fatalf("step %d: no update received in the expected timeframe", i)
|
||||||
case tgs, ok := <-mgr.SyncCh():
|
case tgs, ok := <-mgr.SyncCh():
|
||||||
require.True(t, ok, "step %d: discovery manager channel is closed", i)
|
require.True(t, ok, "step %d: discovery manager channel is closed", i)
|
||||||
require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i)
|
require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i)
|
||||||
|
|
||||||
for k := range expected.tgs {
|
for k := range expected.tgs {
|
||||||
_, ok := tgs[k]
|
_, ok := tgs[k]
|
||||||
|
|
|
@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||||
"Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
"Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
|
||||||
|
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||||
"Wrong portMappings label from the first port.")
|
"Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
|
||||||
"Wrong portDefinitions label from the first port.")
|
"Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
|
@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
|
||||||
|
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||||
require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) {
|
||||||
|
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||||
|
@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||||
|
@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList {
|
func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||||
|
@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||||
|
|
||||||
tgt = tg.Targets[1]
|
tgt = tg.Targets[1]
|
||||||
require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,10 +235,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
||||||
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
|
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
|
||||||
if len(networks) == 0 {
|
if len(networks) == 0 {
|
||||||
// Try to lookup shared networks
|
// Try to lookup shared networks
|
||||||
for {
|
for containerNetworkMode.IsContainer() {
|
||||||
if !containerNetworkMode.IsContainer() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||||
if !exists {
|
if !exists {
|
||||||
break
|
break
|
||||||
|
|
|
@ -182,9 +182,10 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
var ipv6Addresses []string
|
var ipv6Addresses []string
|
||||||
|
|
||||||
for _, ip := range server.PublicIPs {
|
for _, ip := range server.PublicIPs {
|
||||||
if ip.Family == instance.ServerIPIPFamilyInet {
|
switch ip.Family {
|
||||||
|
case instance.ServerIPIPFamilyInet:
|
||||||
ipv4Addresses = append(ipv4Addresses, ip.Address.String())
|
ipv4Addresses = append(ipv4Addresses, ip.Address.String())
|
||||||
} else if ip.Family == instance.ServerIPIPFamilyInet6 {
|
case instance.ServerIPIPFamilyInet6:
|
||||||
ipv6Addresses = append(ipv6Addresses, ip.Address.String())
|
ipv6Addresses = append(ipv6Addresses, ip.Address.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.Server == "" {
|
if c.Server == "" {
|
||||||
|
//nolint:staticcheck // Capitalized first word.
|
||||||
return errors.New("Uyuni SD configuration requires server host")
|
return errors.New("Uyuni SD configuration requires server host")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = url.Parse(c.Server)
|
_, err = url.Parse(c.Server)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
//nolint:staticcheck // Capitalized first word.
|
||||||
return fmt.Errorf("Uyuni Server URL is not valid: %w", err)
|
return fmt.Errorf("Uyuni Server URL is not valid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Username == "" {
|
if c.Username == "" {
|
||||||
|
//nolint:staticcheck // Capitalized first word.
|
||||||
return errors.New("Uyuni SD configuration requires a username")
|
return errors.New("Uyuni SD configuration requires a username")
|
||||||
}
|
}
|
||||||
if c.Password == "" {
|
if c.Password == "" {
|
||||||
|
//nolint:staticcheck // Capitalized first word.
|
||||||
return errors.New("Uyuni SD configuration requires a password")
|
return errors.New("Uyuni SD configuration requires a password")
|
||||||
}
|
}
|
||||||
return c.HTTPClientConfig.Validate()
|
return c.HTTPClientConfig.Validate()
|
||||||
|
|
|
@ -145,7 +145,7 @@ func parseFlags() *config {
|
||||||
|
|
||||||
_, err := a.Parse(os.Args[1:])
|
_, err := a.Parse(os.Args[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err)
|
||||||
a.Usage(os.Args[1:])
|
a.Usage(os.Args[1:])
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) {
|
||||||
case b == ':':
|
case b == ':':
|
||||||
result.WriteString("_.")
|
result.WriteString("_.")
|
||||||
default:
|
default:
|
||||||
result.WriteString(fmt.Sprintf("_%X", b))
|
fmt.Fprintf(result, "_%X", b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result.WriteByte('"')
|
result.WriteByte('"')
|
||||||
|
|
|
@ -1016,7 +1016,7 @@ type floatBucketIterator struct {
|
||||||
|
|
||||||
func (i *floatBucketIterator) At() Bucket[float64] {
|
func (i *floatBucketIterator) At() Bucket[float64] {
|
||||||
// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
|
// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
|
||||||
return i.baseBucketIterator.at(i.targetSchema)
|
return i.at(i.targetSchema)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *floatBucketIterator) Next() bool {
|
func (i *floatBucketIterator) Next() bool {
|
||||||
|
|
|
@ -513,7 +513,7 @@ func TestLabels_Has(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLabels_Get(t *testing.T) {
|
func TestLabels_Get(t *testing.T) {
|
||||||
require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo"))
|
require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo"))
|
||||||
require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa"))
|
require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa"))
|
||||||
require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb"))
|
require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb"))
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ func (m *MetricStreamingDecoder) NextMetricFamily() error {
|
||||||
m.mfData = b[varIntLength:totalLength]
|
m.mfData = b[varIntLength:totalLength]
|
||||||
|
|
||||||
m.inPos += totalLength
|
m.inPos += totalLength
|
||||||
return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData)
|
return m.unmarshalWithoutMetrics(m, m.mfData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory.
|
// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory.
|
||||||
|
@ -98,7 +98,7 @@ func (m *MetricStreamingDecoder) NextMetric() error {
|
||||||
|
|
||||||
m.resetMetric()
|
m.resetMetric()
|
||||||
m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end]
|
m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end]
|
||||||
if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil {
|
if err := m.unmarshalWithoutLabels(m, m.mData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.metricIndex++
|
m.metricIndex++
|
||||||
|
@ -111,37 +111,37 @@ func (m *MetricStreamingDecoder) resetMetric() {
|
||||||
m.TimestampMs = 0
|
m.TimestampMs = 0
|
||||||
|
|
||||||
// TODO(bwplotka): Autogenerate reset functions.
|
// TODO(bwplotka): Autogenerate reset functions.
|
||||||
if m.Metric.Counter != nil {
|
if m.Counter != nil {
|
||||||
m.Metric.Counter.Value = 0
|
m.Counter.Value = 0
|
||||||
m.Metric.Counter.CreatedTimestamp = nil
|
m.Counter.CreatedTimestamp = nil
|
||||||
m.Metric.Counter.Exemplar = nil
|
m.Counter.Exemplar = nil
|
||||||
}
|
}
|
||||||
if m.Metric.Gauge != nil {
|
if m.Gauge != nil {
|
||||||
m.Metric.Gauge.Value = 0
|
m.Gauge.Value = 0
|
||||||
}
|
}
|
||||||
if m.Metric.Histogram != nil {
|
if m.Histogram != nil {
|
||||||
m.Metric.Histogram.SampleCount = 0
|
m.Histogram.SampleCount = 0
|
||||||
m.Metric.Histogram.SampleCountFloat = 0
|
m.Histogram.SampleCountFloat = 0
|
||||||
m.Metric.Histogram.SampleSum = 0
|
m.Histogram.SampleSum = 0
|
||||||
m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0]
|
m.Histogram.Bucket = m.Histogram.Bucket[:0]
|
||||||
m.Metric.Histogram.CreatedTimestamp = nil
|
m.Histogram.CreatedTimestamp = nil
|
||||||
m.Metric.Histogram.Schema = 0
|
m.Histogram.Schema = 0
|
||||||
m.Metric.Histogram.ZeroThreshold = 0
|
m.Histogram.ZeroThreshold = 0
|
||||||
m.Metric.Histogram.ZeroCount = 0
|
m.Histogram.ZeroCount = 0
|
||||||
m.Metric.Histogram.ZeroCountFloat = 0
|
m.Histogram.ZeroCountFloat = 0
|
||||||
m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0]
|
m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0]
|
||||||
m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0]
|
m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0]
|
||||||
m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0]
|
m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0]
|
||||||
m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0]
|
m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0]
|
||||||
m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0]
|
m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0]
|
||||||
m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0]
|
m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0]
|
||||||
m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0]
|
m.Histogram.Exemplars = m.Histogram.Exemplars[:0]
|
||||||
}
|
}
|
||||||
if m.Metric.Summary != nil {
|
if m.Summary != nil {
|
||||||
m.Metric.Summary.SampleCount = 0
|
m.Summary.SampleCount = 0
|
||||||
m.Metric.Summary.SampleSum = 0
|
m.Summary.SampleSum = 0
|
||||||
m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0]
|
m.Summary.Quantile = m.Summary.Quantile[:0]
|
||||||
m.Metric.Summary.CreatedTimestamp = nil
|
m.Summary.CreatedTimestamp = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) {
|
||||||
|
|
||||||
func TestToHistogram_Empty(t *testing.T) {
|
func TestToHistogram_Empty(t *testing.T) {
|
||||||
t.Run("v1", func(t *testing.T) {
|
t.Run("v1", func(t *testing.T) {
|
||||||
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
|
require.NotNil(t, prompb.Histogram{}.ToIntHistogram())
|
||||||
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
|
require.NotNil(t, prompb.Histogram{}.ToFloatHistogram())
|
||||||
})
|
})
|
||||||
t.Run("v2", func(t *testing.T) {
|
t.Run("v2", func(t *testing.T) {
|
||||||
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
|
require.NotNil(t, writev2.Histogram{}.ToIntHistogram())
|
||||||
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
|
require.NotNil(t, writev2.Histogram{}.ToFloatHistogram())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||||
stor.DB.Compact(ctx)
|
stor.Compact(ctx)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ func rangeQueryCases() []benchCase {
|
||||||
|
|
||||||
func BenchmarkRangeQuery(b *testing.B) {
|
func BenchmarkRangeQuery(b *testing.B) {
|
||||||
stor := teststorage.New(b)
|
stor := teststorage.New(b)
|
||||||
stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
||||||
defer stor.Close()
|
defer stor.Close()
|
||||||
opts := promql.EngineOpts{
|
opts := promql.EngineOpts{
|
||||||
Logger: nil,
|
Logger: nil,
|
||||||
|
@ -498,8 +498,8 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
|
||||||
require.NoError(tb, a.Commit())
|
require.NoError(tb, a.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||||
stor.DB.Compact(ctx)
|
stor.Compact(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateNativeHistogramSeries(app storage.Appender, numSeries int) error {
|
func generateNativeHistogramSeries(app storage.Appender, numSeries int) error {
|
||||||
|
|
|
@ -731,7 +731,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
||||||
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
|
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
|
||||||
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
|
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
|
||||||
// Instant evaluation. This is executed as a range evaluation with one step.
|
// Instant evaluation. This is executed as a range evaluation with one step.
|
||||||
if s.Start == s.End && s.Interval == 0 {
|
if s.Start.Equal(s.End) && s.Interval == 0 {
|
||||||
start := timeMilliseconds(s.Start)
|
start := timeMilliseconds(s.Start)
|
||||||
evaluator := &evaluator{
|
evaluator := &evaluator{
|
||||||
startTimestamp: start,
|
startTimestamp: start,
|
||||||
|
|
|
@ -674,10 +674,10 @@ func lexInsideBraces(l *Lexer) stateFn {
|
||||||
l.backup()
|
l.backup()
|
||||||
l.emit(EQL)
|
l.emit(EQL)
|
||||||
case r == '!':
|
case r == '!':
|
||||||
switch nr := l.next(); {
|
switch nr := l.next(); nr {
|
||||||
case nr == '~':
|
case '~':
|
||||||
l.emit(NEQ_REGEX)
|
l.emit(NEQ_REGEX)
|
||||||
case nr == '=':
|
case '=':
|
||||||
l.emit(NEQ)
|
l.emit(NEQ)
|
||||||
default:
|
default:
|
||||||
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
||||||
|
|
|
@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
for i := range test.result {
|
for i := range test.result {
|
||||||
test.result[i].T = timestamp.FromTime(evalTime)
|
test.result[i].T = timestamp.FromTime(evalTime)
|
||||||
}
|
}
|
||||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||||
|
|
||||||
sort.Slice(filteredRes, func(i, j int) bool {
|
sort.Slice(filteredRes, func(i, j int) bool {
|
||||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||||
|
@ -188,7 +188,7 @@ func TestAlertingRule(t *testing.T) {
|
||||||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||||
|
|
||||||
for _, aa := range rule.ActiveAlerts() {
|
for _, aa := range rule.ActiveAlerts() {
|
||||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
test.result[i].F = forState
|
test.result[i].F = forState
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||||
|
|
||||||
sort.Slice(filteredRes, func(i, j int) bool {
|
sort.Slice(filteredRes, func(i, j int) bool {
|
||||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||||
|
@ -341,7 +341,7 @@ func TestForStateAddSamples(t *testing.T) {
|
||||||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||||
|
|
||||||
for _, aa := range rule.ActiveAlerts() {
|
for _, aa := range rule.ActiveAlerts() {
|
||||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -489,7 +489,7 @@ func TestForStateRestore(t *testing.T) {
|
||||||
|
|
||||||
got := newRule.ActiveAlerts()
|
got := newRule.ActiveAlerts()
|
||||||
for _, aa := range got {
|
for _, aa := range got {
|
||||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||||
}
|
}
|
||||||
sort.Slice(got, func(i, j int) bool {
|
sort.Slice(got, func(i, j int) bool {
|
||||||
return labels.Compare(got[i].Labels, got[j].Labels) < 0
|
return labels.Compare(got[i].Labels, got[j].Labels) < 0
|
||||||
|
@ -513,7 +513,7 @@ func TestForStateRestore(t *testing.T) {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
exp := tt.expectedAlerts
|
exp := tt.expectedAlerts
|
||||||
require.Equal(t, len(exp), len(got))
|
require.Len(t, got, len(exp))
|
||||||
sortAlerts(exp)
|
sortAlerts(exp)
|
||||||
sortAlerts(got)
|
sortAlerts(got)
|
||||||
for i, e := range exp {
|
for i, e := range exp {
|
||||||
|
@ -2442,7 +2442,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations.
|
// Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations.
|
||||||
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
require.Equal(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateWhenStopped(t *testing.T) {
|
func TestUpdateWhenStopped(t *testing.T) {
|
||||||
|
|
|
@ -1157,7 +1157,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
require.FailNow(t, "Cancellation during initial offset failed.")
|
require.FailNow(t, "Cancellation during initial offset failed.")
|
||||||
case err := <-errc:
|
case err := <-errc:
|
||||||
require.FailNow(t, "Unexpected error: %s", err)
|
require.FailNow(t, "Unexpected error", "err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The provided timeout must cause cancellation of the context passed down to the
|
// The provided timeout must cause cancellation of the context passed down to the
|
||||||
|
@ -1200,7 +1200,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||||
case <-signal:
|
case <-signal:
|
||||||
// Loop terminated as expected.
|
// Loop terminated as expected.
|
||||||
case err := <-errc:
|
case err := <-errc:
|
||||||
require.FailNow(t, "Unexpected error: %s", err)
|
require.FailNow(t, "Unexpected error", "err: %s", err)
|
||||||
case <-time.After(3 * time.Second):
|
case <-time.After(3 * time.Second):
|
||||||
require.FailNow(t, "Loop did not terminate on context cancellation")
|
require.FailNow(t, "Loop did not terminate on context cancellation")
|
||||||
}
|
}
|
||||||
|
@ -1309,14 +1309,14 @@ test_metric_total 1
|
||||||
md, ok = cache.GetMetadata("test_metric_no_help")
|
md, ok = cache.GetMetadata("test_metric_no_help")
|
||||||
require.True(t, ok, "expected metadata to be present")
|
require.True(t, ok, "expected metadata to be present")
|
||||||
require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type")
|
require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type")
|
||||||
require.Equal(t, "", md.Help)
|
require.Empty(t, md.Help)
|
||||||
require.Equal(t, "", md.Unit)
|
require.Empty(t, md.Unit)
|
||||||
|
|
||||||
md, ok = cache.GetMetadata("test_metric_no_type")
|
md, ok = cache.GetMetadata("test_metric_no_type")
|
||||||
require.True(t, ok, "expected metadata to be present")
|
require.True(t, ok, "expected metadata to be present")
|
||||||
require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type")
|
require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type")
|
||||||
require.Equal(t, "other help text", md.Help)
|
require.Equal(t, "other help text", md.Help)
|
||||||
require.Equal(t, "", md.Unit)
|
require.Empty(t, md.Unit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
||||||
|
@ -1567,7 +1567,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
|
||||||
if numScrapes == cue {
|
if numScrapes == cue {
|
||||||
action(sl)
|
action(sl)
|
||||||
}
|
}
|
||||||
w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)))
|
fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
sl.run(nil)
|
sl.run(nil)
|
||||||
|
@ -4259,7 +4259,7 @@ test_summary_count 199
|
||||||
foundLeValues[v] = true
|
foundLeValues[v] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected")
|
require.Len(t, foundLeValues, len(expectedValues), "number of label values not as expected")
|
||||||
for _, v := range expectedValues {
|
for _, v := range expectedValues {
|
||||||
require.Contains(t, foundLeValues, v, "label value not found")
|
require.Contains(t, foundLeValues, v, "label value not found")
|
||||||
}
|
}
|
||||||
|
@ -4568,7 +4568,7 @@ metric: <
|
||||||
foundLeValues[v] = true
|
foundLeValues[v] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
|
require.Len(t, foundLeValues, len(expectedValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
|
||||||
for _, v := range expectedValues {
|
for _, v := range expectedValues {
|
||||||
require.Contains(t, foundLeValues, v, "label value not found")
|
require.Contains(t, foundLeValues, v, "label value not found")
|
||||||
}
|
}
|
||||||
|
@ -4817,7 +4817,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
|
||||||
|
|
||||||
switch numScrapes {
|
switch numScrapes {
|
||||||
case 1:
|
case 1:
|
||||||
w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))))
|
fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))
|
||||||
return nil
|
return nil
|
||||||
case 5:
|
case 5:
|
||||||
cancel()
|
cancel()
|
||||||
|
@ -4867,7 +4867,7 @@ func TestScrapeLoopCompression(t *testing.T) {
|
||||||
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
|
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
|
||||||
fmt.Fprint(w, metricsText)
|
fmt.Fprint(w, string(metricsText))
|
||||||
close(scraped)
|
close(scraped)
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
@ -5164,7 +5164,7 @@ scrape_configs:
|
||||||
|
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
s.DB.EnableNativeHistograms()
|
s.EnableNativeHistograms()
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
|
|
||||||
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)
|
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)
|
||||||
|
|
|
@ -144,7 +144,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
||||||
func (t *Target) hash() uint64 {
|
func (t *Target) hash() uint64 {
|
||||||
h := fnv.New64a()
|
h := fnv.New64a()
|
||||||
|
|
||||||
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
fmt.Fprintf(h, "%016d", t.labels.Hash())
|
||||||
h.Write([]byte(t.URL().String()))
|
h.Write([]byte(t.URL().String()))
|
||||||
|
|
||||||
return h.Sum64()
|
return h.Sum64()
|
||||||
|
|
|
@ -64,10 +64,8 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
|
||||||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||||
}
|
}
|
||||||
|
|
||||||
concurrentSelect := false
|
concurrentSelect := len(secondaries) > 0
|
||||||
if len(secondaries) > 0 {
|
|
||||||
concurrentSelect = true
|
|
||||||
}
|
|
||||||
return &querierAdapter{&mergeGenericQuerier{
|
return &querierAdapter{&mergeGenericQuerier{
|
||||||
mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge,
|
mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge,
|
||||||
queriers: queriers,
|
queriers: queriers,
|
||||||
|
@ -111,10 +109,8 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
|
||||||
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
|
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
|
||||||
}
|
}
|
||||||
|
|
||||||
concurrentSelect := false
|
concurrentSelect := len(secondaries) > 0
|
||||||
if len(secondaries) > 0 {
|
|
||||||
concurrentSelect = true
|
|
||||||
}
|
|
||||||
return &chunkQuerierAdapter{&mergeGenericQuerier{
|
return &chunkQuerierAdapter{&mergeGenericQuerier{
|
||||||
mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge,
|
mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge,
|
||||||
queriers: queriers,
|
queriers: queriers,
|
||||||
|
|
|
@ -210,7 +210,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||||
}
|
}
|
||||||
// internal labels should be maintained
|
// internal labels should be maintained
|
||||||
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") {
|
||||||
name = otlptranslator.NormalizeLabel(name)
|
name = otlptranslator.NormalizeLabel(name)
|
||||||
}
|
}
|
||||||
l[name] = extras[i+1]
|
l[name] = extras[i+1]
|
||||||
|
|
|
@ -102,8 +102,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
||||||
// Cumulative temporality is always valid.
|
// Cumulative temporality is always valid.
|
||||||
// Delta temporality is also valid if AllowDeltaTemporality is true.
|
// Delta temporality is also valid if AllowDeltaTemporality is true.
|
||||||
// All other temporality values are invalid.
|
// All other temporality values are invalid.
|
||||||
!(temporality == pmetric.AggregationTemporalityCumulative ||
|
(temporality != pmetric.AggregationTemporalityCumulative &&
|
||||||
(settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
|
(!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) {
|
||||||
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
|
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -515,10 +515,8 @@ func NewQueueManager(
|
||||||
compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use.
|
compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use.
|
||||||
}
|
}
|
||||||
|
|
||||||
walMetadata := false
|
walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1
|
||||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
|
||||||
walMetadata = true
|
|
||||||
}
|
|
||||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
||||||
|
|
||||||
// The current MetadataWatcher implementation is mutually exclusive
|
// The current MetadataWatcher implementation is mutually exclusive
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.
|
||||||
noop bool
|
noop bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt)
|
cq.maxt, noop, err = c.preferLocalStorage(mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -277,7 +277,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
||||||
require.Equal(t, 2, recorder.Code/100)
|
require.Equal(t, 2, recorder.Code/100)
|
||||||
|
|
||||||
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
|
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
|
||||||
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
|
require.Empty(t, recorder.Result().Header.Get("Content-Encoding"))
|
||||||
|
|
||||||
var results []*prompb.ChunkedReadResponse
|
var results []*prompb.ChunkedReadResponse
|
||||||
stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil)
|
stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil)
|
||||||
|
|
|
@ -112,7 +112,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
||||||
require.Len(t, ssSlice, 2)
|
require.Len(t, ssSlice, 2)
|
||||||
var iter chunkenc.Iterator
|
var iter chunkenc.Iterator
|
||||||
for i, s := range ssSlice {
|
for i, s := range ssSlice {
|
||||||
require.EqualValues(t, series[i].lbs, s.Labels())
|
require.Equal(t, series[i].lbs, s.Labels())
|
||||||
iter = s.Iterator(iter)
|
iter = s.Iterator(iter)
|
||||||
j := 0
|
j := 0
|
||||||
for iter.Next() == chunkenc.ValFloat {
|
for iter.Next() == chunkenc.ValFloat {
|
||||||
|
@ -597,15 +597,15 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
||||||
}
|
}
|
||||||
series := NewListSeries(lbs, copiedSamples)
|
series := NewListSeries(lbs, copiedSamples)
|
||||||
encoder := NewSeriesToChunkEncoder(series)
|
encoder := NewSeriesToChunkEncoder(series)
|
||||||
require.EqualValues(t, lbs, encoder.Labels())
|
require.Equal(t, lbs, encoder.Labels())
|
||||||
|
|
||||||
chks, err := ExpandChunks(encoder.Iterator(nil))
|
chks, err := ExpandChunks(encoder.Iterator(nil))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(test.expectedCounterResetHeaders), len(chks))
|
require.Len(t, chks, len(test.expectedCounterResetHeaders))
|
||||||
|
|
||||||
// Decode all encoded samples and assert they are equal to the original ones.
|
// Decode all encoded samples and assert they are equal to the original ones.
|
||||||
encodedSamples := chunks.ChunkMetasToSamples(chks)
|
encodedSamples := chunks.ChunkMetasToSamples(chks)
|
||||||
require.Equal(t, len(test.expectedSamples), len(encodedSamples))
|
require.Len(t, encodedSamples, len(test.expectedSamples))
|
||||||
|
|
||||||
for i, s := range test.expectedSamples {
|
for i, s := range test.expectedSamples {
|
||||||
encodedSample := encodedSamples[i]
|
encodedSample := encodedSamples[i]
|
||||||
|
|
|
@ -1305,7 +1305,7 @@ func TestDBCreatedTimestampSamplesIngestion(t *testing.T) {
|
||||||
|
|
||||||
outputSamples := readWALSamples(t, s.wal.Dir())
|
outputSamples := readWALSamples(t, s.wal.Dir())
|
||||||
|
|
||||||
require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples))
|
require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples))
|
||||||
|
|
||||||
for i, expectedSample := range tc.expectedSamples {
|
for i, expectedSample := range tc.expectedSamples {
|
||||||
for _, sample := range outputSamples {
|
for _, sample := range outputSamples {
|
||||||
|
|
|
@ -129,7 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
||||||
|
|
||||||
// Checking on-disk bytes for the first file.
|
// Checking on-disk bytes for the first file.
|
||||||
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
|
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
|
||||||
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
|
require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles))
|
||||||
|
|
||||||
actualBytes, err := os.ReadFile(firstFileName)
|
actualBytes, err := os.ReadFile(firstFileName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -208,9 +208,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
||||||
|
|
||||||
files, err := os.ReadDir(hrw.dir.Name())
|
files, err := os.ReadDir(hrw.dir.Name())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
require.Len(t, files, len(remainingFiles), "files on disk")
|
||||||
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
|
||||||
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
require.Len(t, hrw.closers, len(remainingFiles), "closers")
|
||||||
|
|
||||||
for _, i := range remainingFiles {
|
for _, i := range remainingFiles {
|
||||||
_, ok := hrw.mmappedChunkFiles[i]
|
_, ok := hrw.mmappedChunkFiles[i]
|
||||||
|
@ -325,9 +325,9 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
||||||
|
|
||||||
files, err := os.ReadDir(hrw.dir.Name())
|
files, err := os.ReadDir(hrw.dir.Name())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
require.Len(t, files, len(remainingFiles), "files on disk")
|
||||||
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
|
||||||
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
require.Len(t, hrw.closers, len(remainingFiles), "closers")
|
||||||
|
|
||||||
for _, i := range remainingFiles {
|
for _, i := range remainingFiles {
|
||||||
_, ok := hrw.mmappedChunkFiles[i]
|
_, ok := hrw.mmappedChunkFiles[i]
|
||||||
|
|
|
@ -1399,7 +1399,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
||||||
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
|
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
|
||||||
}
|
}
|
||||||
require.NoError(t, db.reload())
|
require.NoError(t, db.reload())
|
||||||
require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks")
|
require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks")
|
||||||
|
|
||||||
return len(blocks)
|
return len(blocks)
|
||||||
},
|
},
|
||||||
|
|
|
@ -1347,7 +1347,7 @@ func TestTombstoneCleanFail(t *testing.T) {
|
||||||
actualBlockDirs, err := blockDirs(db.dir)
|
actualBlockDirs, err := blockDirs(db.dir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Only one block should have been replaced by a new block.
|
// Only one block should have been replaced by a new block.
|
||||||
require.Equal(t, len(oldBlockDirs), len(actualBlockDirs))
|
require.Len(t, actualBlockDirs, len(oldBlockDirs))
|
||||||
require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1)
|
require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1535,7 +1535,7 @@ func TestSizeRetention(t *testing.T) {
|
||||||
|
|
||||||
// Test that registered size matches the actual disk size.
|
// Test that registered size matches the actual disk size.
|
||||||
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
|
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
|
||||||
require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
|
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
|
||||||
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
|
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
|
||||||
walSize, err := db.Head().wal.Size()
|
walSize, err := db.Head().wal.Size()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -2052,7 +2052,7 @@ func TestNoEmptyBlocks(t *testing.T) {
|
||||||
require.NoError(t, db.Compact(ctx))
|
require.NoError(t, db.Compact(ctx))
|
||||||
actBlocks, err := blockDirs(db.Dir())
|
actBlocks, err := blockDirs(db.Dir())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(db.Blocks()), len(actBlocks))
|
require.Len(t, actBlocks, len(db.Blocks()))
|
||||||
require.Empty(t, actBlocks)
|
require.Empty(t, actBlocks)
|
||||||
require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here")
|
require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here")
|
||||||
})
|
})
|
||||||
|
@ -2072,7 +2072,7 @@ func TestNoEmptyBlocks(t *testing.T) {
|
||||||
|
|
||||||
actBlocks, err := blockDirs(db.Dir())
|
actBlocks, err := blockDirs(db.Dir())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(db.Blocks()), len(actBlocks))
|
require.Len(t, actBlocks, len(db.Blocks()))
|
||||||
require.Empty(t, actBlocks)
|
require.Empty(t, actBlocks)
|
||||||
|
|
||||||
app = db.Appender(ctx)
|
app = db.Appender(ctx)
|
||||||
|
@ -2093,7 +2093,7 @@ func TestNoEmptyBlocks(t *testing.T) {
|
||||||
require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
|
require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
|
||||||
actBlocks, err = blockDirs(db.Dir())
|
actBlocks, err = blockDirs(db.Dir())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(db.Blocks()), len(actBlocks))
|
require.Len(t, actBlocks, len(db.Blocks()))
|
||||||
require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples")
|
require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -2134,7 +2134,7 @@ func TestNoEmptyBlocks(t *testing.T) {
|
||||||
|
|
||||||
actBlocks, err := blockDirs(db.Dir())
|
actBlocks, err := blockDirs(db.Dir())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(db.Blocks()), len(actBlocks))
|
require.Len(t, actBlocks, len(db.Blocks()))
|
||||||
require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.")
|
require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -2450,7 +2450,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
t.Run("blocks", func(t *testing.T) {
|
t.Run("blocks", func(t *testing.T) {
|
||||||
blocks, err := dbReadOnly.Blocks()
|
blocks, err := dbReadOnly.Blocks()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(expBlocks), len(blocks))
|
require.Len(t, blocks, len(expBlocks))
|
||||||
for i, expBlock := range expBlocks {
|
for i, expBlock := range expBlocks {
|
||||||
require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
|
require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
|
||||||
}
|
}
|
||||||
|
@ -2478,7 +2478,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
readOnlySeries := query(t, q, matchAll)
|
readOnlySeries := query(t, q, matchAll)
|
||||||
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
||||||
|
|
||||||
require.Equal(t, len(expSeries), len(readOnlySeries), "total series mismatch")
|
require.Len(t, readOnlySeries, len(expSeries), "total series mismatch")
|
||||||
require.Equal(t, expSeries, readOnlySeries, "series mismatch")
|
require.Equal(t, expSeries, readOnlySeries, "series mismatch")
|
||||||
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
|
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
|
||||||
})
|
})
|
||||||
|
@ -2488,7 +2488,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
|
readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
|
||||||
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
||||||
|
|
||||||
require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch")
|
require.Len(t, readOnlySeries, len(expChunks), "total series mismatch")
|
||||||
require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch")
|
require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch")
|
||||||
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
|
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
|
||||||
})
|
})
|
||||||
|
@ -8260,7 +8260,7 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
|
||||||
require.NoError(t, db.Compact(ctx))
|
require.NoError(t, db.Compact(ctx))
|
||||||
verifyBlockRanges := func() {
|
verifyBlockRanges := func() {
|
||||||
blocks := db.Blocks()
|
blocks := db.Blocks()
|
||||||
require.Equal(t, len(c.blockRanges), len(blocks))
|
require.Len(t, blocks, len(c.blockRanges))
|
||||||
for j, br := range c.blockRanges {
|
for j, br := range c.blockRanges {
|
||||||
require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime())
|
require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime())
|
||||||
require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime())
|
require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime())
|
||||||
|
|
|
@ -4666,7 +4666,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
|
// We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
|
||||||
require.Equal(t, len(expHistograms), len(actHistograms))
|
require.Len(t, actHistograms, len(expHistograms))
|
||||||
actNumStale := 0
|
actNumStale := 0
|
||||||
for i, eh := range expHistograms {
|
for i, eh := range expHistograms {
|
||||||
ah := actHistograms[i]
|
ah := actHistograms[i]
|
||||||
|
@ -5304,7 +5304,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
|
||||||
// Verify the snapshot.
|
// Verify the snapshot.
|
||||||
name, idx, offset, err := LastChunkSnapshot(dir)
|
name, idx, offset, err := LastChunkSnapshot(dir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, "", name)
|
require.NotEmpty(t, name)
|
||||||
require.Equal(t, 0, idx)
|
require.Equal(t, 0, idx)
|
||||||
require.Positive(t, offset)
|
require.Positive(t, offset)
|
||||||
}
|
}
|
||||||
|
|
|
@ -424,7 +424,7 @@ func TestPersistence_index_e2e(t *testing.T) {
|
||||||
res, err := ir.SortedLabelValues(ctx, k)
|
res, err := ir.SortedLabelValues(ctx, k)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, len(v), len(res))
|
require.Len(t, res, len(v))
|
||||||
for i := 0; i < len(v); i++ {
|
for i := 0; i < len(v); i++ {
|
||||||
require.Equal(t, v[i], res[i])
|
require.Equal(t, v[i], res[i])
|
||||||
}
|
}
|
||||||
|
|
|
@ -860,7 +860,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
||||||
var b labels.ScratchBuilder
|
var b labels.ScratchBuilder
|
||||||
err = ir.Series(s1Ref, &b, &chks)
|
err = ir.Series(s1Ref, &b, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
require.Len(t, chks, len(tc.expChunksSamples))
|
||||||
|
|
||||||
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
|
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
|
||||||
defer cr.Close()
|
defer cr.Close()
|
||||||
|
@ -1030,7 +1030,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||||
var b labels.ScratchBuilder
|
var b labels.ScratchBuilder
|
||||||
err = ir.Series(s1Ref, &b, &chks)
|
err = ir.Series(s1Ref, &b, &chks)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
require.Len(t, chks, len(tc.expChunksSamples))
|
||||||
|
|
||||||
// Now we keep receiving ooo samples
|
// Now we keep receiving ooo samples
|
||||||
// OOO few samples for s1.
|
// OOO few samples for s1.
|
||||||
|
|
|
@ -246,7 +246,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) {
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
// Sanity check.
|
// Sanity check.
|
||||||
require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets")
|
require.Len(t, tc.expectedCounterResets, len(tc.samples), "number of samples and counter resets")
|
||||||
|
|
||||||
oooChunk := OOOChunk{}
|
oooChunk := OOOChunk{}
|
||||||
for _, s := range tc.samples {
|
for _, s := range tc.samples {
|
||||||
|
@ -264,7 +264,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) {
|
||||||
|
|
||||||
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
|
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks")
|
require.Len(t, chunks, len(tc.expectedChunks), "number of chunks")
|
||||||
sampleIndex := 0
|
sampleIndex := 0
|
||||||
for i, c := range chunks {
|
for i, c := range chunks {
|
||||||
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)
|
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)
|
||||||
|
|
|
@ -525,7 +525,7 @@ func (b *blockBaseSeriesSet) Next() bool {
|
||||||
// Count those in range to size allocation (roughly - ignoring tombstones).
|
// Count those in range to size allocation (roughly - ignoring tombstones).
|
||||||
nChks := 0
|
nChks := 0
|
||||||
for _, chk := range b.bufChks {
|
for _, chk := range b.bufChks {
|
||||||
if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) {
|
if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt {
|
||||||
nChks++
|
nChks++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -263,7 +263,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
|
||||||
rmChunkRefs(chksRes)
|
rmChunkRefs(chksRes)
|
||||||
require.Equal(t, errExp, errRes)
|
require.Equal(t, errExp, errRes)
|
||||||
|
|
||||||
require.Equal(t, len(chksExp), len(chksRes))
|
require.Len(t, chksRes, len(chksExp))
|
||||||
var exp, act [][]chunks.Sample
|
var exp, act [][]chunks.Sample
|
||||||
for i := range chksExp {
|
for i := range chksExp {
|
||||||
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
|
||||||
|
|
|
@ -174,7 +174,7 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name)
|
require.Len(t, actual, len(expected), "Length not equal to expected for %s", name)
|
||||||
for i, s := range expected {
|
for i, s := range expected {
|
||||||
expectedSample := s
|
expectedSample := s
|
||||||
actualSample := actual[i]
|
actualSample := actual[i]
|
||||||
|
|
|
@ -125,12 +125,13 @@ func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:revive // error-naming.
|
//nolint:staticcheck,revive // error-naming.
|
||||||
var (
|
var (
|
||||||
// Currently there are only 2 types, warnings and info.
|
// Currently there are only 2 types, warnings and info.
|
||||||
// For now, info are visually identical with warnings as we have not updated
|
// For now, info are visually identical with warnings as we have not updated
|
||||||
// the API spec or the frontend to show a different kind of warning. But we
|
// the API spec or the frontend to show a different kind of warning. But we
|
||||||
// make the distinction here to prepare for adding them in future.
|
// make the distinction here to prepare for adding them in future.
|
||||||
|
|
||||||
PromQLInfo = errors.New("PromQL info")
|
PromQLInfo = errors.New("PromQL info")
|
||||||
PromQLWarning = errors.New("PromQL warning")
|
PromQLWarning = errors.New("PromQL warning")
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return writeSubcommands(writer, 1, model.Name, model.CmdGroupModel.Commands)
|
return writeSubcommands(writer, 1, model.Name, model.Commands)
|
||||||
}
|
}
|
||||||
|
|
||||||
func header(title, help string) []byte {
|
func header(title, help string) []byte {
|
||||||
|
@ -172,13 +172,13 @@ func writeTable(writer io.Writer, data [][]string, header string) error {
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
buf.WriteString(fmt.Sprintf("\n\n%s\n\n", header))
|
fmt.Fprintf(buf, "\n\n%s\n\n", header)
|
||||||
columnsToRender := determineColumnsToRender(data)
|
columnsToRender := determineColumnsToRender(data)
|
||||||
|
|
||||||
headers := data[0]
|
headers := data[0]
|
||||||
buf.WriteString("|")
|
buf.WriteString("|")
|
||||||
for _, j := range columnsToRender {
|
for _, j := range columnsToRender {
|
||||||
buf.WriteString(fmt.Sprintf(" %s |", headers[j]))
|
fmt.Fprintf(buf, " %s |", headers[j])
|
||||||
}
|
}
|
||||||
buf.WriteString("\n")
|
buf.WriteString("\n")
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ func writeTable(writer io.Writer, data [][]string, header string) error {
|
||||||
row := data[i]
|
row := data[i]
|
||||||
buf.WriteString("|")
|
buf.WriteString("|")
|
||||||
for _, j := range columnsToRender {
|
for _, j := range columnsToRender {
|
||||||
buf.WriteString(fmt.Sprintf(" %s |", row[j]))
|
fmt.Fprintf(buf, " %s |", row[j])
|
||||||
}
|
}
|
||||||
buf.WriteString("\n")
|
buf.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands []
|
||||||
help = cmd.HelpLong
|
help = cmd.HelpLong
|
||||||
}
|
}
|
||||||
help = formatHyphenatedWords(help)
|
help = formatHyphenatedWords(help)
|
||||||
if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil {
|
if _, err := fmt.Fprintf(writer, "\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,8 +255,8 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands []
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd.CmdGroupModel != nil && len(cmd.CmdGroupModel.Commands) > 0 {
|
if cmd.CmdGroupModel != nil && len(cmd.Commands) > 0 {
|
||||||
if err := writeSubcommands(writer, level+1, modelName, cmd.CmdGroupModel.Commands); err != nil {
|
if err := writeSubcommands(writer, level+1, modelName, cmd.Commands); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ func NewQueryStats(s *Statistics) QueryStats {
|
||||||
sp = s.Samples
|
sp = s.Samples
|
||||||
)
|
)
|
||||||
|
|
||||||
for s, timer := range tg.TimerGroup.timers {
|
for s, timer := range tg.timers {
|
||||||
switch s {
|
switch s {
|
||||||
case EvalTotalTime:
|
case EvalTotalTime:
|
||||||
qt.EvalTotalTime = timer.Duration()
|
qt.EvalTotalTime = timer.Duration()
|
||||||
|
@ -328,5 +328,5 @@ func (qs *QuerySamples) NewChild() *QuerySamples {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
|
func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
|
||||||
return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...)
|
return NewSpanTimer(ctx, qt.SpanOperation(), qs.GetTimer(qt), observers...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,10 +54,10 @@ func SanitizeFullLabelName(name string) string {
|
||||||
}
|
}
|
||||||
var validSb strings.Builder
|
var validSb strings.Builder
|
||||||
for i, b := range name {
|
for i, b := range name {
|
||||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0) {
|
||||||
validSb.WriteRune('_')
|
|
||||||
} else {
|
|
||||||
validSb.WriteRune(b)
|
validSb.WriteRune(b)
|
||||||
|
} else {
|
||||||
|
validSb.WriteRune('_')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return validSb.String()
|
return validSb.String()
|
||||||
|
|
|
@ -812,7 +812,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
|
||||||
|
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status, fmt.Errorf("Error getting hostname: %w", err)
|
return status, fmt.Errorf("error getting hostname: %w", err)
|
||||||
}
|
}
|
||||||
status.Hostname = hostname
|
status.Hostname = hostname
|
||||||
status.ServerTime = time.Now().UTC()
|
status.ServerTime = time.Now().UTC()
|
||||||
|
|
|
@ -624,7 +624,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
|
||||||
b, err := io.ReadAll(resp.Body)
|
b, err := io.ReadAll(resp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, json.Unmarshal(b, snapshot))
|
require.NoError(t, json.Unmarshal(b, snapshot))
|
||||||
require.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
|
require.NotEmpty(t, snapshot.Data.Name, "snapshot directory not returned")
|
||||||
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
|
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
|
||||||
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
|
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue