Fix linting issues found by golangci-lint v2.0.2 (#16368)

* Fix linting issues found by golangci-lint v2.0.2

---------

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2025-05-03 19:05:13 +02:00 committed by GitHub
parent 2e9ab9cc62
commit e7e3ab2824
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
50 changed files with 178 additions and 208 deletions

View file

@ -164,31 +164,9 @@ linters:
- name: unused-parameter
- name: var-declaration
- name: var-naming
staticcheck:
checks:
- all # Enable all checks.
# FIXME: We should enable this check once we have fixed all the issues.
- -QF1001
- -QF1002
- -QF1003
- -QF1006
- -QF1007
- -QF1008
- -QF1009
- -QF1010
- -QF1012
- -ST1000
- -ST1003
- -ST1005
- -ST1012
- -ST1016
- -ST1020
testifylint:
disable:
- empty # FIXME
- equal-values # FIXME
- float-compare
- formatter # FIXME
- go-require
- len # FIXME
- useless-assert # FIXME: wait for golangci-lint > v2.0.2

View file

@ -268,7 +268,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
require.Fail(t, "prometheus should be still running: %v", err)
t.Fatalf("prometheus should be still running: %v", err)
case <-time.After(startupTime):
prom.Process.Kill()
<-done
@ -332,7 +332,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
require.Fail(t, "prometheus should be still running: %v", err)
t.Fatalf("prometheus should be still running: %v", err)
case <-time.After(startupTime):
prom.Process.Kill()
<-done

View file

@ -88,7 +88,7 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
_, err = p.configFile.Seek(0, 0)
require.NoError(t, err)
if queryLogFile != "" {
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
_, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile)
require.NoError(t, err)
}
_, err = p.configFile.Write([]byte(p.configuration()))

View file

@ -510,7 +510,7 @@ func TestCheckRules(t *testing.T) {
os.Stdin = r
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
require.Equal(t, successExitCode, exitCode, "")
require.Equal(t, successExitCode, exitCode)
})
t.Run("rules-bad", func(t *testing.T) {
@ -532,7 +532,7 @@ func TestCheckRules(t *testing.T) {
os.Stdin = r
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
require.Equal(t, failureExitCode, exitCode, "")
require.Equal(t, failureExitCode, exitCode)
})
t.Run("rules-lint-fatal", func(t *testing.T) {
@ -554,7 +554,7 @@ func TestCheckRules(t *testing.T) {
os.Stdin = r
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
require.Equal(t, lintErrExitCode, exitCode, "")
require.Equal(t, lintErrExitCode, exitCode)
})
}
@ -572,19 +572,19 @@ func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
require.Equal(t, successExitCode, exitCode, "")
require.Equal(t, successExitCode, exitCode)
})
t.Run("rules-bad", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
require.Equal(t, failureExitCode, exitCode, "")
require.Equal(t, failureExitCode, exitCode)
})
t.Run("rules-lint-fatal", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
require.Equal(t, lintErrExitCode, exitCode, "")
require.Equal(t, lintErrExitCode, exitCode)
})
}

View file

@ -321,12 +321,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
return errs
}
for {
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) {
break
}
for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) {
// We need to check alerts for this time.
// If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval'
// then we compare alerts with the Eval at `ts`.

View file

@ -115,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
region, err := metadata.Region()
if err != nil {
//nolint:staticcheck // Capitalized first word.
return errors.New("Lightsail SD configuration requires a region")
}
c.Region = region

View file

@ -425,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
d := newDiscovery(t, config)
// Should be empty if not initialized.
require.Equal(t, "", d.clientDatacenter)
require.Empty(t, d.clientDatacenter)
err = d.getDatacenter()
// An error should be returned.
require.EqualError(t, err, tc.errMessage)
// Should still be empty.
require.Equal(t, "", d.clientDatacenter)
require.Empty(t, d.clientDatacenter)
}
}

View file

@ -219,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
podPhaseLabel: lv(string(pod.Status.Phase)),
podNodeNameLabel: lv(pod.Spec.NodeName),
podHostIPLabel: lv(pod.Status.HostIP),
podUID: lv(string(pod.ObjectMeta.UID)),
podUID: lv(string(pod.UID)),
}
addObjectMetaLabels(ls, pod.ObjectMeta, RolePod)

View file

@ -194,7 +194,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
events, err := d.client.ListEvents(ctx, &eventsOpts)
if err != nil {
var e *linodego.Error
if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) {
if !errors.As(err, &e) || e.Code != http.StatusUnauthorized {
return nil, err
}
// If we get a 401, the token doesn't have `events:read_only` scope.

View file

@ -695,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
for x := 0; x < totalUpdatesCount; x++ {
select {
case <-ctx.Done():
require.FailNow(t, "%d: no update arrived within the timeout limit", x)
t.Fatalf("%d: no update arrived within the timeout limit", x)
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
@ -769,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
}
}
}
if match != present {
msg := ""
if !present {
msg = "not"
}
require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets)
if present {
require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets)
} else {
require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets)
}
}
@ -1091,9 +1089,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
targetGroups, ok := discoveryManager.targets[p]
require.True(t, ok, "'%v' should be present in targets", p)
// Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p)
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
require.Empty(t, syncedTargets["prometheus"], 0)
require.Empty(t, syncedTargets["prometheus"])
}
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@ -1373,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
time.Sleep(expected.delay)
select {
case <-ctx.Done():
require.FailNow(t, "step %d: no update received in the expected timeframe", i)
t.Fatalf("step %d: no update received in the expected timeframe", i)
case tgs, ok := <-mgr.SyncCh():
require.True(t, ok, "step %d: discovery manager channel is closed", i)
require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i)
require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i)
for k := range expected.tgs {
_, ok := tgs[k]

View file

@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
tgt = tg.Targets[1]
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
"Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
}
@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
"Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
"Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
}
@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
}
func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList {
@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
}
func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList {
@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
}
func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList {
@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
tgt := tg.Targets[0]
require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
tgt = tg.Targets[1]
require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
}

View file

@ -235,10 +235,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
if len(networks) == 0 {
// Try to lookup shared networks
for {
if !containerNetworkMode.IsContainer() {
break
}
for containerNetworkMode.IsContainer() {
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
if !exists {
break

View file

@ -182,9 +182,10 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
var ipv6Addresses []string
for _, ip := range server.PublicIPs {
if ip.Family == instance.ServerIPIPFamilyInet {
switch ip.Family {
case instance.ServerIPIPFamilyInet:
ipv4Addresses = append(ipv4Addresses, ip.Address.String())
} else if ip.Family == instance.ServerIPIPFamilyInet6 {
case instance.ServerIPIPFamilyInet6:
ipv6Addresses = append(ipv6Addresses, ip.Address.String())
}
}

View file

@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.Server == "" {
//nolint:staticcheck // Capitalized first word.
return errors.New("Uyuni SD configuration requires server host")
}
_, err = url.Parse(c.Server)
if err != nil {
//nolint:staticcheck // Capitalized first word.
return fmt.Errorf("Uyuni Server URL is not valid: %w", err)
}
if c.Username == "" {
//nolint:staticcheck // Capitalized first word.
return errors.New("Uyuni SD configuration requires a username")
}
if c.Password == "" {
//nolint:staticcheck // Capitalized first word.
return errors.New("Uyuni SD configuration requires a password")
}
return c.HTTPClientConfig.Validate()

View file

@ -145,7 +145,7 @@ func parseFlags() *config {
_, err := a.Parse(os.Args[1:])
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err)
a.Usage(os.Args[1:])
os.Exit(2)
}

View file

@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) {
case b == ':':
result.WriteString("_.")
default:
result.WriteString(fmt.Sprintf("_%X", b))
fmt.Fprintf(result, "_%X", b)
}
}
result.WriteByte('"')

View file

@ -1016,7 +1016,7 @@ type floatBucketIterator struct {
func (i *floatBucketIterator) At() Bucket[float64] {
// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
return i.baseBucketIterator.at(i.targetSchema)
return i.at(i.targetSchema)
}
func (i *floatBucketIterator) Next() bool {

View file

@ -513,7 +513,7 @@ func TestLabels_Has(t *testing.T) {
}
func TestLabels_Get(t *testing.T) {
require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo"))
require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo"))
require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa"))
require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb"))
}

View file

@ -81,7 +81,7 @@ func (m *MetricStreamingDecoder) NextMetricFamily() error {
m.mfData = b[varIntLength:totalLength]
m.inPos += totalLength
return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData)
return m.unmarshalWithoutMetrics(m, m.mfData)
}
// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory.
@ -98,7 +98,7 @@ func (m *MetricStreamingDecoder) NextMetric() error {
m.resetMetric()
m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end]
if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil {
if err := m.unmarshalWithoutLabels(m, m.mData); err != nil {
return err
}
m.metricIndex++
@ -111,37 +111,37 @@ func (m *MetricStreamingDecoder) resetMetric() {
m.TimestampMs = 0
// TODO(bwplotka): Autogenerate reset functions.
if m.Metric.Counter != nil {
m.Metric.Counter.Value = 0
m.Metric.Counter.CreatedTimestamp = nil
m.Metric.Counter.Exemplar = nil
if m.Counter != nil {
m.Counter.Value = 0
m.Counter.CreatedTimestamp = nil
m.Counter.Exemplar = nil
}
if m.Metric.Gauge != nil {
m.Metric.Gauge.Value = 0
if m.Gauge != nil {
m.Gauge.Value = 0
}
if m.Metric.Histogram != nil {
m.Metric.Histogram.SampleCount = 0
m.Metric.Histogram.SampleCountFloat = 0
m.Metric.Histogram.SampleSum = 0
m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0]
m.Metric.Histogram.CreatedTimestamp = nil
m.Metric.Histogram.Schema = 0
m.Metric.Histogram.ZeroThreshold = 0
m.Metric.Histogram.ZeroCount = 0
m.Metric.Histogram.ZeroCountFloat = 0
m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0]
m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0]
m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0]
m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0]
m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0]
m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0]
m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0]
if m.Histogram != nil {
m.Histogram.SampleCount = 0
m.Histogram.SampleCountFloat = 0
m.Histogram.SampleSum = 0
m.Histogram.Bucket = m.Histogram.Bucket[:0]
m.Histogram.CreatedTimestamp = nil
m.Histogram.Schema = 0
m.Histogram.ZeroThreshold = 0
m.Histogram.ZeroCount = 0
m.Histogram.ZeroCountFloat = 0
m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0]
m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0]
m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0]
m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0]
m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0]
m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0]
m.Histogram.Exemplars = m.Histogram.Exemplars[:0]
}
if m.Metric.Summary != nil {
m.Metric.Summary.SampleCount = 0
m.Metric.Summary.SampleSum = 0
m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0]
m.Metric.Summary.CreatedTimestamp = nil
if m.Summary != nil {
m.Summary.SampleCount = 0
m.Summary.SampleSum = 0
m.Summary.Quantile = m.Summary.Quantile[:0]
m.Summary.CreatedTimestamp = nil
}
}

View file

@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) {
func TestToHistogram_Empty(t *testing.T) {
t.Run("v1", func(t *testing.T) {
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
require.NotNil(t, prompb.Histogram{}.ToIntHistogram())
require.NotNil(t, prompb.Histogram{}.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
require.NotNil(t, writev2.Histogram{}.ToIntHistogram())
require.NotNil(t, writev2.Histogram{}.ToFloatHistogram())
})
}

View file

@ -89,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
}
}
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.DB.Compact(ctx)
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.Compact(ctx)
return nil
}
@ -269,7 +269,7 @@ func rangeQueryCases() []benchCase {
func BenchmarkRangeQuery(b *testing.B) {
stor := teststorage.New(b)
stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings.
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
defer stor.Close()
opts := promql.EngineOpts{
Logger: nil,
@ -498,8 +498,8 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
require.NoError(tb, a.Commit())
}
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.DB.Compact(ctx)
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.Compact(ctx)
}
func generateNativeHistogramSeries(app storage.Appender, numSeries int) error {

View file

@ -731,7 +731,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
// Instant evaluation. This is executed as a range evaluation with one step.
if s.Start == s.End && s.Interval == 0 {
if s.Start.Equal(s.End) && s.Interval == 0 {
start := timeMilliseconds(s.Start)
evaluator := &evaluator{
startTimestamp: start,

View file

@ -674,10 +674,10 @@ func lexInsideBraces(l *Lexer) stateFn {
l.backup()
l.emit(EQL)
case r == '!':
switch nr := l.next(); {
case nr == '~':
switch nr := l.next(); nr {
case '~':
l.emit(NEQ_REGEX)
case nr == '=':
case '=':
l.emit(NEQ)
default:
return l.errorf("unexpected character after '!' inside braces: %q", nr)

View file

@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) {
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime)
}
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@ -188,7 +188,7 @@ func TestAlertingRule(t *testing.T) {
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}
@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) {
test.result[i].F = forState
}
}
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@ -341,7 +341,7 @@ func TestForStateAddSamples(t *testing.T) {
prom_testutil.RequireEqual(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
})
@ -489,7 +489,7 @@ func TestForStateRestore(t *testing.T) {
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
@ -513,7 +513,7 @@ func TestForStateRestore(t *testing.T) {
}
default:
exp := tt.expectedAlerts
require.Equal(t, len(exp), len(got))
require.Len(t, got, len(exp))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
@ -2442,7 +2442,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
wg.Wait()
// Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations.
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
require.Equal(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
}
func TestUpdateWhenStopped(t *testing.T) {

View file

@ -1157,7 +1157,7 @@ func TestScrapeLoopRun(t *testing.T) {
case <-time.After(5 * time.Second):
require.FailNow(t, "Cancellation during initial offset failed.")
case err := <-errc:
require.FailNow(t, "Unexpected error: %s", err)
require.FailNow(t, "Unexpected error", "err: %s", err)
}
// The provided timeout must cause cancellation of the context passed down to the
@ -1200,7 +1200,7 @@ func TestScrapeLoopRun(t *testing.T) {
case <-signal:
// Loop terminated as expected.
case err := <-errc:
require.FailNow(t, "Unexpected error: %s", err)
require.FailNow(t, "Unexpected error", "err: %s", err)
case <-time.After(3 * time.Second):
require.FailNow(t, "Loop did not terminate on context cancellation")
}
@ -1309,14 +1309,14 @@ test_metric_total 1
md, ok = cache.GetMetadata("test_metric_no_help")
require.True(t, ok, "expected metadata to be present")
require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type")
require.Equal(t, "", md.Help)
require.Equal(t, "", md.Unit)
require.Empty(t, md.Help)
require.Empty(t, md.Unit)
md, ok = cache.GetMetadata("test_metric_no_type")
require.True(t, ok, "expected metadata to be present")
require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type")
require.Equal(t, "other help text", md.Help)
require.Equal(t, "", md.Unit)
require.Empty(t, md.Unit)
}
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
@ -1567,7 +1567,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
if numScrapes == cue {
action(sl)
}
w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)))
fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)
return nil
}
sl.run(nil)
@ -4259,7 +4259,7 @@ test_summary_count 199
foundLeValues[v] = true
}
require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected")
require.Len(t, foundLeValues, len(expectedValues), "number of label values not as expected")
for _, v := range expectedValues {
require.Contains(t, foundLeValues, v, "label value not found")
}
@ -4568,7 +4568,7 @@ metric: <
foundLeValues[v] = true
}
require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
require.Len(t, foundLeValues, len(expectedValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
for _, v := range expectedValues {
require.Contains(t, foundLeValues, v, "label value not found")
}
@ -4817,7 +4817,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
switch numScrapes {
case 1:
w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))))
fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))
return nil
case 5:
cancel()
@ -4867,7 +4867,7 @@ func TestScrapeLoopCompression(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
fmt.Fprint(w, metricsText)
fmt.Fprint(w, string(metricsText))
close(scraped)
}))
defer ts.Close()
@ -5164,7 +5164,7 @@ scrape_configs:
s := teststorage.New(t)
defer s.Close()
s.DB.EnableNativeHistograms()
s.EnableNativeHistograms()
reg := prometheus.NewRegistry()
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)

View file

@ -144,7 +144,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) {
func (t *Target) hash() uint64 {
h := fnv.New64a()
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
fmt.Fprintf(h, "%016d", t.labels.Hash())
h.Write([]byte(t.URL().String()))
return h.Sum64()

View file

@ -64,10 +64,8 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
queriers = append(queriers, newSecondaryQuerierFrom(q))
}
concurrentSelect := false
if len(secondaries) > 0 {
concurrentSelect = true
}
concurrentSelect := len(secondaries) > 0
return &querierAdapter{&mergeGenericQuerier{
mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge,
queriers: queriers,
@ -111,10 +109,8 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
}
concurrentSelect := false
if len(secondaries) > 0 {
concurrentSelect = true
}
concurrentSelect := len(secondaries) > 0
return &chunkQuerierAdapter{&mergeGenericQuerier{
mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge,
queriers: queriers,

View file

@ -210,7 +210,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
}
// internal labels should be maintained
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") {
name = otlptranslator.NormalizeLabel(name)
}
l[name] = extras[i+1]

View file

@ -102,8 +102,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
// Cumulative temporality is always valid.
// Delta temporality is also valid if AllowDeltaTemporality is true.
// All other temporality values are invalid.
!(temporality == pmetric.AggregationTemporalityCumulative ||
(settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
(temporality != pmetric.AggregationTemporalityCumulative &&
(!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) {
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
continue
}

View file

@ -515,10 +515,8 @@ func NewQueueManager(
compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use.
}
walMetadata := false
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
walMetadata = true
}
walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
// The current MetadataWatcher implementation is mutually exclusive

View file

@ -93,7 +93,7 @@ func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.
noop bool
err error
)
cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt)
cq.maxt, noop, err = c.preferLocalStorage(mint, maxt)
if err != nil {
return nil, err
}

View file

@ -277,7 +277,7 @@ func TestStreamReadEndpoint(t *testing.T) {
require.Equal(t, 2, recorder.Code/100)
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
require.Empty(t, recorder.Result().Header.Get("Content-Encoding"))
var results []*prompb.ChunkedReadResponse
stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil)

View file

@ -112,7 +112,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
require.Len(t, ssSlice, 2)
var iter chunkenc.Iterator
for i, s := range ssSlice {
require.EqualValues(t, series[i].lbs, s.Labels())
require.Equal(t, series[i].lbs, s.Labels())
iter = s.Iterator(iter)
j := 0
for iter.Next() == chunkenc.ValFloat {
@ -597,15 +597,15 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
}
series := NewListSeries(lbs, copiedSamples)
encoder := NewSeriesToChunkEncoder(series)
require.EqualValues(t, lbs, encoder.Labels())
require.Equal(t, lbs, encoder.Labels())
chks, err := ExpandChunks(encoder.Iterator(nil))
require.NoError(t, err)
require.Equal(t, len(test.expectedCounterResetHeaders), len(chks))
require.Len(t, chks, len(test.expectedCounterResetHeaders))
// Decode all encoded samples and assert they are equal to the original ones.
encodedSamples := chunks.ChunkMetasToSamples(chks)
require.Equal(t, len(test.expectedSamples), len(encodedSamples))
require.Len(t, encodedSamples, len(test.expectedSamples))
for i, s := range test.expectedSamples {
encodedSample := encodedSamples[i]

View file

@ -1305,7 +1305,7 @@ func TestDBCreatedTimestampSamplesIngestion(t *testing.T) {
outputSamples := readWALSamples(t, s.wal.Dir())
require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples))
require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples))
for i, expectedSample := range tc.expectedSamples {
for _, sample := range outputSamples {

View file

@ -129,7 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
// Checking on-disk bytes for the first file.
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles))
actualBytes, err := os.ReadFile(firstFileName)
require.NoError(t, err)
@ -208,9 +208,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
require.Len(t, files, len(remainingFiles), "files on disk")
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
require.Len(t, hrw.closers, len(remainingFiles), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
@ -325,9 +325,9 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
require.Len(t, files, len(remainingFiles), "files on disk")
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
require.Len(t, hrw.closers, len(remainingFiles), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]

View file

@ -1399,7 +1399,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
}
require.NoError(t, db.reload())
require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks")
require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks")
return len(blocks)
},

View file

@ -1347,7 +1347,7 @@ func TestTombstoneCleanFail(t *testing.T) {
actualBlockDirs, err := blockDirs(db.dir)
require.NoError(t, err)
// Only one block should have been replaced by a new block.
require.Equal(t, len(oldBlockDirs), len(actualBlockDirs))
require.Len(t, actualBlockDirs, len(oldBlockDirs))
require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1)
}
@ -1535,7 +1535,7 @@ func TestSizeRetention(t *testing.T) {
// Test that registered size matches the actual disk size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err := db.Head().wal.Size()
require.NoError(t, err)
@ -2052,7 +2052,7 @@ func TestNoEmptyBlocks(t *testing.T) {
require.NoError(t, db.Compact(ctx))
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Len(t, actBlocks, len(db.Blocks()))
require.Empty(t, actBlocks)
require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here")
})
@ -2072,7 +2072,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Len(t, actBlocks, len(db.Blocks()))
require.Empty(t, actBlocks)
app = db.Appender(ctx)
@ -2093,7 +2093,7 @@ func TestNoEmptyBlocks(t *testing.T) {
require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
actBlocks, err = blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Len(t, actBlocks, len(db.Blocks()))
require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples")
})
@ -2134,7 +2134,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Len(t, actBlocks, len(db.Blocks()))
require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.")
})
}
@ -2450,7 +2450,7 @@ func TestDBReadOnly(t *testing.T) {
t.Run("blocks", func(t *testing.T) {
blocks, err := dbReadOnly.Blocks()
require.NoError(t, err)
require.Equal(t, len(expBlocks), len(blocks))
require.Len(t, blocks, len(expBlocks))
for i, expBlock := range expBlocks {
require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
}
@ -2478,7 +2478,7 @@ func TestDBReadOnly(t *testing.T) {
readOnlySeries := query(t, q, matchAll)
readOnlyDBHash := testutil.DirHash(t, dbDir)
require.Equal(t, len(expSeries), len(readOnlySeries), "total series mismatch")
require.Len(t, readOnlySeries, len(expSeries), "total series mismatch")
require.Equal(t, expSeries, readOnlySeries, "series mismatch")
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
})
@ -2488,7 +2488,7 @@ func TestDBReadOnly(t *testing.T) {
readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
readOnlyDBHash := testutil.DirHash(t, dbDir)
require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch")
require.Len(t, readOnlySeries, len(expChunks), "total series mismatch")
require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch")
require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
})
@ -8260,7 +8260,7 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, db.Compact(ctx))
verifyBlockRanges := func() {
blocks := db.Blocks()
require.Equal(t, len(c.blockRanges), len(blocks))
require.Len(t, blocks, len(c.blockRanges))
for j, br := range c.blockRanges {
require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime())
require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime())

View file

@ -4666,7 +4666,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
}
// We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
require.Equal(t, len(expHistograms), len(actHistograms))
require.Len(t, actHistograms, len(expHistograms))
actNumStale := 0
for i, eh := range expHistograms {
ah := actHistograms[i]
@ -5304,7 +5304,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
// Verify the snapshot.
name, idx, offset, err := LastChunkSnapshot(dir)
require.NoError(t, err)
require.NotEqual(t, "", name)
require.NotEmpty(t, name)
require.Equal(t, 0, idx)
require.Positive(t, offset)
}

View file

@ -424,7 +424,7 @@ func TestPersistence_index_e2e(t *testing.T) {
res, err := ir.SortedLabelValues(ctx, k)
require.NoError(t, err)
require.Equal(t, len(v), len(res))
require.Len(t, res, len(v))
for i := 0; i < len(v); i++ {
require.Equal(t, v[i], res[i])
}

View file

@ -860,7 +860,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
require.NoError(t, err)
require.Equal(t, len(tc.expChunksSamples), len(chks))
require.Len(t, chks, len(tc.expChunksSamples))
cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0)
defer cr.Close()
@ -1030,7 +1030,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
var b labels.ScratchBuilder
err = ir.Series(s1Ref, &b, &chks)
require.NoError(t, err)
require.Equal(t, len(tc.expChunksSamples), len(chks))
require.Len(t, chks, len(tc.expChunksSamples))
// Now we keep receiving ooo samples
// OOO few samples for s1.

View file

@ -246,7 +246,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) {
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Sanity check.
require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets")
require.Len(t, tc.expectedCounterResets, len(tc.samples), "number of samples and counter resets")
oooChunk := OOOChunk{}
for _, s := range tc.samples {
@ -264,7 +264,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) {
chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks")
require.Len(t, chunks, len(tc.expectedChunks), "number of chunks")
sampleIndex := 0
for i, c := range chunks {
require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i)

View file

@ -525,7 +525,7 @@ func (b *blockBaseSeriesSet) Next() bool {
// Count those in range to size allocation (roughly - ignoring tombstones).
nChks := 0
for _, chk := range b.bufChks {
if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) {
if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt {
nChks++
}
}

View file

@ -263,7 +263,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
rmChunkRefs(chksRes)
require.Equal(t, errExp, errRes)
require.Equal(t, len(chksExp), len(chksRes))
require.Len(t, chksRes, len(chksExp))
var exp, act [][]chunks.Sample
for i := range chksExp {
samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)

View file

@ -174,7 +174,7 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa
}
}
require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name)
require.Len(t, actual, len(expected), "Length not equal to expected for %s", name)
for i, s := range expected {
expectedSample := s
actualSample := actual[i]

View file

@ -125,12 +125,13 @@ func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
return
}
//nolint:revive // error-naming.
//nolint:staticcheck,revive // error-naming.
var (
// Currently there are only 2 types, warnings and info.
// For now, info are visually identical with warnings as we have not updated
// the API spec or the frontend to show a different kind of warning. But we
// make the distinction here to prepare for adding them in future.
PromQLInfo = errors.New("PromQL info")
PromQLWarning = errors.New("PromQL warning")

View file

@ -50,7 +50,7 @@ func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error {
return err
}
return writeSubcommands(writer, 1, model.Name, model.CmdGroupModel.Commands)
return writeSubcommands(writer, 1, model.Name, model.Commands)
}
func header(title, help string) []byte {
@ -172,13 +172,13 @@ func writeTable(writer io.Writer, data [][]string, header string) error {
buf := bytes.NewBuffer(nil)
buf.WriteString(fmt.Sprintf("\n\n%s\n\n", header))
fmt.Fprintf(buf, "\n\n%s\n\n", header)
columnsToRender := determineColumnsToRender(data)
headers := data[0]
buf.WriteString("|")
for _, j := range columnsToRender {
buf.WriteString(fmt.Sprintf(" %s |", headers[j]))
fmt.Fprintf(buf, " %s |", headers[j])
}
buf.WriteString("\n")
@ -192,7 +192,7 @@ func writeTable(writer io.Writer, data [][]string, header string) error {
row := data[i]
buf.WriteString("|")
for _, j := range columnsToRender {
buf.WriteString(fmt.Sprintf(" %s |", row[j]))
fmt.Fprintf(buf, " %s |", row[j])
}
buf.WriteString("\n")
}
@ -243,7 +243,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands []
help = cmd.HelpLong
}
help = formatHyphenatedWords(help)
if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil {
if _, err := fmt.Fprintf(writer, "\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help); err != nil {
return err
}
@ -255,8 +255,8 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands []
return err
}
if cmd.CmdGroupModel != nil && len(cmd.CmdGroupModel.Commands) > 0 {
if err := writeSubcommands(writer, level+1, modelName, cmd.CmdGroupModel.Commands); err != nil {
if cmd.CmdGroupModel != nil && len(cmd.Commands) > 0 {
if err := writeSubcommands(writer, level+1, modelName, cmd.Commands); err != nil {
return err
}
}

View file

@ -134,7 +134,7 @@ func NewQueryStats(s *Statistics) QueryStats {
sp = s.Samples
)
for s, timer := range tg.TimerGroup.timers {
for s, timer := range tg.timers {
switch s {
case EvalTotalTime:
qt.EvalTotalTime = timer.Duration()
@ -328,5 +328,5 @@ func (qs *QuerySamples) NewChild() *QuerySamples {
}
func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...)
return NewSpanTimer(ctx, qt.SpanOperation(), qs.GetTimer(qt), observers...)
}

View file

@ -54,10 +54,10 @@ func SanitizeFullLabelName(name string) string {
}
var validSb strings.Builder
for i, b := range name {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
validSb.WriteRune('_')
} else {
if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0) {
validSb.WriteRune(b)
} else {
validSb.WriteRune('_')
}
}
return validSb.String()

View file

@ -812,7 +812,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
hostname, err := os.Hostname()
if err != nil {
return status, fmt.Errorf("Error getting hostname: %w", err)
return status, fmt.Errorf("error getting hostname: %w", err)
}
status.Hostname = hostname
status.ServerTime = time.Now().UTC()

View file

@ -624,7 +624,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.NoError(t, json.Unmarshal(b, snapshot))
require.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
require.NotEmpty(t, snapshot.Data.Name, "snapshot directory not returned")
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
}