@@ -16,13 +16,12 @@ package collectors
16
16
import (
17
17
"errors"
18
18
"fmt"
19
+ "log/slog"
19
20
"math"
20
21
"strings"
21
22
"sync"
22
23
"time"
23
24
24
- "github.com/go-kit/log"
25
- "github.com/go-kit/log/level"
26
25
"github.com/prometheus/client_golang/prometheus"
27
26
"golang.org/x/net/context"
28
27
"google.golang.org/api/monitoring/v3"
@@ -53,7 +52,7 @@ type MonitoringCollector struct {
53
52
lastScrapeDurationSecondsMetric prometheus.Gauge
54
53
collectorFillMissingLabels bool
55
54
monitoringDropDelegatedProjects bool
56
- logger log .Logger
55
+ logger * slog .Logger
57
56
counterStore DeltaCounterStore
58
57
histogramStore DeltaHistogramStore
59
58
aggregateDeltas bool
@@ -120,10 +119,10 @@ type DeltaHistogramStore interface {
120
119
ListMetrics (metricDescriptorName string ) []* HistogramMetric
121
120
}
122
121
123
- func NewMonitoringCollector (projectID string , monitoringService * monitoring.Service , opts MonitoringCollectorOptions , logger log .Logger , counterStore DeltaCounterStore , histogramStore DeltaHistogramStore ) (* MonitoringCollector , error ) {
122
+ func NewMonitoringCollector (projectID string , monitoringService * monitoring.Service , opts MonitoringCollectorOptions , logger * slog .Logger , counterStore DeltaCounterStore , histogramStore DeltaHistogramStore ) (* MonitoringCollector , error ) {
124
123
const subsystem = "monitoring"
125
124
126
- logger = log .With (logger , "project_id" , projectID )
125
+ logger = logger .With ("project_id" , projectID )
127
126
128
127
apiCallsTotalMetric := prometheus .NewCounter (
129
128
prometheus.CounterOpts {
@@ -237,7 +236,7 @@ func (c *MonitoringCollector) Collect(ch chan<- prometheus.Metric) {
237
236
if err := c .reportMonitoringMetrics (ch , begun ); err != nil {
238
237
errorMetric = float64 (1 )
239
238
c .scrapeErrorsTotalMetric .Inc ()
240
- level . Error ( c .logger ). Log ( "msg" , "Error while getting Google Stackdriver Monitoring metrics" , "err" , err )
239
+ c .logger . Error ( "Error while getting Google Stackdriver Monitoring metrics" , "err" , err )
241
240
}
242
241
c .scrapeErrorsTotalMetric .Collect (ch )
243
242
@@ -283,7 +282,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
283
282
wg .Add (1 )
284
283
go func (metricDescriptor * monitoring.MetricDescriptor , ch chan <- prometheus.Metric , startTime , endTime time.Time ) {
285
284
defer wg .Done ()
286
- level . Debug ( c .logger ). Log ( "msg" , "retrieving Google Stackdriver Monitoring metrics for descriptor" , "descriptor" , metricDescriptor .Type )
285
+ c .logger . Debug ( "retrieving Google Stackdriver Monitoring metrics for descriptor" , "descriptor" , metricDescriptor .Type )
287
286
filter := fmt .Sprintf ("metric.type=\" %s\" " , metricDescriptor .Type )
288
287
if c .monitoringDropDelegatedProjects {
289
288
filter = fmt .Sprintf (
@@ -298,11 +297,11 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
298
297
ingestDelay := metricDescriptor .Metadata .IngestDelay
299
298
ingestDelayDuration , err := time .ParseDuration (ingestDelay )
300
299
if err != nil {
301
- level . Error ( c .logger ). Log ( "msg" , "error parsing ingest delay from metric metadata" , "descriptor" , metricDescriptor .Type , "err" , err , "delay" , ingestDelay )
300
+ c .logger . Error ( "error parsing ingest delay from metric metadata" , "descriptor" , metricDescriptor .Type , "err" , err , "delay" , ingestDelay )
302
301
errChannel <- err
303
302
return
304
303
}
305
- level . Debug ( c .logger ). Log ( "msg" , "adding ingest delay" , "descriptor" , metricDescriptor .Type , "delay" , ingestDelay )
304
+ c .logger . Debug ( "adding ingest delay" , "descriptor" , metricDescriptor .Type , "delay" , ingestDelay )
306
305
endTime = endTime .Add (ingestDelayDuration * - 1 )
307
306
startTime = startTime .Add (ingestDelayDuration * - 1 )
308
307
}
@@ -313,7 +312,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
313
312
}
314
313
}
315
314
316
- level . Debug ( c .logger ). Log ( "msg" , "retrieving Google Stackdriver Monitoring metrics with filter" , "filter" , filter )
315
+ c .logger . Debug ( "retrieving Google Stackdriver Monitoring metrics with filter" , "filter" , filter )
317
316
318
317
timeSeriesListCall := c .monitoringService .Projects .TimeSeries .List (utils .ProjectResource (c .projectID )).
319
318
Filter (filter ).
@@ -324,15 +323,15 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
324
323
c .apiCallsTotalMetric .Inc ()
325
324
page , err := timeSeriesListCall .Do ()
326
325
if err != nil {
327
- level . Error ( c .logger ). Log ( "msg" , "error retrieving Time Series metrics for descriptor" , "descriptor" , metricDescriptor .Type , "err" , err )
326
+ c .logger . Error ( "error retrieving Time Series metrics for descriptor" , "descriptor" , metricDescriptor .Type , "err" , err )
328
327
errChannel <- err
329
328
break
330
329
}
331
330
if page == nil {
332
331
break
333
332
}
334
333
if err := c .reportTimeSeriesMetrics (page , metricDescriptor , ch , begun ); err != nil {
335
- level . Error ( c .logger ). Log ( "msg" , "error reporting Time Series metrics for descriptor" , "descriptor" , metricDescriptor .Type , "err" , err )
334
+ c .logger . Error ( "error reporting Time Series metrics for descriptor" , "descriptor" , metricDescriptor .Type , "err" , err )
336
335
errChannel <- err
337
336
break
338
337
}
@@ -368,7 +367,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
368
367
}
369
368
370
369
if cached := c .descriptorCache .Lookup (metricsTypePrefix ); cached != nil {
371
- level . Debug ( c .logger ). Log ( "msg" , "using cached Google Stackdriver Monitoring metric descriptors starting with" , "prefix" , metricsTypePrefix )
370
+ c .logger . Debug ( "using cached Google Stackdriver Monitoring metric descriptors starting with" , "prefix" , metricsTypePrefix )
372
371
if err := metricDescriptorsFunction (cached ); err != nil {
373
372
errChannel <- err
374
373
}
@@ -381,7 +380,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
381
380
return metricDescriptorsFunction (r .MetricDescriptors )
382
381
}
383
382
384
- level . Debug ( c .logger ). Log ( "msg" , "listing Google Stackdriver Monitoring metric descriptors starting with" , "prefix" , metricsTypePrefix )
383
+ c .logger . Debug ( "listing Google Stackdriver Monitoring metric descriptors starting with" , "prefix" , metricsTypePrefix )
385
384
if err := c .monitoringService .Projects .MetricDescriptors .List (utils .ProjectResource (c .projectID )).
386
385
Filter (filter ).
387
386
Pages (ctx , callback ); err != nil {
@@ -396,7 +395,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
396
395
wg .Wait ()
397
396
close (errChannel )
398
397
399
- level . Debug ( c .logger ). Log ( "msg" , "Done reporting monitoring metrics" )
398
+ c .logger . Debug ( "Done reporting monitoring metrics" )
400
399
return <- errChannel
401
400
}
402
401
@@ -500,12 +499,12 @@ func (c *MonitoringCollector) reportTimeSeriesMetrics(
500
499
if err == nil {
501
500
timeSeriesMetrics .CollectNewConstHistogram (timeSeries , newestEndTime , labelKeys , dist , buckets , labelValues , timeSeries .MetricKind )
502
501
} else {
503
- level . Debug ( c .logger ). Log ( "msg" , "discarding" , "resource" , timeSeries .Resource .Type , "metric" ,
502
+ c .logger . Debug ( "discarding" , "resource" , timeSeries .Resource .Type , "metric" ,
504
503
timeSeries .Metric .Type , "err" , err )
505
504
}
506
505
continue
507
506
default :
508
- level . Debug ( c .logger ). Log ( "msg" , "discarding" , "value_type" , timeSeries .ValueType , "metric" , timeSeries )
507
+ c .logger . Debug ( "discarding" , "value_type" , timeSeries .ValueType , "metric" , timeSeries )
509
508
continue
510
509
}
511
510
@@ -569,7 +568,7 @@ func (c *MonitoringCollector) generateHistogramBuckets(
569
568
func (c * MonitoringCollector ) keyExists (labelKeys []string , key string ) bool {
570
569
for _ , item := range labelKeys {
571
570
if item == key {
572
- level . Debug ( c .logger ). Log ( "msg" , "Found duplicate label key" , "key" , key )
571
+ c .logger . Debug ( "Found duplicate label key" , "key" , key )
573
572
return true
574
573
}
575
574
}
0 commit comments