@@ -42,22 +42,6 @@ const infoSchemaProcesslistQuery = `
42
42
GROUP BY user, SUBSTRING_INDEX(host, ':', 1), command, state
43
43
`
44
44
45
- // Tunable flags.
46
- var (
47
- processlistMinTime = kingpin .Flag (
48
- "collect.info_schema.processlist.min_time" ,
49
- "Minimum time a thread must be in each state to be counted" ,
50
- ).Default ("0" ).Int ()
51
- processesByUserFlag = kingpin .Flag (
52
- "collect.info_schema.processlist.processes_by_user" ,
53
- "Enable collecting the number of processes by user" ,
54
- ).Default ("true" ).Bool ()
55
- processesByHostFlag = kingpin .Flag (
56
- "collect.info_schema.processlist.processes_by_host" ,
57
- "Enable collecting the number of processes by host" ,
58
- ).Default ("true" ).Bool ()
59
- )
60
-
61
45
// Metric descriptors.
62
46
var (
63
47
processlistCountDesc = prometheus .NewDesc (
79
63
)
80
64
81
65
// ScrapeProcesslist collects from `information_schema.processlist`.
82
- type ScrapeProcesslist struct {}
66
+ type ScrapeProcesslist struct {
67
+ ProcessListMinTime int
68
+ ProcessesByUserFlag bool
69
+ ProcessesByHostFlag bool
70
+ }
83
71
84
72
// Name of the Scraper. Should be unique.
85
73
func (ScrapeProcesslist ) Name () string {
@@ -96,11 +84,27 @@ func (ScrapeProcesslist) Version() float64 {
96
84
return 5.1
97
85
}
98
86
87
+ // RegisterFlags adds flags to configure the Scraper.
88
+ func (s * ScrapeProcesslist ) RegisterFlags (application * kingpin.Application ) {
89
+ application .Flag (
90
+ "collect.info_schema.processlist.min_time" ,
91
+ "Minimum time a thread must be in each state to be counted" ,
92
+ ).Default ("0" ).IntVar (& s .ProcessListMinTime )
93
+ application .Flag (
94
+ "collect.info_schema.processlist.processes_by_user" ,
95
+ "Enable collecting the number of processes by user" ,
96
+ ).Default ("true" ).BoolVar (& s .ProcessesByUserFlag )
97
+ application .Flag (
98
+ "collect.info_schema.processlist.processes_by_host" ,
99
+ "Enable collecting the number of processes by host" ,
100
+ ).Default ("true" ).BoolVar (& s .ProcessesByHostFlag )
101
+ }
102
+
99
103
// Scrape collects data from database connection and sends it over channel as prometheus metric.
100
- func (ScrapeProcesslist ) Scrape (ctx context.Context , db * sql.DB , ch chan <- prometheus.Metric , logger log.Logger ) error {
104
+ func (s ScrapeProcesslist ) Scrape (ctx context.Context , db * sql.DB , ch chan <- prometheus.Metric , logger log.Logger ) error {
101
105
processQuery := fmt .Sprintf (
102
106
infoSchemaProcesslistQuery ,
103
- * processlistMinTime ,
107
+ s . ProcessListMinTime ,
104
108
)
105
109
processlistRows , err := db .QueryContext (ctx , processQuery )
106
110
if err != nil {
@@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome
162
166
}
163
167
}
164
168
165
- if * processesByHostFlag {
169
+ if s . ProcessesByHostFlag {
166
170
for _ , host := range sortedMapKeys (stateHostCounts ) {
167
171
ch <- prometheus .MustNewConstMetric (processesByHostDesc , prometheus .GaugeValue , float64 (stateHostCounts [host ]), host )
168
172
}
169
173
}
170
- if * processesByUserFlag {
174
+
175
+ if s .ProcessesByUserFlag {
171
176
for _ , user := range sortedMapKeys (stateUserCounts ) {
172
177
ch <- prometheus .MustNewConstMetric (processesByUserDesc , prometheus .GaugeValue , float64 (stateUserCounts [user ]), user )
173
178
}
0 commit comments