|
42 | 42 | securedMetrics = kingpin.Flag("web.secured-metrics", "Expose metrics using https.").Default("false").Bool()
|
43 | 43 | serverCert = kingpin.Flag("web.ssl-server-cert", "Path to the PEM encoded certificate").ExistingFile()
|
44 | 44 | serverKey = kingpin.Flag("web.ssl-server-key", "Path to the PEM encoded key").ExistingFile()
|
| 45 | + scrapeInterval = kingpin.Flag("scrape.interval", "Interval between each scrape. Default is to scrape on collect requests").Default("0s").Duration() |
45 | 46 | )
|
46 | 47 |
|
47 | 48 | // Metric name parts.
|
@@ -80,6 +81,7 @@ type Exporter struct {
|
80 | 81 | duration, error prometheus.Gauge
|
81 | 82 | totalScrapes prometheus.Counter
|
82 | 83 | scrapeErrors *prometheus.CounterVec
|
| 84 | + scrapeResults []prometheus.Metric |
83 | 85 | up prometheus.Gauge
|
84 | 86 | db *sql.DB
|
85 | 87 | }
|
@@ -194,14 +196,47 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
194 | 196 |
|
195 | 197 | // Collect implements prometheus.Collector.
|
196 | 198 | func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
|
197 |
| - e.scrape(ch) |
| 199 | + if *scrapeInterval == 0 { // if we are to scrape when the request is made |
| 200 | + e.scrape(ch) |
| 201 | + } else { |
| 202 | + scrapeResults := e.scrapeResults // There is a risk that e.scrapeResults will be replaced while we traverse this look. This should mitigate that risk |
| 203 | + for idx := range scrapeResults { |
| 204 | + ch <- scrapeResults[idx] |
| 205 | + } |
| 206 | + } |
198 | 207 | ch <- e.duration
|
199 | 208 | ch <- e.totalScrapes
|
200 | 209 | ch <- e.error
|
201 | 210 | e.scrapeErrors.Collect(ch)
|
202 | 211 | ch <- e.up
|
203 | 212 | }
|
204 | 213 |
|
| 214 | +func (e *Exporter) runScheduledScrapes() { |
| 215 | + if *scrapeInterval == 0 { |
| 216 | + return // Do nothing as scrapes will be done on Collect requests |
| 217 | + } |
| 218 | + ticker := time.NewTicker(*scrapeInterval) |
| 219 | + defer ticker.Stop() |
| 220 | + for { |
| 221 | + metricCh := make(chan prometheus.Metric, 5) |
| 222 | + go func() { |
| 223 | + scrapeResults := []prometheus.Metric{} |
| 224 | + for { |
| 225 | + scrapeResult, more := <-metricCh |
| 226 | + if more { |
| 227 | + scrapeResults = append(scrapeResults, scrapeResult) |
| 228 | + } else { |
| 229 | + e.scrapeResults = scrapeResults |
| 230 | + return |
| 231 | + } |
| 232 | + } |
| 233 | + }() |
| 234 | + e.scrape(metricCh) |
| 235 | + close(metricCh) |
| 236 | + <-ticker.C |
| 237 | + } |
| 238 | +} |
| 239 | + |
205 | 240 | func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
206 | 241 | e.totalScrapes.Inc()
|
207 | 242 | var err error
|
@@ -446,7 +481,7 @@ func GeneratePrometheusMetrics(db *sql.DB, parse func(row map[string]string) err
|
446 | 481 | // and a second slice to contain pointers to each item in the columns slice.
|
447 | 482 | columns := make([]interface{}, len(cols))
|
448 | 483 | columnPointers := make([]interface{}, len(cols))
|
449 |
| - for i, _ := range columns { |
| 484 | + for i := range columns { |
450 | 485 | columnPointers[i] = &columns[i]
|
451 | 486 | }
|
452 | 487 |
|
@@ -559,10 +594,11 @@ func main() {
|
559 | 594 |
|
560 | 595 | exporter := NewExporter(dsn)
|
561 | 596 | prometheus.MustRegister(exporter)
|
| 597 | + go exporter.runScheduledScrapes() |
562 | 598 |
|
563 | 599 | // See more info on https://github.com/prometheus/client_golang/blob/master/prometheus/promhttp/http.go#L269
|
564 | 600 | opts := promhttp.HandlerOpts{
|
565 |
| - ErrorLog: log.NewErrorLogger(), |
| 601 | + ErrorLog: log.NewErrorLogger(), |
566 | 602 | ErrorHandling: promhttp.ContinueOnError,
|
567 | 603 | }
|
568 | 604 | http.Handle(*metricPath, promhttp.HandlerFor(prometheus.DefaultGatherer, opts))
|
|
0 commit comments