Skip to content
This repository was archived by the owner on Feb 13, 2025. It is now read-only.

Commit 6c979a3

Browse files
authored
Added async scraping as an option. The option is only activaited if scrape.interval is set (#232)
1 parent c4743d6 commit 6c979a3

File tree

1 file changed

+39
-3
lines changed

1 file changed

+39
-3
lines changed

main.go

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ var (
4242
securedMetrics = kingpin.Flag("web.secured-metrics", "Expose metrics using https.").Default("false").Bool()
4343
serverCert = kingpin.Flag("web.ssl-server-cert", "Path to the PEM encoded certificate").ExistingFile()
4444
serverKey = kingpin.Flag("web.ssl-server-key", "Path to the PEM encoded key").ExistingFile()
45+
scrapeInterval = kingpin.Flag("scrape.interval", "Interval between each scrape. Default is to scrape on collect requests").Default("0s").Duration()
4546
)
4647

4748
// Metric name parts.
@@ -80,6 +81,7 @@ type Exporter struct {
8081
duration, error prometheus.Gauge
8182
totalScrapes prometheus.Counter
8283
scrapeErrors *prometheus.CounterVec
84+
scrapeResults []prometheus.Metric
8385
up prometheus.Gauge
8486
db *sql.DB
8587
}
@@ -194,14 +196,47 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
194196

195197
// Collect implements prometheus.Collector.
196198
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
197-
e.scrape(ch)
199+
if *scrapeInterval == 0 { // if we are to scrape when the request is made
200+
e.scrape(ch)
201+
} else {
202+
scrapeResults := e.scrapeResults // There is a risk that e.scrapeResults will be replaced while we traverse this look. This should mitigate that risk
203+
for idx := range scrapeResults {
204+
ch <- scrapeResults[idx]
205+
}
206+
}
198207
ch <- e.duration
199208
ch <- e.totalScrapes
200209
ch <- e.error
201210
e.scrapeErrors.Collect(ch)
202211
ch <- e.up
203212
}
204213

214+
func (e *Exporter) runScheduledScrapes() {
215+
if *scrapeInterval == 0 {
216+
return // Do nothing as scrapes will be done on Collect requests
217+
}
218+
ticker := time.NewTicker(*scrapeInterval)
219+
defer ticker.Stop()
220+
for {
221+
metricCh := make(chan prometheus.Metric, 5)
222+
go func() {
223+
scrapeResults := []prometheus.Metric{}
224+
for {
225+
scrapeResult, more := <-metricCh
226+
if more {
227+
scrapeResults = append(scrapeResults, scrapeResult)
228+
} else {
229+
e.scrapeResults = scrapeResults
230+
return
231+
}
232+
}
233+
}()
234+
e.scrape(metricCh)
235+
close(metricCh)
236+
<-ticker.C
237+
}
238+
}
239+
205240
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
206241
e.totalScrapes.Inc()
207242
var err error
@@ -446,7 +481,7 @@ func GeneratePrometheusMetrics(db *sql.DB, parse func(row map[string]string) err
446481
// and a second slice to contain pointers to each item in the columns slice.
447482
columns := make([]interface{}, len(cols))
448483
columnPointers := make([]interface{}, len(cols))
449-
for i, _ := range columns {
484+
for i := range columns {
450485
columnPointers[i] = &columns[i]
451486
}
452487

@@ -559,10 +594,11 @@ func main() {
559594

560595
exporter := NewExporter(dsn)
561596
prometheus.MustRegister(exporter)
597+
go exporter.runScheduledScrapes()
562598

563599
// See more info on https://github.com/prometheus/client_golang/blob/master/prometheus/promhttp/http.go#L269
564600
opts := promhttp.HandlerOpts{
565-
ErrorLog: log.NewErrorLogger(),
601+
ErrorLog: log.NewErrorLogger(),
566602
ErrorHandling: promhttp.ContinueOnError,
567603
}
568604
http.Handle(*metricPath, promhttp.HandlerFor(prometheus.DefaultGatherer, opts))

0 commit comments

Comments
 (0)