forked from cortexproject/cortex
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlimits.go
111 lines (96 loc) · 2.8 KB
/
limits.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
package queryrange
import (
"context"
"net/http"
"time"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/weaveworks/common/httpgrpc"
"github.com/weaveworks/common/user"
"github.com/cortexproject/cortex/pkg/util/validation"
)
// Limits allows us to specify per-tenant runtime limits on the behavior of
// the query handling code.
type Limits interface {
MaxQueryLength(string) time.Duration
MaxQueryParallelism(string) int
MaxCacheFreshness(string) time.Duration
}
type limits struct {
Limits
next Handler
}
// LimitsMiddleware creates a new Middleware that invalidates large queries based on Limits interface.
func LimitsMiddleware(l Limits) Middleware {
return MiddlewareFunc(func(next Handler) Handler {
return limits{
next: next,
Limits: l,
}
})
}
func (l limits) Do(ctx context.Context, r Request) (Response, error) {
userid, err := user.ExtractOrgID(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
maxQueryLen := l.MaxQueryLength(userid)
queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart()))
if maxQueryLen != 0 && queryLen > maxQueryLen {
return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLen)
}
return l.next.Do(ctx, r)
}
// RequestResponse contains a request response and the respective request that was used.
type RequestResponse struct {
Request Request
Response Response
}
// DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request.
func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) {
userid, err := user.ExtractOrgID(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
// If one of the requests fail, we want to be able to cancel the rest of them.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Feed all requests to a bounded intermediate channel to limit parallelism.
intermediate := make(chan Request)
go func() {
for _, req := range reqs {
intermediate <- req
}
close(intermediate)
}()
respChan, errChan := make(chan RequestResponse), make(chan error)
parallelism := limits.MaxQueryParallelism(userid)
if parallelism > len(reqs) {
parallelism = len(reqs)
}
for i := 0; i < parallelism; i++ {
go func() {
for req := range intermediate {
resp, err := downstream.Do(ctx, req)
if err != nil {
errChan <- err
} else {
respChan <- RequestResponse{req, resp}
}
}
}()
}
resps := make([]RequestResponse, 0, len(reqs))
var firstErr error
for range reqs {
select {
case resp := <-respChan:
resps = append(resps, resp)
case err := <-errChan:
if firstErr == nil {
cancel()
firstErr = err
}
}
}
return resps, firstErr
}