forked from roadrunner-server/jobs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
listener.go
193 lines (162 loc) · 5.76 KB
/
listener.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
package jobs
import (
"context"
"time"
"github.com/roadrunner-server/goridge/v3/pkg/frame"
"github.com/roadrunner-server/sdk/v4/payload"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/propagation"
"go.uber.org/zap"
)
// non blocking listener
func (p *Plugin) listener() {
for i := 0; i < p.cfg.NumPollers; i++ {
go func() {
for {
select {
case <-p.stopCh:
p.log.Debug("------> job poller was stopped <------")
return
default:
start := time.Now().UTC()
// get prioritized JOB from the queue
jb := p.queue.ExtractMin()
traceCtx := otel.GetTextMapPropagator().Extract(context.Background(), propagation.HeaderCarrier(jb.Headers()))
_, span := p.tracer.Tracer(PluginName).Start(traceCtx, "jobs_listener")
// parse the context
// for each job, context contains:
/*
1. Job class
2. Job ID provided from the outside
3. Job Headers map[string][]string
4. Timeout in seconds
5. Pipeline name
*/
p.log.Debug("job processing was started", zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
ctx, err := jb.Context()
if err != nil {
p.metrics.CountJobErr()
p.log.Error("job marshal error", zap.Error(err), zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
errNack := jb.Nack()
if errNack != nil {
p.log.Error("negatively acknowledge was failed", zap.String("ID", jb.ID()), zap.Error(errNack))
}
span.End()
continue
}
// get payload from the sync.Pool
exec := p.payload(jb.Body(), ctx)
// protect from the pool reset
p.mu.RLock()
// TODO(rustatian): context.Background() is not a good idea, we need to pass the context with timeout from the configuration
re, err := p.workersPool.Exec(context.Background(), exec, nil)
p.mu.RUnlock()
if err != nil {
p.metrics.CountJobErr()
p.log.Error("job processed with errors", zap.Error(err), zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
// RR protocol level error, Nack the job
errNack := jb.Nack()
if errNack != nil {
p.log.Error("negatively acknowledge failed", zap.String("ID", jb.ID()), zap.Error(errNack))
}
p.putPayload(exec)
jb = nil
span.End()
continue
}
var resp *payload.Payload
select {
case pld := <-re:
if pld.Error() != nil {
p.metrics.CountJobErr()
p.log.Error("job processed with errors", zap.Error(err), zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
// RR protocol level error, Nack the job
errNack := jb.Nack()
if errNack != nil {
p.log.Error("negatively acknowledge failed", zap.String("ID", jb.ID()), zap.Error(errNack))
}
p.putPayload(exec)
jb = nil
span.End()
continue
}
// streaming is not supported
if pld.Payload().Flags&frame.STREAM != 0 {
p.metrics.CountJobErr()
p.log.Warn("streaming is not supported",
zap.String("ID", jb.ID()),
zap.Time("start", start),
zap.Int64("elapsed", time.Since(start).Milliseconds()))
errNack := jb.Nack()
if errNack != nil {
p.log.Error("negatively acknowledge failed", zap.String("ID", jb.ID()), zap.Error(errNack))
}
p.log.Error("job execute failed", zap.Error(err))
p.putPayload(exec)
jb = nil
span.End()
continue
}
// assign the payload
resp = pld.Payload()
default:
// should never happen
p.metrics.CountJobErr()
p.log.Error("worker null response, this is not expected")
errNack := jb.Nack()
if errNack != nil {
p.log.Error("negatively acknowledge failed", zap.String("ID", jb.ID()), zap.Error(errNack))
}
p.putPayload(exec)
jb = nil
span.End()
}
// if response is nil or body is nil, just acknowledge the job
if resp == nil || resp.Body == nil {
p.putPayload(exec)
err = jb.Ack()
if err != nil {
p.metrics.CountJobErr()
p.log.Error("acknowledge error, job might be missed", zap.Error(err), zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
jb = nil
span.End()
continue
}
p.log.Debug("job was processed successfully", zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
p.metrics.CountJobOk()
jb = nil
span.End()
continue
}
// handle the response protocol
err = p.respHandler.Handle(resp, jb)
if err != nil {
p.metrics.CountJobErr()
p.log.Error("response handler error", zap.Error(err), zap.String("ID", jb.ID()), zap.ByteString("response", resp.Body), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
p.putPayload(exec)
/*
Job malformed, acknowledge it to prevent endless loop
*/
errAck := jb.Ack()
if errAck != nil {
p.log.Error("acknowledge failed, job might be lost", zap.String("ID", jb.ID()), zap.Error(err), zap.Error(errAck))
jb = nil
span.End()
continue
}
p.log.Error("job acknowledged, but contains error", zap.Error(err))
jb = nil
span.End()
continue
}
p.metrics.CountJobOk()
p.log.Debug("job was processed successfully", zap.String("ID", jb.ID()), zap.Time("start", start), zap.Int64("elapsed", time.Since(start).Milliseconds()))
// return payload
p.putPayload(exec)
jb = nil
span.End()
}
}
}()
}
}