Skip to content
This repository was archived by the owner on Mar 6, 2023. It is now read-only.

Commit 1b32af2

Browse files
authored
Merge pull request #39 from RaduBerinde/logevent-limit
Add option for limiting the number of logs in a Span
2 parents 017835e + 723bb40 commit 1b32af2

File tree

4 files changed

+140
-11
lines changed

4 files changed

+140
-11
lines changed

span.go

Lines changed: 64 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,8 @@ type spanImpl struct {
2929
event func(SpanEvent)
3030
sync.Mutex // protects the fields below
3131
raw RawSpan
32+
// The number of logs dropped because of MaxLogsPerSpan.
33+
numDroppedLogs int
3234
}
3335

3436
var spanPool = &sync.Pool{New: func() interface{} {
@@ -98,6 +100,21 @@ func (s *spanImpl) LogKV(keyValues ...interface{}) {
98100
s.LogFields(fields...)
99101
}
100102

103+
func (s *spanImpl) appendLog(lr opentracing.LogRecord) {
104+
maxLogs := s.tracer.options.MaxLogsPerSpan
105+
if maxLogs == 0 || len(s.raw.Logs) < maxLogs {
106+
s.raw.Logs = append(s.raw.Logs, lr)
107+
return
108+
}
109+
110+
// We have too many logs. We don't touch the first numOld logs; we treat the
111+
// rest as a circular buffer and overwrite the oldest log among those.
112+
numOld := (maxLogs - 1) / 2
113+
numNew := maxLogs - numOld
114+
s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr
115+
s.numDroppedLogs++
116+
}
117+
101118
func (s *spanImpl) LogFields(fields ...log.Field) {
102119
lr := opentracing.LogRecord{
103120
Fields: fields,
@@ -111,7 +128,7 @@ func (s *spanImpl) LogFields(fields ...log.Field) {
111128
if lr.Timestamp.IsZero() {
112129
lr.Timestamp = time.Now()
113130
}
114-
s.raw.Logs = append(s.raw.Logs, lr)
131+
s.appendLog(lr)
115132
}
116133

117134
func (s *spanImpl) LogEvent(event string) {
@@ -139,13 +156,30 @@ func (s *spanImpl) Log(ld opentracing.LogData) {
139156
ld.Timestamp = time.Now()
140157
}
141158

142-
s.raw.Logs = append(s.raw.Logs, ld.ToLogRecord())
159+
s.appendLog(ld.ToLogRecord())
143160
}
144161

145162
func (s *spanImpl) Finish() {
146163
s.FinishWithOptions(opentracing.FinishOptions{})
147164
}
148165

166+
// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
167+
// the end (i.e. pos circular left shifts).
168+
func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
169+
// This algorithm is described in:
170+
// http://www.cplusplus.com/reference/algorithm/rotate
171+
for first, middle, next := 0, pos, pos; first != middle; {
172+
buf[first], buf[next] = buf[next], buf[first]
173+
first++
174+
next++
175+
if next == len(buf) {
176+
next = middle
177+
} else if first == middle {
178+
middle = next
179+
}
180+
}
181+
}
182+
149183
func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
150184
finishTime := opts.FinishTime
151185
if finishTime.IsZero() {
@@ -155,18 +189,42 @@ func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
155189

156190
s.Lock()
157191
defer s.Unlock()
158-
if opts.LogRecords != nil {
159-
s.raw.Logs = append(s.raw.Logs, opts.LogRecords...)
192+
193+
for _, lr := range opts.LogRecords {
194+
s.appendLog(lr)
160195
}
161196
for _, ld := range opts.BulkLogData {
162-
s.raw.Logs = append(s.raw.Logs, ld.ToLogRecord())
197+
s.appendLog(ld.ToLogRecord())
163198
}
199+
200+
if s.numDroppedLogs > 0 {
201+
// We dropped some log events, which means that we used part of Logs as a
202+
// circular buffer (see appendLog). De-circularize it.
203+
numOld := (len(s.raw.Logs) - 1) / 2
204+
numNew := len(s.raw.Logs) - numOld
205+
rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew)
206+
207+
// Replace the log in the middle (the oldest "new" log) with information
208+
// about the dropped logs. This means that we are effectively dropping one
209+
// more "new" log.
210+
numDropped := s.numDroppedLogs + 1
211+
s.raw.Logs[numOld] = opentracing.LogRecord{
212+
// Keep the timestamp of the last dropped event.
213+
Timestamp: s.raw.Logs[numOld].Timestamp,
214+
Fields: []log.Field{
215+
log.String("event", "dropped Span logs"),
216+
log.Int("dropped_log_count", numDropped),
217+
log.String("component", "basictracer"),
218+
},
219+
}
220+
}
221+
164222
s.raw.Duration = duration
165223

166224
s.onFinish(s.raw)
167225
s.tracer.options.Recorder.RecordSpan(s.raw)
168226

169-
// Last chance to get options before the span is possbily reset.
227+
// Last chance to get options before the span is possibly reset.
170228
poolEnabled := s.tracer.options.EnableSpanPool
171229
if s.tracer.options.DebugAssertUseAfterFinish {
172230
// This makes it much more likely to catch a panic on any subsequent

span_test.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package basictracer
22

33
import (
44
"reflect"
5+
"strconv"
56
"testing"
67

78
opentracing "github.com/opentracing/opentracing-go"
@@ -166,3 +167,58 @@ func TestSpan_DropAllLogs(t *testing.T) {
166167
// Only logs are dropped
167168
assert.Equal(t, 0, len(spans[0].Logs))
168169
}
170+
171+
func TestSpan_MaxLogSperSpan(t *testing.T) {
172+
for _, limit := range []int{1, 2, 3, 5, 10, 15, 20} {
173+
for _, numLogs := range []int{1, 2, 3, 5, 10, 15, 20, 30, 40, 50} {
174+
recorder := NewInMemoryRecorder()
175+
// Tracer that only retains the last <limit> logs.
176+
tracer := NewWithOptions(Options{
177+
Recorder: recorder,
178+
ShouldSample: func(traceID uint64) bool { return true }, // always sample
179+
MaxLogsPerSpan: limit,
180+
})
181+
182+
span := tracer.StartSpan("x")
183+
for i := 0; i < numLogs; i++ {
184+
span.LogKV("eventIdx", i)
185+
}
186+
span.Finish()
187+
188+
spans := recorder.GetSpans()
189+
assert.Equal(t, 1, len(spans))
190+
assert.Equal(t, "x", spans[0].Operation)
191+
192+
logs := spans[0].Logs
193+
var firstLogs, lastLogs []opentracing.LogRecord
194+
if numLogs <= limit {
195+
assert.Equal(t, numLogs, len(logs))
196+
firstLogs = logs
197+
} else {
198+
assert.Equal(t, limit, len(logs))
199+
if len(logs) > 0 {
200+
numOld := (len(logs) - 1) / 2
201+
firstLogs = logs[:numOld]
202+
lastLogs = logs[numOld+1:]
203+
204+
fv := NewLogFieldValidator(t, logs[numOld].Fields)
205+
fv = fv.ExpectNextFieldEquals("event", reflect.String, "dropped Span logs")
206+
fv = fv.ExpectNextFieldEquals(
207+
"dropped_log_count", reflect.Int, strconv.Itoa(numLogs-limit+1),
208+
)
209+
fv.ExpectNextFieldEquals("component", reflect.String, "basictracer")
210+
}
211+
}
212+
213+
for i, lr := range firstLogs {
214+
fv := NewLogFieldValidator(t, lr.Fields)
215+
fv.ExpectNextFieldEquals("eventIdx", reflect.Int, strconv.Itoa(i))
216+
}
217+
218+
for i, lr := range lastLogs {
219+
fv := NewLogFieldValidator(t, lr.Fields)
220+
fv.ExpectNextFieldEquals("eventIdx", reflect.Int, strconv.Itoa(numLogs-len(lastLogs)+i))
221+
}
222+
}
223+
}
224+
}

testutil_test.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package basictracer
33
import (
44
"fmt"
55
"reflect"
6+
"runtime"
67
"testing"
78

89
"github.com/opentracing/opentracing-go/log"
@@ -41,7 +42,8 @@ func NewLogFieldValidator(t *testing.T, fields []log.Field) *LogFieldValidator {
4142
// []Field slices.
4243
func (fv *LogFieldValidator) ExpectNextFieldEquals(key string, kind reflect.Kind, valAsString string) *LogFieldValidator {
4344
if len(fv.fields) < fv.fieldIdx {
44-
fv.t.Errorf("Expecting more than the %v Fields we have", len(fv.fields))
45+
_, file, line, _ := runtime.Caller(1)
46+
fv.t.Errorf("%s:%d Expecting more than the %v Fields we have", file, line, len(fv.fields))
4547
}
4648
fv.nextKey = key
4749
fv.nextKind = kind
@@ -107,15 +109,17 @@ func (fv *LogFieldValidator) EmitLazyLogger(value log.LazyLogger) {
107109
}
108110

109111
func (fv *LogFieldValidator) validateNextField(key string, actualKind reflect.Kind, value interface{}) {
112+
// Reference the ExpectNextField caller in error messages.
113+
_, file, line, _ := runtime.Caller(4)
110114
if fv.nextKey != key {
111-
fv.t.Errorf("Bad key: expected %q, found %q", fv.nextKey, key)
115+
fv.t.Errorf("%s:%d Bad key: expected %q, found %q", file, line, fv.nextKey, key)
112116
}
113117
if fv.nextKind != actualKind {
114-
fv.t.Errorf("Bad reflect.Kind: expected %v, found %v", fv.nextKind, actualKind)
118+
fv.t.Errorf("%s:%d Bad reflect.Kind: expected %v, found %v", file, line, fv.nextKind, actualKind)
115119
return
116120
}
117121
if fv.nextValAsString != fmt.Sprint(value) {
118-
fv.t.Errorf("Bad value: expected %q, found %q", fv.nextValAsString, fmt.Sprint(value))
122+
fv.t.Errorf("%s:%d Bad value: expected %q, found %q", file, line, fv.nextValAsString, fmt.Sprint(value))
119123
}
120124
// All good.
121125
}

tracer.go

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,16 @@ type Options struct {
4040
// DropAllLogs turns log events on all Spans into no-ops.
4141
// If NewSpanEventListener is set, the callbacks will still fire.
4242
DropAllLogs bool
43+
// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
44+
// value). If a span has more logs than this value, logs are dropped as
45+
// necessary (and replaced with a log describing how many were dropped).
46+
//
47+
// About half of the MaxLogPerSpan logs kept are the oldest logs, and about
48+
// half are the newest logs.
49+
//
50+
// If NewSpanEventListener is set, the callbacks will still fire for all log
51+
// events. This value is ignored if DropAllLogs is true.
52+
MaxLogsPerSpan int
4353
// DebugAssertSingleGoroutine internally records the ID of the goroutine
4454
// creating each Span and verifies that no operation is carried out on
4555
// it on a different goroutine.
@@ -87,7 +97,8 @@ type Options struct {
8797
// returned object with a Tracer.
8898
func DefaultOptions() Options {
8999
return Options{
90-
ShouldSample: func(traceID uint64) bool { return traceID%64 == 0 },
100+
ShouldSample: func(traceID uint64) bool { return traceID%64 == 0 },
101+
MaxLogsPerSpan: 100,
91102
}
92103
}
93104

0 commit comments

Comments
 (0)