Skip to content
This repository was archived by the owner on Aug 13, 2019. It is now read-only.

Commit d6ba13b

Browse files
committed
Fix some stuff I broke during rebase.
Signed-off-by: Callum Styan <[email protected]>
1 parent 26482ca commit d6ba13b

File tree

8 files changed

+42
-63
lines changed

8 files changed

+42
-63
lines changed

compact.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
608608
}
609609

610610
// Create an empty tombstones file.
611-
if _, err := tombstones.WriteTombstoneFile(c.logger, tmp, record.NewMemTombstones()); err != nil {
611+
if _, err := tombstones.WriteTombstoneFile(c.logger, tmp, tombstones.NewMemTombstones()); err != nil {
612612
return errors.Wrap(err, "write new tombstones file")
613613
}
614614

@@ -768,7 +768,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
768768
//
769769
// TODO think how to avoid the typecasting to verify when it is head block.
770770
if _, isHeadChunk := chk.Chunk.(*safeChunk); isHeadChunk && chk.MaxTime >= meta.MaxTime {
771-
dranges = append(dranges, Interval{Mint: meta.MaxTime, Maxt: math.MaxInt64})
771+
dranges = append(dranges, tombstones.Interval{Mint: meta.MaxTime, Maxt: math.MaxInt64})
772772

773773
} else
774774
// Sanity check for disk blocks.

go.mod

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ require (
88
github.com/oklog/ulid v1.3.1
99
github.com/pkg/errors v0.8.0
1010
github.com/prometheus/client_golang v1.0.0
11-
github.com/prometheus/prometheus v2.5.0+incompatible
1211
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
1312
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5
1413
gopkg.in/alecthomas/kingpin.v2 v2.2.6

go.sum

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFd
5959
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
6060
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
6161
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
62-
github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg=
63-
github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
6462
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
6563
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
6664
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=

head.go

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -347,20 +347,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) {
347347
}
348348

349349
var (
350-
<<<<<<< HEAD
351-
dec RecordDecoder
352-
series []RefSeries
353-
samples []RefSample
354-
tstones []Stone
355-
allStones = newMemTombstones()
356-
=======
357350
dec record.RecordDecoder
358351
series []record.RefSeries
359352
samples []record.RefSample
360353
tstones []tombstones.Stone
361354
allStones = tombstones.NewMemTombstones()
362-
err error
363-
>>>>>>> Move tombstones to it's own package.
364355
)
365356
defer func() {
366357
if err := allStones.Close(); err != nil {
@@ -385,7 +376,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) {
385376
series, created := h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels)
386377

387378
if !created {
388-
// There's already a different ref for this series.
379+
// There's already a different Ref for this series.
389380
multiRefLock.Lock()
390381
multiRef[s.Ref] = series.Ref
391382
multiRefLock.Unlock()
@@ -474,15 +465,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) {
474465
}
475466
wg.Wait()
476467

477-
<<<<<<< HEAD
478468
if r.Err() != nil {
479469
return errors.Wrap(r.Err(), "read records")
480470
}
481471

482-
if err := allStones.Iter(func(ref uint64, dranges Intervals) error {
483-
=======
484472
if err := allStones.Iter(func(ref uint64, dranges tombstones.Intervals) error {
485-
>>>>>>> Move tombstones to it's own package.
486473
return h.chunkRewrite(ref, dranges)
487474
}); err != nil {
488475
return errors.Wrap(r.Err(), "deleting samples from tombstones")
@@ -1303,21 +1290,15 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks
13031290
continue
13041291
}
13051292
// Set the head chunks as open (being appended to).
1306-
maxTime := c.maxTime
1307-
if s.headChunk == c {
1293+
maxTime := c.MaxTime
1294+
if s.HeadChunk == c {
13081295
maxTime = math.MaxInt64
13091296
}
13101297

13111298
*chks = append(*chks, chunks.Meta{
1312-
<<<<<<< HEAD
1313-
MinTime: c.minTime,
1314-
MaxTime: maxTime,
1315-
Ref: packChunkID(s.ref, uint64(s.chunkID(i))),
1316-
=======
13171299
MinTime: c.MinTime,
1318-
MaxTime: c.MaxTime,
1300+
MaxTime: maxTime,
13191301
Ref: packChunkID(s.Ref, uint64(s.ChunkID(i))),
1320-
>>>>>>> Move WAL Watcher from Prometheus to TSDB WAL package.
13211302
})
13221303
}
13231304

head_test.go

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -102,28 +102,28 @@ func TestHead_ReadWAL(t *testing.T) {
102102
for _, compress := range []bool{false, true} {
103103
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
104104
entries := []interface{}{
105-
[]RefSeries{
105+
[]record.RefSeries{
106106
{Ref: 10, Labels: labels.FromStrings("a", "1")},
107107
{Ref: 11, Labels: labels.FromStrings("a", "2")},
108108
{Ref: 100, Labels: labels.FromStrings("a", "3")},
109109
},
110-
[]RefSample{
110+
[]record.RefSample{
111111
{Ref: 0, T: 99, V: 1},
112112
{Ref: 10, T: 100, V: 2},
113113
{Ref: 100, T: 100, V: 3},
114114
},
115-
[]RefSeries{
115+
[]record.RefSeries{
116116
{Ref: 50, Labels: labels.FromStrings("a", "4")},
117117
// This series has two refs pointing to it.
118118
{Ref: 101, Labels: labels.FromStrings("a", "3")},
119119
},
120-
[]RefSample{
120+
[]record.RefSample{
121121
{Ref: 10, T: 101, V: 5},
122122
{Ref: 50, T: 101, V: 6},
123123
{Ref: 101, T: 101, V: 7},
124124
},
125-
[]Stone{
126-
{ref: 0, intervals: []Interval{{Mint: 99, Maxt: 101}}},
125+
[]tombstones.Stone{
126+
{Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}},
127127
},
128128
}
129129
dir, err := ioutil.TempDir("", "test_read_wal")
@@ -148,10 +148,10 @@ func TestHead_ReadWAL(t *testing.T) {
148148
s50 := head.series.getByID(50)
149149
s100 := head.series.getByID(100)
150150

151-
testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset)
152-
testutil.Equals(t, (*memSeries)(nil), s11) // Series without samples should be garbage colected at head.Init().
153-
testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset)
154-
testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset)
151+
testutil.Equals(t, labels.FromStrings("a", "1"), s10.Lset)
152+
testutil.Equals(t, (*record.MemSeries)(nil), s11) // Series without samples should be garbage colected at head.Init().
153+
testutil.Equals(t, labels.FromStrings("a", "4"), s50.Lset)
154+
testutil.Equals(t, labels.FromStrings("a", "3"), s100.Lset)
155155

156156
expandChunk := func(c chunkenc.Iterator) (x []sample) {
157157
for c.Next() {
@@ -161,9 +161,9 @@ func TestHead_ReadWAL(t *testing.T) {
161161
testutil.Ok(t, c.Err())
162162
return x
163163
}
164-
testutil.Equals(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0)))
165-
testutil.Equals(t, []sample{{101, 6}}, expandChunk(s50.iterator(0)))
166-
testutil.Equals(t, []sample{{100, 3}, {101, 7}}, expandChunk(s100.iterator(0)))
164+
testutil.Equals(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.Iterator(0)))
165+
testutil.Equals(t, []sample{{101, 6}}, expandChunk(s50.Iterator(0)))
166+
testutil.Equals(t, []sample{{100, 3}, {101, 7}}, expandChunk(s100.Iterator(0)))
167167
})
168168
}
169169
}
@@ -328,14 +328,14 @@ func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
328328
for _, compress := range []bool{false, true} {
329329
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
330330
entries := []interface{}{
331-
[]RefSeries{
331+
[]record.RefSeries{
332332
{Ref: 10, Labels: labels.FromStrings("a", "1")},
333333
},
334-
[]RefSample{},
335-
[]RefSeries{
334+
[]record.RefSample{},
335+
[]record.RefSeries{
336336
{Ref: 50, Labels: labels.FromStrings("a", "2")},
337337
},
338-
[]RefSample{
338+
[]record.RefSample{
339339
{Ref: 50, T: 80, V: 1},
340340
{Ref: 50, T: 90, V: 1},
341341
},
@@ -1056,17 +1056,17 @@ func TestHead_LogRollback(t *testing.T) {
10561056

10571057
testutil.Equals(t, 1, len(recs))
10581058

1059-
series, ok := recs[0].([]RefSeries)
1059+
series, ok := recs[0].([]record.RefSeries)
10601060
testutil.Assert(t, ok, "expected series record but got %+v", recs[0])
1061-
testutil.Equals(t, []RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series)
1061+
testutil.Equals(t, []record.RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series)
10621062
})
10631063
}
10641064
}
10651065

10661066
// TestWalRepair_DecodingError ensures that a repair is run for an error
10671067
// when decoding a record.
10681068
func TestWalRepair_DecodingError(t *testing.T) {
1069-
var enc RecordEncoder
1069+
var enc record.RecordEncoder
10701070
for name, test := range map[string]struct {
10711071
corrFunc func(rec []byte) []byte // Func that applies the corruption to a record.
10721072
rec []byte
@@ -1078,7 +1078,7 @@ func TestWalRepair_DecodingError(t *testing.T) {
10781078
// Do not modify the base record because it is Logged multiple times.
10791079
res := make([]byte, len(rec))
10801080
copy(res, rec)
1081-
res[0] = byte(RecordInvalid)
1081+
res[0] = byte(record.RecordInvalid)
10821082
return res
10831083
},
10841084
enc.Series([]record.RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, []byte{}),

record/internal.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ type MemSeries struct {
7474
PendingCommit bool // Whether there are samples waiting to be committed to this series.
7575
Chunks []*MemChunk
7676
Lset labels.Labels
77+
HeadChunk *MemChunk
7778

78-
headChunk *MemChunk
7979
chunkRange int64
8080
firstChunkID int
8181

@@ -117,7 +117,7 @@ func (s *MemSeries) cut(mint int64) *MemChunk {
117117
MaxTime: math.MinInt64,
118118
}
119119
s.Chunks = append(s.Chunks, c)
120-
s.headChunk = c
120+
s.HeadChunk = c
121121

122122
// Set upper bound on when the next chunk must be started. An earlier timestamp
123123
// may be chosen dynamically at a later point.
@@ -143,7 +143,7 @@ func (s *MemSeries) ChunksMetas() []chunks.Meta {
143143
// and 'chunkRange', like how it would appear after 'newMemSeries(...)'.
144144
func (s *MemSeries) Reset() {
145145
s.Chunks = nil
146-
s.headChunk = nil
146+
s.HeadChunk = nil
147147
s.firstChunkID = 0
148148
s.nextAt = math.MinInt64
149149
s.sampleBuf = [4]sample{}
@@ -197,9 +197,9 @@ func (s *MemSeries) TruncateChunksBefore(mint int64) (removed int) {
197197
s.Chunks = append(s.Chunks[:0], s.Chunks[k:]...)
198198
s.firstChunkID += k
199199
if len(s.Chunks) == 0 {
200-
s.headChunk = nil
200+
s.HeadChunk = nil
201201
} else {
202-
s.headChunk = s.Chunks[len(s.Chunks)-1]
202+
s.HeadChunk = s.Chunks[len(s.Chunks)-1]
203203
}
204204

205205
return k
@@ -270,7 +270,7 @@ func (s *MemSeries) Iterator(id int) chunkenc.Iterator {
270270
}
271271

272272
func (s *MemSeries) head() *MemChunk {
273-
return s.headChunk
273+
return s.HeadChunk
274274
}
275275

276276
type MemChunk struct {

tombstones/tombstones.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ func init() {
5050
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
5151
}
5252

53-
// NewCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
53+
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
5454
// polynomial may be easily changed in one location at a later time, if necessary.
55-
func NewCRC32() hash.Hash32 {
55+
func newCRC32() hash.Hash32 {
5656
return crc32.New(castagnoliTable)
5757
}
5858

@@ -72,7 +72,7 @@ type TombstoneReader interface {
7272
}
7373

7474
func WriteTombstoneFile(logger log.Logger, dir string, tr TombstoneReader) (int64, error) {
75-
path := filepath.Join(dir, tombstoneFilename)
75+
path := filepath.Join(dir, TombstoneFilename)
7676
tmp := path + ".tmp"
7777
hash := newCRC32()
7878
var size int
@@ -151,9 +151,9 @@ type Stone struct {
151151
}
152152

153153
func ReadTombstones(dir string) (TombstoneReader, int64, error) {
154-
b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename))
154+
b, err := ioutil.ReadFile(filepath.Join(dir, TombstoneFilename))
155155
if os.IsNotExist(err) {
156-
return newMemTombstones(), 0, nil
156+
return NewMemTombstones(), 0, nil
157157
} else if err != nil {
158158
return nil, 0, err
159159
}
@@ -175,7 +175,7 @@ func ReadTombstones(dir string) (TombstoneReader, int64, error) {
175175
}
176176

177177
// Verify checksum.
178-
hash := NewCRC32()
178+
hash := newCRC32()
179179
if _, err := hash.Write(d.Get()); err != nil {
180180
return nil, 0, errors.Wrap(err, "write to hash")
181181
}

wal/wal_watcher.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ var (
7676
},
7777
[]string{consumer},
7878
)
79+
lrMetrics = NewLiveReaderMetrics(prometheus.DefaultRegisterer)
7980
)
8081

8182
// This function is copied from prometheus/prometheus/pkg/timestamp to avoid adding vendor to TSDB repo.
@@ -308,7 +309,7 @@ func (w *WALWatcher) watch(segmentNum int, tail bool) error {
308309
}
309310
defer segment.Close()
310311

311-
reader := NewLiveReader(w.logger, w.reg, segment)
312+
reader := NewLiveReader(w.logger, lrMetrics, segment)
312313

313314
readTicker := time.NewTicker(readPeriod)
314315
defer readTicker.Stop()
@@ -523,7 +524,7 @@ func (w *WALWatcher) readCheckpoint(checkpointDir string) error {
523524
}
524525
defer sr.Close()
525526

526-
r := NewLiveReader(w.logger, w.reg, sr)
527+
r := NewLiveReader(w.logger, lrMetrics, sr)
527528
if err := w.readSegment(r, index, false); err != io.EOF && err != nil {
528529
return errors.Wrap(err, "readSegment")
529530
}

0 commit comments

Comments
 (0)