Skip to content
This repository was archived by the owner on Aug 13, 2019. It is now read-only.

Commit 195bc0d

Browse files
authored
Merge pull request #303 from Bplotka/bp/better-compact-logging
repair + compact: Improved logging for easier future debug purposes.
2 parents 659ed64 + fada85a commit 195bc0d

File tree

2 files changed

+41
-16
lines changed

2 files changed

+41
-16
lines changed

compact.go

Lines changed: 29 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
package tsdb
1515

1616
import (
17+
"fmt"
1718
"io"
1819
"math/rand"
1920
"os"
@@ -33,7 +34,7 @@ import (
3334
"github.com/prometheus/tsdb/labels"
3435
)
3536

36-
// ExponentialBlockRanges returns the time ranges based on the stepSize
37+
// ExponentialBlockRanges returns the time ranges based on the stepSize.
3738
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
3839
ranges := make([]int64, 0, steps)
3940
curRange := minSize
@@ -215,7 +216,7 @@ func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta {
215216

216217
Outer:
217218
for _, p := range parts {
218-
// Donot select the range if it has a block whose compaction failed.
219+
// Do not select the range if it has a block whose compaction failed.
219220
for _, dm := range p {
220221
if dm.meta.Compaction.Failed {
221222
continue Outer
@@ -312,9 +313,12 @@ func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
312313
// Compact creates a new block in the compactor's directory from the blocks in the
313314
// provided directories.
314315
func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, err error) {
315-
var blocks []BlockReader
316-
var bs []*Block
317-
var metas []*BlockMeta
316+
var (
317+
blocks []BlockReader
318+
bs []*Block
319+
metas []*BlockMeta
320+
uids []string
321+
)
318322

319323
for _, d := range dirs {
320324
b, err := OpenBlock(d, c.chunkPool)
@@ -331,13 +335,23 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID,
331335
metas = append(metas, meta)
332336
blocks = append(blocks, b)
333337
bs = append(bs, b)
338+
uids = append(uids, meta.ULID.String())
334339
}
335340

336341
entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
337342
uid = ulid.MustNew(ulid.Now(), entropy)
338343

339-
err = c.write(dest, compactBlockMetas(uid, metas...), blocks...)
344+
meta := compactBlockMetas(uid, metas...)
345+
err = c.write(dest, meta, blocks...)
340346
if err == nil {
347+
level.Info(c.logger).Log(
348+
"msg", "compact blocks",
349+
"count", len(blocks),
350+
"mint", meta.MinTime,
351+
"maxt", meta.MaxTime,
352+
"ulid", meta.ULID,
353+
"sources", fmt.Sprintf("%v", uids),
354+
)
341355
return uid, nil
342356
}
343357

@@ -365,7 +379,13 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) (
365379
meta.Compaction.Level = 1
366380
meta.Compaction.Sources = []ulid.ULID{uid}
367381

368-
return uid, c.write(dest, meta, b)
382+
err := c.write(dest, meta, b)
383+
if err != nil {
384+
return uid, err
385+
}
386+
387+
level.Info(c.logger).Log("msg", "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID)
388+
return uid, nil
369389
}
370390

371391
// instrumentedChunkWriter is used for level 1 compactions to record statistics
@@ -390,8 +410,6 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
390410
// write creates a new block that is the union of the provided blocks into dir.
391411
// It cleans up all files of the old blocks after completing successfully.
392412
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) {
393-
level.Info(c.logger).Log("msg", "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime)
394-
395413
dir := filepath.Join(dest, meta.ULID.String())
396414
tmp := dir + ".tmp"
397415

@@ -472,7 +490,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
472490
return errors.Wrap(err, "sync temporary dir file")
473491
}
474492

475-
// close temp dir before rename block dir(for windows platform)
493+
// Close temp dir before rename block dir (for windows platform).
476494
if err = df.Close(); err != nil {
477495
return errors.Wrap(err, "close temporary dir")
478496
}
@@ -482,6 +500,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
482500
if err := renameFile(tmp, dir); err != nil {
483501
return errors.Wrap(err, "rename block dir")
484502
}
503+
485504
return nil
486505
}
487506

@@ -718,11 +737,6 @@ type compactionMerger struct {
718737
intervals Intervals
719738
}
720739

721-
type compactionSeries struct {
722-
labels labels.Labels
723-
chunks []*chunks.Meta
724-
}
725-
726740
func newCompactionMerger(a, b ChunkSeriesSet) (*compactionMerger, error) {
727741
c := &compactionMerger{
728742
a: a,

repair.go

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,20 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
3636
return err
3737
}
3838
if meta.Version == 1 {
39+
level.Info(logger).Log(
40+
"msg", "found healthy block",
41+
"mint", meta.MinTime,
42+
"maxt", meta.MaxTime,
43+
"ulid", meta.ULID,
44+
)
3945
continue
4046
}
41-
level.Info(logger).Log("msg", "fixing broken block", "ulid", meta.ULID)
47+
level.Info(logger).Log(
48+
"msg", "fixing broken block",
49+
"mint", meta.MinTime,
50+
"maxt", meta.MaxTime,
51+
"ulid", meta.ULID,
52+
)
4253

4354
repl, err := os.Create(filepath.Join(d, "index.repaired"))
4455
if err != nil {

0 commit comments

Comments
 (0)