Skip to content

Commit 1fbe319

Browse files
gwossumdevanbenz
andauthored
fix: reduce excessive CPU usage during compaction planning (#26432)
Co-authored-by: devanbenz <[email protected]>
1 parent eab8a8a commit 1fbe319

File tree

11 files changed

+2586
-2048
lines changed

11 files changed

+2586
-2048
lines changed

cmd/influx_tools/compact/command.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ type shardCompactor struct {
136136

137137
// These methods are not used in production, need to implement in
138138
// order to satisfy the FileStore interface, see: https://github.com/influxdata/influxdb/pull/26211
139-
func (sc *shardCompactor) Stats() []tsm1.FileStat {
139+
func (sc *shardCompactor) Stats() []tsm1.ExtFileStat {
140140
return nil
141141
}
142142

tsdb/engine/tsm1/compact.go

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,8 @@ type DefaultPlanner struct {
168168
}
169169

170170
type fileStore interface {
171-
Stats() []FileStat
171+
Stats() []ExtFileStat
172172
LastModified() time.Time
173-
BlockCount(path string, idx int) int
174173
ParseFileName(path string) (int, int, error)
175174
NextGeneration() int
176175
TSMReader(path string) (*TSMReader, error)
@@ -190,7 +189,7 @@ func NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPl
190189
// 000001 each with different sequence numbers.
191190
type tsmGeneration struct {
192191
id int
193-
files []FileStat
192+
files []ExtFileStat
194193
parseFileName ParseFileNameFunc
195194
}
196195

@@ -270,7 +269,10 @@ func (c *DefaultPlanner) generationsFullyCompacted(gens tsmGenerations) (bool, s
270269
aggressivePointsPerBlockCount := 0
271270
filesUnderMaxTsmSizeCount := 0
272271
for _, tsmFile := range gens[0].files {
273-
if c.FileStore.BlockCount(tsmFile.Path, 1) >= c.GetAggressiveCompactionPointsPerBlock() {
272+
// We check for greater than the default points per block here because the admin may
273+
// have increased aggressive points per block in the config and wants to
274+
// recompact files at the new higher max.
275+
if tsmFile.FirstBlockCount > tsdb.DefaultMaxPointsPerBlock {
274276
aggressivePointsPerBlockCount++
275277
}
276278
if tsmFile.Size < tsdb.MaxTSMFileSize {
@@ -510,7 +512,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
510512
var skip bool
511513

512514
// Skip the file if it's over the max size and contains a full block and it does not have any tombstones
513-
if len(generations) > 2 && group.size() > uint64(tsdb.MaxTSMFileSize) && c.FileStore.BlockCount(group.files[0].Path, 1) >= tsdb.DefaultMaxPointsPerBlock && !group.hasTombstones() {
515+
if len(generations) > 2 && group.size() > uint64(tsdb.MaxTSMFileSize) && group.files[0].FirstBlockCount >= tsdb.DefaultMaxPointsPerBlock && !group.hasTombstones() {
514516
skip = true
515517
}
516518

@@ -586,7 +588,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
586588
// Skip the file if it's over the max size and contains a full block or the generation is split
587589
// over multiple files. In the latter case, that would mean the data in the file spilled over
588590
// the 2GB limit.
589-
if g.size() > uint64(tsdb.MaxTSMFileSize) && c.FileStore.BlockCount(g.files[0].Path, 1) >= tsdb.DefaultMaxPointsPerBlock {
591+
if g.size() > uint64(tsdb.MaxTSMFileSize) && g.files[0].FirstBlockCount >= tsdb.DefaultMaxPointsPerBlock {
590592
start = i + 1
591593
}
592594

@@ -630,7 +632,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
630632
}
631633

632634
// Skip the file if it's over the max size and it contains a full block
633-
if gen.size() >= uint64(tsdb.MaxTSMFileSize) && c.FileStore.BlockCount(gen.files[0].Path, 1) >= tsdb.DefaultMaxPointsPerBlock && !gen.hasTombstones() {
635+
if gen.size() >= uint64(tsdb.MaxTSMFileSize) && gen.files[0].FirstBlockCount >= tsdb.DefaultMaxPointsPerBlock && !gen.hasTombstones() {
634636
startIndex++
635637
continue
636638
}

0 commit comments

Comments
 (0)