Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/fix_43693.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: "bug_fix"

# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog)
component: pkg/stanza

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Improve logic to detect copytruncate rotations and reset reader offsets to ensure continuous log collection."

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [43693]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
2 changes: 1 addition & 1 deletion .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ receiver/ntpreceiver/ @open-telemetry
receiver/oracledbreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 @atoulme
receiver/osqueryreceiver/ @open-telemetry/collector-contrib-approvers @nslaughter @smithclay
receiver/otelarrowreceiver/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3
receiver/otlpjsonfilereceiver/ @open-telemetry/collector-contrib-approvers @atoulme
receiver/otlpjsonfilereceiver/ @open-telemetry/collector-contrib-approvers @atoulme @paulojmdias
receiver/podmanreceiver/ @open-telemetry/collector-contrib-approvers @rogercoll
receiver/postgresqlreceiver/ @open-telemetry/collector-contrib-approvers @antonblock @ishleenk17
receiver/pprofreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme
Expand Down
25 changes: 25 additions & 0 deletions pkg/stanza/fileconsumer/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,21 @@ const (
defaultPollInterval = 200 * time.Millisecond
)

// OnTruncate defines the behavior when a file with the same fingerprint
// is detected but with a smaller size (indicating a copytruncate rotation).
const (
// OnTruncateIgnore keeps the current behavior: do not read any data until
// the file grows past the original offset, then read only the new data.
OnTruncateIgnore = "ignore"
// OnTruncateReadWholeFile reads the whole file from the beginning when truncation is detected.
OnTruncateReadWholeFile = "read_whole_file"
// OnTruncateReadNew stores the new (lower) offset, so that the next time the file grows,
// any new data past the new offset is read.
OnTruncateReadNew = "read_new"
)

const defaultOnTruncate = OnTruncateIgnore

var allowFileDeletion = featuregate.GlobalRegistry().MustRegister(
"filelog.allowFileDeletion",
featuregate.StageAlpha,
Expand All @@ -63,6 +78,7 @@ func NewConfig() *Config {
MaxLogSize: reader.DefaultMaxLogSize,
Encoding: defaultEncoding,
FlushPeriod: reader.DefaultFlushPeriod,
OnTruncate: defaultOnTruncate,
Resolver: attrs.Resolver{
IncludeFileName: true,
},
Expand Down Expand Up @@ -91,6 +107,7 @@ type Config struct {
Compression string `mapstructure:"compression,omitempty"`
PollsToArchive int `mapstructure:"polls_to_archive,omitempty"`
AcquireFSLock bool `mapstructure:"acquire_fs_lock,omitempty"`
OnTruncate string `mapstructure:"on_truncate,omitempty"`
}

type HeaderConfig struct {
Expand Down Expand Up @@ -192,6 +209,7 @@ func (c Config) Build(set component.TelemetrySettings, emit emit.Callback, opts
telemetryBuilder: telemetryBuilder,
noTracking: o.noTracking,
pollsToArchive: c.PollsToArchive,
onTruncate: c.OnTruncate,
}, nil
}

Expand Down Expand Up @@ -247,6 +265,13 @@ func (c Config) validate() error {
return fmt.Errorf("'include_file_owner_name' or 'include_file_owner_group_name' it's not supported for windows: %w", err)
}

switch c.OnTruncate {
case OnTruncateIgnore, OnTruncateReadWholeFile, OnTruncateReadNew:
// Valid values
default:
return fmt.Errorf("'on_truncate' must be one of: %s, %s, %s", OnTruncateIgnore, OnTruncateReadWholeFile, OnTruncateReadNew)
}

return nil
}

Expand Down
88 changes: 87 additions & 1 deletion pkg/stanza/fileconsumer/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ type Manager struct {
maxBatches int
maxBatchFiles int
pollsToArchive int
onTruncate string

telemetryBuilder *metadata.TelemetryBuilder
}
Expand Down Expand Up @@ -252,6 +253,34 @@ func (m *Manager) handleUnmatchedFiles(ctx context.Context) {
var err error

if md != nil {
// Check if file was copy-truncated since last seen
if info, statErr := file.Stat(); statErr == nil && md.Offset > info.Size() {
// File has been truncated (copytruncate rotation)
switch m.onTruncate {
case OnTruncateReadWholeFile:
m.set.Logger.Debug("File has been rotated(truncated). Resetting offset to 0",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
)
md.Offset = 0
case OnTruncateReadNew:
m.set.Logger.Debug("File has been rotated(truncated). Storing new offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
zap.Int64("new_offset", info.Size()),
)
md.Offset = info.Size()
case OnTruncateIgnore:
// Keep the old offset - no data will be read until file grows past the original offset
m.set.Logger.Debug("File has been rotated(truncated). Keeping original offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
)
}
}
reader, err = m.readerFactory.NewReaderFromMetadata(file, md)
if m.tracker.Name() != tracker.NoStateTracker {
m.set.Logger.Info("File found in archive. Started watching file again", zap.String("path", file.Name()))
Expand Down Expand Up @@ -289,11 +318,68 @@ func (m *Manager) newReader(ctx context.Context, file *os.File, fp *fingerprint.
zap.String("rotated_path", file.Name()))
}
}
return m.readerFactory.NewReaderFromMetadata(file, oldReader.Close())
// Close old reader and adjust metadata if file was copy-truncated.
md := oldReader.Close()
if info, err := file.Stat(); err == nil && md.Offset > info.Size() {
// File has been truncated (copytruncate rotation)
switch m.onTruncate {
case OnTruncateReadWholeFile:
m.set.Logger.Warn("File has been rotated(truncated). Resetting offset to 0",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
)
md.Offset = 0
case OnTruncateReadNew:
m.set.Logger.Warn("File has been rotated(truncated). Storing new offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
zap.Int64("new_offset", info.Size()),
)
md.Offset = info.Size()
case OnTruncateIgnore:
// Keep the old offset - no data will be read until file grows past the original offset
m.set.Logger.Warn("File has been rotated(truncated). Keeping original offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", md.Offset),
zap.Int64("current_file_size", info.Size()),
)
}
}
return m.readerFactory.NewReaderFromMetadata(file, md)
}

// Check for closed files for match
if oldMetadata := m.tracker.GetClosedFile(fp); oldMetadata != nil {
// Check if file was copy-truncated since last seen
if info, statErr := file.Stat(); statErr == nil && oldMetadata.Offset > info.Size() {
// File has been truncated (copytruncate rotation)
switch m.onTruncate {
case OnTruncateReadWholeFile:
m.set.Logger.Debug("File has been rotated(truncated). Resetting offset to 0",
zap.String("path", file.Name()),
zap.Int64("stored_offset", oldMetadata.Offset),
zap.Int64("current_file_size", info.Size()),
)
oldMetadata.Offset = 0
case OnTruncateReadNew:
m.set.Logger.Debug("File has been rotated(truncated). Storing new offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", oldMetadata.Offset),
zap.Int64("current_file_size", info.Size()),
zap.Int64("new_offset", info.Size()),
)
oldMetadata.Offset = info.Size()
case OnTruncateIgnore:
// Keep the old offset - no data will be read until file grows past the original offset
m.set.Logger.Debug("File has been rotated(truncated). Keeping original offset",
zap.String("path", file.Name()),
zap.Int64("stored_offset", oldMetadata.Offset),
zap.Int64("current_file_size", info.Size()),
)
}
}
r, err := m.readerFactory.NewReaderFromMetadata(file, oldMetadata)
if err != nil {
return nil, err
Expand Down
57 changes: 57 additions & 0 deletions pkg/stanza/fileconsumer/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package fileconsumer

import (
"bytes"
"compress/gzip"
"context"
"fmt"
Expand Down Expand Up @@ -1664,3 +1665,59 @@ func TestArchive(t *testing.T) {

sink.ExpectCalls(t, log3, log4)
}

func TestCopyTruncateResetsOffsetOnRestart_IdenticalFirstKB(t *testing.T) {
t.Parallel()

line := string(bytes.Repeat([]byte("a"), 1024)) // identical 1024B lines

tempDir := t.TempDir()
cfg := NewConfig().includeDir(tempDir)
cfg.StartAt = "beginning"
cfg.FingerprintSize = 1000 // identical prefix across rotations

// Manager #1 (manual polling, no background goroutine)
op1, sink1 := testManager(t, cfg)
op1.persister = testutil.NewUnscopedMockPersister()

// Create file and write 20 lines
log := filetest.OpenTemp(t, tempDir)
for range 20 {
filetest.WriteString(t, log, line+"\n")
}

// First poll: read the existing 20 lines
op1.poll(t.Context())
for range 20 {
sink1.ExpectToken(t, []byte(line))
}

// Simulate copytruncate
rotated := log.Name() + ".1"
origData, err := os.ReadFile(log.Name())
require.NoError(t, err)
require.NoError(t, os.WriteFile(rotated, origData, 0o600))
require.NoError(t, log.Truncate(0))
_, err = log.Seek(0, 0)
require.NoError(t, err)
for range 10 {
filetest.WriteString(t, log, line+"\n")
}

// Persist metadata as if we were running; then stop op1.
// (poll() already saves checkpoints when persister is set.)
// Ensure any internal rotations are finalized.
op1.poll(t.Context())
require.NoError(t, op1.Stop())

// Manager #2 (manual polling) resumes from persisted metadata
op2, sink2 := testManager(t, cfg)
op2.persister = op1.persister

// On poll, offset > current size is detected and reset to 0.
op2.poll(t.Context())
for range 10 {
sink2.ExpectToken(t, []byte(line))
}
sink2.ExpectNoCalls(t)
}
Loading
Loading