diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b8c46fa18c5..58d2a41e526 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -129,6 +129,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] - Make more selective the Pod autodiscovery upon node and namespace update events. {issue}37338[37338] {pull}37431[37431] +- Raw event data logged by outputs on error is now logged to a different log file {pull}37475[37475] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index c803ff33e8e..5b7d35e78c2 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12700,12 +12700,12 @@ SOFTWARE -------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.5 +Dependency : github.com/belimawr/elastic-agent-libs +Version: v0.2.9-0.20240122163001-efb117578ab2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/belimawr/elastic-agent-libs@v0.2.9-0.20240122163001-efb117578ab2/LICENSE: Apache License Version 2.0, January 2004 diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 883760ab410..83d5cf61ab0 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1544,6 +1544,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index eb87fec7e7e..dc63f2ab148 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -169,6 +169,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/auditbeat/tests/system/requirements.txt b/auditbeat/tests/system/requirements.txt index c2399b66f80..a6da4ed167d 100644 --- a/auditbeat/tests/system/requirements.txt +++ b/auditbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/dev-tools/requirements.txt b/dev-tools/requirements.txt index f69927dbe3c..abd269465e9 100644 --- a/dev-tools/requirements.txt +++ b/dev-tools/requirements.txt @@ -1,3 +1,3 @@ elasticsearch requests -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 755db3726e7..dc39590777e 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2640,6 +2640,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index aa50779b922..810604dbe1d 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -186,6 +186,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/tests/integration/sensitive_log_file_test.go b/filebeat/tests/integration/sensitive_log_file_test.go new file mode 100644 index 00000000000..9ddc504dd6e --- /dev/null +++ b/filebeat/tests/integration/sensitive_log_file_test.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +var eventsLogFileCfg = ` +filebeat.inputs: + - type: filestream + id: filestream-input-id + enabled: true + parsers: + - ndjson: + target: "" + overwrite_keys: true + expand_keys: true + add_error_key: true + ignore_decoding_error: false + paths: + - %s + +output: + elasticsearch: + hosts: + - localhost:9200 + protocol: http + username: admin + password: testing + +logging: + level: debug + files: + events: + files: + name: filebeat-sensitive-data +` + +func TestEventsLoggerESOutput(t *testing.T) { + // First things first, ensure ES is running and we can connect to it. + // If ES is not running, the test will timeout and the only way to know + // what caused it is going through Filebeat's logs. + integration.EnsureESIsRunning(t) + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + logFilePath := filepath.Join(filebeat.TempDir(), "log.log") + filebeat.WriteConfigFile(fmt.Sprintf(eventsLogFileCfg, logFilePath)) + + logFile, err := os.Create(logFilePath) + if err != nil { + t.Fatalf("could not create file '%s': %s", logFilePath, err) + } + + _, _ = logFile.WriteString(` +{"message":"foo bar","int":10,"string":"str"} +{"message":"another message","int":20,"string":"str2"} +{"message":"index failure","int":"not a number","string":10} +{"message":"second index failure","int":"not a number","string":10} +`) + if err := logFile.Sync(); err != nil { + t.Fatalf("could not sync log file '%s': %s", logFilePath, err) + } + if err := logFile.Close(); err != nil { + t.Fatalf("could not close log file '%s': %s", logFilePath, err) + } + + filebeat.Start() + + // Wait for a log entry that indicates an entry in the events + // logger file. + msg := "Cannot index event (status=400)" + require.Eventually(t, func() bool { + return filebeat.LogContains(msg) + }, time.Minute, 100*time.Millisecond, + fmt.Sprintf("String '%s' not found on Filebeat logs", msg)) + + glob := filepath.Join(filebeat.TempDir(), "filebeat-sensitive-data*.ndjson") + files, err := filepath.Glob(glob) + if err != nil { + t.Fatalf("could not read files matching glob '%s': %s", glob, err) + } + if len(files) != 1 { + t.Fatalf("there must be only one file matching the glob '%s', found: %s", glob, files) + } + + eventsLogFile := files[0] + data, err := os.ReadFile(eventsLogFile) + if err != nil { + t.Fatalf("could not read '%s': %s", eventsLogFile, err) + } + + strData := string(data) + eventMsg := "not a number" + if !strings.Contains(strData, eventMsg) { + t.Errorf("expecting to find '%s' on '%s'", eventMsg, eventsLogFile) + t.Errorf("Contents:\n%s", strData) + t.FailNow() + } +} diff --git a/go.mod b/go.mod index a7044889fac..b1598cf2dc8 100644 --- a/go.mod +++ b/go.mod @@ -419,3 +419,5 @@ replace ( // Exclude this version because the version has an invalid checksum. exclude github.com/docker/distribution v2.8.0+incompatible + +replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2 diff --git a/go.sum b/go.sum index 79feea75570..3cfab73cd90 100644 --- a/go.sum +++ b/go.sum @@ -373,6 +373,8 @@ github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56 github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 h1:lxW5Q6K2IisyF5tlr6Ts0W4POGWQZco05MJjFmoeIHs= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2 h1:QOTo5kTJ8oqdrSOH8/OhSkEMA3mnRltGg52M9YyH7Zo= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= @@ -662,8 +664,6 @@ github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lO github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= -github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 2b2f28382e9..da19a7c7db3 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index 8accb212db4..3c1f3756420 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -152,6 +152,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/heartbeat/tests/system/requirements.txt b/heartbeat/tests/system/requirements.txt index c2399b66f80..a6da4ed167d 100644 --- a/heartbeat/tests/system/requirements.txt +++ b/heartbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index 660bbb73a02..2b65512d999 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -67,3 +67,43 @@ logging.files: # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true + +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/{{.BeatName}} + + # The name of the files where the logs are written to. + #name: {{.BeatName}}-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true diff --git a/libbeat/_meta/config/logging.yml.tmpl b/libbeat/_meta/config/logging.yml.tmpl index 00227ad0cdf..d56a3c8fd0a 100644 --- a/libbeat/_meta/config/logging.yml.tmpl +++ b/libbeat/_meta/config/logging.yml.tmpl @@ -8,3 +8,17 @@ # To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] + +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/{{.BeatName}} + + # The name of the files where the logs are written to. + #name: {{.BeatName}}-sensitive-data diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index efe8bd48f79..54e0fb8a6c5 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -120,14 +120,15 @@ type beatConfig struct { Features *config.C `config:"features"` // beat internal components configurations - HTTP *config.C `config:"http"` - HTTPPprof *pprof.Config `config:"http.pprof"` - BufferConfig *config.C `config:"http.buffer"` - Path paths.Path `config:"path"` - Logging *config.C `config:"logging"` - MetricLogging *config.C `config:"logging.metrics"` - Keystore *config.C `config:"keystore"` - Instrumentation instrumentation.Config `config:"instrumentation"` + HTTP *config.C `config:"http"` + HTTPPprof *pprof.Config `config:"http.pprof"` + BufferConfig *config.C `config:"http.buffer"` + Path paths.Path `config:"path"` + Logging *config.C `config:"logging"` + SensitiveLogging *config.C `config:"logging.sensitive"` + MetricLogging *config.C `config:"logging.metrics"` + Keystore *config.C `config:"keystore"` + Instrumentation instrumentation.Config `config:"instrumentation"` // output/publishing related configurations Pipeline pipeline.Config `config:",inline"` @@ -378,7 +379,30 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { Logger: logp.L().Named("publisher"), Tracer: b.Instrumentation.Tracer(), } - outputFactory := b.makeOutputFactory(b.Config.Output) + + // Get the default/current logging configuration + // we need some defaults to be populates otherwise Unpack will + // fail. We also overwrite some defaults that are specific to the + // events logger. + sensitiveLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + sensitiveLoggerCfg.ToFiles = true // make the default explicit + sensitiveLoggerCfg.Files.MaxSize = 5242880 // 5MB + sensitiveLoggerCfg.Files.MaxBackups = 5 + + // merge sensitiveLoggerCfg with b.Config.Logging, so logging.sensitive.* only + // overwrites the files block. + if err := b.Config.SensitiveLogging.Unpack(&sensitiveLoggerCfg); err != nil { + return nil, fmt.Errorf("error initialising events logger: %w", err) + } + + // Ensure the default filename is set + if sensitiveLoggerCfg.Files.Name == "" { + sensitiveLoggerCfg.Files.Name = b.Info.Beat + // Append the name so the files do not overwrite themselves. + sensitiveLoggerCfg.Files.Name = sensitiveLoggerCfg.Files.Name + "-sensitive-data" + } + + outputFactory := b.makeOutputFactory(b.Config.Output, sensitiveLoggerCfg) settings := pipeline.Settings{ Processors: b.processors, InputQueueSize: b.InputQueueSize, @@ -388,7 +412,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return nil, fmt.Errorf("error initializing publisher: %w", err) } - reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader())) + reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader(), sensitiveLoggerCfg)) // TODO: some beats race on shutdown with publisher.Stop -> do not call Stop yet, // but refine publisher to disconnect clients on stop automatically @@ -784,6 +808,23 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("error unpacking config data: %w", err) } + // If either b.Config.EventLoggingor b.Config.Logging are nil + // merging them will fail, so in case any of them is nil, + // we set them to an empty config.C + if b.Config.SensitiveLogging == nil { + b.Config.SensitiveLogging = config.NewConfig() + } + if b.Config.Logging == nil { + b.Config.Logging = config.NewConfig() + } + if err := b.Config.SensitiveLogging.Merge(b.Config.Logging); err != nil { + return fmt.Errorf("cannot merge logging and logging.sensitive configuration: %w", err) + } + + if _, err := b.Config.SensitiveLogging.Remove("events", -1); err != nil { + return fmt.Errorf("cannot update logging.sensitive configuration: %w", err) + } + if err := promoteOutputQueueSettings(&b.Config); err != nil { return fmt.Errorf("could not promote output queue settings: %w", err) } @@ -1091,7 +1132,7 @@ func (b *Beat) indexSetupCallback() elasticsearch.ConnectCallback { } } -func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader) reload.Reloadable { +func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader, sensitiveLoggerCfg logp.Config) reload.Reloadable { return reload.ReloadableFunc(func(update *reload.ConfigWithMeta) error { if update == nil { return nil @@ -1113,15 +1154,16 @@ func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader) reload.Re } } - return outReloader.Reload(update, b.createOutput) + return outReloader.Reload(update, sensitiveLoggerCfg, b.createOutput) }) } func (b *Beat) makeOutputFactory( cfg config.Namespace, + eventLoggerCfg logp.Config, ) func(outputs.Observer) (string, outputs.Group, error) { return func(outStats outputs.Observer) (string, outputs.Group, error) { - out, err := b.createOutput(outStats, cfg) + out, err := b.createOutput(outStats, cfg, eventLoggerCfg) return cfg.Name(), out, err } } @@ -1217,7 +1259,7 @@ func (b *Beat) reloadOutputOnCertChange(cfg config.Namespace) error { return nil } -func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace) (outputs.Group, error) { +func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace, sensitiveLoggerCfg logp.Config) (outputs.Group, error) { if !cfg.IsSet() { return outputs.Group{}, nil } @@ -1226,7 +1268,7 @@ func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace) (outpu return outputs.Group{}, fmt.Errorf("could not setup output certificates reloader: %w", err) } - return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config()) + return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config(), sensitiveLoggerCfg) } func (b *Beat) registerClusterUUIDFetching() { diff --git a/libbeat/cmd/instance/beat_test.go b/libbeat/cmd/instance/beat_test.go index 52e55941225..184797591b8 100644 --- a/libbeat/cmd/instance/beat_test.go +++ b/libbeat/cmd/instance/beat_test.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher/queue/memqueue" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-ucfg/yaml" "github.com/gofrs/uuid" @@ -247,7 +248,7 @@ elasticsearch: update := &reload.ConfigWithMeta{Config: c} m := &outputReloaderMock{} - reloader := b.makeOutputReloader(m) + reloader := b.makeOutputReloader(m, logp.Config{}) require.False(t, b.Config.Output.IsSet(), "the output should not be set yet") require.True(t, b.isConnectionToOlderVersionAllowed(), "allow_older_versions flag should be true from 8.11") @@ -266,7 +267,8 @@ type outputReloaderMock struct { func (r *outputReloaderMock) Reload( cfg *reload.ConfigWithMeta, - factory func(o outputs.Observer, cfg config.Namespace) (outputs.Group, error), + sensitiveLoggerCfg logp.Config, + factory func(o outputs.Observer, cfg config.Namespace, sensitiveLoggerCfg logp.Config) (outputs.Group, error), ) error { r.cfg = cfg return nil diff --git a/libbeat/cmd/test/output.go b/libbeat/cmd/test/output.go index 3290c283c27..ac7e3ba535a 100644 --- a/libbeat/cmd/test/output.go +++ b/libbeat/cmd/test/output.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/testing" ) @@ -41,7 +42,8 @@ func GenTestOutputCmd(settings instance.Settings) *cobra.Command { } im, _ := idxmgmt.DefaultSupport(nil, b.Info, nil) - output, err := outputs.Load(im, b.Info, nil, b.Config.Output.Name(), b.Config.Output.Config()) + // we use an empty config for the events logger because this is just a output test + output, err := outputs.Load(im, b.Info, nil, b.Config.Output.Name(), b.Config.Output.Config(), logp.Config{}) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing output: %s\n", err) os.Exit(1) diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index 4ba73c1b60d..89181dc6961 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -293,3 +293,80 @@ Below are some samples: `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:16 some message` `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:19 some message {"x": 1}` + +ifndef::serverless[] +[float] +=== Configuration options for sensitive logger + +Some outputs will log raw events on errors like indexing errors in the +Elasticsearch output, to prevent logging raw events (that may contain +sensitive information) together with other log messages, a different +log file, only for log entries containing raw events, is used. It will +use the same level, selectors and all other configurations from the +default logger, but it will have it's own file configuration. + +Having a different log file for raw events also prevents event data +from drowning out the regular log files. + +IMPORTANT: No matter the default logger output configuration, raw events +will **always** be logged to a file configured by `logging.sensitive.files`. + +[float] +==== `logging.sensitive.files.path` + +The directory that log files are written to. The default is the logs path. See +the <> section for details. + +[float] +==== `logging.sensitive.files.name` + +The name of the file that logs are written to. The default is '{beatname_lc}'-sensitive. + +[float] +==== `logging.sensitive.files.rotateeverybytes` + +The maximum size of a log file. If the limit is reached, a new log file is +generated. The default size limit is 5242880 (5 MB). + +[float] +==== `logging.sensitive.files.keepfiles` + +The number of most recent rotated log files to keep on disk. Older files are +deleted during log rotation. The default value is 5. The `keepfiles` options has +to be in the range of 2 to 1024 files. + +[float] +==== `logging.sensitive.files.permissions` + +The permissions mask to apply when rotating log files. The default value is +0600. The `permissions` option must be a valid Unix-style file permissions mask +expressed in octal notation. In Go, numbers in octal notation must start with +'0'. + +The most permissive mask allowed is 0640. If a higher permissions mask is +specified via this setting, it will be subject to an umask of 0027. + +This option is not supported on Windows. + +Examples: + +* 0640: give read and write access to the file owner, and read access to members of the group associated with the file. +* 0600: give read and write access to the file owner, and no access to all others. + +[float] +==== `logging.sensitive.files.interval` + +Enable log file rotation on time intervals in addition to size-based rotation. +Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h +are boundary-aligned with minutes, hours, days, weeks, months, and years as +reported by the local system clock. All other intervals are calculated from the +unix epoch. Defaults to disabled. + +[float] +==== `logging.sensitive.files.rotateonstartup` + +If the log file already exists on startup, immediately rotate it and start +writing to a new file instead of appending to the existing one. Defaults to +true. +endif::serverless[] + diff --git a/libbeat/outputs/console/console.go b/libbeat/outputs/console/console.go index b81bf336348..753fe0b4b30 100644 --- a/libbeat/outputs/console/console.go +++ b/libbeat/outputs/console/console.go @@ -51,6 +51,7 @@ func makeConsole( beat beat.Info, observer outputs.Observer, cfg *config.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig err := cfg.Unpack(&config) diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 8aeef2c623e..048b90e487a 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -55,7 +55,8 @@ type Client struct { observer outputs.Observer NonIndexableAction string - log *logp.Logger + log *logp.Logger + sensitiveLogger *logp.Logger } // ClientSettings contains the settings for a client. @@ -81,6 +82,8 @@ const ( // NewClient instantiates a new client. func NewClient( + logger *logp.Logger, + sensitiveLogger *logp.Logger, s ClientSettings, onConnect *callbacksRegistry, ) (*Client, error) { @@ -140,7 +143,8 @@ func NewClient( observer: s.Observer, NonIndexableAction: s.NonIndexableAction, - log: logp.NewLogger("elasticsearch"), + log: logger, + sensitiveLogger: sensitiveLogger, } return client, nil @@ -174,6 +178,8 @@ func (client *Client) Clone() *Client { client.conn.Transport.Proxy.Disable = client.conn.Transport.Proxy.URL == nil c, _ := NewClient( + client.log, + client.sensitiveLogger, ClientSettings{ ConnectionSettings: connection, Index: client.index, @@ -431,12 +437,12 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat result, _ := data[i].Content.Meta.HasKey(dead_letter_marker_field) if result { stats.nonIndexable++ - client.log.Errorf("Can't deliver to dead letter index event (status=%v). Enable debug logs to view the event and cause.", status) - client.log.Debugf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) + client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look for sensitive-data log file to view the event and cause.", status) + client.sensitiveLogger.Errorf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) // poison pill - this will clog the pipeline if the underlying failure is non transient. } else if client.NonIndexableAction == dead_letter_index { - client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look for sensitive-data log file to view the event and cause.", status) + client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) if data[i].Content.Meta == nil { data[i].Content.Meta = mapstr.M{ dead_letter_marker_field: true, @@ -451,8 +457,8 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat } } else { // drop stats.nonIndexable++ - client.log.Warnf("Cannot index event (status=%v): dropping event! Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v): dropping event! Look for sensitive-data log file to view the event and cause.", status) + client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) continue } } diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 7a8a06becca..2ecafac5f1d 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -422,7 +422,16 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu info := beat.Info{Beat: "libbeat"} // disable ILM if using specified index name im, _ := idxmgmt.DefaultSupport(nil, info, conf.MustNewConfigFrom(map[string]interface{}{"setup.ilm.enabled": "false"})) - output, err := makeES(im, info, stats, config) + + // Creates the events logger configuration for testing, + // it uses the default one but logs to stderr instead of a file. + // This prevents the test to leave log files behind. + sensitiveLoggerCfg := logp.DefaultConfig(logp.DefaultEnvironment) + sensitiveLoggerCfg.Level = logp.DebugLevel + sensitiveLoggerCfg.ToStderr = true + sensitiveLoggerCfg.ToFiles = false + + output, err := makeES(im, info, stats, config, sensitiveLoggerCfg) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/elasticsearch/client_proxy_test.go b/libbeat/outputs/elasticsearch/client_proxy_test.go index e3fd914bbe7..9898e38c58d 100644 --- a/libbeat/outputs/elasticsearch/client_proxy_test.go +++ b/libbeat/outputs/elasticsearch/client_proxy_test.go @@ -36,6 +36,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/outputs/outil" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) @@ -204,7 +205,7 @@ func doClientPing(t *testing.T) { clientSettings.Transport.Proxy.URL = &proxyURL } - client, err := NewClient(clientSettings, nil) + client, err := NewClient(logp.L(), logp.L(), clientSettings, nil) require.NoError(t, err) // This ping won't succeed; we aren't testing end-to-end communication diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 58e5f3ee5e2..ed4d1a67fb2 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -90,6 +90,8 @@ func (bm *batchMock) RetryEvents(events []publisher.Event) { func TestPublish(t *testing.T) { makePublishTestClient := func(t *testing.T, url string) *Client { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), ConnectionSettings: eslegclient.ConnectionSettings{URL: url}, @@ -248,6 +250,8 @@ func TestPublish(t *testing.T) { func TestCollectPublishFailsNone(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -272,6 +276,8 @@ func TestCollectPublishFailsNone(t *testing.T) { func TestCollectPublishFailMiddle(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -302,6 +308,8 @@ func TestCollectPublishFailMiddle(t *testing.T) { func TestCollectPublishFailDeadLetterQueue(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "dead_letter_index", @@ -361,6 +369,8 @@ func TestCollectPublishFailDeadLetterQueue(t *testing.T) { func TestCollectPublishFailDrop(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -405,6 +415,8 @@ func TestCollectPublishFailDrop(t *testing.T) { func TestCollectPublishFailAll(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -434,6 +446,8 @@ func TestCollectPipelinePublishFail(t *testing.T) { logp.TestingSetup(logp.WithSelectors("elasticsearch")) client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -481,6 +495,8 @@ func TestCollectPipelinePublishFail(t *testing.T) { func BenchmarkCollectPublishFailsNone(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -510,6 +526,8 @@ func BenchmarkCollectPublishFailsNone(b *testing.B) { func BenchmarkCollectPublishFailMiddle(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -540,6 +558,8 @@ func BenchmarkCollectPublishFailMiddle(b *testing.B) { func BenchmarkCollectPublishFailAll(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -589,17 +609,20 @@ func TestClientWithHeaders(t *testing.T) { })) defer ts.Close() - client, err := NewClient(ClientSettings{ - Observer: outputs.NewNilObserver(), - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: ts.URL, - Headers: map[string]string{ - "host": "myhost.local", - "X-Test": "testing value", + client, err := NewClient( + logp.L(), + logp.L(), + ClientSettings{ + Observer: outputs.NewNilObserver(), + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: ts.URL, + Headers: map[string]string{ + "host": "myhost.local", + "X-Test": "testing value", + }, }, - }, - Index: outil.MakeSelector(outil.ConstSelectorExpr("test", outil.SelectorLowerCase)), - }, nil) + Index: outil.MakeSelector(outil.ConstSelectorExpr("test", outil.SelectorLowerCase)), + }, nil) assert.NoError(t, err) // simple ping @@ -667,6 +690,8 @@ func TestBulkEncodeEvents(t *testing.T) { } client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), Index: index, @@ -743,6 +768,8 @@ func TestBulkEncodeEventsWithOpType(t *testing.T) { } client, _ := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), Index: index, @@ -786,13 +813,16 @@ func TestClientWithAPIKey(t *testing.T) { })) defer ts.Close() - client, err := NewClient(ClientSettings{ - Observer: outputs.NewNilObserver(), - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: ts.URL, - APIKey: "hyokHG4BfWk5viKZ172X:o45JUkyuS--yiSAuuxl8Uw", - }, - }, nil) + client, err := NewClient( + logp.L(), + logp.L(), + ClientSettings{ + Observer: outputs.NewNilObserver(), + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: ts.URL, + APIKey: "hyokHG4BfWk5viKZ172X:o45JUkyuS--yiSAuuxl8Uw", + }, + }, nil) assert.NoError(t, err) // This connection will fail since the server doesn't return a valid @@ -806,6 +836,8 @@ func TestClientWithAPIKey(t *testing.T) { func TestPublishEventsWithBulkFiltering(t *testing.T) { makePublishTestClient := func(t *testing.T, url string, configParams map[string]string) *Client { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), ConnectionSettings: eslegclient.ConnectionSettings{ diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 649168eb11b..e44548c826b 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -18,6 +18,8 @@ package elasticsearch import ( + "go.uber.org/zap" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" @@ -38,8 +40,14 @@ func makeES( beat beat.Info, observer outputs.Observer, cfg *config.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) + sensitiveLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") + if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { return outputs.Fail(err) @@ -110,27 +118,30 @@ func makeES( } var client outputs.NetworkClient - client, err = NewClient(ClientSettings{ - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: esURL, - Beatname: beat.Beat, - Kerberos: esConfig.Kerberos, - Username: esConfig.Username, - Password: esConfig.Password, - APIKey: esConfig.APIKey, - Parameters: params, - Headers: esConfig.Headers, - CompressionLevel: esConfig.CompressionLevel, - Observer: observer, - EscapeHTML: esConfig.EscapeHTML, - Transport: esConfig.Transport, - IdleConnTimeout: esConfig.Transport.IdleConnTimeout, - }, - Index: index, - Pipeline: pipeline, - Observer: observer, - NonIndexableAction: policy.action(), - }, &connectCallbackRegistry) + client, err = NewClient( + log, + sensitiveLogger, + ClientSettings{ + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: esURL, + Beatname: beat.Beat, + Kerberos: esConfig.Kerberos, + Username: esConfig.Username, + Password: esConfig.Password, + APIKey: esConfig.APIKey, + Parameters: params, + Headers: esConfig.Headers, + CompressionLevel: esConfig.CompressionLevel, + Observer: observer, + EscapeHTML: esConfig.EscapeHTML, + Transport: esConfig.Transport, + IdleConnTimeout: esConfig.Transport.IdleConnTimeout, + }, + Index: index, + Pipeline: pipeline, + Observer: observer, + NonIndexableAction: policy.action(), + }, &connectCallbackRegistry) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/elasticsearch/elasticsearch_test.go b/libbeat/outputs/elasticsearch/elasticsearch_test.go index 45db313d903..09f4ad0bf46 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch_test.go +++ b/libbeat/outputs/elasticsearch/elasticsearch_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -121,6 +122,8 @@ func TestPipelineSelection(t *testing.T) { selector, err := buildPipelineSelector(config.MustNewConfigFrom(test.cfg)) client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Pipeline: &selector, }, diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index 4ddc5955d6e..2cd92b42355 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -23,6 +23,8 @@ import ( "path/filepath" "time" + "go.uber.org/zap" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/codec" @@ -37,12 +39,13 @@ func init() { } type fileOutput struct { - log *logp.Logger - filePath string - beat beat.Info - observer outputs.Observer - rotator *file.Rotator - codec codec.Codec + log *logp.Logger + sensitiveLogger *logp.Logger + filePath string + beat beat.Info + observer outputs.Observer + rotator *file.Rotator + codec codec.Codec } // makeFileout instantiates a new file output instance. @@ -51,6 +54,7 @@ func makeFileout( beat beat.Info, observer outputs.Observer, cfg *c.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { foConfig := defaultConfig() if err := cfg.Unpack(&foConfig); err != nil { @@ -60,10 +64,17 @@ func makeFileout( // disable bulk support in publisher pipeline _ = cfg.SetInt("bulk_max_size", -1, -1) + logSelector := "file" + sensitiveLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") + fo := &fileOutput{ - log: logp.NewLogger("file"), - beat: beat, - observer: observer, + log: logp.NewLogger(logSelector), + sensitiveLogger: sensitiveLogger, + beat: beat, + observer: observer, } if err := fo.init(beat, foConfig); err != nil { return outputs.Fail(err) @@ -131,7 +142,8 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { } else { out.log.Warnf("Failed to serialize the event: %+v", err) } - out.log.Debugf("Failed event: %v", event) + out.log.Debug("Event logged to sensitive-data log file") + out.sensitiveLogger.Debugf("Failed event: %v", event) dropped++ continue diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 24bbc61145d..6d1baf19452 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -28,6 +28,7 @@ import ( "github.com/Shopify/sarama" "github.com/eapache/go-resiliency/breaker" + "go.uber.org/zap" "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/outputs" @@ -40,16 +41,17 @@ import ( ) type client struct { - log *logp.Logger - observer outputs.Observer - hosts []string - topic outil.Selector - key *fmtstr.EventFormatString - index string - codec codec.Codec - config sarama.Config - mux sync.Mutex - done chan struct{} + log *logp.Logger + sensitiveLogger *logp.Logger + observer outputs.Observer + hosts []string + topic outil.Selector + key *fmtstr.EventFormatString + index string + codec codec.Codec + config sarama.Config + mux sync.Mutex + done chan struct{} producer sarama.AsyncProducer @@ -81,17 +83,24 @@ func newKafkaClient( headers []header, writer codec.Codec, cfg *sarama.Config, + sensitiveLoggerCfg logp.Config, ) (*client, error) { + sensitiveLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") + c := &client{ - log: logp.NewLogger(logSelector), - observer: observer, - hosts: hosts, - topic: topic, - key: key, - index: strings.ToLower(index), - codec: writer, - config: *cfg, - done: make(chan struct{}), + log: logp.NewLogger(logSelector), + sensitiveLogger: sensitiveLogger, + observer: observer, + hosts: hosts, + topic: topic, + key: key, + index: strings.ToLower(index), + codec: writer, + config: *cfg, + done: make(chan struct{}), } if len(headers) != 0 { @@ -228,7 +237,8 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { serializedEvent, err := c.codec.Encode(c.index, event) if err != nil { if c.log.IsDebug() { - c.log.Debugf("failed event: %v", event) + c.log.Debug("failed event logged to events logger file") + c.sensitiveLogger.Debugf("failed event: %v", event) } return nil, err } diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index 0c856ea425d..93b3edfa2fb 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -43,6 +43,7 @@ func makeKafka( beat beat.Info, observer outputs.Observer, cfg *config.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) log.Debug("initialize kafka output") @@ -72,7 +73,7 @@ func makeKafka( return outputs.Fail(err) } - client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg) + client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg, sensitiveLoggerCfg) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 29fc72ac859..b6e5be46d0a 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -258,7 +258,7 @@ func TestKafkaPublish(t *testing.T) { } t.Run(name, func(t *testing.T) { - grp, err := makeKafka(nil, beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, outputs.NewNilObserver(), cfg) + grp, err := makeKafka(nil, beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, outputs.NewNilObserver(), cfg, logp.Config{}) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/logstash/logstash.go b/libbeat/outputs/logstash/logstash.go index 072ec049f6f..cc5a5025164 100644 --- a/libbeat/outputs/logstash/logstash.go +++ b/libbeat/outputs/logstash/logstash.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -40,6 +41,7 @@ func makeLogstash( beat beat.Info, observer outputs.Observer, cfg *conf.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { lsConfig, err := readConfig(cfg, beat) if err != nil { diff --git a/libbeat/outputs/logstash/logstash_integration_test.go b/libbeat/outputs/logstash/logstash_integration_test.go index 2cfbcd03974..fe44ff92bf0 100644 --- a/libbeat/outputs/logstash/logstash_integration_test.go +++ b/libbeat/outputs/logstash/logstash_integration_test.go @@ -39,6 +39,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/outest" "github.com/elastic/beats/v7/libbeat/outputs/outil" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) @@ -193,7 +194,7 @@ func newTestElasticsearchOutput(t *testing.T, test string) *testOutputer { t.Fatal("init index management:", err) } - grp, err := plugin(im, info, outputs.NewNilObserver(), config) + grp, err := plugin(im, info, outputs.NewNilObserver(), config, logp.Config{}) if err != nil { t.Fatalf("init elasticsearch output plugin failed: %v", err) } diff --git a/libbeat/outputs/logstash/logstash_test.go b/libbeat/outputs/logstash/logstash_test.go index fa1b57fb841..226515d1036 100644 --- a/libbeat/outputs/logstash/logstash_test.go +++ b/libbeat/outputs/logstash/logstash_test.go @@ -32,6 +32,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/outest" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" v2 "github.com/elastic/go-lumber/server/v2" ) @@ -181,7 +182,7 @@ func newTestLumberjackOutput( } cfg, _ := conf.NewConfigFrom(config) - grp, err := outputs.Load(nil, beat.Info{}, nil, "logstash", cfg) + grp, err := outputs.Load(nil, beat.Info{}, nil, "logstash", cfg, logp.Config{}) if err != nil { t.Fatalf("init logstash output plugin failed: %v", err) } diff --git a/libbeat/outputs/output_reg.go b/libbeat/outputs/output_reg.go index 3d2675c2ce2..de0d5a2714d 100644 --- a/libbeat/outputs/output_reg.go +++ b/libbeat/outputs/output_reg.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/publisher/queue" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) var outputReg = map[string]Factory{} @@ -32,7 +33,8 @@ type Factory func( im IndexManager, beat beat.Info, stats Observer, - cfg *config.C) (Group, error) + cfg *config.C, + sensitiveLoggerCfg logp.Config) (Group, error) // IndexManager provides additional index related services to the outputs. type IndexManager interface { @@ -81,6 +83,7 @@ func Load( stats Observer, name string, config *config.C, + sensitiveLoggerCfg logp.Config, ) (Group, error) { factory := FindFactory(name) if factory == nil { @@ -90,5 +93,5 @@ func Load( if stats == nil { stats = NewNilObserver() } - return factory(im, info, stats, config) + return factory(im, info, stats, config, sensitiveLoggerCfg) } diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 5a299749aac..476ba9f8cd9 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -26,6 +26,7 @@ import ( "time" "github.com/gomodule/redigo/redis" + "go.uber.org/zap" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" @@ -47,7 +48,8 @@ type publishFn func( ) ([]publisher.Event, error) type client struct { - log *logp.Logger + log *logp.Logger + sensitiveLogger *logp.Logger *transport.Client observer outputs.Observer index string @@ -74,18 +76,26 @@ func newClient( pass string, db int, key outil.Selector, dt redisDataType, index string, codec codec.Codec, + sensitiveLoggerCfg logp.Config, ) *client { + logSelector := "redis" + sensitiveLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") + return &client{ - log: logp.NewLogger("redis"), - Client: tc, - observer: observer, - timeout: timeout, - password: pass, - index: strings.ToLower(index), - db: db, - dataType: dt, - key: key, - codec: codec, + log: logp.NewLogger(logSelector), + sensitiveLogger: sensitiveLogger, + Client: tc, + observer: observer, + timeout: timeout, + password: pass, + index: strings.ToLower(index), + db: db, + dataType: dt, + key: key, + codec: codec, } } @@ -227,7 +237,7 @@ func (c *client) publishEventsBulk(conn redis.Conn, command string) publishFn { args := make([]interface{}, 1, len(data)+1) args[0] = dest - okEvents, args := serializeEvents(c.log, args, 1, data, c.index, c.codec) + okEvents, args := serializeEvents(c.log, c.sensitiveLogger, args, 1, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if (len(args) - 1) == 0 { return nil, nil @@ -253,7 +263,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF return func(key outil.Selector, data []publisher.Event) ([]publisher.Event, error) { var okEvents []publisher.Event serialized := make([]interface{}, 0, len(data)) - okEvents, serialized = serializeEvents(c.log, serialized, 0, data, c.index, c.codec) + okEvents, serialized = serializeEvents(c.log, c.sensitiveLogger, serialized, 0, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if len(serialized) == 0 { return nil, nil @@ -308,6 +318,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF func serializeEvents( log *logp.Logger, + sensitiveLogger *logp.Logger, to []interface{}, i int, data []publisher.Event, @@ -319,8 +330,8 @@ func serializeEvents( for _, d := range data { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look for sensitive-data log file to view the event", err) + sensitiveLogger.Debugf("Failed event: %v", d.Content) goto failLoop } @@ -337,8 +348,8 @@ failLoop: for _, d := range rest { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look for sensitive-data log file to view the event", err) + sensitiveLogger.Debugf("Failed event: %v", d.Content) i++ continue } diff --git a/libbeat/outputs/redis/redis.go b/libbeat/outputs/redis/redis.go index 9814d6abee7..8b80cfc5206 100644 --- a/libbeat/outputs/redis/redis.go +++ b/libbeat/outputs/redis/redis.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/codec" "github.com/elastic/beats/v7/libbeat/outputs/outil" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -51,6 +52,7 @@ func makeRedis( beat beat.Info, observer outputs.Observer, cfg *config.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { if !cfg.HasField("index") { @@ -161,7 +163,7 @@ func makeRedis( } client := newClient(conn, observer, rConfig.Timeout, - pass, rConfig.Db, key, dataType, rConfig.Index, enc) + pass, rConfig.Db, key, dataType, rConfig.Index, enc, sensitiveLoggerCfg) clients[i] = newBackoffClient(client, rConfig.Backoff.Init, rConfig.Backoff.Max) } diff --git a/libbeat/outputs/redis/redis_integration_test.go b/libbeat/outputs/redis/redis_integration_test.go index dfd48dc75d2..3627203f7c1 100644 --- a/libbeat/outputs/redis/redis_integration_test.go +++ b/libbeat/outputs/redis/redis_integration_test.go @@ -37,6 +37,7 @@ import ( _ "github.com/elastic/beats/v7/libbeat/outputs/codec/json" "github.com/elastic/beats/v7/libbeat/outputs/outest" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -330,7 +331,7 @@ func newRedisTestingOutput(t *testing.T, cfg map[string]interface{}) outputs.Cli t.Fatalf("redis output module not registered") } - out, err := plugin(nil, beat.Info{Beat: testBeatname, Version: testBeatversion}, outputs.NewNilObserver(), config) + out, err := plugin(nil, beat.Info{Beat: testBeatname, Version: testBeatversion}, outputs.NewNilObserver(), config, logp.Config{}) if err != nil { t.Fatalf("Failed to initialize redis output: %v", err) } diff --git a/libbeat/outputs/redis/redis_test.go b/libbeat/outputs/redis/redis_test.go index 6e9d70f5786..7640a2957c0 100644 --- a/libbeat/outputs/redis/redis_test.go +++ b/libbeat/outputs/redis/redis_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" _ "github.com/elastic/beats/v7/libbeat/outputs/codec/json" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -108,7 +109,7 @@ func TestMakeRedis(t *testing.T) { t.Run(name, func(t *testing.T) { cfg, err := config.NewConfigFrom(test.config) assert.NoError(t, err) - groups, err := makeRedis(nil, beatInfo, outputs.NewNilObserver(), cfg) + groups, err := makeRedis(nil, beatInfo, outputs.NewNilObserver(), cfg, logp.Config{}) assert.Equal(t, err == nil, test.valid) if err != nil && test.valid { t.Log(err) diff --git a/libbeat/outputs/shipper/shipper.go b/libbeat/outputs/shipper/shipper.go index fe19a36b31d..8caa97b25f2 100644 --- a/libbeat/outputs/shipper/shipper.go +++ b/libbeat/outputs/shipper/shipper.go @@ -92,6 +92,7 @@ func makeShipper( beat beat.Info, observer outputs.Observer, cfg *conf.C, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig() diff --git a/libbeat/outputs/shipper/shipper_test.go b/libbeat/outputs/shipper/shipper_test.go index e26d44635af..53d55c5a45b 100644 --- a/libbeat/outputs/shipper/shipper_test.go +++ b/libbeat/outputs/shipper/shipper_test.go @@ -42,6 +42,7 @@ import ( "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-shipper-client/pkg/helpers" pb "github.com/elastic/elastic-agent-shipper-client/pkg/proto" @@ -583,6 +584,7 @@ func createShipperClient(t *testing.T, cfg *config.C, observer outputs.Observer) beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, observer, cfg, + logp.Config{}, ) require.NoError(t, err) require.Len(t, group.Clients, 1) diff --git a/libbeat/publisher/pipeline/controller.go b/libbeat/publisher/pipeline/controller.go index 1c480c01bce..5b49bccfb2d 100644 --- a/libbeat/publisher/pipeline/controller.go +++ b/libbeat/publisher/pipeline/controller.go @@ -180,7 +180,8 @@ func (c *outputController) Set(outGrp outputs.Group) { // Reload the output func (c *outputController) Reload( cfg *reload.ConfigWithMeta, - outFactory func(outputs.Observer, conf.Namespace) (outputs.Group, error), + sensitiveLoggerCfg logp.Config, + outFactory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error { outCfg := conf.Namespace{} if cfg != nil { @@ -191,7 +192,7 @@ func (c *outputController) Reload( output, err := loadOutput(c.monitors, func(stats outputs.Observer) (string, outputs.Group, error) { name := outCfg.Name() - out, err := outFactory(stats, outCfg) + out, err := outFactory(stats, outCfg, sensitiveLoggerCfg) return name, out, err }) if err != nil { diff --git a/libbeat/publisher/pipeline/pipeline.go b/libbeat/publisher/pipeline/pipeline.go index cf03163750e..3414950538f 100644 --- a/libbeat/publisher/pipeline/pipeline.go +++ b/libbeat/publisher/pipeline/pipeline.go @@ -111,7 +111,8 @@ const ( type OutputReloader interface { Reload( cfg *reload.ConfigWithMeta, - factory func(outputs.Observer, conf.Namespace) (outputs.Group, error), + sensitiveLoggerCfg logp.Config, + factory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error } diff --git a/libbeat/publisher/pipeline/stress/out.go b/libbeat/publisher/pipeline/stress/out.go index d1014b8d782..5fa5260861a 100644 --- a/libbeat/publisher/pipeline/stress/out.go +++ b/libbeat/publisher/pipeline/stress/out.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) type testOutput struct { @@ -55,7 +56,7 @@ func init() { outputs.RegisterType("test", makeTestOutput) } -func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C) (outputs.Group, error) { +func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C, sensitiveLoggerCfg logp.Config) (outputs.Group, error) { config := defaultTestOutputConfig if err := cfg.Unpack(&config); err != nil { return outputs.Fail(err) diff --git a/libbeat/publisher/pipeline/stress/run.go b/libbeat/publisher/pipeline/stress/run.go index ee118d502eb..622f303173d 100644 --- a/libbeat/publisher/pipeline/stress/run.go +++ b/libbeat/publisher/pipeline/stress/run.go @@ -76,7 +76,7 @@ func RunTests( processing, func(stat outputs.Observer) (string, outputs.Group, error) { cfg := config.Output - out, err := outputs.Load(nil, info, stat, cfg.Name(), cfg.Config()) + out, err := outputs.Load(nil, info, stat, cfg.Name(), cfg.Config(), logp.Config{}) return cfg.Name(), out, err }, ) diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 046c578d7cd..583c348fa5d 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -366,7 +366,11 @@ func (b *BeatProc) WriteConfigFile(cfg string) { // when the test ends. func (b *BeatProc) openLogFile() *os.File { t := b.t - glob := fmt.Sprintf("%s-*.ndjson", filepath.Join(b.tempDir, b.beatName)) + // Beats can produce two different log files, to make sure we're + // reading the normal one we add the year to the glob. The default + // log file name looks like: filebeat-20240116.ndjson + year := time.Now().Year() + glob := fmt.Sprintf("%s-%d*.ndjson", filepath.Join(b.tempDir, b.beatName), year) files, err := filepath.Glob(glob) if err != nil { t.Fatalf("could not expand log file glob: %s", err) diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index d6b8b9e9475..53208c0730a 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2394,6 +2394,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index a148cfb3b51..2884d7aa617 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -142,6 +142,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt index 7402ff16caa..409d7406632 100644 --- a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt +++ b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt @@ -10,4 +10,4 @@ rsa==4.7.2 s3transfer==0.3.3 six==1.14.0 urllib3==1.26.5 -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/metricbeat/tests/system/requirements.txt b/metricbeat/tests/system/requirements.txt index 98713863fc0..e58c701d9db 100644 --- a/metricbeat/tests/system/requirements.txt +++ b/metricbeat/tests/system/requirements.txt @@ -1,4 +1,4 @@ kafka-python==1.4.3 elasticsearch==7.1.0 semver==2.8.1 -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 1e013fb081f..4346ef2ea5e 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2010,6 +2010,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index fea1a2fb115..bd111eddab6 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -270,6 +270,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/packetbeat/tests/system/gen/memcache/requirements.txt b/packetbeat/tests/system/gen/memcache/requirements.txt index 1666df74b64..a1dbb5b952c 100644 --- a/packetbeat/tests/system/gen/memcache/requirements.txt +++ b/packetbeat/tests/system/gen/memcache/requirements.txt @@ -1,2 +1,2 @@ pylibmc -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 8b7bad94c23..d5f8b5383a0 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1426,6 +1426,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index f6d5ac9069e..791fdd2a78c 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -155,6 +155,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 45d1c4af851..23279e9f077 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1600,6 +1600,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/auditbeat/auditbeat.yml b/x-pack/auditbeat/auditbeat.yml index 7bdea6578cc..ca5a0f6b671 100644 --- a/x-pack/auditbeat/auditbeat.yml +++ b/x-pack/auditbeat/auditbeat.yml @@ -196,6 +196,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go index de3436156b4..5fd4c8eb83b 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go +++ b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/logp/configure" ) // load pipeline starts up a new pipeline with the given config @@ -66,6 +67,16 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * Processors: processing, } + // Get the default/current logging configuration + // we need some defaults to be populates otherwise Unpack will + // fail + sensitiveLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + + // Ensure the default filename is set + if sensitiveLoggerCfg.Files.Name == "" { + sensitiveLoggerCfg.Files.Name = "dockerlogbeat-sensitive-data" + } + pipeline, err := pipeline.LoadWithSettings( info, pipeline.Monitors{ @@ -76,7 +87,7 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * pipelineCfg, func(stat outputs.Observer) (string, outputs.Group, error) { cfg := config.Output - out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config()) + out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config(), sensitiveLoggerCfg) return cfg.Name(), out, err }, settings, diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 14308c2cce1..5f97731ecd5 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5016,6 +5016,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index aa50779b922..810604dbe1d 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -186,6 +186,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/filebeat/input/lumberjack/server_test.go b/x-pack/filebeat/input/lumberjack/server_test.go index c7db6abf0b5..d03bf1353b9 100644 --- a/x-pack/filebeat/input/lumberjack/server_test.go +++ b/x-pack/filebeat/input/lumberjack/server_test.go @@ -52,8 +52,8 @@ func TestServer(t *testing.T) { c := makeTestConfig() c.TLS = serverConf // Disable mTLS requirements in the server. - var clientAuth = tlscommon.TLSClientAuthNone - c.TLS.ClientAuth = &clientAuth + clientAuth := tlscommon.TLSClientAuthNone + c.TLS.ClientAuth = &clientAuth // tls.NoClientCert c.TLS.VerificationMode = tlscommon.VerifyNone testSendReceive(t, c, 10, clientConf) @@ -221,12 +221,12 @@ func tlsSetup(t *testing.T) (clientConfig *tls.Config, serverConfig *tlscommon.S MinVersion: tls.VersionTLS12, } - var clientAuth = tlscommon.TLSClientAuthRequired - + clientAuth := tlscommon.TLSClientAuthRequired serverConfig = &tlscommon.ServerConfig{ // NOTE: VerifyCertificate is ineffective unless ClientAuth is set to RequireAndVerifyClientCert. VerificationMode: tlscommon.VerifyCertificate, - ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert + // Unfortunately ServerConfig uses an unexported type in an exported field. + ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert CAs: []string{ string(certData.ca.CertPEM(t)), }, diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 4e939b686a6..1a563d5be0d 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1264,6 +1264,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/functionbeat + + # The name of the files where the logs are written to. + #name: functionbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 9a2627ca44f..39034b2496d 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -365,6 +365,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/functionbeat + + # The name of the files where the logs are written to. + #name: functionbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/functionbeat/tests/system/requirements.txt b/x-pack/functionbeat/tests/system/requirements.txt index c2399b66f80..a6da4ed167d 100644 --- a/x-pack/functionbeat/tests/system/requirements.txt +++ b/x-pack/functionbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 2b2f28382e9..da19a7c7db3 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/heartbeat/heartbeat.yml b/x-pack/heartbeat/heartbeat.yml index 8accb212db4..3c1f3756420 100644 --- a/x-pack/heartbeat/heartbeat.yml +++ b/x-pack/heartbeat/heartbeat.yml @@ -152,6 +152,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index a22db4f7f8c..95b2f101bcb 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2955,6 +2955,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml index a148cfb3b51..2884d7aa617 100644 --- a/x-pack/metricbeat/metricbeat.yml +++ b/x-pack/metricbeat/metricbeat.yml @@ -142,6 +142,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 1de9a267ae5..78690d219ef 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -983,6 +983,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/osquerybeat + + # The name of the files where the logs are written to. + #name: osquerybeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/osquerybeat/osquerybeat.yml b/x-pack/osquerybeat/osquerybeat.yml index 5a3dcde51e9..1e5fa364f90 100644 --- a/x-pack/osquerybeat/osquerybeat.yml +++ b/x-pack/osquerybeat/osquerybeat.yml @@ -128,6 +128,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/osquerybeat + + # The name of the files where the logs are written to. + #name: osquerybeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 1e013fb081f..4346ef2ea5e 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2010,6 +2010,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index fea1a2fb115..bd111eddab6 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -270,6 +270,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 528560748fb..3c14ac0e385 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1428,6 +1428,46 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-sensitive-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 5 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index bf7d2f819eb..efafa909a61 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -156,6 +156,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.sensitive: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-sensitive-data + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The