Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions .chloggen/loadbalancingexporter-43644-add-quarantine.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog)
component: exporter/loadbalancing

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Quarantine mechanism for unhealthy endpoints in the DNS resolver.

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [43644]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext: |
- Added a quarantine feature for unhealthy endpoints, delaying retries to those endpoints after a configurable period (default: 30s).
- Quarantine settings are configurable via the DNS resolver's `quarantine` section.
- The load balancer will avoid sending data to endpoints marked as unhealthy until their quarantine period expires, using healthy endpoints in the hash ring without triggering unnecessary ring updates.
- This increases resilience by reducing the risk of exporters being stuck in degraded states with repeated failed attempts.
- This feature currently applies only to the DNS resolver.
# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
3 changes: 3 additions & 0 deletions exporter/loadbalancingexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,9 @@ Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using th
* `port` port to be used for exporting the traces to the IP addresses resolved from `hostname`. If `port` is not specified, the default port 4317 is used.
* `interval` resolver interval in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `5s` will be used.
* `timeout` resolver timeout in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `1s` will be used.
* `quarantine` node: enables a quarantine mechanism that prevents the exporter from sending data to endpoints that have previously failed (i.e., are marked as unhealthy) until a defined quarantine period passes. While an endpoint is quarantined, only healthy endpoints in the hash ring are used to dispatch data, avoiding unnecessary hash ring updates and reducing the risk of repeatedly targeting unhealthy endpoints.
* `enabled` toggle to activate endpoint quarantine logic. Default is `false`.
* `duration` how long in go-Duration format an unhealthy endpoint should remain in quarantine before the exporter retries it (e.g., `30s`, `1m`). Defaults to `30s` if not specified.
* The `k8s` node accepts the following optional properties:
* `service` Kubernetes service to resolve, e.g. `lb-svc.lb-ns`. If no namespace is specified, an attempt will be made to infer the namespace for this collector, and if this fails it will fall back to the `default` namespace.
* `ports` port to be used for exporting the traces to the addresses resolved from `service`. If `ports` is not specified, the default port 4317 is used. When multiple ports are specified, two backends are added to the load balancer as if they were at different pods.
Expand Down
20 changes: 16 additions & 4 deletions exporter/loadbalancingexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,11 @@ type StaticResolver struct {

// DNSResolver defines the configuration for the DNS resolver
type DNSResolver struct {
Hostname string `mapstructure:"hostname"`
Port string `mapstructure:"port"`
Interval time.Duration `mapstructure:"interval"`
Timeout time.Duration `mapstructure:"timeout"`
Hostname string `mapstructure:"hostname"`
Port string `mapstructure:"port"`
Interval time.Duration `mapstructure:"interval"`
Timeout time.Duration `mapstructure:"timeout"`
Quarantine QuarantineSettings `mapstructure:"quarantine"`
// prevent unkeyed literal initialization
_ struct{}
}
Expand All @@ -96,6 +97,17 @@ type K8sSvcResolver struct {
_ struct{}
}

// QuarantineSettings defines the configuration for endpoint quarantine behavior
type QuarantineSettings struct {
// Duration specifies how long an unhealthy endpoint should be excluded from load balancing
// after a failure. After this duration, the endpoint will be eligible for retry.
// Default: 30s
Duration time.Duration `mapstructure:"duration"`
Enabled bool `mapstructure:"enabled"`
// prevent unkeyed literal initialization
_ struct{}
}

type AWSCloudMapResolver struct {
NamespaceName string `mapstructure:"namespace"`
ServiceName string `mapstructure:"service_name"`
Expand Down
23 changes: 14 additions & 9 deletions exporter/loadbalancingexporter/consistent_hashing.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,29 +28,34 @@ type ringItem struct {
// hashRing is a consistent hash ring following Karger et al.
type hashRing struct {
// ringItems holds all the positions, used for the lookup the position for the closest next ring item
items []ringItem
items []ringItem
endpoints []string
}

// newHashRing builds a new immutable consistent hash ring based on the given endpoints.
func newHashRing(endpoints []string) *hashRing {
items := positionsForEndpoints(endpoints, defaultWeight)
return &hashRing{
items: items,
items: items,
endpoints: endpoints,
}
}

// endpointFor calculates which backend is responsible for the given traceID
func (h *hashRing) endpointFor(identifier []byte) string {
if h == nil {
// perhaps the ring itself couldn't get initialized yet?
return ""
}
// getPosition calculates the position in the ring for the given identifier
func getPosition(identifier []byte) position {
hasher := crc32.NewIEEE()
hasher.Write(identifier)
hash := hasher.Sum32()
pos := hash % maxPositions
return position(pos)
}

return h.findEndpoint(position(pos))
// endpointFor calculates which backend is responsible for the given traceID
func (h *hashRing) endpointFor(identifier []byte) string {
if h == nil {
return ""
}
return h.findEndpoint(getPosition(identifier))
}

// findEndpoint returns the "next" endpoint starting from the given position, or an empty string in case no endpoints are available
Expand Down
41 changes: 40 additions & 1 deletion exporter/loadbalancingexporter/consistent_hashing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package loadbalancingexporter

import (
"fmt"
"hash/crc32"
"testing"

"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -46,6 +47,39 @@ func TestEndpointFor(t *testing.T) {
}
}

func TestGetPosition(t *testing.T) {
tests := []struct {
name string
identifier []byte
want position
}{
{
name: "simple case",
identifier: []byte("example"),
want: position(crc32.ChecksumIEEE([]byte("example")) % maxPositions),
},
{
name: "different input",
identifier: []byte("another"),
want: position(crc32.ChecksumIEEE([]byte("another")) % maxPositions),
},
{
name: "empty identifier",
identifier: []byte(""),
want: position(crc32.ChecksumIEEE([]byte("")) % maxPositions),
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getPosition(tt.identifier)
if got != tt.want {
t.Errorf("getPosition(%q) = %v, want %v", tt.identifier, got, tt.want)
}
})
}
}

func TestPositionsFor(t *testing.T) {
// prepare
endpoint := "host1"
Expand Down Expand Up @@ -165,6 +199,7 @@ func TestEqual(t *testing.T) {
[]ringItem{
{pos: position(123), endpoint: "endpoint-1"},
},
[]string{"endpoint-1"},
}

for _, tt := range []struct {
Expand All @@ -174,7 +209,7 @@ func TestEqual(t *testing.T) {
}{
{
"empty",
&hashRing{[]ringItem{}},
&hashRing{[]ringItem{}, []string{}},
false,
},
{
Expand All @@ -188,6 +223,7 @@ func TestEqual(t *testing.T) {
[]ringItem{
{pos: position(123), endpoint: "endpoint-1"},
},
[]string{"endpoint-1"},
},
true,
},
Expand All @@ -198,6 +234,7 @@ func TestEqual(t *testing.T) {
{pos: position(123), endpoint: "endpoint-1"},
{pos: position(124), endpoint: "endpoint-2"},
},
[]string{"endpoint-1", "endpoint-2"},
},
false,
},
Expand All @@ -207,6 +244,7 @@ func TestEqual(t *testing.T) {
[]ringItem{
{pos: position(124), endpoint: "endpoint-1"},
},
[]string{"endpoint-1"},
},
false,
},
Expand All @@ -216,6 +254,7 @@ func TestEqual(t *testing.T) {
[]ringItem{
{pos: position(123), endpoint: "endpoint-2"},
},
[]string{"endpoint-2"},
},
false,
},
Expand Down
134 changes: 130 additions & 4 deletions exporter/loadbalancingexporter/loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"slices"
"strings"
"sync"
"time"

"go.opentelemetry.io/collector/component"
"go.uber.org/zap"
Expand Down Expand Up @@ -38,6 +39,10 @@ type loadBalancer struct {
componentFactory componentFactory
exporters map[string]*wrappedExporter

// Track unhealthy endpoints across all signal types
unhealthyEndpoints map[string]time.Time
healthLock sync.RWMutex

stopped bool
updateLock sync.RWMutex
}
Expand Down Expand Up @@ -79,12 +84,14 @@ func newLoadBalancer(logger *zap.Logger, cfg component.Config, factory component

var err error
dnsResolver := oCfg.Resolver.DNS.Get()

res, err = newDNSResolver(
dnsLogger,
dnsResolver.Hostname,
dnsResolver.Port,
dnsResolver.Interval,
dnsResolver.Timeout,
&dnsResolver.Quarantine,
telemetry,
)
if err != nil {
Expand Down Expand Up @@ -137,10 +144,11 @@ func newLoadBalancer(logger *zap.Logger, cfg component.Config, factory component
}

return &loadBalancer{
logger: logger,
res: res,
componentFactory: factory,
exporters: map[string]*wrappedExporter{},
logger: logger,
res: res,
componentFactory: factory,
exporters: map[string]*wrappedExporter{},
unhealthyEndpoints: make(map[string]time.Time),
}, nil
}

Expand Down Expand Up @@ -212,6 +220,48 @@ func (lb *loadBalancer) removeExtraExporters(ctx context.Context, endpoints []st
}
}

// markUnhealthy marks an endpoint as unhealthy
func (lb *loadBalancer) markUnhealthy(endpoint string) {
lb.healthLock.Lock()
defer lb.healthLock.Unlock()

if _, exists := lb.unhealthyEndpoints[endpoint]; !exists {
lb.unhealthyEndpoints[endpoint] = time.Now()
}
}

// isHealthy checks if an endpoint is healthy or if it has been quarantined long enough to retry
func (lb *loadBalancer) isHealthy(endpoint string) bool {
lb.healthLock.RLock()
timestamp, exists := lb.unhealthyEndpoints[endpoint]
lb.healthLock.RUnlock()

if !exists {
return true
}

// If quarantine period has passed, remove from unhealthy list and allow retry
if dnsRes, ok := lb.res.(*dnsResolver); ok && dnsRes.quarantine != nil {
lb.logger.Debug("isHealthy", zap.String("endpoint", endpoint), zap.Time("timestamp", timestamp), zap.Duration("quarantineDuration", dnsRes.quarantine.Duration))
if time.Since(timestamp) > dnsRes.quarantine.Duration {
lb.healthLock.Lock()
delete(lb.unhealthyEndpoints, endpoint)
lb.healthLock.Unlock()
lb.logger.Debug("isHealthy - quarantine period passed", zap.String("endpoint", endpoint))
return true
}
}

return false
}

// isQuarantineEnabled checks if the resolver supports quarantine logic and if it's enabled.
// Quarantine logic is supported for DNS resolvers only.
func (lb *loadBalancer) isQuarantineEnabled() bool {
dnsRes, ok := lb.res.(*dnsResolver)
return ok && dnsRes.quarantine.Enabled
}

func (lb *loadBalancer) Shutdown(ctx context.Context) error {
err := lb.res.shutdown(ctx)
lb.stopped = true
Expand All @@ -238,3 +288,79 @@ func (lb *loadBalancer) exporterAndEndpoint(identifier []byte) (*wrappedExporter

return exp, endpoint, nil
}

// exporterAndEndpointByPosition returns the exporter and endpoint in the ring at the given position.
func (lb *loadBalancer) exporterAndEndpointByPosition(pos position) (*wrappedExporter, string, error) {
lb.updateLock.RLock()
defer lb.updateLock.RUnlock()

endpoint := lb.ring.findEndpoint(pos)
exp, found := lb.exporters[endpointWithPort(endpoint)]
if !found {
return nil, "", fmt.Errorf("couldn't find the exporter for the endpoint %q", endpoint)
}

return exp, endpoint, nil
}

// consumeWithRetryAndQuarantine executes the consume operation with the initial assignment of the exporter and endpoint.
// If the consume operation fails, it will retry with the next endpoint in its associated ring.
// It will try subsequent endpoints until either one succeeds or all available endpoints have been tried.
// Once an unhealthy endpoint is found, it will be marked as unhealthy and not tried again until the quarantine period has passed.
func (lb *loadBalancer) consumeWithRetryAndQuarantine(identifier []byte, exp *wrappedExporter, endpoint string, consume func(*wrappedExporter, string) error) error {
var err error

// Try the first endpoint if it's healthy or quarantine period has passed
if lb.isHealthy(endpoint) {
if err = consume(exp, endpoint); err == nil {
return nil
}
// Mark as unhealthy if the consume failed
lb.markUnhealthy(endpoint)
}

// If consume failed, try with subsequent endpoints
// Keep track of tried endpoints to avoid infinite loop
tried := map[string]bool{endpoint: true}
currentPos := getPosition(identifier)

// Try until we've used all available endpoints
for len(tried) < len(lb.ring.endpoints) {
// retryExp, retryEndpoint, retryErr := lb.exporterAndEndpoint(identifier)
retryExp, retryEndpoint, retryErr := lb.exporterAndEndpointByPosition(currentPos)
if retryErr != nil {
// Return original error if we can't get a new endpoint
return err
}

// If we've already tried this endpoint in this cycle, move to next position
if tried[retryEndpoint] {
currentPos = (currentPos + 1) % position(maxPositions)
continue
}

// Skip unhealthy endpoints that are still in quarantine
if !lb.isHealthy(retryEndpoint) {
tried[retryEndpoint] = true
// If we've exhausted all endpoints and they're all unhealthy, stop
if len(tried) == len(lb.exporters) {
break
}
currentPos = (currentPos + 1) % position(maxPositions)
continue
}

tried[retryEndpoint] = true

if retryErr = consume(retryExp, retryEndpoint); retryErr == nil {
return nil
}
// Mark as unhealthy if the consume failed
lb.markUnhealthy(retryEndpoint)

// Move to next position for next iteration
currentPos = (currentPos + 1) % position(maxPositions)
}

return fmt.Errorf("all endpoints were tried and failed: %v", tried)
}
Loading