Skip to content

Commit 1bb0438

Browse files
NO-SNOW Extract error handling logic (#1116)
1 parent 3e95af2 commit 1bb0438

File tree

4 files changed

+95
-79
lines changed

4 files changed

+95
-79
lines changed

src/main/java/com/snowflake/kafka/connector/internal/streaming/DirectTopicPartitionChannel.java

Lines changed: 12 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,11 @@
22

33
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_CONFIG;
44
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_DEFAULT;
5-
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG;
6-
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_TOLERANCE_CONFIG;
75
import static java.util.stream.Collectors.toMap;
86

97
import com.google.common.annotations.VisibleForTesting;
108
import com.google.common.base.MoreObjects;
119
import com.google.common.base.Preconditions;
12-
import com.google.common.base.Strings;
1310
import com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig;
1411
import com.snowflake.kafka.connector.Utils;
1512
import com.snowflake.kafka.connector.dlq.KafkaRecordErrorReporter;
@@ -44,7 +41,6 @@
4441
import net.snowflake.ingest.utils.SFException;
4542
import org.apache.kafka.common.TopicPartition;
4643
import org.apache.kafka.connect.errors.ConnectException;
47-
import org.apache.kafka.connect.errors.DataException;
4844
import org.apache.kafka.connect.sink.SinkRecord;
4945
import org.apache.kafka.connect.sink.SinkTaskContext;
5046

@@ -94,9 +90,6 @@ public class DirectTopicPartitionChannel implements TopicPartitionChannel {
9490
/* Error handling, DB, schema, Snowflake URL and other snowflake specific connector properties are defined here. */
9591
private final Map<String, String> sfConnectorConfig;
9692

97-
/* Responsible for returning errors to DLQ if records have failed to be ingested. */
98-
private final KafkaRecordErrorReporter kafkaRecordErrorReporter;
99-
10093
private final SchemaEvolutionService schemaEvolutionService;
10194

10295
/**
@@ -105,20 +98,6 @@ public class DirectTopicPartitionChannel implements TopicPartitionChannel {
10598
*/
10699
private final SinkTaskContext sinkTaskContext;
107100

108-
/* Error related properties */
109-
110-
// If set to true, we will send records to DLQ provided DLQ name is valid.
111-
private final boolean errorTolerance;
112-
113-
// Whether to log errors to log file.
114-
private final boolean logErrors;
115-
116-
// Set to false if DLQ topic is null or empty. True if it is a valid string in config
117-
private final boolean isDLQTopicSet;
118-
119-
// Whether schematization has been enabled.
120-
private final boolean enableSchematization;
121-
122101
// Whether schema evolution could be done on this channel
123102
private final boolean enableSchemaEvolution;
124103

@@ -141,6 +120,8 @@ public class DirectTopicPartitionChannel implements TopicPartitionChannel {
141120

142121
private final FailsafeExecutor<Long> offsetTokenExecutor;
143122

123+
private final StreamingErrorHandler streamingErrorHandler;
124+
144125
/** Testing only, initialize TopicPartitionChannel without the connection service */
145126
@VisibleForTesting
146127
public DirectTopicPartitionChannel(
@@ -162,7 +143,6 @@ public DirectTopicPartitionChannel(
162143
tableName,
163144
false, /* No schema evolution permission */
164145
sfConnectorConfig,
165-
kafkaRecordErrorReporter,
166146
sinkTaskContext,
167147
conn,
168148
new StreamingRecordService(
@@ -173,7 +153,8 @@ public DirectTopicPartitionChannel(
173153
false,
174154
null,
175155
schemaEvolutionService,
176-
insertErrorMapper);
156+
insertErrorMapper,
157+
new StreamingErrorHandler(sfConnectorConfig, kafkaRecordErrorReporter, telemetryService));
177158
}
178159

179160
/**
@@ -192,23 +173,24 @@ public DirectTopicPartitionChannel(
192173
* @param telemetryService Telemetry Service which includes the Telemetry Client, sends Json data
193174
* to Snowflake
194175
* @param insertErrorMapper Mapper to map insert errors to schema evolution items
176+
* @param streamingErrorHandler contains DLQ and error logging related logic
195177
*/
196178
public DirectTopicPartitionChannel(
197179
SnowflakeStreamingIngestClient streamingIngestClient,
198180
TopicPartition topicPartition,
199181
final String channelNameFormatV1,
200182
final String tableName,
201-
boolean hasSchemaEvolutionPermission,
183+
final boolean enableSchemaEvolution,
202184
final Map<String, String> sfConnectorConfig,
203-
KafkaRecordErrorReporter kafkaRecordErrorReporter,
204185
SinkTaskContext sinkTaskContext,
205186
SnowflakeConnectionService conn,
206187
StreamingRecordService streamingRecordService,
207188
SnowflakeTelemetryService telemetryService,
208189
boolean enableCustomJMXMonitoring,
209190
MetricsJmxReporter metricsJmxReporter,
210191
SchemaEvolutionService schemaEvolutionService,
211-
InsertErrorMapper insertErrorMapper) {
192+
InsertErrorMapper insertErrorMapper,
193+
StreamingErrorHandler streamingErrorHandler) {
212194
final long startTime = System.currentTimeMillis();
213195

214196
this.streamingIngestClient = Preconditions.checkNotNull(streamingIngestClient);
@@ -217,23 +199,13 @@ public DirectTopicPartitionChannel(
217199
this.channelNameFormatV1 = Preconditions.checkNotNull(channelNameFormatV1);
218200
this.tableName = Preconditions.checkNotNull(tableName);
219201
this.sfConnectorConfig = Preconditions.checkNotNull(sfConnectorConfig);
220-
this.kafkaRecordErrorReporter = Preconditions.checkNotNull(kafkaRecordErrorReporter);
221202
this.sinkTaskContext = Preconditions.checkNotNull(sinkTaskContext);
222203
this.conn = conn;
223204

224205
this.streamingRecordService = streamingRecordService;
225206
this.telemetryServiceV2 = Preconditions.checkNotNull(telemetryService);
226207

227-
/* Error properties */
228-
this.errorTolerance = StreamingUtils.tolerateErrors(this.sfConnectorConfig);
229-
this.logErrors = StreamingUtils.logErrors(this.sfConnectorConfig);
230-
this.isDLQTopicSet =
231-
!Strings.isNullOrEmpty(StreamingUtils.getDlqTopicName(this.sfConnectorConfig));
232-
233-
/* Schematization related properties */
234-
this.enableSchematization = Utils.isSchematizationEnabled(this.sfConnectorConfig);
235-
236-
this.enableSchemaEvolution = this.enableSchematization && hasSchemaEvolutionPermission;
208+
this.enableSchemaEvolution = enableSchemaEvolution;
237209
this.schemaEvolutionService = schemaEvolutionService;
238210

239211
this.channelOffsetTokenMigrator = new ChannelOffsetTokenMigrator(conn, telemetryService);
@@ -291,6 +263,7 @@ public DirectTopicPartitionChannel(
291263
+ " correct offset instead",
292264
this.getChannelNameFormatV1());
293265
}
266+
this.streamingErrorHandler = streamingErrorHandler;
294267
}
295268

296269
/**
@@ -479,7 +452,7 @@ private void handleInsertRowFailure(
479452
this.getChannelNameFormatV1(),
480453
e);
481454
if (Objects.equals(e.getCode(), SnowflakeErrors.ERROR_5026.getCode())) {
482-
handleError(Collections.singletonList(e), kafkaSinkRecord);
455+
streamingErrorHandler.handleError(Collections.singletonList(e), kafkaSinkRecord);
483456
} else {
484457
throw e;
485458
}
@@ -489,7 +462,7 @@ private void handleInsertRowFailure(
489462
}
490463
}
491464

492-
handleError(
465+
streamingErrorHandler.handleError(
493466
insertErrors.stream()
494467
.map(InsertValidationResponse.InsertError::getException)
495468
.collect(Collectors.toList()),
@@ -501,42 +474,6 @@ private Map<String, ColumnProperties> getTableSchemaFromChannel() {
501474
.collect(toMap(Map.Entry::getKey, entry -> new ColumnProperties(entry.getValue())));
502475
}
503476

504-
private void handleError(List<Exception> insertErrors, SinkRecord kafkaSinkRecord) {
505-
if (logErrors) {
506-
for (Exception insertError : insertErrors) {
507-
LOGGER.error("Insert Row Error message:{}", insertError.getMessage());
508-
}
509-
}
510-
if (errorTolerance) {
511-
if (!isDLQTopicSet) {
512-
LOGGER.warn(
513-
"{} is set, however {} is not. The message will not be added to the Dead Letter Queue"
514-
+ " topic.",
515-
ERRORS_TOLERANCE_CONFIG,
516-
ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG);
517-
} else {
518-
LOGGER.warn(
519-
"Adding the message to Dead Letter Queue topic: {}",
520-
ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG);
521-
this.kafkaRecordErrorReporter.reportError(
522-
kafkaSinkRecord,
523-
insertErrors.stream()
524-
.findFirst()
525-
.orElseThrow(
526-
() ->
527-
new IllegalStateException(
528-
"Reported record error, however exception list is empty.")));
529-
}
530-
} else {
531-
final String errMsg =
532-
String.format(
533-
"Error inserting Records using Streaming API with msg:%s",
534-
insertErrors.get(0).getMessage());
535-
this.telemetryServiceV2.reportKafkaConnectFatalError(errMsg);
536-
throw new DataException(errMsg, insertErrors.get(0));
537-
}
538-
}
539-
540477
@Override
541478
@VisibleForTesting
542479
public long fetchOffsetTokenWithRetry() {

src/main/java/com/snowflake/kafka/connector/internal/streaming/SnowflakeSinkServiceV2.java

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -239,22 +239,26 @@ private TopicPartitionChannel createTopicPartitionChannel(
239239
StreamingRecordService streamingRecordService =
240240
new StreamingRecordService(this.recordService, this.kafkaRecordErrorReporter);
241241

242+
StreamingErrorHandler streamingErrorHandler =
243+
new StreamingErrorHandler(
244+
connectorConfig, kafkaRecordErrorReporter, this.conn.getTelemetryClient());
245+
242246
return new DirectTopicPartitionChannel(
243247
this.streamingIngestClient,
244248
topicPartition,
245249
partitionChannelKey, // Streaming channel name
246250
tableName,
247251
hasSchemaEvolutionPermission,
248252
this.connectorConfig,
249-
this.kafkaRecordErrorReporter,
250253
this.sinkTaskContext,
251254
this.conn,
252255
streamingRecordService,
253256
this.conn.getTelemetryClient(),
254257
this.enableCustomJMXMonitoring,
255258
this.metricsJmxReporter,
256259
this.schemaEvolutionService,
257-
new InsertErrorMapper());
260+
new InsertErrorMapper(),
261+
streamingErrorHandler);
258262
}
259263

260264
/**
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
package com.snowflake.kafka.connector.internal.streaming;
2+
3+
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG;
4+
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_TOLERANCE_CONFIG;
5+
6+
import com.google.common.base.Strings;
7+
import com.snowflake.kafka.connector.dlq.KafkaRecordErrorReporter;
8+
import com.snowflake.kafka.connector.internal.KCLogger;
9+
import com.snowflake.kafka.connector.internal.telemetry.SnowflakeTelemetryService;
10+
import java.util.List;
11+
import java.util.Map;
12+
import org.apache.kafka.connect.errors.DataException;
13+
import org.apache.kafka.connect.sink.SinkRecord;
14+
15+
/** Class encapsulating logic related to error handling e.g. DLQ. */
16+
public class StreamingErrorHandler {
17+
18+
private static final KCLogger LOGGER = new KCLogger(StreamingErrorHandler.class.getName());
19+
20+
private final boolean logErrors;
21+
private final boolean isDLQTopicSet;
22+
private final boolean errorTolerance;
23+
private final KafkaRecordErrorReporter kafkaRecordErrorReporter;
24+
private final SnowflakeTelemetryService telemetryServiceV2;
25+
26+
public StreamingErrorHandler(
27+
Map<String, String> sfConnectorConfig,
28+
KafkaRecordErrorReporter kafkaRecordErrorReporter,
29+
SnowflakeTelemetryService telemetryServiceV2) {
30+
31+
this.logErrors = StreamingUtils.logErrors(sfConnectorConfig);
32+
this.isDLQTopicSet = !Strings.isNullOrEmpty(StreamingUtils.getDlqTopicName(sfConnectorConfig));
33+
this.errorTolerance = StreamingUtils.tolerateErrors(sfConnectorConfig);
34+
this.kafkaRecordErrorReporter = kafkaRecordErrorReporter;
35+
this.telemetryServiceV2 = telemetryServiceV2;
36+
}
37+
38+
public void handleError(List<Exception> insertErrors, SinkRecord kafkaSinkRecord) {
39+
if (logErrors) {
40+
for (Exception insertError : insertErrors) {
41+
LOGGER.error("Insert Row Error message:{}", insertError.getMessage());
42+
}
43+
}
44+
if (errorTolerance) {
45+
if (!isDLQTopicSet) {
46+
LOGGER.warn(
47+
"{} is set, however {} is not. The message will not be added to the Dead Letter Queue"
48+
+ " topic.",
49+
ERRORS_TOLERANCE_CONFIG,
50+
ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG);
51+
} else {
52+
LOGGER.warn(
53+
"Adding the message to Dead Letter Queue topic: {}",
54+
ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG);
55+
this.kafkaRecordErrorReporter.reportError(
56+
kafkaSinkRecord,
57+
insertErrors.stream()
58+
.findFirst()
59+
.orElseThrow(
60+
() ->
61+
new IllegalStateException(
62+
"Reported record error, however exception list is empty.")));
63+
}
64+
} else {
65+
final String errMsg =
66+
String.format(
67+
"Error inserting Records using Streaming API with msg:%s",
68+
insertErrors.get(0).getMessage());
69+
this.telemetryServiceV2.reportKafkaConnectFatalError(errMsg);
70+
throw new DataException(errMsg, insertErrors.get(0));
71+
}
72+
}
73+
}

src/test/java/com/snowflake/kafka/connector/internal/streaming/TopicPartitionChannelTest.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -996,22 +996,24 @@ public DirectTopicPartitionChannel createTopicPartitionChannel(
996996
SnowflakeTelemetryService telemetryService,
997997
boolean enableCustomJMXMonitoring,
998998
MetricsJmxReporter metricsJmxReporter) {
999+
StreamingErrorHandler streamingErrorHandler =
1000+
new StreamingErrorHandler(sfConnectorConfig, kafkaRecordErrorReporter, telemetryService);
9991001
return new DirectTopicPartitionChannel(
10001002
streamingIngestClient,
10011003
topicPartition,
10021004
channelNameFormatV1,
10031005
tableName,
10041006
hasSchemaEvolutionPermission,
10051007
sfConnectorConfig,
1006-
kafkaRecordErrorReporter,
10071008
sinkTaskContext,
10081009
conn,
10091010
new StreamingRecordService(recordService, kafkaRecordErrorReporter),
10101011
telemetryService,
10111012
enableCustomJMXMonitoring,
10121013
metricsJmxReporter,
10131014
this.schemaEvolutionService,
1014-
new InsertErrorMapper());
1015+
new InsertErrorMapper(),
1016+
streamingErrorHandler);
10151017
}
10161018

10171019
@Test

0 commit comments

Comments
 (0)