@@ -50,9 +50,9 @@ import org.apache.kafka.common.serialization.Serializer
5050 */
5151@FlowPreview
5252public suspend fun <A , B > Flow <ProducerRecord <A , B >>.produce (
53- settings : ProducerSettings <A , B >
53+ settings : ProducerSettings <A , B >,
5454): Flow <RecordMetadata > =
55- settings. kafkaProducer().flatMapConcat { producer ->
55+ kafkaProducer(settings ).flatMapConcat { producer ->
5656 this @produce.map { record -> producer.sendAwait(record) }
5757 }
5858
@@ -75,7 +75,7 @@ public suspend fun <A, B> Flow<ProducerRecord<A, B>>.produce(
7575 * <!--- KNIT example-producer-02.kt -->
7676 */
7777public suspend fun <A , B > KafkaProducer <A , B >.sendAwait (
78- record : ProducerRecord <A , B >
78+ record : ProducerRecord <A , B >,
7979): RecordMetadata =
8080 suspendCoroutine { cont ->
8181 // Those can be a SerializationException when it fails to serialize the message,
@@ -87,23 +87,49 @@ public suspend fun <A, B> KafkaProducer<A, B>.sendAwait(
8787 }
8888 }
8989
90- public fun < K , V > KafkaProducer (
91- props : Properties ,
92- keyDeserializer : Serializer < K >,
93- valueDeserializer : Serializer < V >
94- ): KafkaProducer <K , V > =
95- KafkaProducer (props, keyDeserializer, valueDeserializer)
90+ /* *
91+ * KafkaKafkaProducer for [K] - [V] which takes
92+ */
93+ @Suppress( " FunctionName " )
94+ public fun < K , V > KafkaProducer ( setting : ProducerSettings < K , V > ): KafkaProducer <K , V > =
95+ KafkaProducer (setting.properties(), setting. keyDeserializer, setting. valueDeserializer)
9696
97+ /* *
98+ * Will automatically close, and flush when finished streaming.
99+ * The [Flow] will close when the [KafkaProducer] is consumed from the [Flow].
100+ *
101+ * This means that the [KafkaProducer] will not be closed for a synchronous running stream, but
102+ * when running the [Flow] is offloaded in a separate Coroutine it's prone to be collected, closed
103+ * and flushed. In the example below we construct a producer stream that produces 100 indexed
104+ * messages.
105+ *
106+ * ```kotlin
107+ * fun <Key, Value> KafkaProducer<Key, Value>.produce(topicName: String, count: Int): Flow<Unit> =
108+ * (0..count).asFlow().map { sendAwait(ProducerRecord(topicName, "message #it")) }
109+ *
110+ * val producerStream = kafkaProducer(Properties(), StringSerializer(), StringSerializer())
111+ * .flatMapConcat { producer -> producer.produce("topic-name", 100) }
112+ * ```
113+ *
114+ * Here the `KafkaProducer` will only get collected (and closed/flushed) when all 100 messages
115+ * were produced.
116+ *
117+ * **DO NOT** If instead we'd do something like the following, where we offload in a buffer then
118+ * the `KafkaProducer` gets collected into the buffer and thus closed/flushed.
119+ *
120+ * ```kotlin
121+ * kafkaProducer(Properties(), StringSerializer(), StringSerializer()).buffer(10)
122+ * ```
123+ */
97124public fun <K , V > kafkaProducer (
98- props : Properties ,
99- keyDeserializer : Serializer <K >,
100- valueDeserializer : Serializer <V >
125+ setting : ProducerSettings <K , V >,
101126): Flow <KafkaProducer <K , V >> = flow {
102- val producer = KafkaProducer (props, keyDeserializer, valueDeserializer)
103- try {
104- producer.use { emit(it) }
105- } finally {
106- producer.flush()
127+ KafkaProducer (setting).use { producer ->
128+ try {
129+ emit(producer)
130+ } finally {
131+ producer.flush()
132+ }
107133 }
108134}
109135
@@ -115,6 +141,9 @@ public enum class Acks(public val value: String) {
115141}
116142
117143/* *
144+ * A type-safe constructor for [KafkaProducer] settings.
145+ * It forces you to specify the bootstrapServer, and serializers for [K] and [V].
146+ * These are the minimum requirements for constructing a valid [KafkaProducer].
118147 *
119148 * @see http://kafka.apache.org/documentation.html#producerconfigs
120149 */
@@ -123,7 +152,7 @@ public data class ProducerSettings<K, V>(
123152 val keyDeserializer : Serializer <K >,
124153 val valueDeserializer : Serializer <V >,
125154 val acks : Acks = Acks .One ,
126- val other : Properties ? = null
155+ val other : Properties ? = null ,
127156) {
128157 public fun properties (): Properties =
129158 Properties ().apply {
@@ -133,34 +162,4 @@ public data class ProducerSettings<K, V>(
133162 put(ProducerConfig .ACKS_CONFIG , acks.value)
134163 other?.let { putAll(other) }
135164 }
136-
137- /* *
138- * Will automatically close, and flush when finished streaming.
139- * The [Flow] will close when the [KafkaProducer] is consumed from the [Flow].
140- *
141- * This means that the [KafkaProducer] will not be closed for a synchronous running stream, but
142- * when running the [Flow] is offloaded in a separate Coroutine it's prone to be collected, closed
143- * and flushed. In the example below we construct a producer stream that produces 100 indexed
144- * messages.
145- *
146- * ```kotlin
147- * fun <Key, Value> KafkaProducer<Key, Value>.produce(topicName: String, count: Int): Flow<Unit> =
148- * (0..count).asFlow().map { sendAwait(ProducerRecord(topicName, "message #it")) }
149- *
150- * val producerStream = kafkaProducer(Properties(), StringSerializer(), StringSerializer())
151- * .flatMapConcat { producer -> producer.produce("topic-name", 100) }
152- * ```
153- *
154- * Here the `KafkaProducer` will only get collected (and closed/flushed) when all 100 messages
155- * were produced.
156- *
157- * **DO NOT** If instead we'd do something like the following, where we offload in a buffer then
158- * the `KafkaProducer` gets collected into the buffer and thus closed/flushed.
159- *
160- * ```kotlin
161- * kafkaProducer(Properties(), StringSerializer(), StringSerializer()).buffer(10)
162- * ```
163- */
164- public fun kafkaProducer (): Flow <KafkaProducer <K , V >> =
165- kafkaProducer(properties(), keyDeserializer, valueDeserializer)
166165}
0 commit comments