Skip to content

Commit

Permalink
improve onErrorContinue for Exception catch
Browse files Browse the repository at this point in the history
  • Loading branch information
wooEnrico committed May 17, 2024
1 parent 06ddb50 commit 7c39d86
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,10 @@ public KafkaConsumer(String name, ConsumerProperties consumerProperties, Consume

@Override
public void close() {
this.close.set(true);
if (!this.close.compareAndSet(false, true)) {
return;
}

this.subscribers.forEach((threadPoolExecutor, kafkaConsumer) -> {
threadPoolExecutor.shutdown();
});
Expand All @@ -88,9 +91,11 @@ private void subscribe(ThreadPoolExecutor threadPoolExecutor) {
}

CompletableFuture.runAsync(this::loopPoll, this.pollExecutor).exceptionally(throwable -> {
log.error("stop poll and close consumer {}", this.name, throwable);
if (!close.get()) {
log.error("kafka consumer recreate {}", this.name, throwable);
this.subscribe(threadPoolExecutor);
} else {
log.error("kafka consumer stop {}", this.name, throwable);
}
return null;
});
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package io.github.wooernico.kafka.consumer;

import io.github.wooernico.kafka.KafkaUtil;
import org.apache.kafka.clients.consumer.CommitFailedException;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.Deserializer;
import org.slf4j.Logger;
Expand All @@ -20,6 +19,7 @@
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Function;
Expand All @@ -39,6 +39,7 @@ public abstract class ReactorKafkaReceiver<K, V> implements Closeable {

private final Map<ThreadPoolExecutor, Disposable> subscribers = new ConcurrentHashMap<>();
private final AtomicInteger rebalanceCounter = new AtomicInteger(0);
private final AtomicBoolean close = new AtomicBoolean(false);

public ReactorKafkaReceiver(String name, ConsumerProperties consumerProperties, Function<ConsumerRecord<K, V>, Mono<Void>> consumer, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) {
this(name, consumerProperties, keyDeserializer, valueDeserializer, consumer,
Expand All @@ -56,11 +57,15 @@ public ReactorKafkaReceiver(String name, ConsumerProperties consumerProperties,
this.onAssign = onAssign;
this.onRevoke = onRevoke;

this.subscribe(null);
this.subscribe();
}

@Override
public void close() throws IOException {
if (!this.close.compareAndSet(false, true)) {
return;
}

this.subscribers.forEach((threadPoolExecutor, disposable) -> {
disposable.dispose();
threadPoolExecutor.shutdown();
Expand All @@ -79,20 +84,30 @@ private void subscribe(ThreadPoolExecutor threadPoolExecutor) {
}
}

this.reactorKafkaHandler();
if (this.close.get()) {
return;
}

this.subscribe();
}

private void reactorKafkaHandler() {
private void subscribe() {
CustomizableThreadFactory customizableThreadFactory = new CustomizableThreadFactory(this.name + "-" + this.rebalanceCounter.incrementAndGet() + "-");
ThreadPoolExecutor threadPoolExecutor = KafkaUtil.newThreadPoolExecutor(this.consumerProperties.getExecutor(), customizableThreadFactory);

Disposable disposable = this.createKafkaReceiver(this.consumerProperties, this.keyDeserializer, this.valueDeserializer, this.onAssign, this.onRevoke).receiveAutoAck().concatMap(r -> r)
.flatMap(record -> Mono.defer(() -> this.consumer.apply(record)).subscribeOn(Schedulers.fromExecutor(threadPoolExecutor)))
.onErrorContinue(e -> !(e instanceof CommitFailedException), (e, o) -> log.error("onErrorContinue record : {}", o, e))
KafkaReceiver<K, V> kafkaReceiver = this.createKafkaReceiver(this.consumerProperties, this.keyDeserializer, this.valueDeserializer, this.onAssign, this.onRevoke);
Disposable disposable = kafkaReceiver.receiveAutoAck().concatMap(r -> r)
.doOnError(e -> {
log.error("commit failed for rebalanced and recreate {}", this.name, e);
log.error("kafka receiver recreate {}", this.name, e);
this.subscribe(threadPoolExecutor);
}).subscribe();
})
.flatMap(record -> Mono.defer(() -> this.consumer.apply(record))
.onErrorResume(throwable -> {
log.error("onErrorResume record : {}", record, throwable);
return Mono.empty();
})
.subscribeOn(Schedulers.fromExecutor(threadPoolExecutor))
)
.subscribe();

this.subscribers.put(threadPoolExecutor, disposable);
}
Expand Down

0 comments on commit 7c39d86

Please sign in to comment.