[#5] 컨슈머 코드 마이그레이션 #6
@ -0,0 +1,87 @@
|
||||
package com.sangdol.consumer.infrastructure.kafka.consumer;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.WakeupException;
|
||||
|
||||
import com.sangdol.consumer.domain.TestRecord;
|
||||
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
public class ConsumerWorker implements Runnable {
|
||||
|
||||
private final KafkaConsumer<String, TestRecord> consumer;
|
||||
private final RecordProcessor recordProcessor;
|
||||
private final AtomicBoolean running = new AtomicBoolean(true);
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
log.info("[ConsumerWorker] started. thread: {}", Thread.currentThread().getName());
|
||||
while (running.get()) {
|
||||
ConsumerRecords<String, TestRecord> records = consumer.poll(Duration.ofMillis(1000));
|
||||
|
||||
if (records.isEmpty()) {
|
||||
recordProcessor.processIfEmptyRecords();
|
||||
continue;
|
||||
}
|
||||
|
||||
log.debug("[ConsumerWorker] received {} records. thread: {}", records.count(), Thread.currentThread().getName());
|
||||
recordProcessor.process(records);
|
||||
commitAsync(records);
|
||||
}
|
||||
} catch (WakeupException e) {
|
||||
log.info("[ConsumerWorker] Wakeup Exception. thread: {}", Thread.currentThread().getName());
|
||||
} catch (Exception e) {
|
||||
log.info("[ConsumerWorker] Unexpected Exception. thread: {}", Thread.currentThread().getName());
|
||||
} finally {
|
||||
try {
|
||||
consumer.commitSync();
|
||||
log.info("[ConsumerWorker] Final offset committed. Close consumer.. ");
|
||||
} catch (Exception e) {
|
||||
log.error("[ConsumerWorker] Failed to commit final offsets when shutdown. thread: {}",
|
||||
Thread.currentThread().getName());
|
||||
} finally {
|
||||
consumer.close();
|
||||
log.info("[ConsumerWorker] Closed consumer thread. thread: {}", Thread.currentThread().getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void commitAsync(ConsumerRecords<String, TestRecord> records) {
|
||||
Map<TopicPartition, OffsetAndMetadata> offsets = records.partitions().stream()
|
||||
.collect(Collectors.toMap(tp -> tp, tp -> {
|
||||
List<ConsumerRecord<String, TestRecord>> partitionRecords = records.records(tp);
|
||||
long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
|
||||
|
||||
return new OffsetAndMetadata(lastOffset + 1);
|
||||
}));
|
||||
|
||||
consumer.commitAsync(offsets, (o, exception) -> {
|
||||
if (exception != null) {
|
||||
log.error("[ConsumerWorker] failed to commit offsets for offset: {}, partition {}", o,
|
||||
records.partitions(), exception);
|
||||
} else {
|
||||
log.debug("[ConsumerWorker] committed offsets for offset: {}, partition {}", o,
|
||||
records.partitions());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
running.set(false);
|
||||
consumer.wakeup();
|
||||
}
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user