亿级日志log4j2接入Kafka方案
背景描述
一个高访问量高并发高响应的系统,每天大概5~6亿的请求日志量,日志接入大数据平台,第一版使用的是Flume方式,后来因为需要基于日志做实时预警功能,故需要实时接入,采用log4j2 KafkaAppender接入Kafka消息队列的模式。
log4j2配置
<Kafka name="kafkaLog" topic="topic_request_log" ignoreExceptions="false">
<PatternLayout pattern="[%-4level]_|_%d{YYYY-MM-dd HH:mm:ss}_|_%m_|_${sys:ip}"/>
<Property name="bootstrap.servers">bigdata001.dns.org:9092,bigdata002.dns.org:9092</Property>
<Property name="max.block.ms">2000</Property>
</Kafka>
<RollingFile name="failoverKafkaLog" fileName="../log/Service/failoverKafka/request.log"
filePattern="../log/Service/failoverKafka/request.%d{yyyy-MM-dd}.log">
<ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
<PatternLayout>
<Pattern>[%-4level]_|_%d{YYYY-MM-dd HH:mm:ss}_|_%m_|_${sys:ip}%n</Pattern>
</PatternLayout>
<Policies>
<TimeBasedTriggeringPolicy />
</Policies>
</RollingFile>
<Failover name="Failover" primary="kafkaLog" retryIntervalSeconds="600">
<Failovers>
<AppenderRef ref="failoverKafkaLog"/>
</Failovers>
</Failover>
<!--异步-->
<AsyncLogger name="kafkaLogger" level="INFO" additivity="false">
<appender-ref ref="Failover"/>
</AsyncLogger>