docker run -d--name zookeeper -p2181:2181 -eTZ="Asia/Shanghai"--restart always wurstmeister/zookeeper
[root@localhost ~]# docker logs zookeeper
ZooKeeper JMX enabled by default
Using config:/opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
2024-08-0117:38:05,746[myid:]- INFO [main:QuorumPeerConfig@136]- Reading configuration from:/opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
2024-08-0117:38:05,751[myid:]- INFO [main:DatadirCleanupManager@78]- autopurge.snapRetainCount set to 32024-08-0117:38:05,751[myid:]- INFO [main:DatadirCleanupManager@79]- autopurge.purgeInterval set to 12024-08-0117:38:05,752[myid:]- WARN [main:QuorumPeerMain@116]- Either no config or no quorum defined in config, running in standalone mode
2024-08-0117:38:05,753[myid:]- INFO [PurgeTask:DatadirCleanupManager$PurgeTask@138]- Purge task started.2024-08-0117:38:05,766[myid:]- INFO [PurgeTask:DatadirCleanupManager$PurgeTask@144]- Purge task completed.2024-08-0117:38:05,767[myid:]- INFO [main:QuorumPeerConfig@136]- Reading configuration from:/opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
2024-08-0117:38:05,767[myid:]- INFO [main:ZooKeeperServerMain@98]- Starting server
2024-08-0117:38:05,775[myid:]- INFO [main:Environment@100]- Server environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/201804:05 GMT
2024-08-0117:38:05,775[myid:]- INFO [main:Environment@100]- Server environment:host.name=dec10f300eea
2024-08-0117:38:05,776[myid:]- INFO [main:Environment@100]- Server environment:java.version=1.7.0_65
2024-08-0117:38:05,776[myid:]- INFO [main:Environment@100]- Server environment:java.vendor=Oracle Corporation
2024-08-0117:38:05,776[myid:]- INFO [main:Environment@100]- Server environment:java.home=/usr/lib/jvm/java-7-openjdk-amd64/jre
2024-08-0117:38:05,776[myid:]- INFO [main:Environment@100]- Server environment:java.class.path=/opt/zookeeper-3.4.13/bin/../build/classes:/opt/zookeeper-3.4.13/bin/../build/lib/*.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-log4j12-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-api-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/netty-3.10.6.Final.jar:/opt/zookeeper-3.4.13/bin/../lib/log4j-1.2.17.jar:/opt/zookeeper-3.4.13/bin/../lib/jline-0.9.94.jar:/opt/zookeeper-3.4.13/bin/../lib/audience-annotations-0.5.0.jar:/opt/zookeeper-3.4.13/bin/../zookeeper-3.4.13.jar:/opt/zookeeper-3.4.13/bin/../src/java/lib/*.jar:/opt/zookeeper-3.4.13/bin/../conf:
2024-08-01 17:38:05,776 [myid:] - INFO [main:Environment@100] - Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib
2024-08-01 17:38:05,776 [myid:] - INFO [main:Environment@100] - Server environment:java.io.tmpdir=/tmp
2024-08-01 17:38:05,778 [myid:] - INFO [main:Environment@100] - Server environment:java.compiler=<NA>
2024-08-01 17:38:05,778 [myid:] - INFO [main:Environment@100] - Server environment:os.name=Linux
2024-08-01 17:38:05,778 [myid:] - INFO [main:Environment@100] - Server environment:os.arch=amd64
2024-08-01 17:38:05,778 [myid:] - INFO [main:Environment@100] - Server environment:os.version=3.10.0-693.el7.x86_64
2024-08-01 17:38:05,779 [myid:] - INFO [main:Environment@100] - Server environment:user.name=root
2024-08-01 17:38:05,779 [myid:] - INFO [main:Environment@100] - Server environment:user.home=/root
2024-08-01 17:38:05,779 [myid:] - INFO [main:Environment@100] - Server environment:user.dir=/opt/zookeeper-3.4.13
2024-08-01 17:38:05,782 [myid:] - INFO [main:ZooKeeperServer@836] - tickTime set to 2000
2024-08-01 17:38:05,782 [myid:] - INFO [main:ZooKeeperServer@845] - minSessionTimeout set to -1
2024-08-01 17:38:05,782 [myid:] - INFO [main:ZooKeeperServer@854] - maxSessionTimeout set to -1
2024-08-01 17:38:05,796 [myid:] - INFO [main:ServerCnxnFactory@117] - Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory
2024-08-01 17:38:05,801 [myid:] - INFO [main:NIOServerCnxnFactory@89] - binding to port 0.0.0.0/0.0.0.0:2181
2、安装kafka
docker run -d--name kafka -p9092:9092 -eKAFKA_BROKER_ID=0-eKAFKA_ZOOKEEPER_CONNECT=192.168.74.148:2181 -eKAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.74.148:9092 -eKAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -eTZ="Asia/Shanghai" wurstmeister/kafka
[root@localhost ~]# docker logs kafka
[Configuring]'advertised.listeners' in '/opt/kafka/config/server.properties'
[Configuring]'port' in '/opt/kafka/config/server.properties'
Excluding KAFKA_HOME from broker config
[Configuring]'log.dirs' in '/opt/kafka/config/server.properties'
[Configuring]'listeners' in '/opt/kafka/config/server.properties'
Excluding KAFKA_VERSION from broker config
[Configuring]'zookeeper.connect' in '/opt/kafka/config/server.properties'
[Configuring]'broker.id' in '/opt/kafka/config/server.properties'
[2024-08-0117:43:49,050] INFO Registered kafka:type=kafka.Log4jController MBean(kafka.utils.Log4jControllerRegistration$)[2024-08-0117:43:49,597] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation(org.apache.zookeeper.common.X509Util)[2024-08-0117:43:49,783] INFO Registered signal handlers for TERM, INT,HUP(org.apache.kafka.common.utils.LoggingSignalHandler)[2024-08-0117:43:49,794] INFO starting(kafka.server.KafkaServer)[2024-08-0117:43:49,795] INFO Connecting to zookeeper on 192.168.74.148:2181(kafka.server.KafkaServer)[2024-08-0117:43:49,870] INFO [ZooKeeperClient Kafka server] Initializing a new session to 192.168.74.148:2181.(kafka.zookeeper.ZooKeeperClient)[2024-08-0117:43:49,878] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/202120:03GMT(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,878] INFO Client environment:host.name=c85acd592a88(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,878] INFO Client environment:java.version=1.8.0_292 (org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,878] INFO Client environment:java.vendor=Azul Systems, Inc.(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,878] INFO Client environment:java.home=/usr/lib/jvm/zulu8-ca/jre(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,878] INFO Client environment:java.class.path=/opt/kafka/bin/../libs/activation-1.1.1.jar:/opt/kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/opt/kafka/bin/../libs/argparse4j-0.7.0.jar:/opt/kafka/bin/../libs/audience-annotations-0.5.0.jar:/opt/kafka/bin/../libs/commons-cli-1.4.jar:/opt/kafka/bin/../libs/commons-lang3-3.8.1.jar:/opt/kafka/bin/../libs/connect-api-2.8.1.jar:/opt/kafka/bin/../libs/connect-basic-auth-extension-2.8.1.jar:/opt/kafka/bin/../libs/connect-file-2.8.1.jar:/opt/kafka/bin/../libs/connect-json-2.8.1.jar:/opt/kafka/bin/../libs/connect-mirror-2.8.1.jar:/opt/kafka/bin/../libs/connect-mirror-client-2.8.1.jar:/opt/kafka/bin/../libs/connect-runtime-2.8.1.jar:/opt/kafka/bin/../libs/connect-transforms-2.8.1.jar:/opt/kafka/bin/../libs/hk2-api-2.6.1.jar:/opt/kafka/bin/../libs/hk2-locator-2.6.1.jar:/opt/kafka/bin/../libs/hk2-utils-2.6.1.jar:/opt/kafka/bin/../libs/jackson-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-core-2.10.5.jar:/opt/kafka/bin/../libs/jackson-databind-2.10.5.1.jar:/opt/kafka/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/opt/kafka/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-paranamer-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/opt/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/opt/kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/opt/kafka/bin/../libs/jakarta.inject-2.6.1.jar:/opt/kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/opt/kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/opt/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/opt/kafka/bin/../libs/javassist-3.27.0-GA.jar:/opt/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/opt/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/opt/kafka/bin/../libs/jaxb-api-2.3.0.jar:/opt/kafka/bin/../libs/jersey-client-2.34.jar:/opt/kafka/bin/../libs/jersey-common-2.34.jar:/opt/kafka/bin/../libs/jersey-container-servlet-2.34.jar:/opt/kafka/bin/../libs/jersey-container-servlet-core-2.34.jar:/opt/kafka/bin/../libs/jersey-hk2-2.34.jar:/opt/kafka/bin/../libs/jersey-server-2.34.jar:/opt/kafka/bin/../libs/jetty-client-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-continuation-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-http-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-io-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-security-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-server-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-servlet-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-servlets-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-util-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jetty-util-ajax-9.4.43.v20210629.jar:/opt/kafka/bin/../libs/jline-3.12.1.jar:/opt/kafka/bin/../libs/jopt-simple-5.0.4.jar:/opt/kafka/bin/../libs/kafka-clients-2.8.1.jar:/opt/kafka/bin/../libs/kafka-log4j-appender-2.8.1.jar:/opt/kafka/bin/../libs/kafka-metadata-2.8.1.jar:/opt/kafka/bin/../libs/kafka-raft-2.8.1.jar:/opt/kafka/bin/../libs/kafka-shell-2.8.1.jar:/opt/kafka/bin/../libs/kafka-streams-2.8.1.jar:/opt/kafka/bin/../libs/kafka-streams-examples-2.8.1.jar:/opt/kafka/bin/../libs/kafka-streams-scala_2.13-2.8.1.jar:/opt/kafka/bin/../libs/kafka-streams-test-utils-2.8.1.jar:/opt/kafka/bin/../libs/kafka-tools-2.8.1.jar:/opt/kafka/bin/../libs/kafka_2.13-2.8.1-sources.jar:/opt/kafka/bin/../libs/kafka_2.13-2.8.1.jar:/opt/kafka/bin/../libs/log4j-1.2.17.jar:/opt/kafka/bin/../libs/lz4-java-1.7.1.jar:/opt/kafka/bin/../libs/maven-artifact-3.8.1.jar:/opt/kafka/bin/../libs/metrics-core-2.2.0.jar:/opt/kafka/bin/../libs/netty-buffer-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-codec-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-common-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-handler-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-resolver-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-transport-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/opt/kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/opt/kafka/bin/../libs/paranamer-2.8.jar:/opt/kafka/bin/../libs/plexus-utils-3.2.1.jar:/opt/kafka/bin/../libs/reflections-0.9.12.jar:/opt/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/opt/kafka/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/opt/kafka/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/opt/kafka/bin/../libs/scala-library-2.13.5.jar:/opt/kafka/bin/../libs/scala-logging_2.13-3.9.2.jar:/opt/kafka/bin/../libs/scala-reflect-2.13.5.jar:/opt/kafka/bin/../libs/slf4j-api-1.7.30.jar:/opt/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/opt/kafka/bin/../libs/snappy-java-1.1.8.1.jar:/opt/kafka/bin/../libs/zookeeper-3.5.9.jar:/opt/kafka/bin/../libs/zookeeper-jute-3.5.9.jar:/opt/kafka/bin/../libs/zstd-jni-1.4.9-1.jar(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:java.library.path=/usr/lib/jvm/zulu8-ca/jre/lib/amd64/server:/usr/lib/jvm/zulu8-ca/jre/lib/amd64:/usr/lib/jvm/zulu8-ca/jre/../lib/amd64:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:java.io.tmpdir=/tmp(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:java.compiler=<NA>(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:os.name=Linux(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:os.arch=amd64(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:os.version=3.10.0-693.el7.x86_64(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:user.name=root(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:user.home=/root(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,879] INFO Client environment:user.dir=/(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,880] INFO Client environment:os.memory.free=1013MB (org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,880] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,880] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,884] INFO Initiating client connection, connectString=192.168.74.148:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@799f10e1(org.apache.zookeeper.ZooKeeper)[2024-08-0117:43:49,893] INFO jute.maxbuffer value is 4194304Bytes(org.apache.zookeeper.ClientCnxnSocket)[2024-08-0117:43:49,905] INFO zookeeper.request.timeout value is 0. feature enabled=(org.apache.zookeeper.ClientCnxn)[2024-08-0117:43:49,908] INFO [ZooKeeperClient Kafka server] Waiting until connected.(kafka.zookeeper.ZooKeeperClient)[2024-08-0117:43:49,950] INFO Opening socket connection to server 192.168.74.148/192.168.74.148:2181. Will not attempt to authenticate using SASL(unknown error)(org.apache.zookeeper.ClientCnxn)[2024-08-0117:43:49,956] INFO Socket connection established, initiating session, client:/172.17.0.7:38190, server:192.168.74.148/192.168.74.148:2181(org.apache.zookeeper.ClientCnxn)[2024-08-0117:43:50,013] INFO Session establishment complete on server 192.168.74.148/192.168.74.148:2181, sessionid =0x10001a701040000, negotiated timeout =18000(org.apache.zookeeper.ClientCnxn)[2024-08-0117:43:50,016] INFO [ZooKeeperClient Kafka server] Connected.(kafka.zookeeper.ZooKeeperClient)[2024-08-0117:43:50,144] INFO [feature-zk-node-event-process-thread]:Starting(kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)[2024-08-0117:43:50,160] INFO Feature ZK node at path:/feature does not exist(kafka.server.FinalizedFeatureChangeListener)[2024-08-0117:43:50,161] INFO Cleared cache(kafka.server.FinalizedFeatureCache)[2024-08-0117:43:50,410] INFO Cluster ID = WsZyq4U-TA-CCwqn-fKWEA(kafka.server.KafkaServer)[2024-08-0117:43:50,414] WARN No meta.properties file under dir /kafka/kafka-logs-c85acd592a88/meta.properties(kafka.server.BrokerMetadataCheckpoint)[2024-08-0117:43:50,498] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = PLAINTEXT://192.168.74.148:9092
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num =11
alter.log.dirs.replication.quota.window.size.seconds =1
authorizer.class.name =auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads =10
broker.heartbeat.interval.ms =2000
broker.id =0
broker.id.generation.enable = true
broker.rack = null
broker.session.timeout.ms =9000
client.quota.callback.class = null
compression.type = producer
connection.failed.authentication.delay.ms =100
connections.max.idle.ms =600000
connections.max.reauth.ms =0
control.plane.listener.name = null
controlled.shutdown.enable = true
controlled.shutdown.max.retries =3
controlled.shutdown.retry.backoff.ms =5000
controller.listener.names = null
controller.quorum.append.linger.ms =25
controller.quorum.election.backoff.max.ms =1000
controller.quorum.election.timeout.ms =1000
controller.quorum.fetch.timeout.ms =2000
controller.quorum.request.timeout.ms =2000
controller.quorum.retry.backoff.ms =20
controller.quorum.voters =[]
controller.quota.window.num =11
controller.quota.window.size.seconds =1
controller.socket.timeout.ms =30000
create.topic.policy.class.name = null
default.replication.factor =1
delegation.token.expiry.check.interval.ms =3600000
delegation.token.expiry.time.ms =86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms =604800000
delegation.token.secret.key = null
delete.records.purgatory.purge.interval.requests =1
delete.topic.enable = true
fetch.max.bytes =57671680
fetch.purgatory.purge.interval.requests =1000
group.initial.rebalance.delay.ms =0
group.max.session.timeout.ms =1800000
group.max.size =2147483647
group.min.session.timeout.ms =6000
host.name =
initial.broker.registration.timeout.ms =60000
inter.broker.listener.name = null
inter.broker.protocol.version =2.8-IV1
kafka.metrics.polling.interval.secs =10
kafka.metrics.reporters =[]
leader.imbalance.check.interval.seconds =300
leader.imbalance.per.broker.percentage =10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = PLAINTEXT://0.0.0.0:9092
log.cleaner.backoff.ms =15000
log.cleaner.dedupe.buffer.size =134217728
log.cleaner.delete.retention.ms =86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor =0.9
log.cleaner.io.buffer.size =524288
log.cleaner.io.max.bytes.per.second =1.7976931348623157E308
log.cleaner.max.compaction.lag.ms =9223372036854775807
log.cleaner.min.cleanable.ratio =0.5
log.cleaner.min.compaction.lag.ms =0
log.cleaner.threads =1
log.cleanup.policy =[delete]
log.dir =/tmp/kafka-logs
log.dirs =/kafka/kafka-logs-c85acd592a88
log.flush.interval.messages =9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms =60000
log.flush.scheduler.interval.ms =9223372036854775807
log.flush.start.offset.checkpoint.interval.ms =60000
log.index.interval.bytes =4096
log.index.size.max.bytes =10485760
log.message.downconversion.enable = true
log.message.format.version =2.8-IV1
log.message.timestamp.difference.max.ms =9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes =-1
log.retention.check.interval.ms =300000
log.retention.hours =168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours =168
log.roll.jitter.hours =0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes =1073741824
log.segment.delete.delay.ms =60000
max.connection.creation.rate =2147483647
max.connections =2147483647
max.connections.per.ip =2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots =1000
message.max.bytes =1048588
metadata.log.dir = null
metric.reporters =[]
metrics.num.samples =2
metrics.recording.level = INFO
metrics.sample.window.ms =30000
min.insync.replicas =1
node.id =-1
num.io.threads =8
num.network.threads =3
num.partitions =1
num.recovery.threads.per.data.dir =1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers =1
offset.metadata.max.bytes =4096
offsets.commit.required.acks =-1
offsets.commit.timeout.ms =5000
offsets.load.buffer.size =5242880
offsets.retention.check.interval.ms =600000
offsets.retention.minutes =10080
offsets.topic.compression.codec =0
offsets.topic.num.partitions =50
offsets.topic.replication.factor =1
offsets.topic.segment.bytes =104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations =4096
password.encoder.key.length =128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port =9092
principal.builder.class = null
process.roles =[]
producer.purgatory.purge.interval.requests =1000
queued.max.request.bytes =-1
queued.max.requests =500
quota.consumer.default=9223372036854775807
quota.producer.default=9223372036854775807
quota.window.num =11
quota.window.size.seconds =1
replica.fetch.backoff.ms =1000
replica.fetch.max.bytes =1048576
replica.fetch.min.bytes =1
replica.fetch.response.max.bytes =10485760
replica.fetch.wait.max.ms =500
replica.high.watermark.checkpoint.interval.ms =5000
replica.lag.time.max.ms =30000
replica.selector.class = null
replica.socket.receive.buffer.bytes =65536
replica.socket.timeout.ms =30000
replication.quota.window.num =11
replication.quota.window.size.seconds =1
request.timeout.ms =30000
reserved.broker.max.id =1000
sasl.client.callback.handler.class = null
sasl.enabled.mechanisms =[GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd =/usr/bin/kinit
sasl.kerberos.min.time.before.relogin =60000
sasl.kerberos.principal.to.local.rules =[DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter =0.05
sasl.kerberos.ticket.renew.window.factor =0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds =300
sasl.login.refresh.min.period.seconds =60
sasl.login.refresh.window.factor =0.8
sasl.login.refresh.window.jitter =0.05
sasl.mechanism.controller.protocol = GSSAPI
sasl.mechanism.inter.broker.protocol = GSSAPI
sasl.server.callback.handler.class = null
security.inter.broker.protocol = PLAINTEXT
security.providers = null
socket.connection.setup.timeout.max.ms =30000
socket.connection.setup.timeout.ms =10000
socket.receive.buffer.bytes =102400
socket.request.max.bytes =104857600
socket.send.buffer.bytes =102400
ssl.cipher.suites =[]
ssl.client.auth = none
ssl.enabled.protocols =[TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.principal.mapping.rules = DEFAULT
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms =10000
transaction.max.timeout.ms =900000
transaction.remove.expired.transaction.cleanup.interval.ms =3600000
transaction.state.log.load.buffer.size =5242880
transaction.state.log.min.isr =1
transaction.state.log.num.partitions =50
transaction.state.log.replication.factor =1
transaction.state.log.segment.bytes =104857600
transactional.id.expiration.ms =604800000
unclean.leader.election.enable = false
zookeeper.clientCnxnSocket = null
zookeeper.connect =192.168.74.148:2181
zookeeper.connection.timeout.ms =18000
zookeeper.max.in.flight.requests =10
zookeeper.session.timeout.ms =18000
zookeeper.set.acl = false
zookeeper.ssl.cipher.suites = null
zookeeper.ssl.client.enable = false
zookeeper.ssl.crl.enable = false
zookeeper.ssl.enabled.protocols = null
zookeeper.ssl.endpoint.identification.algorithm = HTTPS
zookeeper.ssl.keystore.location = null
zookeeper.ssl.keystore.password = null
zookeeper.ssl.keystore.type = null
zookeeper.ssl.ocsp.enable = false
zookeeper.ssl.protocol = TLSv1.2
zookeeper.ssl.truststore.location = null
zookeeper.ssl.truststore.password = null
zookeeper.ssl.truststore.type = null
zookeeper.sync.time.ms =2000(kafka.server.KafkaConfig)[2024-08-0117:43:50,517] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = PLAINTEXT://192.168.74.148:9092
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num =11
alter.log.dirs.replication.quota.window.size.seconds =1
authorizer.class.name =auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads =10
broker.heartbeat.interval.ms =2000
broker.id =0
broker.id.generation.enable = true
broker.rack = null
broker.session.timeout.ms =9000
client.quota.callback.class = null
compression.type = producer
connection.failed.authentication.delay.ms =100
connections.max.idle.ms =600000
connections.max.reauth.ms =0
control.plane.listener.name = null
controlled.shutdown.enable = true
controlled.shutdown.max.retries =3
controlled.shutdown.retry.backoff.ms =5000
controller.listener.names = null
controller.quorum.append.linger.ms =25
controller.quorum.election.backoff.max.ms =1000
controller.quorum.election.timeout.ms =1000
controller.quorum.fetch.timeout.ms =2000
controller.quorum.request.timeout.ms =2000
controller.quorum.retry.backoff.ms =20
controller.quorum.voters =[]
controller.quota.window.num =11
controller.quota.window.size.seconds =1
controller.socket.timeout.ms =30000
create.topic.policy.class.name = null
default.replication.factor =1
delegation.token.expiry.check.interval.ms =3600000
delegation.token.expiry.time.ms =86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms =604800000
delegation.token.secret.key = null
delete.records.purgatory.purge.interval.requests =1
delete.topic.enable = true
fetch.max.bytes =57671680
fetch.purgatory.purge.interval.requests =1000
group.initial.rebalance.delay.ms =0
group.max.session.timeout.ms =1800000
group.max.size =2147483647
group.min.session.timeout.ms =6000
host.name =
initial.broker.registration.timeout.ms =60000
inter.broker.listener.name = null
inter.broker.protocol.version =2.8-IV1
kafka.metrics.polling.interval.secs =10
kafka.metrics.reporters =[]
leader.imbalance.check.interval.seconds =300
leader.imbalance.per.broker.percentage =10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = PLAINTEXT://0.0.0.0:9092
log.cleaner.backoff.ms =15000
log.cleaner.dedupe.buffer.size =134217728
log.cleaner.delete.retention.ms =86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor =0.9
log.cleaner.io.buffer.size =524288
log.cleaner.io.max.bytes.per.second =1.7976931348623157E308
log.cleaner.max.compaction.lag.ms =9223372036854775807
log.cleaner.min.cleanable.ratio =0.5
log.cleaner.min.compaction.lag.ms =0
log.cleaner.threads =1
log.cleanup.policy =[delete]
log.dir =/tmp/kafka-logs
log.dirs =/kafka/kafka-logs-c85acd592a88
log.flush.interval.messages =9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms =60000
log.flush.scheduler.interval.ms =9223372036854775807
log.flush.start.offset.checkpoint.interval.ms =60000
log.index.interval.bytes =4096
log.index.size.max.bytes =10485760
log.message.downconversion.enable = true
log.message.format.version =2.8-IV1
log.message.timestamp.difference.max.ms =9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes =-1
log.retention.check.interval.ms =300000
log.retention.hours =168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours =168
log.roll.jitter.hours =0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes =1073741824
log.segment.delete.delay.ms =60000
max.connection.creation.rate =2147483647
max.connections =2147483647
max.connections.per.ip =2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots =1000
message.max.bytes =1048588
metadata.log.dir = null
metric.reporters =[]
metrics.num.samples =2
metrics.recording.level = INFO
metrics.sample.window.ms =30000
min.insync.replicas =1
node.id =-1
num.io.threads =8
num.network.threads =3
num.partitions =1
num.recovery.threads.per.data.dir =1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers =1
offset.metadata.max.bytes =4096
offsets.commit.required.acks =-1
offsets.commit.timeout.ms =5000
offsets.load.buffer.size =5242880
offsets.retention.check.interval.ms =600000
offsets.retention.minutes =10080
offsets.topic.compression.codec =0
offsets.topic.num.partitions =50
offsets.topic.replication.factor =1
offsets.topic.segment.bytes =104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations =4096
password.encoder.key.length =128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port =9092
principal.builder.class = null
process.roles =[]
producer.purgatory.purge.interval.requests =1000
queued.max.request.bytes =-1
queued.max.requests =500
quota.consumer.default=9223372036854775807
quota.producer.default=9223372036854775807
quota.window.num =11
quota.window.size.seconds =1
replica.fetch.backoff.ms =1000
replica.fetch.max.bytes =1048576
replica.fetch.min.bytes =1
replica.fetch.response.max.bytes =10485760
replica.fetch.wait.max.ms =500
replica.high.watermark.checkpoint.interval.ms =5000
replica.lag.time.max.ms =30000
replica.selector.class = null
replica.socket.receive.buffer.bytes =65536
replica.socket.timeout.ms =30000
replication.quota.window.num =11
replication.quota.window.size.seconds =1
request.timeout.ms =30000
reserved.broker.max.id =1000
sasl.client.callback.handler.class = null
sasl.enabled.mechanisms =[GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd =/usr/bin/kinit
sasl.kerberos.min.time.before.relogin =60000
sasl.kerberos.principal.to.local.rules =[DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter =0.05
sasl.kerberos.ticket.renew.window.factor =0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds =300
sasl.login.refresh.min.period.seconds =60
sasl.login.refresh.window.factor =0.8
sasl.login.refresh.window.jitter =0.05
sasl.mechanism.controller.protocol = GSSAPI
sasl.mechanism.inter.broker.protocol = GSSAPI
sasl.server.callback.handler.class = null
security.inter.broker.protocol = PLAINTEXT
security.providers = null
socket.connection.setup.timeout.max.ms =30000
socket.connection.setup.timeout.ms =10000
socket.receive.buffer.bytes =102400
socket.request.max.bytes =104857600
socket.send.buffer.bytes =102400
ssl.cipher.suites =[]
ssl.client.auth = none
ssl.enabled.protocols =[TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.principal.mapping.rules = DEFAULT
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms =10000
transaction.max.timeout.ms =900000
transaction.remove.expired.transaction.cleanup.interval.ms =3600000
transaction.state.log.load.buffer.size =5242880
transaction.state.log.min.isr =1
transaction.state.log.num.partitions =50
transaction.state.log.replication.factor =1
transaction.state.log.segment.bytes =104857600
transactional.id.expiration.ms =604800000
unclean.leader.election.enable = false
zookeeper.clientCnxnSocket = null
zookeeper.connect =192.168.74.148:2181
zookeeper.connection.timeout.ms =18000
zookeeper.max.in.flight.requests =10
zookeeper.session.timeout.ms =18000
zookeeper.set.acl = false
zookeeper.ssl.cipher.suites = null
zookeeper.ssl.client.enable = false
zookeeper.ssl.crl.enable = false
zookeeper.ssl.enabled.protocols = null
zookeeper.ssl.endpoint.identification.algorithm = HTTPS
zookeeper.ssl.keystore.location = null
zookeeper.ssl.keystore.password = null
zookeeper.ssl.keystore.type = null
zookeeper.ssl.ocsp.enable = false
zookeeper.ssl.protocol = TLSv1.2
zookeeper.ssl.truststore.location = null
zookeeper.ssl.truststore.password = null
zookeeper.ssl.truststore.type = null
zookeeper.sync.time.ms =2000(kafka.server.KafkaConfig)[2024-08-0117:43:50,623] INFO [ThrottledChannelReaper-Fetch]:Starting(kafka.server.ClientQuotaManager$ThrottledChannelReaper)[2024-08-0117:43:50,624] INFO [ThrottledChannelReaper-Produce]:Starting(kafka.server.ClientQuotaManager$ThrottledChannelReaper)[2024-08-0117:43:50,626] INFO [ThrottledChannelReaper-Request]:Starting(kafka.server.ClientQuotaManager$ThrottledChannelReaper)[2024-08-0117:43:50,629] INFO [ThrottledChannelReaper-ControllerMutation]:Starting(kafka.server.ClientQuotaManager$ThrottledChannelReaper)[2024-08-0117:43:50,650] INFO Log directory /kafka/kafka-logs-c85acd592a88 not found, creating it.(kafka.log.LogManager)[2024-08-0117:43:50,673] INFO Loading logs from log dirs ArraySeq(/kafka/kafka-logs-c85acd592a88)(kafka.log.LogManager)[2024-08-0117:43:50,676] INFO Attempting recovery for all logs in /kafka/kafka-logs-c85acd592a88 since no clean shutdown file was found(kafka.log.LogManager)[2024-08-0117:43:50,683] INFO Loaded 0 logs in 11ms.(kafka.log.LogManager)[2024-08-0117:43:50,684] INFO Starting log cleanup with a period of 300000 ms.(kafka.log.LogManager)[2024-08-0117:43:50,687] INFO Starting log flusher with a default period of 9223372036854775807 ms.(kafka.log.LogManager)[2024-08-0117:43:51,905] INFO Updated connection-accept-rate max connection creation rate to 2147483647(kafka.network.ConnectionQuotas)[2024-08-0117:43:51,910] INFO Awaiting socket connections on 0.0.0.0:9092.(kafka.network.Acceptor)[2024-08-0117:43:51,976] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint :ListenerName(PLAINTEXT)(kafka.network.SocketServer)[2024-08-0117:43:52,089] INFO [broker-0-to-controller-send-thread]:Starting(kafka.server.BrokerToControllerRequestThread)[2024-08-0117:43:52,115] INFO [ExpirationReaper-0-Produce]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,115] INFO [ExpirationReaper-0-Fetch]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,117] INFO [ExpirationReaper-0-DeleteRecords]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,118] INFO [ExpirationReaper-0-ElectLeader]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,150] INFO [LogDirFailureHandler]:Starting(kafka.server.ReplicaManager$LogDirFailureHandler)[2024-08-0117:43:52,201] INFO Creating /brokers/ids/0(is it secure? false)(kafka.zk.KafkaZkClient)[2024-08-0117:43:52,229] INFO Stat of the created znode at /brokers/ids/0 is:25,25,1722505432222,1722505432222,1,0,0,72059410826133504,212,0,25(kafka.zk.KafkaZkClient)[2024-08-0117:43:52,231] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://192.168.74.148:9092, czxid (broker epoch): 25 (kafka.zk.KafkaZkClient)[2024-08-0117:43:52,337] INFO [ExpirationReaper-0-topic]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,376] INFO [ExpirationReaper-0-Heartbeat]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,377] INFO Successfully created /controller_epoch with initial epoch 0(kafka.zk.KafkaZkClient)[2024-08-0117:43:52,378] INFO [ExpirationReaper-0-Rebalance]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,401] INFO Feature ZK node created at path:/feature(kafka.server.FinalizedFeatureChangeListener)[2024-08-0117:43:52,416] INFO [GroupCoordinator 0]: Starting up.(kafka.coordinator.group.GroupCoordinator)[2024-08-0117:43:52,425] INFO [GroupCoordinator 0]: Startup complete.(kafka.coordinator.group.GroupCoordinator)[2024-08-0117:43:52,500] INFO Updated cache from existing <empty> to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0).(kafka.server.FinalizedFeatureCache)[2024-08-0117:43:52,546] INFO [ProducerId Manager 0]: Acquired new producerId block(brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1(kafka.coordinator.transaction.ProducerIdManager)[2024-08-0117:43:52,547] INFO [TransactionCoordinator id=0] Starting up.(kafka.coordinator.transaction.TransactionCoordinator)[2024-08-0117:43:52,554] INFO [TransactionCoordinator id=0] Startup complete.(kafka.coordinator.transaction.TransactionCoordinator)[2024-08-0117:43:52,554] INFO [Transaction Marker Channel Manager 0]:Starting(kafka.coordinator.transaction.TransactionMarkerChannelManager)[2024-08-0117:43:52,670] INFO [ExpirationReaper-0-AlterAcls]:Starting(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)[2024-08-0117:43:52,891] INFO [/config/changes-event-process-thread]:Starting(kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)[2024-08-0117:43:52,933] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Starting socket server acceptors and processors(kafka.network.SocketServer)[2024-08-0117:43:52,960] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started data-plane acceptor and processor(s)for endpoint :ListenerName(PLAINTEXT)(kafka.network.SocketServer)[2024-08-0117:43:52,961] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started socket server acceptors and processors(kafka.network.SocketServer)[2024-08-0117:43:52,980] INFO Kafka version:2.8.1(org.apache.kafka.common.utils.AppInfoParser)[2024-08-0117:43:52,980] INFO Kafka commitId:839b886f9b732b15 (org.apache.kafka.common.utils.AppInfoParser)[2024-08-0117:43:52,980] INFO Kafka startTimeMs:1722505432961(org.apache.kafka.common.utils.AppInfoParser)[2024-08-0117:43:52,985] INFO [KafkaServer id=0]started(kafka.server.KafkaServer)[2024-08-0117:43:53,127] INFO [broker-0-to-controller-send-thread]: Recorded new controller, from now on will use broker 192.168.74.148:9092(id:0 rack: null)(kafka.server.BrokerToControllerRequestThread)