kafka
About 1 min
kafka
prepare
- k8s is ready
- argocd is ready and logged in
installation
- prepare
kafka.yaml
- kraft-minimal
apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: kafka spec: syncPolicy: syncOptions: - CreateNamespace=true project: default source: repoURL: https://charts.bitnami.com/bitnami chart: kafka targetRevision: 28.0.3 helm: releaseName: kafka values: | image: registry: docker.io controller: replicaCount: 1 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 broker: replicaCount: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 externalAccess: enabled: false autoDiscovery: enabled: false image: registry: docker.io volumePermissions: enabled: false image: registry: docker.io metrics: kafka: enabled: false image: registry: docker.io jmx: enabled: false image: registry: docker.io provisioning: enabled: false kraft: enabled: true zookeeper: enabled: false destination: server: https://kubernetes.default.svc namespace: database
zookeeper-minimalapiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: kafka spec: syncPolicy: syncOptions: - CreateNamespace=true project: default source: repoURL: https://charts.bitnami.com/bitnami chart: kafka targetRevision: 28.0.3 helm: releaseName: kafka values: | image: registry: docker.io controller: replicaCount: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 broker: replicaCount: 1 minId: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 externalAccess: enabled: false autoDiscovery: enabled: false image: registry: docker.io volumePermissions: enabled: false image: registry: docker.io metrics: kafka: enabled: false image: registry: docker.io jmx: enabled: false image: registry: docker.io provisioning: enabled: false kraft: enabled: false zookeeper: enabled: true image: registry: docker.io replicaCount: 1 auth: client: enabled: false quorum: enabled: false persistence: enabled: false volumePermissions: enabled: false image: registry: docker.io metrics: enabled: false tls: client: enabled: false quorum: enabled: false destination: server: https://kubernetes.default.svc namespace: database
kraftapiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: kafka spec: syncPolicy: syncOptions: - CreateNamespace=true project: default source: repoURL: https://charts.bitnami.com/bitnami chart: kafka targetRevision: 28.0.3 helm: releaseName: kafka values: | image: registry: docker.io controller: replicaCount: 3 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=3 offsets.topic.replication.factor=3 transaction.state.log.replication.factor=3 broker: replicaCount: 3 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=3 offsets.topic.replication.factor=3 transaction.state.log.replication.factor=3 externalAccess: enabled: false autoDiscovery: enabled: false image: registry: docker.io volumePermissions: enabled: false image: registry: docker.io metrics: kafka: enabled: false image: registry: docker.io jmx: enabled: false image: registry: docker.io provisioning: enabled: false kraft: enabled: true zookeeper: enabled: false destination: server: https://kubernetes.default.svc namespace: database
zookeeperapiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: kafka spec: syncPolicy: syncOptions: - CreateNamespace=true project: default source: repoURL: https://charts.bitnami.com/bitnami chart: kafka targetRevision: 28.0.3 helm: releaseName: kafka values: | image: registry: docker.io controller: replicaCount: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=3 offsets.topic.replication.factor=3 transaction.state.log.replication.factor=3 broker: replicaCount: 3 minId: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=3 offsets.topic.replication.factor=3 transaction.state.log.replication.factor=3 externalAccess: enabled: false autoDiscovery: enabled: false image: registry: docker.io volumePermissions: enabled: false image: registry: docker.io metrics: kafka: enabled: false image: registry: docker.io jmx: enabled: false image: registry: docker.io provisioning: enabled: false kraft: enabled: false zookeeper: enabled: true image: registry: docker.io replicaCount: 1 auth: client: enabled: false quorum: enabled: false persistence: enabled: false volumePermissions: enabled: false image: registry: docker.io metrics: enabled: false tls: client: enabled: false quorum: enabled: false destination: server: https://kubernetes.default.svc namespace: database
zookeeper-minimal-plaintextapiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: kafka spec: syncPolicy: syncOptions: - CreateNamespace=true project: default source: repoURL: https://charts.bitnami.com/bitnami chart: kafka targetRevision: 28.0.3 helm: releaseName: kafka values: | image: registry: docker.io listeners: client: protocol: PLAINTEXT interbroker: protocol: PLAINTEXT controller: replicaCount: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 broker: replicaCount: 1 minId: 0 persistence: enabled: false logPersistence: enabled: false extraConfig: | message.max.bytes=5242880 default.replication.factor=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 externalAccess: enabled: false autoDiscovery: enabled: false image: registry: docker.io volumePermissions: enabled: false image: registry: docker.io metrics: kafka: enabled: false image: registry: docker.io jmx: enabled: false image: registry: docker.io provisioning: enabled: false kraft: enabled: false zookeeper: enabled: true image: registry: docker.io replicaCount: 1 auth: client: enabled: false quorum: enabled: false persistence: enabled: false volumePermissions: enabled: false image: registry: docker.io metrics: enabled: false tls: client: enabled: false quorum: enabled: false destination: server: https://kubernetes.default.svc namespace: database
- apply to k8s
kubectl -n argocd apply -f kafka.yaml
- sync by argocd
argocd app sync argocd/kafka
setup kafka-client-tools
- create
client-properties
- SASL_PLAINTEXT
kubectl -n database \ create secret generic client-properties \ --from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
PLAINTEXTkubectl -n database \ create secret generic client-properties \ --from-literal=client.properties="security.protocol=PLAINTEXT"
- prepare
kafka-client-tools.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: kafka-client-tools labels: app: kafka-client-tools spec: replicas: 1 selector: matchLabels: app: kafka-client-tools template: metadata: labels: app: kafka-client-tools spec: volumes: - name: client-properties secret: secretName: client-properties containers: - name: kafka-client-tools image: docker.io/bitnami/kafka:3.6.2 volumeMounts: - name: client-properties mountPath: /bitnami/custom/client.properties subPath: client.properties readOnly: true env: - name: BOOTSTRAP_SERVER value: kafka.database.svc.cluster.local:9092 - name: CLIENT_CONFIG_FILE value: /bitnami/custom/client.properties - name: ZOOKEEPER_CONNECT value: kafka-zookeeper.database.svc.cluster.local:2181 command: - tail - -f - /etc/hosts imagePullPolicy: IfNotPresent
- apply to k8s
kubectl -n database apply -f kafka-client-tools.yaml
check with client
- list topics
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \ 'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
- create topic
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \ 'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
- describe topic
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \ 'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
- produce message
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \ 'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
- consume message
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \ 'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'