Skip to main content

kafka

ben.wangzAbout 1 min

kafka

prepare

  1. k8s is ready
  2. argocd is ready and logged in

installation

  1. prepare kafka.yaml
    • kraft-minimal
      apiVersion: argoproj.io/v1alpha1
      kind: Application
      metadata:
        name: kafka
      spec:
        syncPolicy:
          syncOptions:
          - CreateNamespace=true
        project: default
        source:
          repoURL: https://charts.bitnami.com/bitnami
          chart: kafka
          targetRevision: 28.0.3
          helm:
            releaseName: kafka
            values: |
              image:
                registry: docker.io
              controller:
                replicaCount: 1
                persistence:
                  enabled: false
                logPersistence:
                  enabled: false
                extraConfig: |
                  message.max.bytes=5242880
                  default.replication.factor=1
                  offsets.topic.replication.factor=1
                  transaction.state.log.replication.factor=1
              broker:
                replicaCount: 0
                persistence:
                  enabled: false
                logPersistence:
                  enabled: false
                extraConfig: |
                  message.max.bytes=5242880
                  default.replication.factor=1
                  offsets.topic.replication.factor=1
                  transaction.state.log.replication.factor=1
              externalAccess:
                enabled: false
                autoDiscovery:
                  enabled: false
                  image:
                    registry: docker.io
              volumePermissions:
                enabled: false
                image:
                  registry: docker.io
              metrics:
                kafka:
                  enabled: false
                  image:
                    registry: docker.io
                jmx:
                  enabled: false
                  image:
                    registry: docker.io
              provisioning:
                enabled: false
              kraft:
                enabled: true
              zookeeper:
                enabled: false
        destination:
          server: https://kubernetes.default.svc
          namespace: database
      
      
  2. apply to k8s
    • kubectl -n argocd apply -f kafka.yaml
      
  3. sync by argocd
    • argocd app sync argocd/kafka
      

setup kafka-client-tools

  1. create client-properties
    • SASL_PLAINTEXT
      kubectl -n database \
          create secret generic client-properties \
          --from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
      
  2. prepare kafka-client-tools.yaml
    • apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: kafka-client-tools
        labels:
          app: kafka-client-tools
      spec:
        replicas: 1
        selector:
          matchLabels:
            app: kafka-client-tools
        template:
          metadata:
            labels:
              app: kafka-client-tools
          spec:
            volumes:
            - name: client-properties
              secret:
                secretName: client-properties
            containers:
            - name: kafka-client-tools
              image: docker.io/bitnami/kafka:3.6.2
              volumeMounts:
              - name: client-properties
                mountPath: /bitnami/custom/client.properties
                subPath: client.properties
                readOnly: true
              env:
              - name: BOOTSTRAP_SERVER
                value: kafka.database.svc.cluster.local:9092
              - name: CLIENT_CONFIG_FILE
                value: /bitnami/custom/client.properties
              - name: ZOOKEEPER_CONNECT
                value: kafka-zookeeper.database.svc.cluster.local:2181
              command:
              - tail
              - -f
              - /etc/hosts
              imagePullPolicy: IfNotPresent
      
      
  3. apply to k8s
    • kubectl -n database apply -f kafka-client-tools.yaml
      

check with client

  • list topics
    • kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
          'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
      
  • create topic
    • kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
          'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
      
  • describe topic
    • kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
          'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
      
  • produce message
    • kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
          'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
      
  • consume message
    • kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
          'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'