Skip to main content

TiDB

ben.wangzAbout 2 min

TiDB

prepare

  1. k8s is ready
  2. argocd is ready and logged in
  3. local storage class is ready

installation

  1. Create a namespace
    • kubectl get namespace tidb-cluster > /dev/null 2>&1 \
        || kubectl create namespace tidb-cluster
      
  2. install TiDB Operator CRD
    • prepare tidb-operator-crd.yaml
      • apiVersion: argoproj.io/v1alpha1
        kind: Application
        metadata:
          name: tidb-operator-crd
          namespace: argocd
        spec:
          syncPolicy:
            syncOptions:
              - ServerSideApply=true 
          project: default
          source:
            repoURL: https://gitee.com/ben-wangz-mirror/tidb-operator.git
            targetRevision: v1.6.1
            path: manifests/crd
            directory:
              recurse: true
          destination:
            server: https://kubernetes.default.svc
            namespace: default
        
        
    • apply to k8s
      • kubectl -n argocd apply -f tidb-operator-crd.yaml
        argocd app sync argocd/tidb-operator-crd
        argocd app wait argocd/tidb-operator-crd
        
  3. install TiDB Operator
    • prepare tidb-operator.yaml
      • apiVersion: argoproj.io/v1alpha1
        kind: Application
        metadata:
          name: tidb-operator
        spec:
          syncPolicy:
            syncOptions:
            - CreateNamespace=true
          project: default
          source:
            repoURL: https://charts.pingcap.org/
            chart: tidb-operator
            targetRevision: v1.6.1
            helm:
              releaseName: tidb
              valuesObject:
                timezone: Asia/Shanghai
                operatorImage: m.daocloud.io/docker.io/pingcap/tidb-operator:v1.6.1
                tidbBackupManagerImage: m.daocloud.io/docker.io/pingcap/tidb-backup-manager:v1.6.1
                scheduler:
                  kubeSchedulerImageName: m.daocloud.io/registry.k8s.io/kube-scheduler
                advancedStatefulset:
                  image: m.daocloud.io/pingcap/advanced-statefulset:v0.7.0
          destination:
            server: https://kubernetes.default.svc
            namespace: tidb-admin
        
        
    • apply to k8s
      • kubectl -n argocd apply -f tidb-operator.yaml
        argocd app sync argocd/tidb-operator
        argocd app wait argocd/tidb-operator
        
  4. create TiDB cluster and initialize
    • prepare secret named basic-tidb-credentials to store the credential of tidb root user
      • kubectl -n tidb-cluster create secret generic basic-tidb-credentials --from-literal=root=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
        
    • prepare tidb-init-sql.configmap.yaml
      • apiVersion: v1
        kind: ConfigMap
        metadata:
          name: tidb-init-sql
        data:
          init-sql: |-
            -- create database
            CREATE DATABASE IF NOT EXISTS shopping;
            -- create users
            CREATE TABLE IF NOT EXISTS shopping.users (id INT PRIMARY KEY AUTO_INCREMENT, name VARCHAR(50) NOT NULL, age INT, email VARCHAR(100) UNIQUE, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP);
            -- batch insert multiple users records
            INSERT INTO shopping.users (name, age, email) VALUES ('Bob', 25, 'bob@example.com'), ('Charlie', 35, 'charlie@example.com'), ('David', 28, 'david@example.com'), ('Eve', 32, 'eve@example.com'), ('Frank', 40, 'frank@example.com');
            -- create orders table
            CREATE TABLE IF NOT EXISTS shopping.orders (order_id INT PRIMARY KEY AUTO_INCREMENT, user_id INT, product_name VARCHAR(50), amount DECIMAL(10,2), FOREIGN KEY (user_id) REFERENCES shopping.users(id));
            -- batch insert orders records
            INSERT INTO shopping.orders (user_id, product_name, amount) VALUES ((SELECT id FROM shopping.users WHERE name = 'Bob'), 'Laptop', 1200.00), ((SELECT id FROM shopping.users WHERE name = 'Charlie'), 'Smartphone', 800.00), ((SELECT id FROM shopping.users WHERE name = 'David'), 'Headphones', 150.00), ((SELECT id FROM shopping.users WHERE name = 'Eve'), 'Tablet', 500.00), ((SELECT id FROM shopping.users WHERE name = 'Frank'), 'Camera', 900.00);
        
        
    • prepare tidb-initializer.yaml
      • apiVersion: pingcap.com/v1alpha1
        kind: TidbInitializer
        metadata:
          name: initialize-basic-tidb
        spec:
          image: m.daocloud.io/docker.io/tnir/mysqlclient
          imagePullPolicy: IfNotPresent
          cluster:
            name: basic
          initSqlConfigMap: tidb-init-sql
          passwordSecret: "basic-tidb-credentials"
          timezone: "Asia/Shanghai"
          resources:
            limits:
              cpu: 300m
              memory: 500Mi
            requests:
              cpu: 100m
              memory: 50Mi
        
        
    • prepare tidb-cluster.yaml
      • apiVersion: pingcap.com/v1alpha1
        kind: TidbCluster
        metadata:
          name: basic
        spec:
          version: v8.5.0
          timezone: Asia/Shanghai
          pvReclaimPolicy: Retain
          enableDynamicConfiguration: true
          configUpdateStrategy: RollingUpdate
          discovery: {}
          helper:
            image: m.daocloud.io/docker.io/library/alpine:3.16.0
          pd:
            baseImage: m.daocloud.io/docker.io/pingcap/pd
            maxFailoverCount: 0
            replicas: 1
            # if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
            # storageClassName: local-storage
            requests:
              storage: "1Gi"
              cpu: "200m"
              memory: "512Mi"
            limits:
              cpu: "500m"
              memory: "1Gi"
            config: {}
          tikv:
            baseImage: m.daocloud.io/docker.io/pingcap/tikv
            maxFailoverCount: 0
            # If only 1 TiKV is deployed, the TiKV region leader
            # cannot be transferred during upgrade, so we have
            # to configure a short timeout
            evictLeaderTimeout: 1m
            replicas: 1
            # if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
            # storageClassName: local-storage
            requests:
              storage: "10Gi"
              cpu: "2000m"
              memory: "4Gi"
            limits:
              cpu: "3000m"
              memory: "6Gi"
            config:
              storage:
                # In basic examples, we set this to avoid using too much storage.
                reserve-space: "512MB"
              rocksdb:
                # In basic examples, we set this to avoid the following error in some Kubernetes clusters:
                # "the maximum number of open file descriptors is too small, got 1024, expect greater or equal to 82920"
                max-open-files: 256
              raftdb:
                max-open-files: 256
          tidb:
            baseImage: m.daocloud.io/docker.io/pingcap/tidb
            maxFailoverCount: 0
            replicas: 1
            service:
              type: ClusterIP
            requests:
              cpu: "200m"
              memory: "512Mi"
            limits:
              cpu: "500m"
              memory: "1Gi"
            config: {}
        
        
    • apply to k8s
      • kubectl -n tidb-cluster apply -f tidb-init-sql.configmap.yaml
        kubectl -n tidb-cluster apply -f tidb-cluster.yaml
        kubectl -n tidb-cluster apply -f tidb-initializer.yaml
        
  5. (optional) install TiDB Dashboard components
    • prepare tidb-dashboard.yaml
      • apiVersion: pingcap.com/v1alpha1
        kind: TidbDashboard
        metadata:
          name: basic
        spec:
          baseImage: m.daocloud.io/docker.io/pingcap/tidb-dashboard
          version: latest
        
          ## tidb cluster to be monitored
          ## ** now only support monitoring one tidb cluster **
          clusters:
            - name: basic
        
          ## describes the compute resource requirements and limits.
          ## Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
          requests:
            cpu: 100m
            memory: 512Mi
            storage: 10Gi
          # limits:
          #   cpu: 2000m
          #   memory: 2Gi
        
        
    • apply to k8s
      • kubectl -n tidb-cluster apply -f tidb-dashboard.yaml
        
  6. install mysql-client
    • prepare mysql-client.yaml
      • apiVersion: apps/v1
        kind: Deployment
        metadata:
          name: mysql-client
        spec:
          replicas: 1
          selector:
            matchLabels:
              app: mysql-client
          template:
            metadata:
              labels:
                app: mysql-client
            spec:
              containers:
              - name: mysql-client
                image: m.daocloud.io/docker.io/library/mysql:9.3.0
                command: 
                  - /usr/bin/sleep
                args:
                  - inf
                env:
                  - name: MYSQL_SERVICE_IP
                    value: basic-tidb.tidb-cluster.svc.cluster.local
                  - name: MYSQL_SERVICE_PORT
                    value: "4000"
                  - name: MYSQL_ROOT_PASSWORD
                    valueFrom:
                      secretKeyRef:
                        name: basic-tidb-credentials
                        key: root
                        optional: false
        
        
    • apply to k8s
      • kubectl -n tidb-cluster apply -f mysql-client.yaml
        
    • exec
      • kubectl -n tidb-cluster exec -it deployment/mysql-client -- bash
        # mysql -h $MYSQL_SERVICE_IP -P $MYSQL_SERVICE_PORT -u root -p$MYSQL_ROOT_PASSWORD
        

simple checks

  1. check status of a tidb cluster
    • kubectl -n tidb-cluster get tidbcluster
      
  2. running querys by tidb(mysql interface)
    • prepare query.job.yaml
      • apiVersion: batch/v1
        kind: Job
        metadata:
          name: mysql-query-job
        spec:
          template:
            spec:
              containers:
              - name: mysql-client
                image: m.daocloud.io/docker.io/library/mysql:9.3.0
                command: ['sh', '-c']
                args:
                  - |
                    export MYSQL_PWD=$MYSQL_ROOT_PASSWORD
                    mysql -h $MYSQL_SERVICE_IP -P $MYSQL_SERVICE_PORT -u root -e "
                    USE shopping;
                    SELECT users.name, orders.product_name, orders.amount
                    FROM users
                    JOIN orders ON users.id = orders.user_id;
                    "
                env:
                  - name: MYSQL_SERVICE_IP
                    value: basic-tidb.tidb-cluster.svc.cluster.local
                  - name: MYSQL_SERVICE_PORT
                    value: "4000"
                  - name: MYSQL_ROOT_PASSWORD
                    valueFrom:
                      secretKeyRef:
                        name: basic-tidb-credentials
                        key: root
                        optional: false
              restartPolicy: Never
          backoffLimit: 4
        
        
    • apply to k8s
      • kubectl -n tidb-cluster apply -f query.job.yaml
        kubectl -n tidb-cluster wait --for=condition=complete job/mysql-query-job
        kubectl -n tidb-cluster logs -l job-name=mysql-query-job
        

main operations

  1. scale in/out
  2. monitor and alerts
  3. benchmarks for performance evaluation
  4. backup and restore
  5. import and export
  6. cdc
  7. rbac for mysql interface

uninstallation

  1. uninstall TiDB cluster
    • kubectl -n tidb-cluster delete -f tidb-cluster.yaml
      kubectl -n tidb-cluster delete -f tidb-initializer.yaml
      kubectl -n tidb-cluster delete -f tidb-dashboard.yaml
      
    • kubectl -n tidb-cluster delete secret basic-tidb-credentials
      kubectl -n tidb-cluster delete secret basic-grafana-credentials
      kubectl -n tidb-cluster delete configmap tidb-init-sql
      
    • kubectl -n tidb-cluster delete pvc -l app.kubernetes.io/managed-by=tidb-operator,app.kubernetes.io/instance=basic
      #kubectl -n tidb-cluster delete pvc -l app.kubernetes.io/managed-by=tidb-operator
      
    • kubectl delete namespace tidb-cluster
      
  2. uninstall TiDB operator
    • kubectl -n argocd delete -f tidb-operator.yaml
      
  3. uninstall TiDB operator CRDs
    • kubectl -n argocd delete -f tidb-operator-crd.yaml