Skip to main content

kubeadm

ben.wangzAbout 2 min

kubeadm

references

  • https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/
  • https://docs.fedoraproject.org/en-US/quick-docs/using-kubernetes/

prepare

  1. 1 node with fedora 38(use fedora 39 if you are play with aliyun ecs)
  2. root account required
  3. install necessary packages for each node
    • dnf -y install iptables iproute-tc
      
    • enable cri-o repo
      • for-fedora-38
        dnf -y module enable cri-o:${KUBE_VERSION}
        
    • enable kubernetes repo
      • KUBE_VERSION=1.28
        cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
        [kubernetes]
        name=Kubernetes
        baseurl=https://pkgs.k8s.io/core:/stable:/v${KUBE_VERSION}/rpm/
        enabled=1
        gpgcheck=1
        gpgkey=https://pkgs.k8s.io/core:/stable:/v${KUBE_VERSION}/rpm/repodata/repomd.xml.key
        exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
        EOF
        
    • install components
      • dnf -y install cri-o kubeadm kubelet kubectl --disableexcludes=kubernetes
        

configure node

  1. change hostname of master node
    • hostnamectl set-hostname k8s-master
      
  2. configure /etc/hosts
    • # change ip according to your own machine
      cat >> /etc/hosts <<EOF
      192.168.1.109 k8s-master
      EOF
      
  3. configure ntp
    • dnf install -y chrony \
          && systemctl enable chronyd \
          && systemctl start chronyd \
          && chronyc sources \
          && chronyc tracking \
          && timedatectl set-timezone 'Asia/Shanghai'
      
  4. turn off selinux, firewalld and swap of each node
    • sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config && setenforce 0
      
      systemctl stop firewalld && systemctl disable firewalld
      
      systemctl stop swap-create@zram0
      dnf remove -y zram-generator-defaults
      swapoff -a
      
      
  5. configure forwarding IPv4
    • cat <<EOF | tee /etc/modules-load.d/k8s.conf
      overlay
      br_netfilter
      EOF
      
      modprobe overlay
      modprobe br_netfilter
      
      # sysctl params required by setup, params persist across reboots
      cat <<EOF | tee /etc/sysctl.d/k8s.conf
      net.bridge.bridge-nf-call-iptables  = 1
      net.bridge.bridge-nf-call-ip6tables = 1
      net.ipv4.ip_forward                 = 1
      EOF
      
      # Apply sysctl params without reboot
      sysctl --system
      
      # verify
      lsmod | grep br_netfilter
      lsmod | grep overlay
      sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
      
  6. enable cri-o
    • systemctl enable --now crio
      
  7. enable kubelet
    • systemctl enable --now kubelet
      
  8. prepare kubeadm.conf.yaml
    • ---
      apiVersion: kubeadm.k8s.io/v1beta3
      kind: InitConfiguration
      nodeRegistration:
        criSocket: unix:///var/run/crio/crio.sock
        imagePullPolicy: IfNotPresent
        name: k8s-master
        kubeletExtraArgs:
          resolv-conf: /run/systemd/resolve/resolv.conf
      
      ---
      apiVersion: kubeadm.k8s.io/v1beta3
      kind: ClusterConfiguration
      clusterName: kubernetes
      controllerManager:
        extraArgs:
          node-cidr-mask-size: "16"
      imageRepository: registry.k8s.io
      kubernetesVersion: 1.28.0
      networking:
        dnsDomain: cluster.local
        serviceSubnet: 10.96.0.0/12
        podSubnet: 10.244.0.0/16
      
      ---
      apiVersion: kubelet.config.k8s.io/v1beta1
      kind: KubeletConfiguration
      resolvConf: /run/systemd/resolve/resolv.conf
      
      
  9. initialize the cluster
    • with-image-mirror
      sed -i 's/imageRepository: .*/imageRepository: m.daocloud.io\/registry.k8s.io/g' kubeadm.conf.yaml
      kubeadm init --config kubeadm.conf.yaml
      
  10. copy kubeconfig to local
    • # can be run by any user with sudo privilege
      mkdir -p $HOME/.kube \
          && sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config \
          && sudo chown $UID:$UID $HOME/.kube/config
      
      
  11. allow control plane node to run pods
    • kubectl taint nodes --all node-role.kubernetes.io/control-plane-
      
  12. install pod network(chose one of the methods below)
    • flannel by kubectl
      • # If you use custom podCIDR (not 10.244.0.0/16) you first need to download the above manifest and modify the network to match your one.
        kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
        
    • flannel by helm chart
    • calico by helm chart

test with deployment

  1. prepare nginx-deployment.yaml
    • apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: nginx-deployment
        labels:
          app: nginx
      spec:
        replicas: 3
        selector:
          matchLabels:
            app: nginx
        template:
          metadata:
            labels:
              app: nginx
          spec:
            containers:
            - name: nginx
              image: docker.io/library/nginx:1.24.0-alpine
              ports:
              - containerPort: 80
      
      
  2. apply to cluster
    • kubectl apply -f nginx-deployment.yaml
      
  3. check pods
    • kubectl get pod
      

troubles

  1. "cni0" already has an IP address different from 10.2.44.1/24
    • https://github.com/kubernetes/kubernetes/issues/39557#issuecomment-457839765
    • ip link delete cni0
      

uninstallation

  1. uninstall by kubeadm
    • kubeadm reset
      

addtional software

  1. install argocd by helm
  2. install ingress by argocd
  3. install cert-manager by argocd