Skip to content

faruk-guler/Kubernetes

Repository files navigation

      __  __           __                                            __                       
     /\ \/\ \         /\ \                                          /\ \__                 
     \ \ \/'/'  __  __\ \ \____     __   _ __    ___      __    ____\ \ ,_\    __    ____  
      \ \ , <  /\ \/\ \\ \ '__`\  /'__`\/\`'__\/' _ `\  /'__`\ /',__\\ \ \/  /'__`\ /',__\ 
       \ \ \\`\\ \ \_\ \\ \ \L\ \/\  __/\ \ \/ /\ \/\ \/\  __//\__, `\\ \ \_/\  __//\__, `\
        \ \_\ \_\ \____/ \ \_,__/\ \____\\ \_\ \ \_\ \_\ \____\/\____/ \ \__\ \____\/\____/
         \/_/\/_/\/___/   \/___/  \/____/ \/_/  \/_/\/_/\/____/\/___/   \/__/\/____/\/___/
      ___             _           _                         
     |  _|___ ___ _ _| |_ ___ _ _| |___ ___  
     |  _| .'|  _| | | '_| . | | | | -_|  _|
WWW .|_| |__,|_| |___|_,_|  _|___|_|___|_|.COM

Name: Classic (Vanilla) Kubernetes Cluster Installation Script.
POC: Debian 12 "Bookworm"
Author: faruk guler
Date: 2025

Preparation:

# System Requirements:
-2 GB or more of RAM per machine
-2 CPUs or more for control plane machine
-Unique hostname, MAC address, and product_uuid for every node
-A compatible Linux host. Linux distributions based on Debian and Red Hat

# Server Inventory [Hosts]
KubectlExt:  192.168.44.140
Master:   192.168.44.145
Worker1:  192.168.44.146
Worker2:  192.168.44.147
Worker3:  192.168.44.148

# Docs:
https://kubernetes.io/
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
https://labs.play-with-k8s.com/

# Hostname configuration:
Master Node: sudo hostnamectl set-hostname master
Worker Node: sudo hostnamectl set-hostname worker1
-------

# Simple DNS Integration: "/etc/hosts" file:
cat <<EOF | sudo tee /etc/hosts
127.0.0.1      localhost
192.168.44.145 master
192.168.44.146 worker1
192.168.44.147 worker2
192.168.44.148 worker3
EOF

# Uniq Servers Verify:
lsb_release -a
ip a
sudo cat /sys/class/dmi/id/product_uuid

# Firewall Ports and Protocols:
>> Control plane:
TCP 6443 (Inbound): Kubernetes API server – All
TCP 2379-2380 (Inbound): etcd server client API – kube-apiserver, etcd
TCP 10250 (Inbound): Kubelet API – Self, Control plane
TCP 10259 (Inbound): kube-scheduler – Self
TCP 10257 (Inbound): kube-controller-manager – Self
sudo ss -tuln | grep 6443

>> Worker node(s):
TCP 10250 (Inbound): Kubelet API – Self, Control plane
TCP 10256 (Inbound): kube-proxy – Self, Load balancers
TCP 30000-32767 (Inbound): NodePort Services – All
sudo ss -tuln | grep 10250

# SELinux and AppArmor [Optional]
sudo nano /etc/selinux/config
SELINUX=disabled
sudo reboot
sestatus

# Swap Space:
cat /proc/swaps
swapon --show
sudo swapoff -a
cp /etc/fstab /etc/fstab.bak
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
free -m
lscpu

# NTP Synchronization:
sudo apt install -y chrony
sudo systemctl enable chronyd
sudo systemctl start chronyd
sudo timedatectl

Installing:

# Kernel and Network modules activate:
sudo modprobe overlay
sudo modprobe br_netfilter

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# System Apply:
sudo sysctl --system

# Container Runtime: [Containerd, .etc]
sudo apt update
sudo apt install containerd
sudo systemctl enable --now containerd
sudo mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
sudo sed -i 's/            SystemdCgroup = false/            SystemdCgroup = true/' /etc/containerd/config.toml
sudo systemctl restart containerd

# Install: [kubelet kubeadm kubectl]
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
#apt-mark hold kubelet kubeadm kubectl
#apt-mark unhold kubelet kubeadm kubectl
nc 127.0.0.1 6443 -v
journalctl -u kubelet
journalctl -xfe

# Kubernetes Cluster: [kubeadm init]
sudo kubeadm config images pull
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=<ip> --control-plane-endpoint=<ip>
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=192.168.44.145 --control-plane-endpoint=192.168.44.145

# Join a Cluster: [kubeadm join]
kubeadm token create --print-join-command

# Kubernetes Nodes Configuration:
sudo scp /etc/kubernetes/admin.conf [email protected]:/etc/kubernetes/admin.conf
/etc/kubernetes/admin.conf
~/.kube/config

# Kubectl:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl config
kubectl config get-contexts

# Networking: Calico:
wget https://docs.projectcalico.org/manifests/calico.yaml
kubectl apply -f calico.yaml
#kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
#kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml

# Taint and Untaint:
kubectl describe node master1 | grep Taint [Taint Check]
kubectl taint nodes master1 node-role.kubernetes.io/control-plane:NoSchedule [Master1 Node Apply Taint]
kubectl taint nodes master1 node-role.kubernetes.io/control-plane:NoSchedule- [Master1 Node Remove Taint]

kubectl taint nodes worker1 custom=deny:NoSchedule [Worker1 Apply Taint]
kubectl taint nodes worker1 custom=deny:NoSchedule- [Worker1 Remove Taint]

# Kubectl Auto-Completion:
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
#kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null
#sudo apt-get install bash-completion

# Get Starting >>>
systemctl status kubelet.service
kubectl version
kubectl cluster-info
kubectl get nodes -o wide

Enjoy :)

About

No description, website, or topics provided.

Resources

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published

Languages