Skip to content

Commit

Permalink
Add makefile to automate local ebpf development (#233)
Browse files Browse the repository at this point in the history
Co-authored-by: Ori Shavit <[email protected]>
  • Loading branch information
otterobert and orishavit authored Aug 7, 2024
1 parent 598625c commit 544e863
Show file tree
Hide file tree
Showing 4 changed files with 319 additions and 23 deletions.
116 changes: 116 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
PROMPT_COLOR=\033[36m
PROMPT_NC=\033[0m # No Color

HELM_CHARTS_PATH = ~/helm-charts/otterize-kubernetes

OTRZ_NAMESPACE = otterize-system
OTRZ_IMAGE_TAG = 0.0.0
OTRZ_IMAGE_REGISTRY = otterize
OTRZ_AGENT_IMAGE_NAME = agent
OTRZ_MAPPER_IMAGE_NAME = mapper
OTRZ_BPFMAN_IMAGE_NAME = bpfman
OTRZ_AGENT_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_AGENT_IMAGE_NAME):$(OTRZ_IMAGE_TAG)
OTRZ_MAPPER_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_MAPPER_IMAGE_NAME):$(OTRZ_IMAGE_TAG)
OTRZ_BPFMAN_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_BPFMAN_IMAGE_NAME):$(OTRZ_IMAGE_TAG)

LIMA_K8S_TEMPLATE = ./dev/lima-k8s.yaml
LIMA_CLUSTER_NAME = k8s
LIMA_KUBECONFIG_PATH = $(HOME)/.kube/lima
LIMA_TEMP_DIR = /tmp/lima/

# Include .env file if it exists
ifneq (,$(wildcard ./.env))
include .env
export
endif

help: ## Show help message
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " ${PROMPT_COLOR}%-25s${PROMPT_NC} %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

# Image building targets

build-agent: ## Builds the node agent image
@echo "${PROMPT_COLOR}Building agent image '$(OTRZ_AGENT_IMAGE_FULL_NAME)'...${PROMPT_NC}"
docker buildx build --platform linux/amd64 -t $(OTRZ_AGENT_IMAGE_FULL_NAME) --file build/$(OTRZ_AGENT_IMAGE_NAME).Dockerfile src/

build-mapper: ## Builds the mapper image
@echo "${PROMPT_COLOR}Building mapper image '$(OTRZ_MAPPER_IMAGE_FULL_NAME)'...${PROMPT_NC}"
docker buildx build --platform linux/amd64 -t $(OTRZ_MAPPER_IMAGE_FULL_NAME) --file build/$(OTRZ_MAPPER_IMAGE_NAME).Dockerfile src/

build-bpfman: ## Builds the mapper image
@echo "${PROMPT_COLOR}Building mapper image '$(OTRZ_BPFMAN_IMAGE_FULL_NAME)'...${PROMPT_NC}"
docker buildx build --platform linux/amd64 -t $(OTRZ_BPFMAN_IMAGE_FULL_NAME) --file build/$(OTRZ_BPFMAN_IMAGE_NAME).Dockerfile src/

# Lima-specific targets - used for local development on macOS

lima-install: ## Installs lima if not already installed
@echo "${PROMPT_COLOR}Installing Lima...${PROMPT_NC}"
brew list lima || brew install lima

lima-k8s: ## Starts Lima with k8s template
@echo "${PROMPT_COLOR}Starting Lima with the template '$(LIMA_K8S_TEMPLATE)'...${PROMPT_NC}"
limactl start $(LIMA_K8S_TEMPLATE) --arch x86_64 --name k8s

lima-kubeconfig: ## Copies kubeconfig from lima to host
@echo "${PROMPT_COLOR}Copying kubeconfig from Lima to host...${PROMPT_NC}"
cp $(shell limactl list k8s --format '{{.Dir}}/copied-from-guest/kubeconfig.yaml') $(LIMA_KUBECONFIG_PATH)
@echo "${PROMPT_COLOR}Run 'export KUBECONFIG=$(LIMA_KUBECONFIG_PATH)' to use the kubeconfig.${PROMPT_NC}"

lima-copy-images: ## Copies the images to lima
@echo "${PROMPT_COLOR}Copying images to Lima...${PROMPT_NC}"
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_AGENT_IMAGE_NAME).tar $(OTRZ_AGENT_IMAGE_FULL_NAME)
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_MAPPER_IMAGE_NAME).tar $(OTRZ_MAPPER_IMAGE_FULL_NAME)
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_BPFMAN_IMAGE_NAME).tar $(OTRZ_BPFMAN_IMAGE_FULL_NAME)

limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_AGENT_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_AGENT_IMAGE_NAME).tar
limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_MAPPER_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_MAPPER_IMAGE_NAME).tar
limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_BPFMAN_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_BPFMAN_IMAGE_NAME).tar

LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_AGENT_IMAGE_NAME).tar
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_MAPPER_IMAGE_NAME).tar
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_BPFMAN_IMAGE_NAME).tar

lima-restart-otterize: ## Restarts Otterize pods running in the lima kubernetes cluster
@echo "${PROMPT_COLOR}Restarting Otterize pods...${PROMPT_NC}"
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima kubectl delete pods --all -n $(OTRZ_NAMESPACE)

lima-update-images: build-mapper build-agent build-bpfman lima-copy-images lima-restart-otterize ## Builds and updates the mapper image in the lima kubernetes cluster and restarts the pods

lima-install-otterize: ## Installs Otterize in the lima kubernetes cluster with the provided client ID and client secret
@if [ -z "$(CLIENT_ID)" ]; then \
read -p "Client ID: " client_id; \
else \
client_id=$(CLIENT_ID); \
fi; \
if [ -z "$(CLIENT_SECRET)" ]; then \
read -p "Client Secret: " client_secret; \
else \
client_secret=$(CLIENT_SECRET); \
fi; \
helm --kubeconfig=$(LIMA_KUBECONFIG_PATH) dep up ~/helm-charts/otterize-kubernetes; \
helm --kubeconfig=$(LIMA_KUBECONFIG_PATH) upgrade --install \
otterize $(HELM_CHARTS_PATH) -n $(OTRZ_NAMESPACE) --create-namespace \
--set networkMapper.debug=true \
--set networkMapper.agent.tag=$(OTRZ_IMAGE_TAG) \
--set networkMapper.agent.image=$(OTRZ_AGENT_IMAGE_NAME) \
--set networkMapper.agent.pullPolicy=Never \
--set networkMapper.bpfman.tag=$(OTRZ_IMAGE_TAG) \
--set networkMapper.bpfman.image=$(OTRZ_BPFMAN_IMAGE_NAME) \
--set networkMapper.bpfman.pullPolicy=Never \
--set networkMapper.mapper.tag=$(OTRZ_IMAGE_TAG) \
--set networkMapper.mapper.image=$(OTRZ_MAPPER_IMAGE_NAME) \
--set networkMapper.mapper.pullPolicy=Never \
--set intentsOperator.operator.mode=defaultShadow \
--set global.otterizeCloud.apiAddress=http://host.lima.internal:3000/api \
--set global.otterizeCloud.credentials.clientId=$$client_id \
--set global.otterizeCloud.credentials.clientSecret=$$client_secret


setup-lima: lima-install lima-k8s lima-kubeconfig lima-install-otterize ## Setup Lima with kubernetes template
@echo "${PROMPT_COLOR}Setup completed.${PROMPT_NC}"
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima kubectl get pods -n otterize-system

clean-lima: ## Cleans up lima environment
@echo "${PROMPT_COLOR}Cleaning up '$(LIMA_K8S_TEMPLATE)'...${PROMPT_NC}"
limactl stop -f $(LIMA_CLUSTER_NAME)
limactl delete $(LIMA_CLUSTER_NAME)
23 changes: 0 additions & 23 deletions build/agent.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,26 +1,3 @@
FROM golang:1.22.1 AS ebpf-buildenv

WORKDIR /src
COPY go.mod go.sum ./

RUN <<EOR
apt-get update
apt-get install -y clang llvm libelf-dev libbpf-dev linux-headers-generic bpftool
ln -sf /usr/include/$(uname -m)-linux-gnu/asm /usr/include/asm
go mod download
EOR

COPY ebpf/ ./ebpf/

RUN <<EOR
go generate -tags ebpf ./ebpf/...
EOR

FROM quay.io/bpfman/bpfman AS bpfman
COPY --from=ebpf-buildenv /src/ebpf/ /otterize/ebpf/

ENTRYPOINT ["./bpfman-rpc", "--timeout=0"]

FROM --platform=$BUILDPLATFORM golang:1.22.1-alpine AS buildenv
RUN apk add --no-cache ca-certificates git protoc
RUN apk add build-base libpcap-dev
Expand Down
20 changes: 20 additions & 0 deletions build/bpfman.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
FROM --platform=$BUILDPLATFORM golang:1.22.1 AS ebpf-buildenv

WORKDIR /src
COPY go.mod go.sum ./

RUN apt-get update
RUN apt-get install -y clang llvm libelf-dev libbpf-dev linux-headers-generic
RUN ln -sf /usr/include/$(uname -m)-linux-gnu/asm /usr/include/asm
RUN go mod download

COPY ebpf/ ./ebpf/

RUN <<EOR
go generate -tags ebpf ./ebpf/...
EOR

FROM quay.io/bpfman/bpfman AS bpfman
COPY --from=ebpf-buildenv /src/ebpf/ /otterize/ebpf/

ENTRYPOINT ["./bpfman-rpc", "--timeout=0"]
183 changes: 183 additions & 0 deletions dev/lima-k8s.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
# Deploy kubernetes via kubeadm.
# $ limactl start ./k8s.yaml
# $ limactl shell k8s kubectl

# It can be accessed from the host by exporting the kubeconfig file;
# the ports are already forwarded automatically by lima:
#
# $ export KUBECONFIG=$(limactl list k8s --format 'unix://{{.Dir}}/copied-from-guest/kubeconfig.yaml')
# $ kubectl get no
# NAME STATUS ROLES AGE VERSION
# lima-k8s Ready control-plane,master 44s v1.22.3

cpuType:
"x86_64": "max"

# This template requires Lima v0.20.0 or later.
images:
# Try to use release-yyyyMMdd image if available. Note that release-yyyyMMdd will be removed after several months.
- location: "https://cloud-images.ubuntu.com/releases/24.04/release-20240423/ubuntu-24.04-server-cloudimg-amd64.img"
arch: "x86_64"
digest: "sha256:32a9d30d18803da72f5936cf2b7b9efcb4d0bb63c67933f17e3bdfd1751de3f3"
- location: "https://cloud-images.ubuntu.com/releases/24.04/release-20240423/ubuntu-24.04-server-cloudimg-arm64.img"
arch: "aarch64"
digest: "sha256:c841bac00925d3e6892d979798103a867931f255f28fefd9d5e07e3e22d0ef22"
# Fallback to the latest release image.
# Hint: run `limactl prune` to invalidate the cache
- location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img"
arch: "x86_64"
- location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img"
arch: "aarch64"

# Mounts are disabled in this template, but can be enabled optionally.
mounts: []
containerd:
system: true
user: false
provision:
# See <https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/>
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
command -v kubeadm >/dev/null 2>&1 && exit 0
# Install and configure prerequisites
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
# Installing kubeadm, kubelet and kubectl
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y apt-transport-https ca-certificates curl
VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt | sed -e 's/v//' | cut -d'.' -f1-2)
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
apt-get update
# cri-tools
apt-get install -y cri-tools
cat <<EOF | sudo tee /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
# cni-plugins
apt-get install -y kubernetes-cni
rm -f /etc/cni/net.d/*.conf*
apt-get install -y kubelet kubeadm kubectl && apt-mark hold kubelet kubeadm kubectl
systemctl enable --now kubelet
# See <https://kubernetes.io/docs/setup/production-environment/container-runtimes/>
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
grep SystemdCgroup /etc/containerd/config.toml && exit 0
grep "version = 2" /etc/containerd/config.toml || exit 1
# Configuring the systemd cgroup driver
# Overriding the sandbox (pause) image
cat <<EOF >>/etc/containerd/config.toml
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "$(kubeadm config images list | grep pause | sort -r | head -n1)"
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
EOF
systemctl restart containerd
# See <https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/>
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -e /etc/kubernetes/admin.conf && exit 0
export KUBECONFIG=/etc/kubernetes/admin.conf
kubeadm config images list
kubeadm config images pull --cri-socket=unix:///run/containerd/containerd.sock
# Initializing your control-plane node
cat <<EOF >kubeadm-config.yaml
kind: InitConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
---
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
apiServer:
certSANs: # --apiserver-cert-extra-sans
- "127.0.0.1"
networking:
podSubnet: "10.244.0.0/16" # --pod-network-cidr
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
EOF
kubeadm init --config kubeadm-config.yaml
# Installing a Pod network add-on
kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml
# Control plane node isolation
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
# Replace the server address with localhost, so that it works also from the host
sed -e "/server:/ s|https://.*:\([0-9]*\)$|https://127.0.0.1:\1|" -i $KUBECONFIG
mkdir -p ${HOME:-/root}/.kube && cp -f $KUBECONFIG ${HOME:-/root}/.kube/config
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
export KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p {{.Home}}/.kube
cp -f $KUBECONFIG {{.Home}}/.kube/config
chown -R {{.User}} {{.Home}}/.kube
probes:
- description: "kubeadm to be installed"
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 30s bash -c "until command -v kubeadm >/dev/null 2>&1; do sleep 3; done"; then
echo >&2 "kubeadm is not installed yet"
exit 1
fi
hint: |
See "/var/log/cloud-init-output.log" in the guest
- description: "kubeadm to be completed"
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until test -f /etc/kubernetes/admin.conf; do sleep 3; done"; then
echo >&2 "k8s is not running yet"
exit 1
fi
hint: |
The k8s kubeconfig file has not yet been created.
- description: "kubernetes cluster to be running"
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until kubectl version >/dev/null 2>&1; do sleep 3; done"; then
echo >&2 "kubernetes cluster is not up and running yet"
exit 1
fi
- description: "coredns deployment to be running"
script: |
#!/bin/bash
set -eux -o pipefail
kubectl wait -n kube-system --timeout=180s --for=condition=available deploy coredns
copyToHost:
- guest: "/etc/kubernetes/admin.conf"
host: "{{.Dir}}/copied-from-guest/kubeconfig.yaml"
deleteOnStop: true
message: |
To run `kubectl` on the host (assumes kubectl is installed), run the following commands:
------
export KUBECONFIG="{{.Dir}}/copied-from-guest/kubeconfig.yaml"
kubectl ...
------

0 comments on commit 544e863

Please sign in to comment.