Skip to content

Commit 544e863

Browse files
Add makefile to automate local ebpf development (#233)
Co-authored-by: Ori Shavit <[email protected]>
1 parent 598625c commit 544e863

File tree

4 files changed

+319
-23
lines changed

4 files changed

+319
-23
lines changed

Makefile

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
PROMPT_COLOR=\033[36m
2+
PROMPT_NC=\033[0m # No Color
3+
4+
HELM_CHARTS_PATH = ~/helm-charts/otterize-kubernetes
5+
6+
OTRZ_NAMESPACE = otterize-system
7+
OTRZ_IMAGE_TAG = 0.0.0
8+
OTRZ_IMAGE_REGISTRY = otterize
9+
OTRZ_AGENT_IMAGE_NAME = agent
10+
OTRZ_MAPPER_IMAGE_NAME = mapper
11+
OTRZ_BPFMAN_IMAGE_NAME = bpfman
12+
OTRZ_AGENT_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_AGENT_IMAGE_NAME):$(OTRZ_IMAGE_TAG)
13+
OTRZ_MAPPER_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_MAPPER_IMAGE_NAME):$(OTRZ_IMAGE_TAG)
14+
OTRZ_BPFMAN_IMAGE_FULL_NAME = $(OTRZ_IMAGE_REGISTRY)/$(OTRZ_BPFMAN_IMAGE_NAME):$(OTRZ_IMAGE_TAG)
15+
16+
LIMA_K8S_TEMPLATE = ./dev/lima-k8s.yaml
17+
LIMA_CLUSTER_NAME = k8s
18+
LIMA_KUBECONFIG_PATH = $(HOME)/.kube/lima
19+
LIMA_TEMP_DIR = /tmp/lima/
20+
21+
# Include .env file if it exists
22+
ifneq (,$(wildcard ./.env))
23+
include .env
24+
export
25+
endif
26+
27+
help: ## Show help message
28+
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " ${PROMPT_COLOR}%-25s${PROMPT_NC} %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
29+
30+
# Image building targets
31+
32+
build-agent: ## Builds the node agent image
33+
@echo "${PROMPT_COLOR}Building agent image '$(OTRZ_AGENT_IMAGE_FULL_NAME)'...${PROMPT_NC}"
34+
docker buildx build --platform linux/amd64 -t $(OTRZ_AGENT_IMAGE_FULL_NAME) --file build/$(OTRZ_AGENT_IMAGE_NAME).Dockerfile src/
35+
36+
build-mapper: ## Builds the mapper image
37+
@echo "${PROMPT_COLOR}Building mapper image '$(OTRZ_MAPPER_IMAGE_FULL_NAME)'...${PROMPT_NC}"
38+
docker buildx build --platform linux/amd64 -t $(OTRZ_MAPPER_IMAGE_FULL_NAME) --file build/$(OTRZ_MAPPER_IMAGE_NAME).Dockerfile src/
39+
40+
build-bpfman: ## Builds the mapper image
41+
@echo "${PROMPT_COLOR}Building mapper image '$(OTRZ_BPFMAN_IMAGE_FULL_NAME)'...${PROMPT_NC}"
42+
docker buildx build --platform linux/amd64 -t $(OTRZ_BPFMAN_IMAGE_FULL_NAME) --file build/$(OTRZ_BPFMAN_IMAGE_NAME).Dockerfile src/
43+
44+
# Lima-specific targets - used for local development on macOS
45+
46+
lima-install: ## Installs lima if not already installed
47+
@echo "${PROMPT_COLOR}Installing Lima...${PROMPT_NC}"
48+
brew list lima || brew install lima
49+
50+
lima-k8s: ## Starts Lima with k8s template
51+
@echo "${PROMPT_COLOR}Starting Lima with the template '$(LIMA_K8S_TEMPLATE)'...${PROMPT_NC}"
52+
limactl start $(LIMA_K8S_TEMPLATE) --arch x86_64 --name k8s
53+
54+
lima-kubeconfig: ## Copies kubeconfig from lima to host
55+
@echo "${PROMPT_COLOR}Copying kubeconfig from Lima to host...${PROMPT_NC}"
56+
cp $(shell limactl list k8s --format '{{.Dir}}/copied-from-guest/kubeconfig.yaml') $(LIMA_KUBECONFIG_PATH)
57+
@echo "${PROMPT_COLOR}Run 'export KUBECONFIG=$(LIMA_KUBECONFIG_PATH)' to use the kubeconfig.${PROMPT_NC}"
58+
59+
lima-copy-images: ## Copies the images to lima
60+
@echo "${PROMPT_COLOR}Copying images to Lima...${PROMPT_NC}"
61+
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_AGENT_IMAGE_NAME).tar $(OTRZ_AGENT_IMAGE_FULL_NAME)
62+
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_MAPPER_IMAGE_NAME).tar $(OTRZ_MAPPER_IMAGE_FULL_NAME)
63+
docker save -o $(LIMA_TEMP_DIR)images/$(OTRZ_BPFMAN_IMAGE_NAME).tar $(OTRZ_BPFMAN_IMAGE_FULL_NAME)
64+
65+
limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_AGENT_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_AGENT_IMAGE_NAME).tar
66+
limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_MAPPER_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_MAPPER_IMAGE_NAME).tar
67+
limactl copy $(LIMA_TEMP_DIR)images/$(OTRZ_BPFMAN_IMAGE_NAME).tar k8s:/tmp/$(OTRZ_BPFMAN_IMAGE_NAME).tar
68+
69+
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_AGENT_IMAGE_NAME).tar
70+
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_MAPPER_IMAGE_NAME).tar
71+
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima sudo ctr -n=k8s.io images import /tmp/$(OTRZ_BPFMAN_IMAGE_NAME).tar
72+
73+
lima-restart-otterize: ## Restarts Otterize pods running in the lima kubernetes cluster
74+
@echo "${PROMPT_COLOR}Restarting Otterize pods...${PROMPT_NC}"
75+
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima kubectl delete pods --all -n $(OTRZ_NAMESPACE)
76+
77+
lima-update-images: build-mapper build-agent build-bpfman lima-copy-images lima-restart-otterize ## Builds and updates the mapper image in the lima kubernetes cluster and restarts the pods
78+
79+
lima-install-otterize: ## Installs Otterize in the lima kubernetes cluster with the provided client ID and client secret
80+
@if [ -z "$(CLIENT_ID)" ]; then \
81+
read -p "Client ID: " client_id; \
82+
else \
83+
client_id=$(CLIENT_ID); \
84+
fi; \
85+
if [ -z "$(CLIENT_SECRET)" ]; then \
86+
read -p "Client Secret: " client_secret; \
87+
else \
88+
client_secret=$(CLIENT_SECRET); \
89+
fi; \
90+
helm --kubeconfig=$(LIMA_KUBECONFIG_PATH) dep up ~/helm-charts/otterize-kubernetes; \
91+
helm --kubeconfig=$(LIMA_KUBECONFIG_PATH) upgrade --install \
92+
otterize $(HELM_CHARTS_PATH) -n $(OTRZ_NAMESPACE) --create-namespace \
93+
--set networkMapper.debug=true \
94+
--set networkMapper.agent.tag=$(OTRZ_IMAGE_TAG) \
95+
--set networkMapper.agent.image=$(OTRZ_AGENT_IMAGE_NAME) \
96+
--set networkMapper.agent.pullPolicy=Never \
97+
--set networkMapper.bpfman.tag=$(OTRZ_IMAGE_TAG) \
98+
--set networkMapper.bpfman.image=$(OTRZ_BPFMAN_IMAGE_NAME) \
99+
--set networkMapper.bpfman.pullPolicy=Never \
100+
--set networkMapper.mapper.tag=$(OTRZ_IMAGE_TAG) \
101+
--set networkMapper.mapper.image=$(OTRZ_MAPPER_IMAGE_NAME) \
102+
--set networkMapper.mapper.pullPolicy=Never \
103+
--set intentsOperator.operator.mode=defaultShadow \
104+
--set global.otterizeCloud.apiAddress=http://host.lima.internal:3000/api \
105+
--set global.otterizeCloud.credentials.clientId=$$client_id \
106+
--set global.otterizeCloud.credentials.clientSecret=$$client_secret
107+
108+
109+
setup-lima: lima-install lima-k8s lima-kubeconfig lima-install-otterize ## Setup Lima with kubernetes template
110+
@echo "${PROMPT_COLOR}Setup completed.${PROMPT_NC}"
111+
LIMA_INSTANCE=$(LIMA_CLUSTER_NAME) && lima kubectl get pods -n otterize-system
112+
113+
clean-lima: ## Cleans up lima environment
114+
@echo "${PROMPT_COLOR}Cleaning up '$(LIMA_K8S_TEMPLATE)'...${PROMPT_NC}"
115+
limactl stop -f $(LIMA_CLUSTER_NAME)
116+
limactl delete $(LIMA_CLUSTER_NAME)

build/agent.Dockerfile

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,3 @@
1-
FROM golang:1.22.1 AS ebpf-buildenv
2-
3-
WORKDIR /src
4-
COPY go.mod go.sum ./
5-
6-
RUN <<EOR
7-
apt-get update
8-
apt-get install -y clang llvm libelf-dev libbpf-dev linux-headers-generic bpftool
9-
ln -sf /usr/include/$(uname -m)-linux-gnu/asm /usr/include/asm
10-
go mod download
11-
EOR
12-
13-
COPY ebpf/ ./ebpf/
14-
15-
RUN <<EOR
16-
go generate -tags ebpf ./ebpf/...
17-
EOR
18-
19-
FROM quay.io/bpfman/bpfman AS bpfman
20-
COPY --from=ebpf-buildenv /src/ebpf/ /otterize/ebpf/
21-
22-
ENTRYPOINT ["./bpfman-rpc", "--timeout=0"]
23-
241
FROM --platform=$BUILDPLATFORM golang:1.22.1-alpine AS buildenv
252
RUN apk add --no-cache ca-certificates git protoc
263
RUN apk add build-base libpcap-dev

build/bpfman.Dockerfile

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
FROM --platform=$BUILDPLATFORM golang:1.22.1 AS ebpf-buildenv
2+
3+
WORKDIR /src
4+
COPY go.mod go.sum ./
5+
6+
RUN apt-get update
7+
RUN apt-get install -y clang llvm libelf-dev libbpf-dev linux-headers-generic
8+
RUN ln -sf /usr/include/$(uname -m)-linux-gnu/asm /usr/include/asm
9+
RUN go mod download
10+
11+
COPY ebpf/ ./ebpf/
12+
13+
RUN <<EOR
14+
go generate -tags ebpf ./ebpf/...
15+
EOR
16+
17+
FROM quay.io/bpfman/bpfman AS bpfman
18+
COPY --from=ebpf-buildenv /src/ebpf/ /otterize/ebpf/
19+
20+
ENTRYPOINT ["./bpfman-rpc", "--timeout=0"]

dev/lima-k8s.yaml

Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
# Deploy kubernetes via kubeadm.
2+
# $ limactl start ./k8s.yaml
3+
# $ limactl shell k8s kubectl
4+
5+
# It can be accessed from the host by exporting the kubeconfig file;
6+
# the ports are already forwarded automatically by lima:
7+
#
8+
# $ export KUBECONFIG=$(limactl list k8s --format 'unix://{{.Dir}}/copied-from-guest/kubeconfig.yaml')
9+
# $ kubectl get no
10+
# NAME STATUS ROLES AGE VERSION
11+
# lima-k8s Ready control-plane,master 44s v1.22.3
12+
13+
cpuType:
14+
"x86_64": "max"
15+
16+
# This template requires Lima v0.20.0 or later.
17+
images:
18+
# Try to use release-yyyyMMdd image if available. Note that release-yyyyMMdd will be removed after several months.
19+
- location: "https://cloud-images.ubuntu.com/releases/24.04/release-20240423/ubuntu-24.04-server-cloudimg-amd64.img"
20+
arch: "x86_64"
21+
digest: "sha256:32a9d30d18803da72f5936cf2b7b9efcb4d0bb63c67933f17e3bdfd1751de3f3"
22+
- location: "https://cloud-images.ubuntu.com/releases/24.04/release-20240423/ubuntu-24.04-server-cloudimg-arm64.img"
23+
arch: "aarch64"
24+
digest: "sha256:c841bac00925d3e6892d979798103a867931f255f28fefd9d5e07e3e22d0ef22"
25+
# Fallback to the latest release image.
26+
# Hint: run `limactl prune` to invalidate the cache
27+
- location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img"
28+
arch: "x86_64"
29+
- location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img"
30+
arch: "aarch64"
31+
32+
# Mounts are disabled in this template, but can be enabled optionally.
33+
mounts: []
34+
containerd:
35+
system: true
36+
user: false
37+
provision:
38+
# See <https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/>
39+
- mode: system
40+
script: |
41+
#!/bin/bash
42+
set -eux -o pipefail
43+
command -v kubeadm >/dev/null 2>&1 && exit 0
44+
# Install and configure prerequisites
45+
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
46+
overlay
47+
br_netfilter
48+
EOF
49+
modprobe overlay
50+
modprobe br_netfilter
51+
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
52+
net.bridge.bridge-nf-call-iptables = 1
53+
net.ipv4.ip_forward = 1
54+
net.bridge.bridge-nf-call-ip6tables = 1
55+
EOF
56+
sysctl --system
57+
# Installing kubeadm, kubelet and kubectl
58+
export DEBIAN_FRONTEND=noninteractive
59+
apt-get update
60+
apt-get install -y apt-transport-https ca-certificates curl
61+
VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt | sed -e 's/v//' | cut -d'.' -f1-2)
62+
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
63+
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
64+
apt-get update
65+
# cri-tools
66+
apt-get install -y cri-tools
67+
cat <<EOF | sudo tee /etc/crictl.yaml
68+
runtime-endpoint: unix:///run/containerd/containerd.sock
69+
EOF
70+
# cni-plugins
71+
apt-get install -y kubernetes-cni
72+
rm -f /etc/cni/net.d/*.conf*
73+
apt-get install -y kubelet kubeadm kubectl && apt-mark hold kubelet kubeadm kubectl
74+
systemctl enable --now kubelet
75+
# See <https://kubernetes.io/docs/setup/production-environment/container-runtimes/>
76+
- mode: system
77+
script: |
78+
#!/bin/bash
79+
set -eux -o pipefail
80+
grep SystemdCgroup /etc/containerd/config.toml && exit 0
81+
grep "version = 2" /etc/containerd/config.toml || exit 1
82+
# Configuring the systemd cgroup driver
83+
# Overriding the sandbox (pause) image
84+
cat <<EOF >>/etc/containerd/config.toml
85+
[plugins]
86+
[plugins."io.containerd.grpc.v1.cri"]
87+
sandbox_image = "$(kubeadm config images list | grep pause | sort -r | head -n1)"
88+
[plugins."io.containerd.grpc.v1.cri".containerd]
89+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
90+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
91+
runtime_type = "io.containerd.runc.v2"
92+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
93+
SystemdCgroup = true
94+
EOF
95+
systemctl restart containerd
96+
# See <https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/>
97+
- mode: system
98+
script: |
99+
#!/bin/bash
100+
set -eux -o pipefail
101+
test -e /etc/kubernetes/admin.conf && exit 0
102+
export KUBECONFIG=/etc/kubernetes/admin.conf
103+
kubeadm config images list
104+
kubeadm config images pull --cri-socket=unix:///run/containerd/containerd.sock
105+
# Initializing your control-plane node
106+
cat <<EOF >kubeadm-config.yaml
107+
kind: InitConfiguration
108+
apiVersion: kubeadm.k8s.io/v1beta3
109+
nodeRegistration:
110+
criSocket: unix:///run/containerd/containerd.sock
111+
---
112+
kind: ClusterConfiguration
113+
apiVersion: kubeadm.k8s.io/v1beta3
114+
apiServer:
115+
certSANs: # --apiserver-cert-extra-sans
116+
- "127.0.0.1"
117+
networking:
118+
podSubnet: "10.244.0.0/16" # --pod-network-cidr
119+
---
120+
kind: KubeletConfiguration
121+
apiVersion: kubelet.config.k8s.io/v1beta1
122+
cgroupDriver: systemd
123+
EOF
124+
kubeadm init --config kubeadm-config.yaml
125+
# Installing a Pod network add-on
126+
kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml
127+
# Control plane node isolation
128+
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
129+
# Replace the server address with localhost, so that it works also from the host
130+
sed -e "/server:/ s|https://.*:\([0-9]*\)$|https://127.0.0.1:\1|" -i $KUBECONFIG
131+
mkdir -p ${HOME:-/root}/.kube && cp -f $KUBECONFIG ${HOME:-/root}/.kube/config
132+
- mode: system
133+
script: |
134+
#!/bin/bash
135+
set -eux -o pipefail
136+
export KUBECONFIG=/etc/kubernetes/admin.conf
137+
mkdir -p {{.Home}}/.kube
138+
cp -f $KUBECONFIG {{.Home}}/.kube/config
139+
chown -R {{.User}} {{.Home}}/.kube
140+
probes:
141+
- description: "kubeadm to be installed"
142+
script: |
143+
#!/bin/bash
144+
set -eux -o pipefail
145+
if ! timeout 30s bash -c "until command -v kubeadm >/dev/null 2>&1; do sleep 3; done"; then
146+
echo >&2 "kubeadm is not installed yet"
147+
exit 1
148+
fi
149+
hint: |
150+
See "/var/log/cloud-init-output.log" in the guest
151+
- description: "kubeadm to be completed"
152+
script: |
153+
#!/bin/bash
154+
set -eux -o pipefail
155+
if ! timeout 300s bash -c "until test -f /etc/kubernetes/admin.conf; do sleep 3; done"; then
156+
echo >&2 "k8s is not running yet"
157+
exit 1
158+
fi
159+
hint: |
160+
The k8s kubeconfig file has not yet been created.
161+
- description: "kubernetes cluster to be running"
162+
script: |
163+
#!/bin/bash
164+
set -eux -o pipefail
165+
if ! timeout 300s bash -c "until kubectl version >/dev/null 2>&1; do sleep 3; done"; then
166+
echo >&2 "kubernetes cluster is not up and running yet"
167+
exit 1
168+
fi
169+
- description: "coredns deployment to be running"
170+
script: |
171+
#!/bin/bash
172+
set -eux -o pipefail
173+
kubectl wait -n kube-system --timeout=180s --for=condition=available deploy coredns
174+
copyToHost:
175+
- guest: "/etc/kubernetes/admin.conf"
176+
host: "{{.Dir}}/copied-from-guest/kubeconfig.yaml"
177+
deleteOnStop: true
178+
message: |
179+
To run `kubectl` on the host (assumes kubectl is installed), run the following commands:
180+
------
181+
export KUBECONFIG="{{.Dir}}/copied-from-guest/kubeconfig.yaml"
182+
kubectl ...
183+
------

0 commit comments

Comments
 (0)