Skip to content

Commit

Permalink
disable kube-proxy, address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
rahulait committed Mar 11, 2024
1 parent 2a5fe3e commit 919f64d
Show file tree
Hide file tree
Showing 14 changed files with 131 additions and 45 deletions.
12 changes: 11 additions & 1 deletion controller/linodemachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@ var requeueInstanceStatuses = map[linodego.InstanceStatus]bool{
linodego.InstanceResizing: true,
}

type nodeIP struct {
ip string
ipType clusterv1.MachineAddressType
}

// LinodeMachineReconciler reconciles a LinodeMachine object
type LinodeMachineReconciler struct {
client.Client
Expand Down Expand Up @@ -295,7 +300,12 @@ func (r *LinodeMachineReconciler) reconcileCreate(
machineScope.LinodeMachine.Status.Ready = true
machineScope.LinodeMachine.Spec.InstanceID = &linodeInstance.ID
machineScope.LinodeMachine.Spec.ProviderID = util.Pointer(fmt.Sprintf("linode://%d", linodeInstance.ID))
machineScope.LinodeMachine.Status.Addresses = buildInstanceAddrs(linodeInstance)

addrs, err := r.buildInstanceAddrs(ctx, logger, machineScope, linodeInstance.ID)
if err != nil {
return linodeInstance, err
}
machineScope.LinodeMachine.Status.Addresses = addrs

if err = services.AddNodeToNB(ctx, logger, machineScope); err != nil {
logger.Error(err, "Failed to add instance to Node Balancer backend")
Expand Down
57 changes: 48 additions & 9 deletions controller/linodemachine_controller_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,20 +115,59 @@ func (r *LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineSc
return createConfig, nil
}

func buildInstanceAddrs(linodeInstance *linodego.Instance) []clusterv1.MachineAddress {
func (r *LinodeMachineReconciler) buildInstanceAddrs(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope, instanceID int) ([]clusterv1.MachineAddress, error) {
addrs := []clusterv1.MachineAddress{}
for _, addr := range linodeInstance.IPv4 {
addrType := clusterv1.MachineExternalIP
if addr.IsPrivate() {
addrType = clusterv1.MachineInternalIP
}
ips, err := r.getInstanceIPv4Addresses(ctx, logger, machineScope, instanceID)
if err != nil {
logger.Error(err, "Failed to get instance ip addresses")
return nil, err
}

// add all instance ips to machine's status
for _, ip := range ips {
addrs = append(addrs, clusterv1.MachineAddress{
Type: addrType,
Address: addr.String(),
Type: ip.ipType,
Address: ip.ip,
})
}

return addrs
return addrs, nil
}

func (r *LinodeMachineReconciler) getInstanceIPv4Addresses(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope, instanceID int) ([]nodeIP, error) {
addresses, err := machineScope.LinodeClient.GetInstanceIPAddresses(ctx, instanceID)
if err != nil {
return nil, err
}

// get the default instance config
configs, err := machineScope.LinodeClient.ListInstanceConfigs(ctx, instanceID, &linodego.ListOptions{})
if err != nil || len(configs) == 0 {
logger.Error(err, "Failed to list instance configs")
return nil, err
}

ips := []nodeIP{}
// check if a node has public ip and store it
if len(addresses.IPv4.Public) != 0 {
ips = append(ips, nodeIP{ip: addresses.IPv4.Public[0].Address, ipType: clusterv1.MachineExternalIP})
}

// Iterate over interfaces in config and find VPC specific ips
for _, iface := range configs[0].Interfaces {
if iface.VPCID != nil && iface.IPv4.VPC != "" {
ips = append(ips, nodeIP{ip: iface.IPv4.VPC, ipType: clusterv1.MachineInternalIP})
}
}

// if a node has private ip, store it as well
// NOTE: We specifically store VPC ips first so that they are used first during
// bootstrap when we set `registrationMethod: internal-only-ips`
if len(addresses.IPv4.Private) != 0 {
ips = append(ips, nodeIP{ip: addresses.IPv4.Private[0].Address, ipType: clusterv1.MachineInternalIP})
}

return ips, nil
}

func (r *LinodeMachineReconciler) getOwnerMachine(ctx context.Context, linodeMachine infrav1alpha1.LinodeMachine, log logr.Logger) (*clusterv1.Machine, error) {
Expand Down
10 changes: 10 additions & 0 deletions templates/addons/cilium/cilium.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@ spec:
ipv4NativeRoutingCIDR: 10.0.0.0/8
tunnelProtocol: ""
enableIPv4Masquerade: true
egressMasqueradeInterfaces: eth0
k8sServiceHost: {{ .InfraCluster.spec.controlPlaneEndpoint.host }}
k8sServicePort: {{ .InfraCluster.spec.controlPlaneEndpoint.port }}
extraArgs:
- --direct-routing-device=eth1
- --nodeport-addresses=0.0.0.0/0
ipam:
mode: kubernetes
ipv4:
Expand All @@ -33,3 +39,7 @@ spec:
enabled: true
ui:
enabled: true
# ipMasqAgent:
# enabled: true
# bpf:
# masquerade: true
5 changes: 5 additions & 0 deletions templates/addons/provider-linode/linode-ccm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ spec:
wait: true
timeout: 5m
valuesTemplate: |
routeController:
vpcName: ${VPC_NAME:=${CLUSTER_NAME}}
linodeNodePrivateSubnet: 10.0.0.0/8
configureCloudRoutes: true
routeReconciliationPeriod: 1m
secretRef:
name: "linode-token-region"
image:
Expand Down
1 change: 1 addition & 0 deletions templates/common-init-files/secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ stringData:
set -euo pipefail
export DEBIAN_FRONTEND=noninteractive
hostnamectl set-hostname "$1" && hostname -F /etc/hostname
echo "$(ip a s eth1 |grep 'inet ' |cut -d' ' -f6|cut -d/ -f1) $1" >> /etc/hosts
mkdir -p -m 755 /etc/apt/keyrings
PATCH_VERSION=$${2#[v]}
VERSION=$${PATCH_VERSION%.*}
Expand Down
2 changes: 1 addition & 1 deletion templates/flavors/base/linodeCluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ spec:
vpcRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: LinodeVPC
name: vpc-${CLUSTER_NAME}
name: ${VPC_NAME:=${CLUSTER_NAME}}
4 changes: 2 additions & 2 deletions templates/flavors/base/linodeMachineTemplate.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ spec:
region: ${LINODE_REGION}
authorizedKeys:
# uncomment to include your ssh key in linode provisioning
# - ${LINODE_SSH_PUBKEY:=""}
- ${LINODE_SSH_PUBKEY:=""}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: LinodeMachineTemplate
Expand All @@ -25,4 +25,4 @@ spec:
region: ${LINODE_REGION}
authorizedKeys:
# uncomment to include your ssh key in linode provisioning
# - ${LINODE_SSH_PUBKEY:=""}
- ${LINODE_SSH_PUBKEY:=""}
4 changes: 2 additions & 2 deletions templates/flavors/base/linodeVPC.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: LinodeVPC
metadata:
name: vpc-${CLUSTER_NAME}
name: ${VPC_NAME:=${CLUSTER_NAME}}
spec:
region: ${LINODE_REGION}
subnets:
- ipv4: 10.0.0.0/8
label: default
label: default
2 changes: 2 additions & 0 deletions templates/flavors/default/kubeadmControlPlane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ spec:
- - LABEL=etcd_data
- /var/lib/etcd_data
initConfiguration:
skipPhases:
- addon/kube-proxy
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
Expand Down
2 changes: 1 addition & 1 deletion templates/flavors/k3s/k3sConfigTemplate.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@ spec:
preK3sCommands:
- |
mkdir -p /etc/rancher/k3s/config.yaml.d/
echo "node-ip: $(hostname -I | grep -oE 192\.168\.[0-9]+\.[0-9]+)" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
echo "node-ip: $(ip a s eth1 |grep 'inet ' |cut -d' ' -f6|cut -d/ -f1)" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
- sed -i '/swap/d' /etc/fstab
- swapoff -a
32 changes: 31 additions & 1 deletion templates/flavors/k3s/k3sControlPlane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,36 @@ spec:
name: linode-${CLUSTER_NAME}-crs-0
owner: root:root
path: /var/lib/rancher/k3s/server/manifests/linode-token-region.yaml
- path: /var/lib/rancher/k3s/server/manifests/k3s-cilium-config.yaml
owner: root:root
permissions: "0640"
content: |
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: cilium
namespace: kube-system
spec:
valuesContent: |-
routingMode: native
kubeProxyReplacement: true
ipv4NativeRoutingCIDR: 10.0.0.0/8
tunnelProtocol: ""
enableIPv4Masquerade: true
egressMasqueradeInterfaces: eth0
k8sServiceHost: 10.0.0.2
k8sServicePort: 6443
extraArgs:
- --direct-routing-device=eth1
- --nodeport-addresses=0.0.0.0/0
ipam:
mode: kubernetes
ipv4:
enabled: true
ipv6:
enabled: false
k8s:
requireIPv4PodCIDR: true
serverConfig:
disableComponents:
- servicelb
Expand All @@ -43,7 +73,7 @@ spec:
- "provider-id=linode://{{ ds.meta_data.id }}"
preK3sCommands:
- |
echo "node-ip: $(hostname -I | grep -oE 192\.168\.[0-9]+\.[0-9]+)" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
echo "node-ip: $(ip a s eth1 |grep 'inet ' |cut -d' ' -f6|cut -d/ -f1)" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
- sed -i '/swap/d' /etc/fstab
- swapoff -a
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
Expand Down
12 changes: 12 additions & 0 deletions templates/flavors/rke2/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ resources:
- rke2ControlPlane.yaml
- rke2ConfigTemplate.yaml
- secret.yaml
- ../../addons/cilium
patches:
- target:
group: cluster.x-k8s.io
Expand All @@ -14,6 +15,17 @@ patches:
- op: replace
path: /spec/controlPlaneRef/kind
value: RKE2ControlPlane
- target:
group: cluster.x-k8s.io
version: v1beta1
kind: Cluster
patch: |-
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
labels:
cni: cilium
- target:
group: cluster.x-k8s.io
version: v1beta1
Expand Down
4 changes: 2 additions & 2 deletions templates/flavors/rke2/rke2ConfigTemplate.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@ spec:
kubelet:
extraArgs:
- "provider-id=linode://{{ ds.meta_data.id }}"
# TODO: use MDS to get public and private IP instead because hostname ordering can't always be assumed
# TODO: use MDS to get private IP instead
preRKE2Commands:
- |
mkdir -p /etc/rancher/rke2/config.yaml.d/
echo "node-ip: $(hostname -I | grep -oE 192\.168\.[0-9]+\.[0-9]+)" >> /etc/rancher/rke2/config.yaml.d/capi-config.yaml
echo "node-ip: $(ip a s eth1 |grep 'inet ' |cut -d' ' -f6|cut -d/ -f1)" >> /etc/rancher/rke2/config.yaml.d/capi-config.yaml
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname
29 changes: 3 additions & 26 deletions templates/flavors/rke2/rke2ControlPlane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,39 +21,16 @@ spec:
name: linode-${CLUSTER_NAME}-crs-0
owner: root:root
path: /var/lib/rancher/rke2/server/manifests/linode-token-region.yaml
- path: /var/lib/rancher/rke2/server/manifests/rke2-cilium-config.yaml
owner: root:root
permissions: "0640"
content: |
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-cilium
namespace: kube-system
spec:
valuesContent: |-
routingMode: native
kubeProxyReplacement: true
ipv4NativeRoutingCIDR: 10.0.0.0/8
tunnelProtocol: ""
enableIPv4Masquerade: true
ipam:
mode: kubernetes
ipv4:
enabled: true
ipv6:
enabled: false
k8s:
requireIPv4PodCIDR: true
registrationMethod: internal-only-ips
serverConfig:
cni: cilium
cni: none
cloudProviderName: external
disableComponents:
pluginComponents:
- "rke2-ingress-nginx"
kubernetesComponents:
- "cloudController"
- "kubeProxy"
agentConfig:
version: ${RKE2_KUBERNETES_VERSION}
nodeName: '{{ ds.meta_data.label }}'
Expand All @@ -63,7 +40,7 @@ spec:
preRKE2Commands:
- |
mkdir -p /etc/rancher/rke2/config.yaml.d/
echo "node-ip: $(hostname -I | grep -oE 192\.168\.[0-9]+\.[0-9]+)" >> /etc/rancher/rke2/config.yaml.d/capi-config.yaml
echo "node-ip: $(ip a s eth1 |grep 'inet ' |cut -d' ' -f6|cut -d/ -f1)" >> /etc/rancher/rke2/config.yaml.d/capi-config.yaml
- sed -i '/swap/d' /etc/fstab
- swapoff -a
- hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname
Expand Down

0 comments on commit 919f64d

Please sign in to comment.