Skip to content

Commit

Permalink
Merge pull request #72 from RamLavi/udn_e2e_describe
Browse files Browse the repository at this point in the history
e2e, persistent-ip: Add primary-UDN tests
  • Loading branch information
kubevirt-bot authored Oct 8, 2024
2 parents 59db47c + a873dfb commit 925bee6
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 83 deletions.
2 changes: 1 addition & 1 deletion hack/cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
set -xe
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"

KIND_ARGS="${KIND_ARGS:--ic -ikv -i6 -mne}"
KIND_ARGS="${KIND_ARGS:--ic -ikv -i6 -mne -nse}"

OUTPUT_DIR=${OUTPUT_DIR:-${SCRIPT_DIR}/../.output}

Expand Down
150 changes: 122 additions & 28 deletions test/e2e/persistentips_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,23 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("Persistent IPs", func() {
const (
secondaryLogicalNetworkInterfaceName = "multus"
nadName = "l2-net-attach-def"
)

const (
rolePrimary = "primary"
roleSecondary = "secondary"
)

type testParams struct {
role string
ipsFrom func(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error)
vmi func(namespace string) *kubevirtv1.VirtualMachineInstance
}

var _ = DescribeTableSubtree("Persistent IPs", func(params testParams) {
var failureCount int = 0
JustAfterEach(func() {
if CurrentSpecReport().Failed() {
Expand All @@ -57,21 +73,21 @@ var _ = Describe("Persistent IPs", func() {

When("network attachment definition created with allowPersistentIPs=true", func() {
var (
td testenv.TestData
networkInterfaceName = "multus"
vm *kubevirtv1.VirtualMachine
vmi *kubevirtv1.VirtualMachineInstance
nad *nadv1.NetworkAttachmentDefinition
td testenv.TestData
vm *kubevirtv1.VirtualMachine
vmi *kubevirtv1.VirtualMachineInstance
nad *nadv1.NetworkAttachmentDefinition
)

BeforeEach(func() {
td = testenv.GenerateTestData()
td.SetUp()
DeferCleanup(func() {
td.TearDown()
})

nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace)
vmi = testenv.GenerateAlpineWithMultusVMI(td.Namespace, networkInterfaceName, nad.Name)
nad = testenv.GenerateLayer2WithSubnetNAD(nadName, td.Namespace, params.role)
vmi = params.vmi(td.Namespace)
vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning())

By("Create NetworkAttachmentDefinition")
Expand All @@ -94,14 +110,14 @@ var _ = Describe("Persistent IPs", func() {
WithPolling(time.Second).
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

Expect(vmi.Status.Interfaces).NotTo(BeEmpty())
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty())
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, Not(BeEmpty())))
})

It("should keep ips after live migration", func() {
vmiIPsBeforeMigration := vmi.Status.Interfaces[0].IPs
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())
vmiIPsBeforeMigration, err := params.ipsFrom(vmi)
Expect(err).NotTo(HaveOccurred())
Expect(vmiIPsBeforeMigration).NotTo(BeEmpty())

testenv.LiveMigrateVirtualMachine(td.Namespace, vm.Name)
testenv.CheckLiveMigrationSucceeded(td.Namespace, vm.Name)
Expand All @@ -112,8 +128,7 @@ var _ = Describe("Persistent IPs", func() {
WithTimeout(5 * time.Minute).
Should(testenv.ContainConditionVMIReady())

Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeMigration)))

Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeMigration)))
})

It("should garbage collect IPAMClaims after VM deletion", func() {
Expand Down Expand Up @@ -171,7 +186,10 @@ var _ = Describe("Persistent IPs", func() {
})

It("should keep ips after restart", func() {
vmiIPsBeforeRestart := vmi.Status.Interfaces[0].IPs
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())
vmiIPsBeforeRestart, err := params.ipsFrom(vmi)
Expect(err).NotTo(HaveOccurred())
Expect(vmiIPsBeforeRestart).NotTo(BeEmpty())
vmiUUIDBeforeRestart := vmi.UID

By("Re-starting the VM")
Expand All @@ -190,7 +208,7 @@ var _ = Describe("Persistent IPs", func() {
WithTimeout(5 * time.Minute).
Should(testenv.ContainConditionVMIReady())

Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeRestart)))
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeRestart)))
})
})

Expand All @@ -217,9 +235,9 @@ var _ = Describe("Persistent IPs", func() {
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

Expect(vmi.Status.Interfaces).NotTo(BeEmpty())
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty())
ips, err := params.ipsFrom(vmi)
Expect(err).NotTo(HaveOccurred())
Expect(ips).NotTo(BeEmpty())
})

It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() {
Expand Down Expand Up @@ -260,20 +278,19 @@ var _ = Describe("Persistent IPs", func() {
WithPolling(time.Second).
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

Expect(vmi.Status.Interfaces).NotTo(BeEmpty())
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty())
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, Not(BeEmpty())))
})

It("should keep ips after live migration", func() {
vmiIPsBeforeMigration := vmi.Status.Interfaces[0].IPs
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())
vmiIPsBeforeMigration, err := params.ipsFrom(vmi)
Expect(err).NotTo(HaveOccurred())
Expect(vmiIPsBeforeMigration).NotTo(BeEmpty())

testenv.LiveMigrateVirtualMachine(td.Namespace, vmi.Name)
testenv.CheckLiveMigrationSucceeded(td.Namespace, vmi.Name)

Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeMigration)))

Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeMigration)))
})

It("should garbage collect IPAMClaims after VMI deletion", func() {
Expand All @@ -294,7 +311,20 @@ var _ = Describe("Persistent IPs", func() {
})

})
})
},
Entry("secondary interfaces",
testParams{
role: roleSecondary,
ipsFrom: secondaryNetworkVMIStatusIPs,
vmi: vmiWithMultus,
}),
Entry("primary UDN",
testParams{
role: rolePrimary,
ipsFrom: defaultNetworkStatusAnnotationIPs,
vmi: vmiWithPasst,
}),
)

func foregroundDeleteOptions() *client.DeleteOptions {
foreground := metav1.DeletePropagationForeground
Expand All @@ -311,3 +341,67 @@ func removeFinalizersPatch() ([]byte, error) {
}
return json.Marshal(patch)
}

func secondaryNetworkVMIStatusIPs(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error) {
return testenv.GetIPsFromVMIStatus(vmi, secondaryLogicalNetworkInterfaceName), nil
}

func defaultNetworkStatusAnnotationIPs(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error) {
defNetworkStatus, err := testenv.DefaultNetworkStatus(vmi)
if err != nil {
return nil, err
}

return defNetworkStatus.IPs, nil
}

func vmiWithMultus(namespace string) *kubevirtv1.VirtualMachineInstance {
interfaceName := secondaryLogicalNetworkInterfaceName
return testenv.NewVirtualMachineInstance(
namespace,
testenv.WithMemory("128Mi"),
testenv.WithInterface(kubevirtv1.Interface{
Name: interfaceName,
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{
Bridge: &kubevirtv1.InterfaceBridge{},
},
}),
testenv.WithNetwork(kubevirtv1.Network{

Name: interfaceName,
NetworkSource: kubevirtv1.NetworkSource{
Multus: &kubevirtv1.MultusNetwork{
NetworkName: nadName,
},
},
}),
)
}

func vmiWithPasst(namespace string) *kubevirtv1.VirtualMachineInstance {
const (
interfaceName = "passtnet"
cloudInitNetworkData = `
version: 2
ethernets:
eth0:
dhcp4: true`
)
return testenv.NewVirtualMachineInstance(
namespace,
testenv.WithMemory("2048Mi"),
testenv.WithInterface(kubevirtv1.Interface{
Name: interfaceName,
Binding: &kubevirtv1.PluginBinding{
Name: "passt",
},
}),
testenv.WithNetwork(kubevirtv1.Network{
Name: interfaceName,
NetworkSource: kubevirtv1.NetworkSource{
Pod: &kubevirtv1.PodNetwork{},
},
}),
testenv.WithCloudInitNoCloudVolume(cloudInitNetworkData),
)
}
76 changes: 48 additions & 28 deletions test/env/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@ import (
kubevirtv1 "kubevirt.io/api/core/v1"
)

func GenerateLayer2WithSubnetNAD(namespace string) *nadv1.NetworkAttachmentDefinition {
func GenerateLayer2WithSubnetNAD(nadName, namespace, role string) *nadv1.NetworkAttachmentDefinition {
networkName := "l2"
nadName := RandomName(networkName, 16)
return &nadv1.NetworkAttachmentDefinition{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Expand All @@ -30,26 +29,24 @@ func GenerateLayer2WithSubnetNAD(namespace string) *nadv1.NetworkAttachmentDefin
"topology": "layer2",
"subnets": "10.100.200.0/24",
"netAttachDefName": "%[1]s/%[2]s",
"role": "%[4]s",
"allowPersistentIPs": true
}
`, namespace, nadName, networkName),
`, namespace, nadName, networkName, role),
},
}
}

func GenerateAlpineWithMultusVMI(namespace, interfaceName, networkName string) *kubevirtv1.VirtualMachineInstance {
return &kubevirtv1.VirtualMachineInstance{
type VMIOption func(vmi *kubevirtv1.VirtualMachineInstance)

func NewVirtualMachineInstance(namespace string, opts ...VMIOption) *kubevirtv1.VirtualMachineInstance {
vmi := &kubevirtv1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: RandomName("alpine", 16),
},
Spec: kubevirtv1.VirtualMachineInstanceSpec{
Domain: kubevirtv1.DomainSpec{
Resources: kubevirtv1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
Devices: kubevirtv1.Devices{
Disks: []kubevirtv1.Disk{
{
Expand All @@ -61,26 +58,10 @@ func GenerateAlpineWithMultusVMI(namespace, interfaceName, networkName string) *
Name: "containerdisk",
},
},
Interfaces: []kubevirtv1.Interface{
{
Name: interfaceName,
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{
Bridge: &kubevirtv1.InterfaceBridge{},
},
},
},
},
},
Networks: []kubevirtv1.Network{
{
Name: interfaceName,
NetworkSource: kubevirtv1.NetworkSource{
Multus: &kubevirtv1.MultusNetwork{
NetworkName: networkName,
},
},
Interfaces: []kubevirtv1.Interface{},
},
},
Networks: []kubevirtv1.Network{},
TerminationGracePeriodSeconds: pointer.Int64(5),
Volumes: []kubevirtv1.Volume{
{
Expand All @@ -94,6 +75,45 @@ func GenerateAlpineWithMultusVMI(namespace, interfaceName, networkName string) *
},
},
}

for _, f := range opts {
f(vmi)
}

return vmi
}

func WithMemory(memory string) VMIOption {
return func(vmi *kubevirtv1.VirtualMachineInstance) {
vmi.Spec.Domain.Resources.Requests = corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse(memory),
}
}
}

func WithInterface(iface kubevirtv1.Interface) VMIOption {
return func(vmi *kubevirtv1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.Interfaces = append(vmi.Spec.Domain.Devices.Interfaces, iface)
}
}

func WithNetwork(network kubevirtv1.Network) VMIOption {
return func(vmi *kubevirtv1.VirtualMachineInstance) {
vmi.Spec.Networks = append(vmi.Spec.Networks, network)
}
}

func WithCloudInitNoCloudVolume(cloudInitNetworkData string) VMIOption {
return func(vmi *kubevirtv1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, kubevirtv1.Volume{
Name: "cloudinitdisk",
VolumeSource: kubevirtv1.VolumeSource{
CloudInitNoCloud: &kubevirtv1.CloudInitNoCloudSource{
NetworkData: cloudInitNetworkData,
},
},
})
}
}

type VMOption func(vm *kubevirtv1.VirtualMachine)
Expand Down
Loading

0 comments on commit 925bee6

Please sign in to comment.