Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions libs/vm/spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ class VMISpec:
volumes: list[Volume] | None = None
terminationGracePeriodSeconds: int | None = None # noqa: N815
affinity: Affinity | None = None
nodeSelector: dict[str, str] | None = None # noqa: N815
Comment thread
azhivovk marked this conversation as resolved.


@dataclass
Expand Down
1 change: 1 addition & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ markers =
rwx_default_storage: Tests that require RWX storage
descheduler: Tests that require kube-descheduler on nodes
remote_cluster: Tests that require a remote cluster
mixed_os_nodes: Tests that require a dual-stream cluster with both RHCOS 9 and RHCOS 10 worker nodes
Comment thread
azhivovk marked this conversation as resolved.

## Required operators
mtv: Tests that require the MTV operator to be installed
Expand Down
15 changes: 15 additions & 0 deletions tests/network/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@
from timeout_sampler import TimeoutExpiredError

from libs.net.cluster import ipv4_supported_cluster, ipv6_supported_cluster
from tests.network.libs.nodes import (
RHCOS_9_VERSION_PREFIX,
RHCOS_10_VERSION_PREFIX,
node_by_rhcos_version,
)
from tests.network.utils import get_vlan_index_number
from utilities.constants import (
CLUSTER,
Expand Down Expand Up @@ -321,3 +326,13 @@ def _verify_mtv_installed():
message="Network cluster verification failed",
admin_client=admin_client,
)


@pytest.fixture(scope="module")
def rhcos9_node(workers):
return node_by_rhcos_version(workers=workers, rhcos_version_prefix=RHCOS_9_VERSION_PREFIX)


@pytest.fixture(scope="module")
def rhcos10_node(workers):
return node_by_rhcos_version(workers=workers, rhcos_version_prefix=RHCOS_10_VERSION_PREFIX)
20 changes: 2 additions & 18 deletions tests/network/l2_bridge/bandwidth/lib_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@

from kubernetes.dynamic import DynamicClient

from libs.net.cluster import ipv4_supported_cluster, ipv6_supported_cluster
from libs.net.traffic_generator import IPERF_SERVER_PORT, TcpServer
from libs.vm.factory import base_vmspec, fedora_vm
from libs.vm.spec import CloudInitNoCloud, Interface, Multus, Network
from libs.vm.vm import BaseVirtualMachine, add_volume_disk, cloudinitdisk_storage
from tests.network.libs import cloudinit
from tests.network.libs.cloudinit import primary_iface_cloud_init

BANDWIDTH_SECONDARY_IFACE_NAME: Final[str] = "secondary"
BANDWIDTH_RATE_BPS: Final[int] = 10_000_000 # 10 Mbps
Expand Down Expand Up @@ -115,7 +115,7 @@ def secondary_network_vm(
]

ethernets = {}
primary = _masquerade_iface_cloud_init()
primary = primary_iface_cloud_init()
if primary:
ethernets["eth0"] = primary
ethernets["eth1"] = cloudinit.EthernetDevice(addresses=secondary_iface_addresses)
Expand All @@ -129,19 +129,3 @@ def secondary_network_vm(
)
spec.template.spec = add_volume_disk(vmi_spec=spec.template.spec, volume=volume, disk=disk)
return fedora_vm(namespace=namespace, name=name, client=client, spec=spec)


def _masquerade_iface_cloud_init() -> cloudinit.EthernetDevice | None:
"""Return cloud-init ethernet config for a masquerade (primary) interface.

Returns:
EthernetDevice with static IPv6 and optional DHCP4, or None if IPv6 is not supported.
"""
if not ipv6_supported_cluster():
return None
return cloudinit.EthernetDevice(
addresses=["fd10:0:2::2/120"],
gateway6="fd10:0:2::1",
dhcp4=ipv4_supported_cluster(),
dhcp6=False,
)
12 changes: 1 addition & 11 deletions tests/network/l2_bridge/vmi_interfaces_stability/lib_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from libs.vm.spec import CloudInitNoCloud, Interface, Multus, Network
from libs.vm.vm import BaseVirtualMachine, add_volume_disk, cloudinitdisk_storage
from tests.network.libs import cloudinit
from tests.network.libs.cloudinit import primary_iface_cloud_init
from tests.network.localnet.liblocalnet import GUEST_1ST_IFACE_NAME, GUEST_3RD_IFACE_NAME

LOGGER = logging.getLogger(__name__)
Expand Down Expand Up @@ -61,17 +62,6 @@ def secondary_network_vm(
return fedora_vm(namespace=namespace, name=name, client=client, spec=spec)


def primary_iface_cloud_init() -> cloudinit.EthernetDevice | None:
if not ipv6_supported_cluster():
return None
return cloudinit.EthernetDevice(
addresses=["fd10:0:2::2/120"],
gateway6="fd10:0:2::1",
dhcp4=ipv4_supported_cluster(),
dhcp6=False,
)


def secondary_iface_cloud_init(host_address: int) -> cloudinit.EthernetDevice:
ips = secondary_iface_ips(host_address=host_address)
addresses = [f"{ip}/64" if ipaddress.ip_address(ip).version == 6 else f"{ip}/24" for ip in ips]
Expand Down
20 changes: 20 additions & 0 deletions tests/network/libs/cloudinit.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import yaml

from libs.net.cluster import ipv4_supported_cluster, ipv6_supported_cluster
from tests.network.libs.apimachinery import dict_normalization_for_dataclass

NETWORK_DATA: Final[str] = "networkData"
Expand Down Expand Up @@ -85,3 +86,22 @@ def format_cloud_config(userdata: UserData) -> str:

def cloudinit(netdata: NetworkData) -> dict[str, Any]:
return {NETWORK_DATA: todict(no_cloud=netdata)}


def primary_iface_cloud_init() -> EthernetDevice | None:
"""Return cloud-init ethernet config for the masquerade primary interface.

Configures a static IPv6 address on eth0 when the cluster supports IPv6,
enabling per-family connectivity verification. Returns None on IPv4-only clusters.

Returns:
EthernetDevice with static IPv6 and optional DHCP4, or None if IPv6 is not supported.
"""
if not ipv6_supported_cluster():
return None
return EthernetDevice(
addresses=["fd10:0:2::2/120"],
gateway6="fd10:0:2::1",
dhcp4=ipv4_supported_cluster(),
dhcp6=False,
)
41 changes: 41 additions & 0 deletions tests/network/libs/nodes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from typing import Final

from ocp_resources.node import Node
from ocp_resources.resource import ResourceEditor

from libs.vm.vm import BaseVirtualMachine

HOSTNAME_LABEL: Final[str] = "kubernetes.io/hostname"
RHCOS_9_VERSION_PREFIX: Final[str] = "Red Hat Enterprise Linux CoreOS 9"
RHCOS_10_VERSION_PREFIX: Final[str] = "Red Hat Enterprise Linux CoreOS 10"


def node_by_rhcos_version(workers: list[Node], rhcos_version_prefix: str) -> Node:
"""Return the first worker node whose OS image starts with the given RHCOS version prefix.

Args:
workers: List of worker nodes to search.
rhcos_version_prefix: Expected prefix of the node osImage field (e.g. "Red Hat Enterprise Linux CoreOS 9").

Returns:
The first matching Node.

Raises:
ValueError: If no worker node matches the prefix.
"""
for node in workers:
if node.instance.status.nodeInfo.osImage.startswith(rhcos_version_prefix):
return node
raise ValueError(f"No worker node found with RHCOS version prefix: {rhcos_version_prefix!r}")


def update_vm_node_selector(vm: BaseVirtualMachine, node: Node) -> None:
"""Patch the VM spec to pin it to the given node via nodeSelector.

Args:
vm: VirtualMachine to update.
node: Target worker node.
"""
ResourceEditor(
patches={vm: {"spec": {"template": {"spec": {"nodeSelector": {HOSTNAME_LABEL: node.hostname}}}}}}
).update()
43 changes: 43 additions & 0 deletions tests/network/primary_network/rhel9_rhel10_cluster/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from collections.abc import Generator

import pytest
from kubernetes.dynamic import DynamicClient
from ocp_resources.namespace import Namespace
from ocp_resources.node import Node

from libs.vm.vm import BaseVirtualMachine
from tests.network.primary_network.rhel9_rhel10_cluster.lib_helpers import primary_network_vm


@pytest.fixture(scope="module")
def primary_server_vm(
unprivileged_client: DynamicClient,
namespace: Namespace,
rhcos9_node: Node,
) -> Generator[BaseVirtualMachine]:
with primary_network_vm(
namespace=namespace.name,
name="server-vm",
client=unprivileged_client,
node=rhcos9_node,
) as vm:
vm.start(wait=True)
vm.wait_for_agent_connected()
yield vm


@pytest.fixture(scope="module")
def primary_client_vm(
unprivileged_client: DynamicClient,
namespace: Namespace,
rhcos9_node: Node,
) -> Generator[BaseVirtualMachine]:
with primary_network_vm(
namespace=namespace.name,
name="client-vm",
client=unprivileged_client,
node=rhcos9_node,
) as vm:
vm.start(wait=True)
vm.wait_for_agent_connected()
yield vm
47 changes: 47 additions & 0 deletions tests/network/primary_network/rhel9_rhel10_cluster/lib_helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from kubernetes.dynamic import DynamicClient
from ocp_resources.node import Node

from libs.vm.factory import base_vmspec, fedora_vm
from libs.vm.spec import CloudInitNoCloud, Devices, Interface, Network
from libs.vm.vm import BaseVirtualMachine, add_volume_disk, cloudinitdisk_storage
from tests.network.libs import cloudinit
from tests.network.libs.cloudinit import primary_iface_cloud_init
from tests.network.libs.nodes import HOSTNAME_LABEL


def primary_network_vm(
namespace: str,
name: str,
client: DynamicClient,
node: Node,
) -> BaseVirtualMachine:
"""Create a Fedora VM connected to the primary (masquerade) network only, pinned to a node.

Configures a static IPv6 address on the primary interface when the cluster supports IPv6.

Args:
namespace: Namespace in which the VM will be created.
name: Name of the VM.
client: Kubernetes dynamic client.
node: Worker node to pin the VM to via nodeSelector.

Returns:
Configured BaseVirtualMachine object (not yet started).
"""
spec = base_vmspec()
spec.template.spec.domain.devices = Devices(interfaces=[Interface(name="default", masquerade={})])
spec.template.spec.networks = [Network(name="default", pod={})]
spec.template.spec.nodeSelector = {HOSTNAME_LABEL: node.hostname}

primary = primary_iface_cloud_init()
if primary is not None:
userdata = cloudinit.UserData(users=[])
disk, volume = cloudinitdisk_storage(
data=CloudInitNoCloud(
networkData=cloudinit.asyaml(no_cloud=cloudinit.NetworkData(ethernets={"eth0": primary})),
userData=cloudinit.format_cloud_config(userdata=userdata),
)
)
spec.template.spec = add_volume_disk(vmi_spec=spec.template.spec, volume=volume, disk=disk)

return fedora_vm(namespace=namespace, name=name, client=client, spec=spec)
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,13 @@

import pytest

__test__ = False
from libs.net.vmspec import lookup_iface_status
from tests.network.libs.connectivity import build_ping_command
from tests.network.libs.nodes import update_vm_node_selector
from utilities.virt import migrate_vm_and_verify, vm_console_run_commands


@pytest.mark.mixed_os_nodes
@pytest.mark.incremental
class TestConnectivity:
"""
Expand All @@ -23,7 +27,13 @@ class TestConnectivity:
"""

@pytest.mark.polarion("CNV-15950")
def test_primary_connectivity_reestablished_after_server_migration_to_rhcos10(self):
def test_primary_connectivity_reestablished_after_server_migration_to_rhcos10(
self,
subtests,
primary_client_vm,
primary_server_vm,
rhcos10_node,
):
"""
Test that network connectivity over the primary network can be re-established after
the server VM migrates from an RHCOS 9 node to an RHCOS 10 node.
Expand All @@ -39,9 +49,25 @@ def test_primary_connectivity_reestablished_after_server_migration_to_rhcos10(se
Expected:
- Ping from the client VM to the server VM succeeds after the migration
"""
primary_iface_name = primary_server_vm.vmi.interfaces[0].name
Comment thread
azhivovk marked this conversation as resolved.
update_vm_node_selector(vm=primary_server_vm, node=rhcos10_node)
migrate_vm_and_verify(vm=primary_server_vm)
for ip in lookup_iface_status(vm=primary_server_vm, iface_name=primary_iface_name)["ipAddresses"]:
with subtests.test(msg=f"Testing {primary_server_vm.name} IP address: {ip}"):
vm_console_run_commands(
vm=primary_client_vm,
commands=[build_ping_command(dst_ip=ip, count=10, timeout=10)],
timeout=20,
)

@pytest.mark.polarion("CNV-15967")
def test_primary_connectivity_reestablished_after_server_migration_to_rhcos9(self):
def test_primary_connectivity_reestablished_after_server_migration_to_rhcos9(
self,
subtests,
primary_client_vm,
primary_server_vm,
rhcos9_node,
):
"""
Test that network connectivity over the primary network can be re-established after
the server VM migrates from an RHCOS 10 node to an RHCOS 9 node.
Expand All @@ -57,3 +83,13 @@ def test_primary_connectivity_reestablished_after_server_migration_to_rhcos9(sel
Expected:
- Ping from the client VM to the server VM succeeds after the migration
"""
primary_iface_name = primary_server_vm.vmi.interfaces[0].name
update_vm_node_selector(vm=primary_server_vm, node=rhcos9_node)
migrate_vm_and_verify(vm=primary_server_vm)
for ip in lookup_iface_status(vm=primary_server_vm, iface_name=primary_iface_name)["ipAddresses"]:
with subtests.test(msg=f"Testing {primary_server_vm.name} IP address: {ip}"):
vm_console_run_commands(
vm=primary_client_vm,
commands=[build_ping_command(dst_ip=ip, count=10, timeout=10)],
timeout=20,
)