Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 24 additions & 5 deletions orchagent/portsorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6464,10 +6464,16 @@ void PortsOrch::initializePriorityGroupsBulk(std::vector<Port>& ports)

bulker.executeGet();

for (size_t idx = 0; idx < portCount; idx++)
size_t idx = 0;
for (const auto& port: ports)
{
const auto& port = ports[idx];
if (port.m_priority_group_ids.size() == 0)
{
continue;
}

const auto status = bulker.statuses[idx];
idx++;

if (status != SAI_STATUS_SUCCESS)
{
Expand Down Expand Up @@ -6542,10 +6548,16 @@ void PortsOrch::initializeQueuesBulk(std::vector<Port>& ports)

bulker.executeGet();

for (size_t idx = 0; idx < portCount; idx++)
size_t idx = 0;
for (const auto& port: ports)
{
const auto& port = ports[idx];
if (port.m_queue_ids.size() == 0)
{
continue;
}

const auto status = bulker.statuses[idx];
idx++;

if (status != SAI_STATUS_SUCCESS)
{
Expand Down Expand Up @@ -6622,10 +6634,17 @@ void PortsOrch::initializeSchedulerGroupsBulk(std::vector<Port>& ports)

bulker.executeGet();

size_t bulkIdx = 0;
for (size_t idx = 0; idx < portCount; idx++)
{
const auto& port = ports[idx];
const auto status = bulker.statuses[idx];
if (scheduler_group_ids[idx].size() == 0)
{
continue;
}

const auto status = bulker.statuses[bulkIdx];
bulkIdx++;

if (status != SAI_STATUS_SUCCESS)
{
Expand Down
89 changes: 81 additions & 8 deletions tests/mock_tests/portsorch_ut.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2925,14 +2925,42 @@ namespace portsorch_test
}
}

TEST_F(PortsOrchTest, PortHostIfCreateFailed)
TEST_F(PortsOrchTest, PortsWithNoPGsQueuesSchedulerGroups)
{
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);

auto original_api = sai_hostif_api->create_hostif;
auto hostIfSpy = SpyOn<SAI_API_HOSTIF, SAI_OBJECT_TYPE_HOSTIF>(&sai_hostif_api->create_hostif);
hostIfSpy->callFake([&](sai_object_id_t*, sai_object_id_t, uint32_t, const sai_attribute_t*) -> sai_status_t {
return SAI_STATUS_INSUFFICIENT_RESOURCES;
auto original_api = sai_port_api->get_ports_attribute;
// Mock SAI port API to return 0 number of PGs, queues and scheduler groups
auto spy = SpyOn<SAI_API_PORT, SAI_OBJECT_TYPE_PORT>(&sai_port_api->get_ports_attribute);
spy->callFake([&](
uint32_t object_count,
const sai_object_id_t *object_id,
const uint32_t *attr_count,
sai_attribute_t **attr_list,
sai_bulk_op_error_mode_t mode,
sai_status_t *object_statuses) -> sai_status_t
{
assert(object_count > 1);
assert(attr_count[0] > 1);
switch (attr_list[0]->id)
{
case SAI_PORT_ATTR_NUMBER_OF_INGRESS_PRIORITY_GROUPS:
case SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES:
case SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS:
for (size_t i = 0; i < object_count; i++)
{
attr_list[i]->value.u32 = 0;
object_statuses[i] = SAI_STATUS_SUCCESS;
}
return SAI_STATUS_SUCCESS;
}
return original_api(
object_count,
object_id,
attr_count,
attr_list,
mode,
object_statuses);
}
);

Expand All @@ -2956,14 +2984,15 @@ namespace portsorch_test

static_cast<Orch *>(gPortsOrch)->doTask();

sai_hostif_api->create_hostif = original_api;

Port port;
gPortsOrch->getPort("Ethernet0", port);

ASSERT_FALSE(port.m_init);
ASSERT_TRUE(port.m_init);
ASSERT_EQ(port.m_priority_group_ids.size(), 0);
ASSERT_EQ(port.m_queue_ids.size(), 0);
}


TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute)
{
_hook_sai_port_api();
Expand Down Expand Up @@ -3908,4 +3937,48 @@ namespace portsorch_test
stateDbSet = stateTable.hget("Ethernet0", "max_priority_groups", value);
ASSERT_TRUE(stateDbSet);
}

struct PortsOrchNegativeTests : PortsOrchTest
{
};

TEST_F(PortsOrchNegativeTests, PortHostIfCreateFailed)
{
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);

auto original_api = sai_hostif_api->create_hostif;
auto hostIfSpy = SpyOn<SAI_API_HOSTIF, SAI_OBJECT_TYPE_HOSTIF>(&sai_hostif_api->create_hostif);
hostIfSpy->callFake([&](sai_object_id_t*, sai_object_id_t, uint32_t, const sai_attribute_t*) -> sai_status_t {
return SAI_STATUS_INSUFFICIENT_RESOURCES;
}
);

// Get SAI default ports to populate DB

auto ports = ut_helper::getInitialSaiPorts();

// Populate pot table with SAI ports
for (const auto &it : ports)
{
portTable.set(it.first, it.second);
}

// Set PortConfigDone
portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } });

gPortsOrch->addExistingData(&portTable);

// Apply configuration :
// create ports

static_cast<Orch *>(gPortsOrch)->doTask();

sai_hostif_api->create_hostif = original_api;

Port port;
gPortsOrch->getPort("Ethernet0", port);

ASSERT_FALSE(port.m_init);
}

}
10 changes: 10 additions & 0 deletions tests/mock_tests/saispy.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,3 +118,13 @@ std::shared_ptr<SaiSpyFunctor<n, objtype, sai_status_t, sai_object_id_t, uint32_

return std::make_shared<SaiSpyGetAttrFunctor>(fn_ptr);
}

// get bulk entry attribute
template <int n, int objtype>
std::shared_ptr<SaiSpyFunctor<n, objtype, sai_status_t, uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*>>
SpyOn(sai_status_t (**fn_ptr)(uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*))
{
using SaiSpyGetAttrFunctor = SaiSpyFunctor<n, objtype, sai_status_t, uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*>;

return std::make_shared<SaiSpyGetAttrFunctor>(fn_ptr);
}
Loading