Skip to content

Commit

Permalink
[pre-commit] Fix style issues in test scripts directly under tests fo…
Browse files Browse the repository at this point in the history
…lder (#6648)

What is the motivation for this PR?
pre-commit is a static analysis tool introduced recently. This tool is not able to do diff-only check. It checks the whole files touched by PR. We can't blame PR author for legacy issues. That's why currently the pre-commit check is only optional.

To ensure that we can make pre-commit a mandatory check for PRs submitted to this repository, we need to fix all the legacy issues complained by pre-commit.

How did you do it?
This change fixes the style issues of test scripts directly under the `tests` folder.

Signed-off-by: Xin Wang <[email protected]>
  • Loading branch information
wangxin authored Oct 31, 2022
1 parent 8f01c7b commit 7c73002
Show file tree
Hide file tree
Showing 8 changed files with 100 additions and 59 deletions.
2 changes: 1 addition & 1 deletion SECURITY.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Instead, please report them to the Microsoft Security Response Center (MSRC) at

If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).

You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).

Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:

Expand Down
7 changes: 5 additions & 2 deletions tests/test_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
pytest.mark.topology('any')
]


# Test Functions
def test_show_features(duthosts, enum_dut_hostname):
"""Verify show features command output against CONFIG_DB
Expand All @@ -15,5 +16,7 @@ def test_show_features(duthosts, enum_dut_hostname):
pytest_assert(succeeded, "failed to obtain feature status")
for cmd_key, cmd_value in features_dict.items():
feature = str(cmd_key)
redis_value = duthost.shell('/usr/bin/redis-cli -n 4 --raw hget "FEATURE|{}" "state"'.format(feature), module_ignore_errors=False)['stdout']
pytest_assert(redis_value.lower() == cmd_value.lower(), "'{}' is '{}' which does not match with config_db".format(cmd_key, cmd_value))
redis_value = duthost.shell('/usr/bin/redis-cli -n 4 --raw hget "FEATURE|{}" "state"'
.format(feature), module_ignore_errors=False)['stdout']
pytest_assert(redis_value.lower() == cmd_value.lower(),
"'{}' is '{}' which does not match with config_db".format(cmd_key, cmd_value))
15 changes: 10 additions & 5 deletions tests/test_interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
pytest.mark.device_type('vs')
]


def test_interfaces(duthosts, enum_frontend_dut_hostname, tbinfo, enum_asic_index):
"""compare the interfaces between observed states and target state"""

Expand Down Expand Up @@ -44,23 +45,27 @@ def test_interfaces(duthosts, enum_frontend_dut_hostname, tbinfo, enum_asic_inde
verify_mac_address(host_facts, mg_facts['minigraph_vlan_interfaces'], router_mac)
verify_mac_address(host_facts, mg_facts['minigraph_interfaces'], router_mac)


def verify_port(host_facts, ports):
for port in ports:
pytest_assert(host_facts[port]['active'], "interface {} is not active".format(port))


def verify_mac_address(host_facts, intfs, router_mac):
for intf in intfs:
if 'attachto' in intf:
ifname = intf['attachto']
else:
ifname = intf['name']

pytest_assert(host_facts[ifname]['macaddress'].lower() == router_mac.lower(), \
"interface {} mac address {} does not match router mac {}".format(ifname, host_facts[ifname]['macaddress'], router_mac))
pytest_assert(host_facts[ifname]['macaddress'].lower() == router_mac.lower(),
"interface {} mac address {} does not match router mac {}"
.format(ifname, host_facts[ifname]['macaddress'], router_mac))


def verify_ip_address(host_facts, intfs):
for intf in intfs:
if intf.has_key('attachto'):
if 'attachto' in intf:
ifname = intf['attachto']
else:
ifname = intf['name']
Expand All @@ -69,7 +74,7 @@ def verify_ip_address(host_facts, intfs):
if ip.version == 4:
addrs = []
addrs.append(host_facts[ifname]['ipv4'])
if host_facts[ifname].has_key('ipv4_secondaries'):
if 'ipv4_secondaries' in host_facts[ifname]:
for addr in host_facts[ifname]['ipv4_secondaries']:
addrs.append(addr)
else:
Expand All @@ -79,7 +84,7 @@ def verify_ip_address(host_facts, intfs):
ips_found = []
for addr in addrs:
ips_found.append(addr['address'])
print addr
print(str(addr))
if IPAddress(addr['address']) == ip:
found = True
break
Expand Down
30 changes: 18 additions & 12 deletions tests/test_nbr_health.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,41 +11,45 @@
pytestmark = [
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer,
pytest.mark.topology('util') #special marker
pytest.mark.topology('util') # special marker
]


def check_snmp(hostname, mgmt_addr, localhost, community, is_eos):
logger.info("Check neighbor {}, mgmt ip {} snmp".format(hostname, mgmt_addr))
res = localhost.snmp_facts(host=mgmt_addr, version='v2c', is_eos=is_eos, community=community)
try:
snmp_data = res['ansible_facts']
except:
return "neighbor {} has no snmp data".format(hostname)
except Exception as e:
return "neighbor {} has no snmp data, exception: {}".format(hostname, repr(e))
logger.info("Neighbor {}, sysdescr {}".format(hostname, snmp_data['ansible_sysdescr']))


def check_eos_facts(hostname, mgmt_addr, host):
logger.info("Check neighbor {} eos facts".format(hostname))
res = host.eos_facts()
logger.info("facts: {}".format(json.dumps(res, indent=4)))
try:
eos_facts = res['ansible_facts']
except:
return "neighbor {} has no eos_facts".format(hostname)
except Exception as e:
return "neighbor {} has no eos_facts, exception: {}".format(hostname, repr(e))

mgmt_ifnames = [ x for x in eos_facts['ansible_net_interfaces'] if x.startswith('Management') ]
mgmt_ifnames = [x for x in eos_facts['ansible_net_interfaces'] if x.startswith('Management')]
if len(mgmt_ifnames) == 0:
return "there is no management interface in neighbor {}".format(hostname)
for ifname in mgmt_ifnames:
try:
mgmt_ip = eos_facts['ansible_net_interfaces'][ifname]['ipv4']['address']
except Exception as e:
logger.info("interface {} has no managment address on neighbor {}".format(ifname, hostname))
logger.info("interface {} has no managment address on neighbor {}, exception: {}"
.format(ifname, hostname, repr(e)))

if mgmt_ip == mgmt_addr:
return

return "neighbor {} has no management address {}".format(hostname, mgmt_ip)


def check_sonic_facts(hostname, mgmt_addr, host):
logger.info("Check neighbor {} eos facts".format(hostname))
res = host.facts
Expand All @@ -56,29 +60,32 @@ def check_sonic_facts(hostname, mgmt_addr, host):
for addr in mgmt_addrs:
if addr == mgmt_addr:
return
return "neighbor {} has no management address {}".format(hostname, mgmt_ip)
return "neighbor {} has no management address {}".format(hostname, mgmt_addr)


def check_eos_bgp_facts(hostname, host):
logger.info("Check neighbor {} bgp facts".format(hostname))
res = host.eos_command(commands=['show ip bgp sum'])
logger.info("bgp: {}".format(res))
if not res.has_key('stdout_lines') or u'BGP summary' not in res['stdout_lines'][0][0]:
if 'stdout_lines' not in res or u'BGP summary' not in res['stdout_lines'][0][0]:
return "neighbor {} bgp not configured correctly".format(hostname)


def check_sonic_bgp_facts(hostname, host):
logger.info("Check neighbor {} bgp facts".format(hostname))
res = host.command('vtysh -c "show ip bgp sum"')
logger.info("bgp: {}".format(res))
if not res.has_key('stdout_lines') or u'Unicast Summary' not in "\n".join(res['stdout_lines']):
if 'stdout_lines' not in res or u'Unicast Summary' not in "\n".join(res['stdout_lines']):
return "neighbor {} bgp not configured correctly".format(hostname)


def test_neighbors_health(duthosts, localhost, nbrhosts, eos, sonic, enum_frontend_dut_hostname):
"""Check each neighbor device health"""

fails = []
duthost = duthosts[enum_frontend_dut_hostname]

config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
nei_meta = config_facts.get('DEVICE_NEIGHBOR_METADATA', {})

dut_type = None
Expand Down Expand Up @@ -127,7 +134,6 @@ def test_neighbors_health(duthosts, localhost, nbrhosts, eos, sonic, enum_fronte
failmsg = "neighbor type {} is unknown".format(k)
fails.append(failmsg)


# TODO: check link, bgp, etc. on

pytest_assert(len(fails) == 0, "\n".join(fails))
2 changes: 1 addition & 1 deletion tests/test_posttest.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def test_restore_container_autorestart(duthosts, enum_dut_hostname, enable_conta
SNMP_RELOADING_TIME = 30
time.sleep(SNMP_RELOADING_TIME)


def test_recover_rsyslog_rate_limit(duthosts, enum_dut_hostname):
duthost = duthosts[enum_dut_hostname]
# We don't need to recover the rate limit on vs testbed
Expand All @@ -56,4 +57,3 @@ def test_recover_rsyslog_rate_limit(duthosts, enum_dut_hostname):
if 'enabled' not in state:
continue
duthost.modify_syslog_rate_limit(feature_name, rl_option='enable')

39 changes: 24 additions & 15 deletions tests/test_pretest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,12 @@

from collections import defaultdict

from jinja2 import Template
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.assertions import pytest_require
from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR
from tests.common.helpers.dut_utils import verify_features_state
from tests.common.utilities import wait_until
from tests.common.reboot import reboot
from tests.common.platform.processes_utils import wait_critical_processes
from tests.common.utilities import get_host_visible_vars

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -52,6 +49,7 @@ def test_features_state(duthosts, enum_dut_hostname, localhost):
verify_features_state, duthost), "Not all service states are valid!")
logger.info("The states of features in 'CONFIG_DB' are all valid.")


def test_cleanup_cache():
folder = '_cache'
if os.path.exists(folder):
Expand All @@ -74,11 +72,12 @@ def test_cleanup_testbed(duthosts, enum_dut_hostname, request, ptfhost):
# this step is needed to remove some backup files or the debug files added by users
# which can create issue for log-analyzer
duthost.shell("sudo find /var/log/ -mtime +1 | sudo xargs rm -f",
module_ignore_errors=True, executable="/bin/bash")
module_ignore_errors=True, executable="/bin/bash")

# Cleanup rsyslog configuration file that might have damaged by test_syslog.py
if ptfhost:
ptfhost.shell("if [[ -f /etc/rsyslog.conf ]]; then mv /etc/rsyslog.conf /etc/rsyslog.conf.orig; uniq /etc/rsyslog.conf.orig > /etc/rsyslog.conf; fi", executable="/bin/bash")
ptfhost.shell("if [[ -f /etc/rsyslog.conf ]]; then mv /etc/rsyslog.conf /etc/rsyslog.conf.orig; "
"uniq /etc/rsyslog.conf.orig > /etc/rsyslog.conf; fi", executable="/bin/bash")


def test_disable_container_autorestart(duthosts, enum_dut_hostname, disable_container_autorestart):
Expand Down Expand Up @@ -126,6 +125,7 @@ def collect_dut_info(dut):

return dut_info


def test_update_testbed_metadata(duthosts, tbinfo, fanouthosts):
metadata = {}
tbname = tbinfo['conf-name']
Expand All @@ -135,7 +135,7 @@ def test_update_testbed_metadata(duthosts, tbinfo, fanouthosts):
dutinfo = collect_dut_info(dut)
metadata[dut.hostname] = dutinfo

info = { tbname : metadata }
info = {tbname: metadata}
folder = 'metadata'
filepath = os.path.join(folder, tbname + '.json')
try:
Expand Down Expand Up @@ -181,6 +181,7 @@ def collect_dut_lossless_prio(dut):
result = [int(x) for x in port_qos_map[intf]['pfc_enable'].split(',')]
return result


def collect_dut_all_prio(dut):
config_facts = dut.config_facts(host=dut.hostname, source="running")['ansible_facts']

Expand All @@ -197,11 +198,13 @@ def collect_dut_all_prio(dut):
tc = [int(p) for p in dscp_to_tc_map.values()]
return list(set(tc))


def collect_dut_lossy_prio(dut):
lossless_prio = collect_dut_lossless_prio(dut)
all_prio = collect_dut_all_prio(dut)
return [p for p in all_prio if p not in lossless_prio]


def test_collect_testbed_prio(duthosts, tbinfo):
all_prio = {}
lossless_prio = {}
Expand All @@ -225,10 +228,11 @@ def test_collect_testbed_prio(duthosts, tbinfo):
if not os.path.exists(folder):
os.mkdir(folder)
with open(filepath, 'w') as yf:
json.dump({ tbname : prio_info[i]}, yf, indent=4)
json.dump({tbname: prio_info[i]}, yf, indent=4)
except IOError as e:
logger.warning('Unable to create file {}: {}'.format(filepath, e))


def test_update_saithrift_ptf(request, ptfhost):
'''
Install the correct python saithrift package on the ptf
Expand All @@ -239,7 +243,7 @@ def test_update_saithrift_ptf(request, ptfhost):
pkg_name = py_saithrift_url.split("/")[-1]
ptfhost.shell("rm -f {}".format(pkg_name))
result = ptfhost.get_url(url=py_saithrift_url, dest="/root", module_ignore_errors=True)
if result["failed"] != False or "OK" not in result["msg"]:
if result["failed"] or "OK" not in result["msg"]:
pytest.skip("Download failed/error while installing python saithrift package")
ptfhost.shell("dpkg -i {}".format(os.path.join("/root", pkg_name)))
logging.info("Python saithrift package installed successfully")
Expand All @@ -252,15 +256,10 @@ def test_stop_pfcwd(duthosts, enum_dut_hostname, tbinfo):
dut = duthosts[enum_dut_hostname]
dut.command('pfcwd stop')

"""
Separator for internal pretests.
Please add public pretest above this comment and keep internal
pretests below this comment.
"""

def prepare_autonegtest_params(duthosts, fanouthosts):
from tests.common.platform.device_utils import list_dut_fanout_connections

cadidate_test_ports = {}

for duthost in duthosts:
Expand All @@ -270,7 +269,8 @@ def prepare_autonegtest_params(duthosts, fanouthosts):
for dut_port, fanout, fanout_port in all_ports:
auto_neg_mode = fanout.get_auto_negotiation_mode(fanout_port)
if auto_neg_mode is not None:
cadidate_test_ports[duthost.hostname][dut_port] = {'fanout':fanout.hostname, 'fanout_port': fanout_port}
cadidate_test_ports[duthost.hostname][dut_port] = \
{'fanout': fanout.hostname, 'fanout_port': fanout_port}
folder = 'metadata'
filepath = os.path.join(folder, 'autoneg-test-params.json')
try:
Expand All @@ -279,6 +279,15 @@ def prepare_autonegtest_params(duthosts, fanouthosts):
except IOError as e:
logger.warning('Unable to create a datafile for autoneg tests: {}. Err: {}'.format(filepath, e))


"""
Separator for internal pretests.
Please add public pretest above this comment and keep internal
pretests below this comment.
"""


# This one is special. It is public, but we need to ensure that it is the last one executed in pre-test.
def test_generate_running_golden_config(duthosts):
"""
Generate running golden config after pre test.
Expand Down
20 changes: 13 additions & 7 deletions tests/test_procdockerstatsd.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
pytest.mark.topology('any')
]

# Helper Functions

def get_count_fromredisout(keys_out):
"""Extract keys count from redis output
"""
Expand All @@ -14,22 +14,28 @@ def get_count_fromredisout(keys_out):
count = s.encode('UTF-8')
return count

# Test Functions

def test_verify_status(duthosts, rand_one_dut_hostname):
"""Verify procdockerstatsd is active and running
"""
duthost = duthosts[rand_one_dut_hostname]
status = duthost.get_service_props('procdockerstatsd')
pytest_assert(status["ActiveState"] == "active" and status["SubState"] == "running", "Procdockerstatsd either not active or not running")
pytest_assert(status["ActiveState"] == "active" and status["SubState"] == "running",
"Procdockerstatsd either not active or not running")


def test_verify_redisexport(duthosts, rand_one_dut_hostname):
"""Verify procdockerstatsd is exporting values to redis.
"""
duthost = duthosts[rand_one_dut_hostname]
docker_stdout = duthost.shell('/usr/bin/redis-cli -n 6 KEYS "DOCKER_STATS|*" | wc -l', module_ignore_errors=False)['stdout_lines']
docker_stdout = duthost.shell('/usr/bin/redis-cli -n 6 KEYS "DOCKER_STATS|*" | wc -l',
module_ignore_errors=False)['stdout_lines']
docker_keys_count = get_count_fromredisout(docker_stdout)
process_stdout= duthost.shell('/usr/bin/redis-cli -n 6 KEYS "PROCESS_STATS|*" | wc -l', module_ignore_errors=False)['stdout_lines']
process_stdout = duthost.shell('/usr/bin/redis-cli -n 6 KEYS "PROCESS_STATS|*" | wc -l',
module_ignore_errors=False)['stdout_lines']
process_keys_count = get_count_fromredisout(process_stdout)
# if entry for process or docker data found then daemon is upload is sucessful
pytest_assert(int(docker_keys_count) > 1, "No data docker data upload found by Procdockerstatsd daemon to state_db")
pytest_assert(int(process_keys_count) > 1, "No data process data upload found by Procdockerstatsd daemon to state_db")
pytest_assert(int(docker_keys_count) > 1,
"No data docker data upload found by Procdockerstatsd daemon to state_db")
pytest_assert(int(process_keys_count) > 1,
"No data process data upload found by Procdockerstatsd daemon to state_db")
Loading

0 comments on commit 7c73002

Please sign in to comment.