summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/topotests/all_protocol_startup/test_all_protocol_startup.py67
-rwxr-xr-xtests/topotests/analyze.py50
-rw-r--r--tests/topotests/bfd_isis_topo1/test_bfd_isis_topo1.py1
-rw-r--r--tests/topotests/bfd_profiles_topo1/test_bfd_profiles_topo1.py1
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py5
-rw-r--r--tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py4
-rw-r--r--tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py24
-rw-r--r--tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py4
-rw-r--r--tests/topotests/bgp_auth/test_bgp_auth.py1
-rw-r--r--tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py94
-rw-r--r--tests/topotests/bgp_community_alias/test_bgp-community-alias.py1
-rw-r--r--tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py1
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py2
-rw-r--r--tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py1
-rw-r--r--tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py1
-rw-r--r--tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py3
-rw-r--r--tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py3
-rw-r--r--tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py12
-rw-r--r--tests/topotests/bgp_evpn_mh/test_evpn_mh.py7
-rwxr-xr-xtests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py84
-rw-r--r--tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py12
-rwxr-xr-xtests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py26
-rw-r--r--tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py62
-rw-r--r--tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py8
-rw-r--r--tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py16
-rw-r--r--tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py1
-rw-r--r--tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py26
-rw-r--r--tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py43
-rw-r--r--tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py13
-rw-r--r--tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py6
-rw-r--r--tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py8
-rwxr-xr-xtests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py14
-rw-r--r--tests/topotests/bgp_route_map/test_route_map_topo1.py22
-rwxr-xr-xtests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py16
-rw-r--r--tests/topotests/bgp_update_delay/test_bgp_update_delay.py2
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py4
-rw-r--r--tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py5
-rw-r--r--tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py1
-rw-r--r--tests/topotests/config_timing/test_config_timing.py64
-rwxr-xr-xtests/topotests/conftest.py17
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py8
-rw-r--r--tests/topotests/example_test/test_template.py1
-rwxr-xr-xtests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py4
-rwxr-xr-xtests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py4
-rwxr-xr-xtests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py4
-rw-r--r--tests/topotests/isis_topo1/test_isis_topo1.py6
-rw-r--r--tests/topotests/isis_topo1_vrf/test_isis_topo1_vrf.py6
-rw-r--r--tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py1
-rw-r--r--tests/topotests/lib/bgp.py193
-rw-r--r--tests/topotests/lib/common_config.py293
-rwxr-xr-xtests/topotests/lib/exa-receive.py10
-rw-r--r--tests/topotests/lib/fixtures.py1
-rw-r--r--tests/topotests/lib/ltemplate.py3
-rwxr-xr-xtests/topotests/lib/mcast-tester.py36
-rw-r--r--tests/topotests/lib/micronet_cli.py14
-rw-r--r--tests/topotests/lib/ospf.py159
-rw-r--r--tests/topotests/lib/pim.py94
-rw-r--r--tests/topotests/lib/topogen.py62
-rw-r--r--tests/topotests/lib/topojson.py26
-rw-r--r--tests/topotests/lib/topolog.py19
-rw-r--r--tests/topotests/lib/topotest.py127
-rw-r--r--tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py13
-rwxr-xr-xtests/topotests/msdp_topo1/test_msdp_topo1.py124
-rw-r--r--tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py5
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py56
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py99
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py120
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py20
-rw-r--r--tests/topotests/nhrp_topo/test_nhrp_topo.py131
-rwxr-xr-xtests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py1
-rw-r--r--tests/topotests/ospf6_topo2/test_ospf6_topo2.py33
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_chaos.py2
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py276
-rw-r--r--tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py1
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py2
-rwxr-xr-xtests/topotests/ospf_sr_te_topo1/test_ospf_sr_te_topo1.py68
-rw-r--r--tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py1
-rw-r--r--tests/topotests/ospf_topo2/test_ospf_topo2.py1
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py1540
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py2
-rw-r--r--tests/topotests/pbr_topo1/test_pbr_topo1.py1
-rwxr-xr-xtests/topotests/pim_acl/test_pim_acl.py25
-rw-r--r--tests/topotests/pim_basic/test_pim.py4
-rw-r--r--tests/topotests/pim_basic_topo2/test_pim_basic_topo2.py48
-rwxr-xr-xtests/topotests/pim_igmp_vrf/test_pim_vrf.py52
-rw-r--r--tests/topotests/rip_topo1/test_rip_topo1.py1
-rw-r--r--tests/topotests/route_scale/test_route_scale.py4
-rwxr-xr-xtests/topotests/simple_snmp_test/test_simple_snmp.py6
-rwxr-xr-xtests/topotests/srv6_locator/test_srv6_locator.py24
-rw-r--r--tests/topotests/zebra_netlink/test_zebra_netlink.py2
-rw-r--r--tests/topotests/zebra_opaque/test_zebra_opaque.py4
-rw-r--r--tests/topotests/zebra_rib/test_zebra_rib.py45
-rwxr-xr-xtests/topotests/zebra_seg6_route/test_zebra_seg6_route.py43
-rwxr-xr-xtests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py38
-rwxr-xr-xtools/frr-reload.py46
95 files changed, 2617 insertions, 2024 deletions
diff --git a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
index d272aab78..b1203570a 100644
--- a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
+++ b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
@@ -879,22 +879,32 @@ def test_bgp_summary():
# Read expected result from file
expected_original = open(refTableFile).read().rstrip()
- for arguments in ["", "remote-as internal", "remote-as external",
- "remote-as 100", "remote-as 123",
- "neighbor 192.168.7.10", "neighbor 192.168.7.10",
- "neighbor fc00:0:0:8::1000",
- "neighbor 10.0.0.1",
- "terse",
- "remote-as internal terse",
- "remote-as external terse",
- "remote-as 100 terse", "remote-as 123 terse",
- "neighbor 192.168.7.10 terse", "neighbor 192.168.7.10 terse",
- "neighbor fc00:0:0:8::1000 terse",
- "neighbor 10.0.0.1 terse"]:
+ for arguments in [
+ "",
+ "remote-as internal",
+ "remote-as external",
+ "remote-as 100",
+ "remote-as 123",
+ "neighbor 192.168.7.10",
+ "neighbor 192.168.7.10",
+ "neighbor fc00:0:0:8::1000",
+ "neighbor 10.0.0.1",
+ "terse",
+ "remote-as internal terse",
+ "remote-as external terse",
+ "remote-as 100 terse",
+ "remote-as 123 terse",
+ "neighbor 192.168.7.10 terse",
+ "neighbor 192.168.7.10 terse",
+ "neighbor fc00:0:0:8::1000 terse",
+ "neighbor 10.0.0.1 terse",
+ ]:
# Actual output from router
actual = (
net["r%s" % i]
- .cmd('vtysh -c "show ip bgp summary ' + arguments + '" 2> /dev/null')
+ .cmd(
+ 'vtysh -c "show ip bgp summary ' + arguments + '" 2> /dev/null'
+ )
.rstrip()
)
@@ -923,7 +933,9 @@ def test_bgp_summary():
actual = re.sub(r"Unknown Summary \(VRF default\):", "", actual)
actual = re.sub(r"No Unknown neighbor is configured", "", actual)
- actual = re.sub(r"IPv4 labeled-unicast Summary \(VRF default\):", "", actual)
+ actual = re.sub(
+ r"IPv4 labeled-unicast Summary \(VRF default\):", "", actual
+ )
actual = re.sub(
r"No IPv4 labeled-unicast neighbor is configured", "", actual
)
@@ -937,19 +949,18 @@ def test_bgp_summary():
elif "remote-as 123" in arguments:
expected = re.sub(
r"(192.168.7.(1|2)0|fc00:0:0:8::(1|2)000).+Active.+",
- "", expected
+ "",
+ expected,
)
expected = re.sub(r"\nNeighbor.+Desc", "", expected)
expected = expected + "% No matching neighbor\n"
elif "192.168.7.10" in arguments:
expected = re.sub(
- r"(192.168.7.20|fc00:0:0:8::(1|2)000).+Active.+",
- "", expected
+ r"(192.168.7.20|fc00:0:0:8::(1|2)000).+Active.+", "", expected
)
elif "fc00:0:0:8::1000" in arguments:
expected = re.sub(
- r"(192.168.7.(1|2)0|fc00:0:0:8::2000).+Active.+",
- "", expected
+ r"(192.168.7.(1|2)0|fc00:0:0:8::2000).+Active.+", "", expected
)
elif "10.0.0.1" in arguments:
expected = "No such neighbor in VRF default"
@@ -975,8 +986,12 @@ def test_bgp_summary():
# realign expected neighbor columns if needed
try:
- idx_actual = re.search(r"(Neighbor\s+V\s+)", actual).group(1).find("V")
- idx_expected = re.search(r"(Neighbor\s+V\s+)", expected).group(1).find("V")
+ idx_actual = (
+ re.search(r"(Neighbor\s+V\s+)", actual).group(1).find("V")
+ )
+ idx_expected = (
+ re.search(r"(Neighbor\s+V\s+)", expected).group(1).find("V")
+ )
idx_diff = idx_expected - idx_actual
if idx_diff > 0:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
@@ -994,7 +1009,7 @@ def test_bgp_summary():
diff = topotest.get_textdiff(
actual,
expected,
- title1="actual SHOW IP BGP SUMMARY " + arguments.upper() ,
+ title1="actual SHOW IP BGP SUMMARY " + arguments.upper(),
title2="expected SHOW IP BGP SUMMARY " + arguments.upper(),
)
@@ -1007,7 +1022,9 @@ def test_bgp_summary():
else:
print("r%s ok" % i)
- assert failures == 0, "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % (
+ assert (
+ failures == 0
+ ), "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % (
i,
diff,
)
@@ -1074,7 +1091,9 @@ def test_bgp_ipv6_summary():
actual = re.sub(r"No Unknown neighbor is configured", "", actual)
# Remove Labeled Unicast Summary (all of it)
- actual = re.sub(r"IPv6 labeled-unicast Summary \(VRF default\):", "", actual)
+ actual = re.sub(
+ r"IPv6 labeled-unicast Summary \(VRF default\):", "", actual
+ )
actual = re.sub(
r"No IPv6 labeled-unicast neighbor is configured", "", actual
)
diff --git a/tests/topotests/analyze.py b/tests/topotests/analyze.py
index 09aa22f03..888e70633 100755
--- a/tests/topotests/analyze.py
+++ b/tests/topotests/analyze.py
@@ -45,8 +45,10 @@ def print_summary(results, args):
for group in results:
_ntest, _npass, _nfail, _nerror, _nskip = get_summary(results[group])
if args.verbose:
- print(f"Group: {group} Total: {_ntest} PASSED: {_npass}"
- " FAIL: {_nfail} ERROR: {_nerror} SKIP: {_nskip}")
+ print(
+ f"Group: {group} Total: {_ntest} PASSED: {_npass}"
+ " FAIL: {_nfail} ERROR: {_nerror} SKIP: {_nskip}"
+ )
ntest += _ntest
npass += _npass
nfail += _nfail
@@ -85,7 +87,7 @@ def get_filtered(tfilters, results, args):
break
else:
continue
- #cname = testcase["@classname"]
+ # cname = testcase["@classname"]
fname = testcase.get("@file", "")
cname = testcase.get("@classname", "")
if not fname and not cname:
@@ -106,7 +108,6 @@ def get_filtered(tfilters, results, args):
return found_files
-
def dump_testcase(testcase):
expand_keys = ("failure", "error", "skipped")
@@ -122,10 +123,29 @@ def dump_testcase(testcase):
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("-A", "--save", action="store_true", help="Save /tmp/topotests{,.xml} in --rundir if --rundir does not yet exist")
- parser.add_argument("-F", "--files-only", action="store_true", help="print test file names rather than individual full testcase names")
- parser.add_argument("-S", "--select", default="fe", help="select results combination of letters: 'e'rrored 'f'ailed 'p'assed 's'kipped.")
- parser.add_argument("-r", "--results", help="xml results file or directory containing xml results file")
+ parser.add_argument(
+ "-A",
+ "--save",
+ action="store_true",
+ help="Save /tmp/topotests{,.xml} in --rundir if --rundir does not yet exist",
+ )
+ parser.add_argument(
+ "-F",
+ "--files-only",
+ action="store_true",
+ help="print test file names rather than individual full testcase names",
+ )
+ parser.add_argument(
+ "-S",
+ "--select",
+ default="fe",
+ help="select results combination of letters: 'e'rrored 'f'ailed 'p'assed 's'kipped.",
+ )
+ parser.add_argument(
+ "-r",
+ "--results",
+ help="xml results file or directory containing xml results file",
+ )
parser.add_argument("--rundir", help=argparse.SUPPRESS)
parser.add_argument(
"-E",
@@ -133,18 +153,14 @@ def main():
action="store_true",
help="enumerate each item (results scoped)",
)
- parser.add_argument(
- "-T", "--test", help="print testcase at enumeration"
- )
+ parser.add_argument("-T", "--test", help="print testcase at enumeration")
parser.add_argument(
"--errmsg", action="store_true", help="print testcase error message"
)
parser.add_argument(
"--errtext", action="store_true", help="print testcase error text"
)
- parser.add_argument(
- "--time", action="store_true", help="print testcase run times"
- )
+ parser.add_argument("--time", action="store_true", help="print testcase run times")
parser.add_argument("-s", "--summary", action="store_true", help="print summary")
parser.add_argument("-v", "--verbose", action="store_true", help="be verbose")
@@ -152,14 +168,16 @@ def main():
if args.save and args.results and not os.path.exists(args.results):
if not os.path.exists("/tmp/topotests"):
- logging.critical("No \"/tmp/topotests\" directory to save")
+ logging.critical('No "/tmp/topotests" directory to save')
sys.exit(1)
subprocess.run(["mv", "/tmp/topotests", args.results])
# # Old location for results
# if os.path.exists("/tmp/topotests.xml", args.results):
# subprocess.run(["mv", "/tmp/topotests.xml", args.results])
- assert args.test is None or not args.files_only, "Can't have both --files and --test"
+ assert (
+ args.test is None or not args.files_only
+ ), "Can't have both --files and --test"
results = {}
ttfiles = []
diff --git a/tests/topotests/bfd_isis_topo1/test_bfd_isis_topo1.py b/tests/topotests/bfd_isis_topo1/test_bfd_isis_topo1.py
index 4c44ca440..3c176f25a 100644
--- a/tests/topotests/bfd_isis_topo1/test_bfd_isis_topo1.py
+++ b/tests/topotests/bfd_isis_topo1/test_bfd_isis_topo1.py
@@ -87,6 +87,7 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bfdd, pytest.mark.isisd]
+
def setup_module(mod):
"Sets up the pytest environment"
topodef = {
diff --git a/tests/topotests/bfd_profiles_topo1/test_bfd_profiles_topo1.py b/tests/topotests/bfd_profiles_topo1/test_bfd_profiles_topo1.py
index 02e1d8a20..169f90abf 100644
--- a/tests/topotests/bfd_profiles_topo1/test_bfd_profiles_topo1.py
+++ b/tests/topotests/bfd_profiles_topo1/test_bfd_profiles_topo1.py
@@ -44,6 +44,7 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.ospfd]
+
def setup_module(mod):
"Sets up the pytest environment"
diff --git a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
index d724b586d..f506792c4 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
+++ b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
@@ -42,13 +42,10 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
-
def build_topo(tgen):
r1 = tgen.add_router("r1")
r2 = tgen.add_router("r2")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
switch = tgen.add_switch("s1")
switch.add_link(r1)
diff --git a/tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py b/tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py
index 6836dc11a..ea71c82d8 100644
--- a/tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py
+++ b/tests/topotests/bgp_aggregator_zero/test_bgp_aggregator_zero.py
@@ -42,9 +42,7 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
r1 = tgen.add_router("r1")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
switch = tgen.add_switch("s1")
switch.add_link(r1)
diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
index 9b789f4ea..961d72bd1 100644
--- a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
+++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
@@ -242,9 +242,9 @@ def test_bgp_allowas_in_p0(request):
expected=False,
)
assert result is not True, (
- "Testcase {} : Failed \n".format(tc_name) +
- "Expected behavior: routes should not present in rib \n" +
- "Error: {}".format(result)
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in rib \n"
+ + "Error: {}".format(result)
)
step("Configure allowas-in on R3 for R2.")
@@ -389,9 +389,9 @@ def test_bgp_allowas_in_per_addr_family_p0(request):
tgen, "ipv6", dut, static_route_ipv6, protocol=protocol, expected=False
)
assert result is not True, (
- "Testcase {} : Failed \n".format(tc_name) +
- "Expected behavior: routes are should not be present in ipv6 rib\n" +
- " Error: {}".format(result)
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes are should not be present in ipv6 rib\n"
+ + " Error: {}".format(result)
)
step("Repeat the same test for IPv6 AFI.")
@@ -439,9 +439,9 @@ def test_bgp_allowas_in_per_addr_family_p0(request):
tgen, "ipv4", dut, static_route_ipv4, protocol=protocol, expected=False
)
assert result is not True, (
- "Testcase {} : Failed \n".format(tc_name) +
- "Expected behavior: routes should not be present in ipv4 rib\n" +
- " Error: {}".format(result)
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not be present in ipv4 rib\n"
+ + " Error: {}".format(result)
)
result = verify_rib(tgen, "ipv6", dut, static_route_ipv6, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
@@ -595,9 +595,9 @@ def test_bgp_allowas_in_no_of_occurrences_p0(request):
tgen, addr_type, dut, static_routes, protocol=protocol, expected=False
)
assert result is not True, (
- "Testcase {} : Failed \n ".format(tc_name) +
- "Expected behavior: routes are should not be present in rib\n" +
- "Error: {}".format(result)
+ "Testcase {} : Failed \n ".format(tc_name)
+ + "Expected behavior: routes are should not be present in rib\n"
+ + "Error: {}".format(result)
)
for addr_type in ADDR_TYPES:
diff --git a/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py b/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py
index 63c890f13..14689d737 100644
--- a/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py
+++ b/tests/topotests/bgp_aspath_zero/test_bgp_aspath_zero.py
@@ -42,9 +42,7 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
r1 = tgen.add_router("r1")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
switch = tgen.add_switch("s1")
switch.add_link(r1)
diff --git a/tests/topotests/bgp_auth/test_bgp_auth.py b/tests/topotests/bgp_auth/test_bgp_auth.py
index 3e5b80f83..7b7a576f2 100644
--- a/tests/topotests/bgp_auth/test_bgp_auth.py
+++ b/tests/topotests/bgp_auth/test_bgp_auth.py
@@ -71,6 +71,7 @@ pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
class InvalidCLIError(Exception):
"""Raise when the CLI command is wrong"""
+
def build_topo(tgen):
# Create routers
tgen.add_router("R1")
diff --git a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
index 3de3bcd4d..f416f3d2a 100644
--- a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
+++ b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
@@ -56,18 +56,35 @@ sys.path.append(os.path.join(CWD, "../lib/"))
# Required to instantiate the topology builder class.
-from lib.bgp import (clear_bgp_and_verify, create_router_bgp, modify_as_number,
- verify_as_numbers, verify_bgp_convergence, verify_bgp_rib,
- verify_bgp_timers_and_functionality, verify_router_id)
-from lib.common_config import (addKernelRoute, apply_raw_config,
- check_address_types, create_prefix_lists,
- create_route_maps, create_static_routes,
- required_linux_kernel_version,
- reset_config_on_routers, start_topology, step,
- verify_admin_distance_for_static_routes,
- verify_bgp_community, verify_fib_routes,
- verify_rib, write_test_footer,
- write_test_header)
+from lib.bgp import (
+ clear_bgp_and_verify,
+ create_router_bgp,
+ modify_as_number,
+ verify_as_numbers,
+ verify_bgp_convergence,
+ verify_bgp_rib,
+ verify_bgp_timers_and_functionality,
+ verify_router_id,
+)
+from lib.common_config import (
+ addKernelRoute,
+ apply_raw_config,
+ check_address_types,
+ create_prefix_lists,
+ create_route_maps,
+ create_static_routes,
+ required_linux_kernel_version,
+ reset_config_on_routers,
+ start_topology,
+ step,
+ verify_admin_distance_for_static_routes,
+ verify_bgp_community,
+ verify_fib_routes,
+ verify_rib,
+ write_test_footer,
+ write_test_header,
+)
+
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
@@ -77,7 +94,6 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
# Global Variable
KEEPALIVETIMER = 2
HOLDDOWNTIMER = 6
@@ -162,7 +178,7 @@ def teardown_module():
def test_modify_and_delete_router_id(request):
- """ Test to modify, delete and verify router-id. """
+ """Test to modify, delete and verify router-id."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
@@ -280,11 +296,9 @@ def test_BGP_config_with_invalid_ASN_p2(request):
},
}
result = modify_as_number(tgen, topo, input_dict)
- assert result is not True, (
- "Expected BGP config is not created because of invalid ASNs: {}".format(
- result
- )
- )
+ assert (
+ result is not True
+ ), "Expected BGP config is not created because of invalid ASNs: {}".format(result)
# Creating configuration from JSON
reset_config_on_routers(tgen)
@@ -394,7 +408,7 @@ def test_bgp_timers_functionality(request):
def test_static_routes(request):
- """ Test to create and verify static routes. """
+ """Test to create and verify static routes."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
@@ -457,7 +471,7 @@ def test_static_routes(request):
def test_admin_distance_for_existing_static_routes(request):
- """ Test to modify and verify admin distance for existing static routes."""
+ """Test to modify and verify admin distance for existing static routes."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
@@ -492,7 +506,7 @@ def test_admin_distance_for_existing_static_routes(request):
def test_advertise_network_using_network_command(request):
- """ Test advertise networks using network command."""
+ """Test advertise networks using network command."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
@@ -762,9 +776,13 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request):
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "r4"
@@ -781,9 +799,13 @@ def test_BGP_attributes_with_vrf_default_keyword_p0(request):
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
input_dict_4 = {"largeCommunity": "500:500:500", "community": "500:500"}
@@ -1122,11 +1144,13 @@ def test_bgp_with_loopback_with_same_subnet_p1(request):
dut = "r1"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1, expected=False) # pylint: disable=E1123
+ result = verify_fib_routes(
+ tgen, addr_type, dut, input_dict_r1, expected=False
+ ) # pylint: disable=E1123
assert result is not True, (
- "Testcase {} : Failed \n".format(tc_name) +
- "Expected behavior: routes should not present in fib \n" +
- "Error: {}".format(result)
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in fib \n"
+ + "Error: {}".format(result)
)
step("Verify Ipv4 and Ipv6 network installed in r3 RIB but not in FIB")
@@ -1141,11 +1165,13 @@ def test_bgp_with_loopback_with_same_subnet_p1(request):
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1, expected=False) # pylint: disable=E1123
+ result = verify_fib_routes(
+ tgen, addr_type, dut, input_dict_r1, expected=False
+ ) # pylint: disable=E1123
assert result is not True, (
- "Testcase {} : Failed \n".format(tc_name) +
- "Expected behavior: routes should not present in fib \n" +
- "Error: {}".format(result)
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in fib \n"
+ + "Error: {}".format(result)
)
write_test_footer(tc_name)
diff --git a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
index b95bf7487..0b41dc7c6 100644
--- a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
+++ b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
@@ -40,7 +40,6 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd]
-
def build_topo(tgen):
for routern in range(1, 3):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
index 3f4be3da4..eae2a7d59 100644
--- a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
+++ b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
@@ -46,7 +46,6 @@ from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
-
def build_topo(tgen):
for routern in range(1, 5):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py
index f8099492d..272fdd334 100644
--- a/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py
+++ b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py
@@ -42,7 +42,7 @@ pytestmark = [pytest.mark.bgpd]
def setup_module(mod):
- topodef = { "s1": ("r1", "r2") }
+ topodef = {"s1": ("r1", "r2")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
diff --git a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
index 2e6c3c48a..0fc9d9ddc 100644
--- a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
+++ b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
@@ -51,7 +51,6 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd]
-
def build_topo(tgen):
for routern in range(1, 4):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
index 4a6eaf81a..e6fe22bf0 100644
--- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
+++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
@@ -58,7 +58,6 @@ from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
-
def build_topo(tgen):
for routern in range(1, 7):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py
index 3c0706ed8..ad999a1af 100644
--- a/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py
+++ b/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py
@@ -69,7 +69,6 @@ from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
# Global variables
NEXT_HOPS = {"ipv4": [], "ipv6": []}
INTF_LIST_R3 = []
@@ -310,7 +309,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
- """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+ """Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
tc_name = request.node.name
write_test_header(tc_name)
diff --git a/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py
index cf1b0cfa0..28047424b 100644
--- a/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py
+++ b/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py
@@ -69,7 +69,6 @@ from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
# Global variables
NEXT_HOPS = {"ipv4": [], "ipv6": []}
INTF_LIST_R3 = []
@@ -311,7 +310,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
- """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+ """Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
tc_name = request.node.name
write_test_header(tc_name)
diff --git a/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py b/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
index e57dffee3..54b3e80da 100644
--- a/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
+++ b/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
@@ -223,12 +223,12 @@ def test_ecmp_fast_convergence(request, test_type, tgen, topo):
logger.info("Enable bgp fast-convergence cli")
raw_config = {
- "r2": {
- "raw_config": [
- "router bgp {}".format(topo["routers"]["r2"]["bgp"]["local_as"]),
- "bgp fast-convergence",
- ]
- }
+ "r2": {
+ "raw_config": [
+ "router bgp {}".format(topo["routers"]["r2"]["bgp"]["local_as"]),
+ "bgp fast-convergence",
+ ]
+ }
}
result = apply_raw_config(tgen, raw_config)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
diff --git a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
index c9f6d1dc9..b0e438106 100644
--- a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
+++ b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
@@ -46,6 +46,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
+
# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
@@ -601,14 +602,16 @@ def ping_anycast_gw(tgen):
script_path,
"--imports=Ether,ARP",
"--interface=" + intf,
- 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr)
+ 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr),
]
for name in ("hostd11", "hostd21"):
host = tgen.net.hosts[name]
_, stdout, _ = host.cmd_status(ping_cmd, warn=False, stderr=subprocess.STDOUT)
stdout = stdout.strip()
if stdout:
- host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout)
+ host.logger.debug(
+ "%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout
+ )
def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
diff --git a/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py b/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py
index 91e76fd26..17f5fb08b 100755
--- a/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py
+++ b/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py
@@ -58,7 +58,7 @@ import pytest
import time
import platform
-#Current Working Directory
+# Current Working Directory
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
@@ -79,11 +79,11 @@ from lib.common_config import (
pytestmark = [pytest.mark.bgpd]
-#Global variables
-PES = ['PE1', 'PE2']
-HOSTS = ['host1', 'host2']
-PE_SUFFIX = {'PE1': '1', 'PE2': '2'}
-HOST_SUFFIX = {'host1': '1', 'host2': '2'}
+# Global variables
+PES = ["PE1", "PE2"]
+HOSTS = ["host1", "host2"]
+PE_SUFFIX = {"PE1": "1", "PE2": "2"}
+HOST_SUFFIX = {"host1": "1", "host2": "2"}
TRIGGERS = ["base", "no_rt5", "no_rt2"]
@@ -98,12 +98,12 @@ def build_topo(tgen):
tgen.add_router(host)
krel = platform.release()
- logger.info('Kernel version ' + krel)
+ logger.info("Kernel version " + krel)
- #Add links
- tgen.add_link(tgen.gears['PE1'], tgen.gears['PE2'], 'PE1-eth0', 'PE2-eth0')
- tgen.add_link(tgen.gears['PE1'], tgen.gears['host1'], 'PE1-eth1', 'host1-eth0')
- tgen.add_link(tgen.gears['PE2'], tgen.gears['host2'], 'PE2-eth1', 'host2-eth0')
+ # Add links
+ tgen.add_link(tgen.gears["PE1"], tgen.gears["PE2"], "PE1-eth0", "PE2-eth0")
+ tgen.add_link(tgen.gears["PE1"], tgen.gears["host1"], "PE1-eth1", "host1-eth0")
+ tgen.add_link(tgen.gears["PE2"], tgen.gears["host2"], "PE2-eth1", "host2-eth0")
def setup_module(mod):
@@ -121,12 +121,16 @@ def setup_module(mod):
kernelv = platform.release()
if topotest.version_cmp(kernelv, "4.15") < 0:
- logger.info("For EVPN, kernel version should be minimum 4.15. Kernel present {}".format(kernelv))
+ logger.info(
+ "For EVPN, kernel version should be minimum 4.15. Kernel present {}".format(
+ kernelv
+ )
+ )
return
- if topotest.version_cmp(kernelv, '4.15') == 0:
+ if topotest.version_cmp(kernelv, "4.15") == 0:
l3mdev_accept = 1
- logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept))
+ logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
else:
l3mdev_accept = 0
@@ -232,18 +236,22 @@ def evpn_gateway_ip_show_op_check(trigger=" "):
if trigger not in TRIGGERS:
return "Unexpected trigger", "Unexpected trigger {}".format(trigger)
- show_commands = {'bgp_vni_routes': 'show bgp l2vpn evpn route vni 100 json',
- 'bgp_vrf_ipv4' : 'show bgp vrf vrf-blue ipv4 json',
- 'bgp_vrf_ipv6' : 'show bgp vrf vrf-blue ipv6 json',
- 'zebra_vrf_ipv4': 'show ip route vrf vrf-blue json',
- 'zebra_vrf_ipv6': 'show ipv6 route vrf vrf-blue json'}
+ show_commands = {
+ "bgp_vni_routes": "show bgp l2vpn evpn route vni 100 json",
+ "bgp_vrf_ipv4": "show bgp vrf vrf-blue ipv4 json",
+ "bgp_vrf_ipv6": "show bgp vrf vrf-blue ipv6 json",
+ "zebra_vrf_ipv4": "show ip route vrf vrf-blue json",
+ "zebra_vrf_ipv6": "show ipv6 route vrf vrf-blue json",
+ }
for (name, pe) in tgen.gears.items():
if name not in PES:
continue
for (cmd_key, command) in show_commands.items():
- expected_op_file = "{0}/{1}/{2}_{3}.json".format(CWD, name, cmd_key, trigger)
+ expected_op_file = "{0}/{1}/{2}_{3}.json".format(
+ CWD, name, cmd_key, trigger
+ )
expected_op = json.loads(open(expected_op_file).read())
test_func = partial(topotest.router_json_cmp, pe, command, expected_op)
@@ -265,7 +273,9 @@ def test_evpn_gateway_ip_basic_topo(request):
write_test_header(tc_name)
# Temporarily Disabled
- tgen.set_error("%s: Failing under new micronet framework, please debug and re-enable", tc_name)
+ tgen.set_error(
+ "%s: Failing under new micronet framework, please debug and re-enable", tc_name
+ )
kernelv = platform.release()
if topotest.version_cmp(kernelv, "4.15") < 0:
@@ -304,18 +314,22 @@ def test_evpn_gateway_ip_flap_rt5(request):
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- h1 = tgen.gears['host1']
+ h1 = tgen.gears["host1"]
step("Withdraw type-5 routes")
- h1.run('vtysh -c "config t" \
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv4" \
- -c "no network 100.0.0.21/32"')
- h1.run('vtysh -c "config t" \
+ -c "no network 100.0.0.21/32"'
+ )
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv6" \
- -c "no network 100::21/128"')
+ -c "no network 100::21/128"'
+ )
result, assertmsg = evpn_gateway_ip_show_op_check("no_rt5")
if result is not None:
@@ -324,14 +338,18 @@ def test_evpn_gateway_ip_flap_rt5(request):
step("Advertise type-5 routes again")
- h1.run('vtysh -c "config t" \
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv4" \
- -c "network 100.0.0.21/32"')
- h1.run('vtysh -c "config t" \
+ -c "network 100.0.0.21/32"'
+ )
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv6" \
- -c "network 100::21/128"')
+ -c "network 100::21/128"'
+ )
result, assertmsg = evpn_gateway_ip_show_op_check("base")
if result is not None:
@@ -344,8 +362,8 @@ def test_evpn_gateway_ip_flap_rt5(request):
def test_evpn_gateway_ip_flap_rt2(request):
"""
- Withdraw EVPN type-2 routes and check O/Ps at PE1 and PE2
- """
+ Withdraw EVPN type-2 routes and check O/Ps at PE1 and PE2
+ """
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
@@ -359,7 +377,6 @@ def test_evpn_gateway_ip_flap_rt2(request):
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
-
step("Shut down VxLAN interface at PE1 which results in withdraw of type-2 routes")
pe1 = tgen.net["PE1"]
@@ -391,6 +408,7 @@ def test_memory_leak():
tgen.report_memory_leaks()
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
index 52a6d0c9f..6ea281e6f 100644
--- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
+++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
@@ -129,11 +129,17 @@ def setup_module(mod):
output = router.cmd_raises(cmd.format("r2"))
logger.info("result: " + output)
- tgen.net["r1"].cmd_raises("ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r1-eth0 local 192.168.100.21")
+ tgen.net["r1"].cmd_raises(
+ "ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r1-eth0 local 192.168.100.21"
+ )
tgen.net["r1"].set_intf_netns("vxlan-101", "r1-vrf-101", up=True)
tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set lo up")
- tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link add name bridge-101 up type bridge stp_state 0")
- tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set dev vxlan-101 master bridge-101")
+ tgen.net["r1"].cmd_raises(
+ "ip -n r1-vrf-101 link add name bridge-101 up type bridge stp_state 0"
+ )
+ tgen.net["r1"].cmd_raises(
+ "ip -n r1-vrf-101 link set dev vxlan-101 master bridge-101"
+ )
tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set bridge-101 up")
tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set vxlan-101 up")
diff --git a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py
index c713fb926..40972d4a6 100755
--- a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py
+++ b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py
@@ -178,10 +178,13 @@ def test_pe1_converge_evpn():
_, result = topotest.run_and_expect(test_func, None, count=45, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(pe1.name)
- test_func = partial(check_vni_macs_present, tgen, pe1, 101, (
- ("host1", "host1-eth0"),
- ("host2", "host2-eth0")
- ))
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe1,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
if result:
logger.warning("%s", result)
@@ -205,10 +208,13 @@ def test_pe2_converge_evpn():
assertmsg = '"{}" JSON output mismatches'.format(pe2.name)
assert result is None, assertmsg
- test_func = partial(check_vni_macs_present, tgen, pe2, 101, (
- ("host1", "host1-eth0"),
- ("host2", "host2-eth0")
- ))
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe2,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
if result:
logger.warning("%s", result)
@@ -283,7 +289,7 @@ def test_learning_pe2():
def test_local_remote_mac_pe1():
- " Test MAC transfer PE1 local and PE2 remote"
+ "Test MAC transfer PE1 local and PE2 remote"
tgen = get_topogen()
# Don't run this test if we have any failure.
@@ -296,7 +302,7 @@ def test_local_remote_mac_pe1():
def test_local_remote_mac_pe2():
- " Test MAC transfer PE2 local and PE1 remote"
+ "Test MAC transfer PE2 local and PE1 remote"
tgen = get_topogen()
# Don't run this test if we have any failure.
diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
index bf5aea1da..52ad7813c 100644
--- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
+++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
@@ -814,7 +814,11 @@ def test_BGP_GR_10_p2(request):
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
- step("Verifying GR config and operational state for addr_type {}".format(addr_type))
+ step(
+ "Verifying GR config and operational state for addr_type {}".format(
+ addr_type
+ )
+ )
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
@@ -839,7 +843,12 @@ def test_BGP_GR_10_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -847,7 +856,12 @@ def test_BGP_GR_10_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -855,7 +869,12 @@ def test_BGP_GR_10_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -863,7 +882,12 @@ def test_BGP_GR_10_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -1644,7 +1668,12 @@ def test_BGP_GR_26_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -1652,7 +1681,12 @@ def test_BGP_GR_26_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -1660,7 +1694,12 @@ def test_BGP_GR_26_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
@@ -1668,7 +1707,12 @@ def test_BGP_GR_26_p2(request):
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
diff --git a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
index 7a19939a5..14b8055d9 100644
--- a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
+++ b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
@@ -316,7 +316,13 @@ def test_verify_graceful_shutdown_functionality_with_eBGP_peers_p0(request):
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
diff --git a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
index 95578bada..e842e64ad 100644
--- a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
+++ b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
@@ -313,7 +313,13 @@ def test_verify_graceful_shutdown_functionality_with_iBGP_peers_p0(request):
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
@@ -499,7 +505,13 @@ def test_verify_deleting_re_adding_route_map_with_iBGP_peers_p0(request):
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
index 4eec69f7f..4214f3a86 100644
--- a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
+++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
@@ -64,6 +64,7 @@ this scenario, the servers are also routers as they have to announce
anycast IP (VIP) addresses via BGP.
"""
+
def build_topo(tgen):
"Build function"
diff --git a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
index eefb965f8..fbe1b038e 100644
--- a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
+++ b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
@@ -2642,12 +2642,16 @@ def test_route_map_within_vrf_to_alter_bgp_attribute_nexthop_p0(request):
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
@@ -4921,7 +4925,9 @@ def test_prefix_list_to_permit_deny_prefixes_p0(request):
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
assert result is not True, "Testcase {} : Failed \n"
- "{}:Expected behaviour: Routes are denied by prefix-list \nError {}".format(tc_name, result)
+ "{}:Expected behaviour: Routes are denied by prefix-list \nError {}".format(
+ tc_name, result
+ )
step(
"On router R1, configure prefix-lists to permit 2 "
@@ -5131,7 +5137,11 @@ def test_prefix_list_to_permit_deny_prefixes_p0(request):
)
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected behaviour: Routes are denied by prefix-list \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected behaviour: Routes are denied by prefix-list \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
@@ -5409,7 +5419,9 @@ def test_route_map_set_and_match_tag_p0(request):
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
@@ -5812,7 +5824,9 @@ def test_route_map_set_and_match_metric_p0(request):
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
diff --git a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py
index 3163fd75a..05961b110 100644
--- a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py
+++ b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py
@@ -103,7 +103,6 @@ from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
# Global variables
NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
@@ -1543,7 +1542,11 @@ def test_shut_noshut_p1(request):
sleep(HOLDDOWNTIMER + 1)
result = verify_bgp_convergence(tgen, topo, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: BGP will not be converged \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: BGP will not be converged \nError {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "r2"
@@ -1586,10 +1589,18 @@ def test_shut_noshut_p1(request):
}
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(
+ tc_name, result
+ )
step("Bring up connecting interface between R1<<>>R2 on R1.")
for intf in interfaces:
@@ -1828,7 +1839,9 @@ def test_vrf_vlan_routing_table_p1(request):
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are cleaned \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are cleaned \n Error {}".format(
+ tc_name, result
+ )
step("Add/reconfigure the same VRF instance again")
@@ -3356,12 +3369,16 @@ def test_vrf_name_significance_p1(request):
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
+ )
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "blue2"
@@ -3378,13 +3395,17 @@ def test_vrf_name_significance_p1(request):
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
)
step("Create 2 new VRFs PINK_A and GREY_A IN R3")
diff --git a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
index d32cbeb75..9c13c1c07 100644
--- a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
+++ b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
@@ -98,9 +98,7 @@ def build_topo(tgen):
peer = {}
for i in range(1, 9):
peer[i] = tgen.add_exabgp_peer(
- "peer%s" % i,
- ip="172.16.1.%s/24" % i,
- defaultRoute="via 172.16.1.254"
+ "peer%s" % i, ip="172.16.1.%s/24" % i, defaultRoute="via 172.16.1.254"
)
# First switch is for a dummy interface (for local network)
@@ -188,7 +186,9 @@ def test_bgp_converge():
break
else:
# Bail out with error if a router fails to converge
- bgpStatus = tgen.net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view)
+ bgpStatus = tgen.net["r%s" % i].cmd(
+ 'vtysh -c "show ip bgp view %s summary"' % view
+ )
assert False, "BGP did not converge:\n%s" % bgpStatus
tgen.routers_have_failure()
@@ -209,7 +209,10 @@ def test_bgp_routingTable():
json_file = "{}/{}/view_{}.json".format(thisDir, router.name, view)
expected = json.loads(open(json_file).read())
test_func = partial(
- topotest.router_json_cmp, router, "show ip bgp view {} json".format(view), expected
+ topotest.router_json_cmp,
+ router,
+ "show ip bgp view {} json".format(view),
+ expected,
)
_, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
assertmsg = "Routing Table verification failed for router {}, view {}".format(
diff --git a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
index a2566bd38..1bd4c233d 100644
--- a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
+++ b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
@@ -277,7 +277,7 @@ def test_next_hop_attribute(request):
def test_aspath_attribute(request):
- " Verifying AS_PATH attribute functionality"
+ "Verifying AS_PATH attribute functionality"
tgen = get_topogen()
@@ -488,7 +488,7 @@ def test_aspath_attribute(request):
def test_localpref_attribute(request):
- " Verifying LOCAL PREFERENCE attribute functionality"
+ "Verifying LOCAL PREFERENCE attribute functionality"
tgen = get_topogen()
@@ -1413,7 +1413,7 @@ def test_med_attribute(request):
def test_admin_distance(request):
- " Verifying admin distance functionality"
+ "Verifying admin distance functionality"
tgen = get_topogen()
diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
index d36bc3123..d51dc5f0c 100644
--- a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
+++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
@@ -49,12 +49,8 @@ def build_topo(tgen):
switch.add_link(router)
switch = tgen.gears["s1"]
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
- )
- peer2 = tgen.add_exabgp_peer(
- "peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1"
- )
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1")
+ peer2 = tgen.add_exabgp_peer("peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1")
switch.add_link(peer1)
switch.add_link(peer2)
diff --git a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
index 323c06527..96c4b664b 100755
--- a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
+++ b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
@@ -49,9 +49,7 @@ def build_topo(tgen):
switch.add_link(router)
switch = tgen.gears["s1"]
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
- )
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1")
switch.add_link(peer1)
@@ -61,12 +59,10 @@ def setup_module(module):
router = tgen.gears["r1"]
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, "{}/zebra.conf".format("r1"))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1"))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, "{}/bgpd.conf".format("r1"))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1"))
)
router.start()
@@ -104,11 +100,11 @@ def test_r1_rib():
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
- logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("r1", "show bgp ipv6 vpn 2001:1::/64 json", "r1/vpnv6_rib_entry1.json")
check("r1", "show bgp ipv6 vpn 2001:2::/64 json", "r1/vpnv6_rib_entry2.json")
diff --git a/tests/topotests/bgp_route_map/test_route_map_topo1.py b/tests/topotests/bgp_route_map/test_route_map_topo1.py
index 5af7296fc..3c2d7f28a 100644
--- a/tests/topotests/bgp_route_map/test_route_map_topo1.py
+++ b/tests/topotests/bgp_route_map/test_route_map_topo1.py
@@ -444,8 +444,10 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request):
result = verify_rib(
tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n"
- "routes are not present in rib \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
@@ -464,8 +466,10 @@ def test_route_map_inbound_outbound_same_neighbor_p0(request):
result = verify_rib(
tgen, adt, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n "
- "routes are not present in rib \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
@@ -660,13 +664,13 @@ def test_route_map_with_action_values_combination_of_prefix_action_p0(
result = verify_rib(
tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n "
- "Routes are still present \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "Routes are still present \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
else:
- result = verify_rib(
- tgen, adt, dut, input_dict_2, protocol=protocol
- )
+ result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
index 2f2bdbc8e..2d544c1cc 100755
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
@@ -105,10 +105,12 @@ def setup_module(mod):
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
- router.load_config(TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
tgen.gears["r1"].run("ip link add vrf10 type vrf table 10")
tgen.gears["r1"].run("ip link set vrf10 up")
@@ -151,11 +153,11 @@ def test_rib():
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
- logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("r1", "show bgp ipv6 vpn json", "r1/vpnv6_rib.json")
check("r2", "show bgp ipv6 vpn json", "r2/vpnv6_rib.json")
@@ -183,7 +185,7 @@ def test_ping():
tgen = get_topogen()
func = functools.partial(_check, name, dest_addr, match)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("ce1", "2001:2::2", " 0% packet loss")
check("ce1", "2001:3::2", " 0% packet loss")
diff --git a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
index b3e6ad3a1..1c00c492e 100644
--- a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
+++ b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
@@ -74,6 +74,8 @@ pytestmark = [pytest.mark.bgpd]
CWD = os.path.dirname(os.path.realpath(__file__))
+
+
def build_topo(tgen):
for routern in range(1, 6):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
index 30d05a640..8ba96ef7a 100644
--- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
@@ -902,7 +902,9 @@ def test_modify_route_map_match_set_clauses_p1(request):
rmap_name="rmap_IMP_{}".format(addr_type),
input_dict=input_rmap,
)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Change community-list to match a different value then " "100:100.")
diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
index 8d2cb08ca..c380cc10b 100644
--- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
+++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
@@ -178,7 +178,10 @@ def test_bgp_convergence():
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, router, "show bgp vrf r1-bgp-cust1 summary json", expected
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf r1-bgp-cust1 summary json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5)
assertmsg = "BGP router network did not converge"
diff --git a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
index e630ef910..191a0b53e 100644
--- a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
+++ b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
@@ -39,7 +39,6 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-
pytestmark = [pytest.mark.bgpd]
diff --git a/tests/topotests/config_timing/test_config_timing.py b/tests/topotests/config_timing/test_config_timing.py
index 69b3edcf3..c3eb8ed84 100644
--- a/tests/topotests/config_timing/test_config_timing.py
+++ b/tests/topotests/config_timing/test_config_timing.py
@@ -48,6 +48,7 @@ from lib.topolog import logger
pytestmark = [pytest.mark.staticd]
+
def build_topo(tgen):
tgen.add_router("r1")
switch = tgen.add_switch("s1")
@@ -61,7 +62,8 @@ def setup_module(mod):
router_list = tgen.routers()
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)),
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
)
router.load_config(
TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname))
@@ -74,6 +76,7 @@ def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def get_ip_networks(super_prefix, count):
count_log2 = math.log(count, 2)
if count_log2 != int(count_log2):
@@ -83,6 +86,7 @@ def get_ip_networks(super_prefix, count):
network = ipaddress.ip_network(super_prefix)
return tuple(network.subnets(count_log2))[0:count]
+
def test_static_timing():
tgen = get_topogen()
@@ -90,7 +94,14 @@ def test_static_timing():
pytest.skip(tgen.errors)
def do_config(
- count, bad_indices, base_delta, d_multiplier, add=True, do_ipv6=False, super_prefix=None, en_dbg=False
+ count,
+ bad_indices,
+ base_delta,
+ d_multiplier,
+ add=True,
+ do_ipv6=False,
+ super_prefix=None,
+ en_dbg=False,
):
router_list = tgen.routers()
tot_delta = float(0)
@@ -103,15 +114,11 @@ def test_static_timing():
optyped = "added" if add else "removed"
for rname, router in router_list.items():
- router.logger.info("{} {} static {} routes".format(
- optype, count, iptype)
- )
+ router.logger.info("{} {} static {} routes".format(optype, count, iptype))
# Generate config file.
config_file = os.path.join(
- router.logdir, rname, "{}-routes-{}.conf".format(
- iptype.lower(), optype
- )
+ router.logdir, rname, "{}-routes-{}.conf".format(iptype.lower(), optype)
)
with open(config_file, "w") as f:
for i, net in enumerate(get_ip_networks(super_prefix, count)):
@@ -155,28 +162,51 @@ def test_static_timing():
# Number of static routes
prefix_count = 10000
- prefix_base = [[u"10.0.0.0/8", u"11.0.0.0/8"],
- [u"2100:1111:2220::/44", u"2100:3333:4440::/44"]]
+ prefix_base = [
+ [u"10.0.0.0/8", u"11.0.0.0/8"],
+ [u"2100:1111:2220::/44", u"2100:3333:4440::/44"],
+ ]
bad_indices = []
for ipv6 in [False, True]:
- base_delta = do_config(prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0])
+ base_delta = do_config(
+ prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0]
+ )
# Another set of same number of prefixes
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][1])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][1]
+ )
# Duplicate config
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]
+ )
# Remove 1/2 of duplicate
- do_config(prefix_count // 2, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count // 2,
+ bad_indices,
+ base_delta,
+ 2,
+ False,
+ ipv6,
+ prefix_base[ipv6][0],
+ )
# Add all back in so 1/2 replicate 1/2 new
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]
+ )
# remove all
- delta = do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0])
- delta += do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][1])
+ delta = do_config(
+ prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0]
+ )
+ delta += do_config(
+ prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][1]
+ )
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index ef349251f..2dfeff70f 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -82,7 +82,7 @@ def pytest_addoption(parser):
help="Do not pause after (disables default when --shell or -vtysh given)",
)
- rundir_help="directory for running in and log files"
+ rundir_help = "directory for running in and log files"
parser.addini("rundir", rundir_help, default="/tmp/topotests")
parser.addoption("--rundir", metavar="DIR", help=rundir_help)
@@ -221,7 +221,6 @@ def pytest_configure(config):
is_xdist = True
is_worker = True
-
# -----------------------------------------------------
# Set some defaults for the pytest.ini [pytest] section
# ---------------------------------------------------
@@ -345,7 +344,7 @@ def pytest_runtest_makereport(item, call):
except:
call.excinfo = ExceptionInfo()
- title='unset'
+ title = "unset"
if call.excinfo is None:
error = False
@@ -404,7 +403,11 @@ def pytest_runtest_makereport(item, call):
for node in Mininet.g_mnet_inst.hosts.values():
pause = True
- channel = "{}-{}".format(os.getpid(), Commander.tmux_wait_gen) if not isatty else None
+ channel = (
+ "{}-{}".format(os.getpid(), Commander.tmux_wait_gen)
+ if not isatty
+ else None
+ )
Commander.tmux_wait_gen += 1
wait_for_channels.append(channel)
@@ -415,7 +418,7 @@ def pytest_runtest_makereport(item, call):
title="{} ({})".format(title, node.name),
name=title,
tmux_target=win_info,
- wait_for=channel
+ wait_for=channel,
)
if win_info is None:
win_info = pane_info
@@ -439,9 +442,7 @@ def pytest_runtest_makereport(item, call):
'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
)
except NameError:
- user = input(
- 'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
- )
+ user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ')
user = user.strip()
if user == "cli":
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index 6cce0958a..e7a72ef33 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -1998,12 +1998,8 @@ def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
input_dict_1 = {
"e1": {
"route_maps": {
- "rmap_d1": [
- {"action": "permit", "set": {attribute: 120}}
- ],
- "rmap_d2": [
- {"action": "permit", "set": {attribute: 150}}
- ],
+ "rmap_d1": [{"action": "permit", "set": {attribute: 120}}],
+ "rmap_d2": [{"action": "permit", "set": {attribute: 150}}],
}
}
}
diff --git a/tests/topotests/example_test/test_template.py b/tests/topotests/example_test/test_template.py
index dfc0bb4a0..e94bb905a 100644
--- a/tests/topotests/example_test/test_template.py
+++ b/tests/topotests/example_test/test_template.py
@@ -66,7 +66,6 @@ def build_topo(tgen):
def setup_module(mod):
"Sets up the pytest environment"
-
# This function initiates the topology build with Topogen...
tgen = Topogen(build_topo, mod.__name__)
diff --git a/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
index ce3ae1f4c..107b5e962 100755
--- a/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
+++ b/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py
@@ -144,7 +144,7 @@ def teardown_module(mod):
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
@@ -167,7 +167,7 @@ def test_bgp_convergence(request):
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
diff --git a/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py
index 771df9f7a..b03215d21 100755
--- a/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py
+++ b/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py
@@ -145,7 +145,7 @@ def teardown_module(mod):
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
@@ -168,7 +168,7 @@ def test_bgp_convergence(request):
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
diff --git a/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py
index 2cba65d4d..594b156f8 100755
--- a/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py
+++ b/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -146,7 +146,7 @@ def teardown_module(mod):
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
@@ -169,7 +169,7 @@ def test_bgp_convergence(request):
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
diff --git a/tests/topotests/isis_topo1/test_isis_topo1.py b/tests/topotests/isis_topo1/test_isis_topo1.py
index a597e3107..df63de76d 100644
--- a/tests/topotests/isis_topo1/test_isis_topo1.py
+++ b/tests/topotests/isis_topo1/test_isis_topo1.py
@@ -253,11 +253,7 @@ def dict_merge(dct, merge_dct):
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
for k, v in merge_dct.items():
- if (
- k in dct
- and isinstance(dct[k], dict)
- and topotest.is_mapping(merge_dct[k])
- ):
+ if k in dct and isinstance(dct[k], dict) and topotest.is_mapping(merge_dct[k]):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
diff --git a/tests/topotests/isis_topo1_vrf/test_isis_topo1_vrf.py b/tests/topotests/isis_topo1_vrf/test_isis_topo1_vrf.py
index 59b8666b7..74d5edeca 100644
--- a/tests/topotests/isis_topo1_vrf/test_isis_topo1_vrf.py
+++ b/tests/topotests/isis_topo1_vrf/test_isis_topo1_vrf.py
@@ -281,11 +281,7 @@ def dict_merge(dct, merge_dct):
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
for k, v in merge_dct.items():
- if (
- k in dct
- and isinstance(dct[k], dict)
- and topotest.is_mapping(merge_dct[k])
- ):
+ if k in dct and isinstance(dct[k], dict) and topotest.is_mapping(merge_dct[k]):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
diff --git a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
index 13fa90679..b198f2936 100644
--- a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
+++ b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
@@ -80,6 +80,7 @@ from lib.snmptest import SnmpTester
pytestmark = [pytest.mark.ldpd, pytest.mark.isisd, pytest.mark.snmp]
+
def build_topo(tgen):
"Build function"
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index ac834bcf4..d05332388 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -1227,15 +1227,14 @@ def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
tgen = get_topogen()
for router, rnode in tgen.routers().items():
- if 'bgp' not in topo['routers'][router]:
+ if "bgp" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying BGP Convergence on router %s:", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
@@ -1272,39 +1271,43 @@ def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
data = topo["routers"][bgp_neighbor]["links"]
for dest_link in dest_link_dict.keys():
if dest_link in data:
- peer_details = \
- peer_data[_addr_type][dest_link]
+ peer_details = peer_data[_addr_type][dest_link]
- neighbor_ip = \
- data[dest_link][_addr_type].split(
- "/")[0]
+ neighbor_ip = data[dest_link][_addr_type].split("/")[0]
nh_state = None
- if "ipv4Unicast" in show_bgp_json[vrf] or \
- "ipv6Unicast" in show_bgp_json[vrf]:
- errormsg = ("[DUT: %s] VRF: %s, "
- "ipv4Unicast/ipv6Unicast"
- " address-family present"
- " under l2vpn" % (router,
- vrf))
+ if (
+ "ipv4Unicast" in show_bgp_json[vrf]
+ or "ipv6Unicast" in show_bgp_json[vrf]
+ ):
+ errormsg = (
+ "[DUT: %s] VRF: %s, "
+ "ipv4Unicast/ipv6Unicast"
+ " address-family present"
+ " under l2vpn" % (router, vrf)
+ )
return errormsg
- l2VpnEvpn_data = \
- show_bgp_json[vrf]["l2VpnEvpn"][
- "peers"]
- nh_state = \
- l2VpnEvpn_data[neighbor_ip]["state"]
+ l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][
+ "peers"
+ ]
+ nh_state = l2VpnEvpn_data[neighbor_ip]["state"]
if nh_state == "Established":
no_of_evpn_peer += 1
if no_of_evpn_peer == total_evpn_peer:
- logger.info("[DUT: %s] VRF: %s, BGP is Converged for "
- "epvn peers", router, vrf)
+ logger.info(
+ "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers",
+ router,
+ vrf,
+ )
result = True
else:
- errormsg = ("[DUT: %s] VRF: %s, BGP is not converged "
- "for evpn peers" % (router, vrf))
+ errormsg = (
+ "[DUT: %s] VRF: %s, BGP is not converged "
+ "for evpn peers" % (router, vrf)
+ )
return errormsg
else:
total_peer = 0
@@ -1312,76 +1315,72 @@ def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
if not check_address_types(addr_type):
continue
- bgp_neighbors = \
- bgp_addr_type[addr_type]["unicast"]["neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor in bgp_neighbors:
- total_peer += \
- len(bgp_neighbors[bgp_neighbor]["dest_link"])
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
no_of_peer = 0
for addr_type in bgp_addr_type.keys():
if not check_address_types(addr_type):
continue
- bgp_neighbors = \
- bgp_addr_type[addr_type]["unicast"]["neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor, peer_data in bgp_neighbors.items():
- for dest_link in peer_data["dest_link"].\
- keys():
- data = \
- topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- peer_details = \
- peer_data['dest_link'][dest_link]
- # for link local neighbors
- if "neighbor_type" in peer_details and \
- peer_details["neighbor_type"] == \
- 'link-local':
- intf = topo["routers"][bgp_neighbor][
- "links"][dest_link]["interface"]
- neighbor_ip = get_frr_ipv6_linklocal(
- tgen, bgp_neighbor, intf)
- elif "source_link" in peer_details:
- neighbor_ip = \
- topo["routers"][bgp_neighbor][
- "links"][peer_details[
- 'source_link']][
- addr_type].\
- split("/")[0]
- elif "neighbor_type" in peer_details and \
- peer_details["neighbor_type"] == \
- 'unnumbered':
- neighbor_ip = \
- data[dest_link]["peer-interface"]
- else:
- neighbor_ip = \
- data[dest_link][addr_type].split(
- "/")[0]
- nh_state = None
- neighbor_ip = neighbor_ip.lower()
- if addr_type == "ipv4":
- ipv4_data = show_bgp_json[vrf][
- "ipv4Unicast"]["peers"]
- nh_state = \
- ipv4_data[neighbor_ip]["state"]
- else:
- ipv6_data = show_bgp_json[vrf][
- "ipv6Unicast"]["peers"]
- if neighbor_ip in ipv6_data:
- nh_state = \
- ipv6_data[neighbor_ip]["state"]
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ peer_details = peer_data["dest_link"][dest_link]
+ # for link local neighbors
+ if (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "link-local"
+ ):
+ intf = topo["routers"][bgp_neighbor]["links"][
+ dest_link
+ ]["interface"]
+ neighbor_ip = get_frr_ipv6_linklocal(
+ tgen, bgp_neighbor, intf
+ )
+ elif "source_link" in peer_details:
+ neighbor_ip = topo["routers"][bgp_neighbor][
+ "links"
+ ][peer_details["source_link"]][addr_type].split(
+ "/"
+ )[
+ 0
+ ]
+ elif (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "unnumbered"
+ ):
+ neighbor_ip = data[dest_link]["peer-interface"]
+ else:
+ neighbor_ip = data[dest_link][addr_type].split("/")[
+ 0
+ ]
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][
+ "peers"
+ ]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][
+ "peers"
+ ]
+ if neighbor_ip in ipv6_data:
+ nh_state = ipv6_data[neighbor_ip]["state"]
- if nh_state == "Established":
- no_of_peer += 1
+ if nh_state == "Established":
+ no_of_peer += 1
if no_of_peer == total_peer and no_of_peer > 0:
- logger.info("[DUT: %s] VRF: %s, BGP is Converged",
- router, vrf)
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged", router, vrf)
result = True
else:
- errormsg = ("[DUT: %s] VRF: %s, BGP is not converged"
- % (router, vrf))
+ errormsg = "[DUT: %s] VRF: %s, BGP is not converged" % (router, vrf)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1390,7 +1389,14 @@ def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
@retry(retry_timeout=16)
def verify_bgp_community(
- tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False, expected=True
+ tgen,
+ addr_type,
+ router,
+ network,
+ input_dict=None,
+ vrf=None,
+ bestpath=False,
+ expected=True,
):
"""
API to veiryf BGP large community is attached in route for any given
@@ -2216,7 +2222,7 @@ def verify_bgp_attributes(
input_dict=None,
seq_id=None,
nexthop=None,
- expected=True
+ expected=True,
):
"""
API will verify BGP attributes set by Route-map for given prefix and
@@ -2668,7 +2674,14 @@ def verify_best_path_as_per_admin_distance(
@retry(retry_timeout=10, initial_wait=2)
def verify_bgp_rib(
- tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None, expected=True
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=None,
+ aspath=None,
+ multi_nh=None,
+ expected=True,
):
"""
This API is to verify whether bgp rib has any
@@ -2970,7 +2983,9 @@ def verify_bgp_rib(
@retry(retry_timeout=10)
-def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
+def verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut, peer, expected=True
+):
"""
This API is to verify verify_graceful_restart configuration of DUT and
cross verify the same from the peer bgp routerrouter.
@@ -3772,7 +3787,9 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer)
@retry(retry_timeout=8)
-def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, peer, expected=True):
+def verify_gr_address_family(
+ tgen, topo, addr_type, addr_family, dut, peer, expected=True
+):
"""
This API is to verify gr_address_family in the BGP gr capability advertised
by the neighbor router
@@ -3830,9 +3847,7 @@ def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, peer, expe
show_bgp_graceful_json = run_frr_cmd(
rnode,
- "show bgp {} neighbor {} graceful-restart json".format(
- addr_type, neighbor_ip
- ),
+ "show bgp {} neighbor {} graceful-restart json".format(addr_type, neighbor_ip),
isjson=True,
)
@@ -3880,7 +3895,7 @@ def verify_attributes_for_evpn_routes(
ipLen=None,
rd_peer=None,
rt_peer=None,
- expected=True
+ expected=True,
):
"""
API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1"
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 865f7fd44..cbc73915a 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -129,12 +129,14 @@ DEBUG_LOGS = {
g_iperf_client_procs = {}
g_iperf_server_procs = {}
+
def is_string(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
+
if config.has_option("topogen", "verbosity"):
loglevel = config.get("topogen", "verbosity")
loglevel = loglevel.lower()
@@ -148,7 +150,9 @@ if config.has_option("topogen", "frrtest_log_dir"):
frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
print("frrtest_log_file..", frrtest_log_file)
- logger = get_logger("test_execution_logs", log_level=loglevel, target=frrtest_log_file)
+ logger = get_logger(
+ "test_execution_logs", log_level=loglevel, target=frrtest_log_file
+ )
print("Logs will be sent to logfile: {}".format(frrtest_log_file))
if config.has_option("topogen", "show_router_config"):
@@ -208,7 +212,6 @@ class InvalidCLIError(Exception):
"""Raise when the CLI command is wrong"""
-
def run_frr_cmd(rnode, cmd, isjson=False):
"""
Execute frr show commands in privileged mode
@@ -339,9 +342,7 @@ def create_common_configurations(
frr_cfg_fd.write("\n")
except IOError as err:
- logger.error(
- "Unable to open FRR Config '%s': %s" % (fname, str(err))
- )
+ logger.error("Unable to open FRR Config '%s': %s" % (fname, str(err)))
return False
finally:
frr_cfg_fd.close()
@@ -489,10 +490,10 @@ def reset_config_on_routers(tgen, routerName=None):
# Trim the router list if needed
router_list = tgen.routers()
if routerName:
- if ((routerName not in ROUTER_LIST) or (routerName not in router_list)):
+ if (routerName not in ROUTER_LIST) or (routerName not in router_list):
logger.debug("Exiting API: reset_config_on_routers: no routers")
return True
- router_list = { routerName: router_list[routerName] }
+ router_list = {routerName: router_list[routerName]}
delta_fmt = tgen.logdir + "/{}/delta-{}.conf"
# FRRCFG_BKUP_FILE
@@ -514,22 +515,30 @@ def reset_config_on_routers(tgen, routerName=None):
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
- logger.error("Get running config for %s failed %d: %s", rname, p.returncode, error)
- raise InvalidCLIError("vtysh show running error on {}: {}".format(rname, error))
+ logger.error(
+ "Get running config for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError(
+ "vtysh show running error on {}: {}".format(rname, error)
+ )
#
# Get all delta's in parallel
#
procs = {}
for rname in router_list:
- logger.info("Generating delta for router %s to new configuration (gen %d)", rname, gen)
+ logger.info(
+ "Generating delta for router %s to new configuration (gen %d)", rname, gen
+ )
procs[rname] = tgen.net.popen(
- [ "/usr/lib/frr/frr-reload.py",
- "--test-reset",
- "--input",
- run_cfg_fmt.format(rname, gen),
- "--test",
- target_cfg_fmt.format(rname) ],
+ [
+ "/usr/lib/frr/frr-reload.py",
+ "--test-reset",
+ "--input",
+ run_cfg_fmt.format(rname, gen),
+ "--test",
+ target_cfg_fmt.format(rname),
+ ],
stdin=None,
stdout=open(delta_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
@@ -537,7 +546,9 @@ def reset_config_on_routers(tgen, routerName=None):
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
- logger.error("Delta file creation for %s failed %d: %s", rname, p.returncode, error)
+ logger.error(
+ "Delta file creation for %s failed %d: %s", rname, p.returncode, error
+ )
raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error))
#
@@ -558,13 +569,19 @@ def reset_config_on_routers(tgen, routerName=None):
vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname, gen))
if not p.returncode:
router_list[rname].logger.info(
- '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
)
else:
router_list[rname].logger.warning(
- '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Delta file apply for %s failed %d: %s", rname, p.returncode, output
)
- logger.error("Delta file apply for %s failed %d: %s", rname, p.returncode, output)
# We really need to enable this failure; however, currently frr-reload.py
# producing invalid "no" commands as it just preprends "no", but some of the
@@ -590,9 +607,16 @@ def reset_config_on_routers(tgen, routerName=None):
for rname, p in procs.items():
output, _ = p.communicate()
if p.returncode:
- logger.warning("Get running config for %s failed %d: %s", rname, p.returncode, output)
+ logger.warning(
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
+ )
else:
- logger.info("Configuration on router %s after reset:\n%s", rname, output)
+ logger.info(
+ "Configuration on router %s after reset:\n%s", rname, output
+ )
logger.debug("Exiting API: reset_config_on_routers")
return True
@@ -634,12 +658,14 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
try:
frr_cfg_file = frr_cfg_file_fmt.format(rname)
frr_cfg_save_file = frr_cfg_save_file_fmt.format(rname, gen)
- frr_cfg_bkup = frr_cfg_bkup_fmt.format(rname)
+ frr_cfg_bkup = frr_cfg_bkup_fmt.format(rname)
with open(frr_cfg_file, "r+") as cfg:
data = cfg.read()
logger.info(
"Applying following configuration on router %s (gen: %d):\n%s",
- rname, gen, data
+ rname,
+ gen,
+ data,
)
# Always save a copy of what we just did
with open(frr_cfg_save_file, "w") as bkup:
@@ -654,13 +680,12 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
stderr=subprocess.STDOUT,
)
except IOError as err:
- logging.error(
- "Unable to open config File. error(%s): %s",
- err.errno, err.strerror
+ logger.error(
+ "Unable to open config File. error(%s): %s", err.errno, err.strerror
)
return False
except Exception as error:
- logging.error("Unable to apply config on %s: %s", rname, str(error))
+ logger.error("Unable to apply config on %s: %s", rname, str(error))
return False
errors = []
@@ -670,15 +695,25 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
vtysh_command = "vtysh -f " + frr_cfg_file
if not p.returncode:
router_list[rname].logger.info(
- '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
)
else:
router_list[rname].logger.error(
- '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Config apply for %s failed %d: %s", rname, p.returncode, output
)
- logger.error("Config apply for %s failed %d: %s", rname, p.returncode, output)
# We can't thorw an exception here as we won't clear the config file.
- errors.append(InvalidCLIError("load_config_to_routers error for {}: {}".format(rname, output)))
+ errors.append(
+ InvalidCLIError(
+ "load_config_to_routers error for {}: {}".format(rname, output)
+ )
+ )
# Empty the config file or we append to it next time through.
with open(frr_cfg_file, "r+") as cfg:
@@ -698,9 +733,14 @@ def load_config_to_routers(tgen, routers, save_bkup=False):
for rname, p in procs.items():
output, _ = p.communicate()
if p.returncode:
- logger.warning("Get running config for %s failed %d: %s", rname, p.returncode, output)
+ logger.warning(
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
+ )
else:
- logger.info("New configuration for router %s:\n%s", rname,output)
+ logger.info("New configuration for router %s:\n%s", rname, output)
logger.debug("Exiting API: load_config_to_routers")
return not errors
@@ -757,36 +797,38 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
else:
cmd = "show interface"
for chk_ll in range(0, 60):
- sleep(1/4)
+ sleep(1 / 4)
ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
# Fix newlines (make them all the same)
- ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
interface = None
ll_per_if_count = 0
for line in ifaces:
# Interface name
- m = re_search('Interface ([a-zA-Z0-9-]+) is', line)
+ m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
if m:
interface = m.group(1).split(" ")[0]
ll_per_if_count = 0
# Interface ip
- m1 = re_search('inet6 (fe80[:a-fA-F0-9]+/[0-9]+)', line)
+ m1 = re_search("inet6 (fe80[:a-fA-F0-9]+/[0-9]+)", line)
if m1:
local = m1.group(1)
ll_per_if_count += 1
if ll_per_if_count > 1:
- linklocal += [["%s-%s" %
- (interface, ll_per_if_count), local]]
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
else:
linklocal += [[interface, local]]
try:
if linklocal:
if intf:
- return [_linklocal[1] for _linklocal in linklocal if _linklocal[0]==intf][0].\
- split("/")[0]
+ return [
+ _linklocal[1]
+ for _linklocal in linklocal
+ if _linklocal[0] == intf
+ ][0].split("/")[0]
return linklocal
except IndexError:
continue
@@ -804,7 +846,7 @@ def generate_support_bundle():
tgen = get_topogen()
router_list = tgen.routers()
- test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
+ test_name = os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
bundle_procs = {}
for rname, rnode in router_list.items():
@@ -812,7 +854,10 @@ def generate_support_bundle():
dst_bundle = "{}/{}/support_bundles/{}".format(tgen.logdir, rname, test_name)
rnode.run("mkdir -p " + dst_bundle)
- gen_sup_cmd = ["/usr/lib/frr/generate_support_bundle.py", "--log-dir=" + dst_bundle]
+ gen_sup_cmd = [
+ "/usr/lib/frr/generate_support_bundle.py",
+ "--log-dir=" + dst_bundle,
+ ]
bundle_procs[rname] = tgen.net[rname].popen(gen_sup_cmd, stdin=None)
for rname, rnode in router_list.items():
@@ -879,14 +924,15 @@ def start_topology(tgen, daemon=None):
except IOError as err:
logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
-
# Loading empty zebra.conf file to router, to start the zebra daemon
router.load_config(
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
)
# Loading empty bgpd.conf file to router, to start the bgp daemon
- router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname))
+ router.load_config(
+ TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname)
+ )
if daemon and "ospfd" in daemon:
# Loading empty ospf.conf file to router, to start the bgp daemon
@@ -1046,9 +1092,7 @@ def add_interfaces_to_vlan(tgen, input_dict):
# Adding interface to VLAN
vlan_intf = "{}.{}".format(interface, vlan)
cmd = "ip link add link {} name {} type vlan id {}".format(
- interface,
- vlan_intf,
- vlan
+ interface, vlan_intf, vlan
)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
rnode.run(cmd)
@@ -1061,8 +1105,7 @@ def add_interfaces_to_vlan(tgen, input_dict):
# Assigning IP address
ifaddr = ipaddress.ip_interface(
u"{}/{}".format(
- frr_unicode(data["ip"]),
- frr_unicode(data["subnet"])
+ frr_unicode(data["ip"]), frr_unicode(data["subnet"])
)
)
@@ -1491,10 +1534,9 @@ def create_interface_in_kernel(
if not netmask:
ifaddr = ipaddress.ip_interface(frr_unicode(ip_addr))
else:
- ifaddr = ipaddress.ip_interface(u"{}/{}".format(
- frr_unicode(ip_addr),
- frr_unicode(netmask)
- ))
+ ifaddr = ipaddress.ip_interface(
+ u"{}/{}".format(frr_unicode(ip_addr), frr_unicode(netmask))
+ )
cmd = "ip -{0} a flush {1} scope global && ip a add {2} dev {1} && ip l set {1} up".format(
ifaddr.version, name, ifaddr
)
@@ -1778,7 +1820,9 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
_diag_pct = kwargs.pop("diag_pct", diag_pct)
start_time = datetime.now()
- retry_until = datetime.now() + timedelta(seconds=_retry_timeout + _initial_wait)
+ retry_until = datetime.now() + timedelta(
+ seconds=_retry_timeout + _initial_wait
+ )
if initial_wait > 0:
logger.info("Waiting for [%s]s as initial delay", initial_wait)
@@ -1799,10 +1843,13 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
# Positive result, but happened after timeout failure, very important to
# note for fixing tests.
- logger.warning("RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
- _retry_timeout, (datetime.now() - start_time).total_seconds())
+ logger.warning(
+ "RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
+ _retry_timeout,
+ (datetime.now() - start_time).total_seconds(),
+ )
if isinstance(saved_failure, Exception):
- raise saved_failure # pylint: disable=E0702
+ raise saved_failure # pylint: disable=E0702
return saved_failure
except Exception as error:
@@ -1810,16 +1857,20 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
ret = error
if seconds_left < 0 and saved_failure:
- logger.info("RETRY DIAGNOSTIC: Retry timeout reached, still failing")
+ logger.info(
+ "RETRY DIAGNOSTIC: Retry timeout reached, still failing"
+ )
if isinstance(saved_failure, Exception):
- raise saved_failure # pylint: disable=E0702
+ raise saved_failure # pylint: disable=E0702
return saved_failure
if seconds_left < 0:
logger.info("Retry timeout of %ds reached", _retry_timeout)
saved_failure = ret
- retry_extra_delta = timedelta(seconds=seconds_left + _retry_timeout * _diag_pct)
+ retry_extra_delta = timedelta(
+ seconds=seconds_left + _retry_timeout * _diag_pct
+ )
retry_until = datetime.now() + retry_extra_delta
seconds_left = retry_extra_delta.total_seconds()
@@ -1833,11 +1884,17 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
return saved_failure
if saved_failure:
- logger.info("RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
- retry_sleep, seconds_left)
+ logger.info(
+ "RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
+ retry_sleep,
+ seconds_left,
+ )
else:
- logger.info("Sleeping %ds until next retry with %.1f retry time left",
- retry_sleep, seconds_left)
+ logger.info(
+ "Sleeping %ds until next retry with %.1f retry time left",
+ retry_sleep,
+ seconds_left,
+ )
sleep(retry_sleep)
func_retry._original = func
@@ -1961,12 +2018,13 @@ def create_interfaces_cfg(tgen, topo, build=False):
interface_data.append("ipv6 address {}".format(intf_addr))
# Wait for vrf interfaces to get link local address once they are up
- if not destRouterLink == 'lo' and 'vrf' in topo[c_router][
- 'links'][destRouterLink]:
- vrf = topo[c_router]['links'][destRouterLink]['vrf']
- intf = topo[c_router]['links'][destRouterLink]['interface']
- ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf,
- vrf = vrf)
+ if (
+ not destRouterLink == "lo"
+ and "vrf" in topo[c_router]["links"][destRouterLink]
+ ):
+ vrf = topo[c_router]["links"][destRouterLink]["vrf"]
+ intf = topo[c_router]["links"][destRouterLink]["interface"]
+ ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf, vrf=vrf)
if "ipv6-link-local" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
@@ -4447,7 +4505,7 @@ def required_linux_kernel_version(required_version):
return True
-class HostApplicationHelper (object):
+class HostApplicationHelper(object):
"""Helper to track and cleanup per-host based test processes."""
def __init__(self, tgen=None, base_cmd=None):
@@ -4458,14 +4516,14 @@ class HostApplicationHelper (object):
if tgen is not None:
self.init(tgen)
- def __enter__ (self):
+ def __enter__(self):
self.init()
return self
- def __exit__ (self ,type, value, traceback):
+ def __exit__(self, type, value, traceback):
self.cleanup()
- def __str__ (self):
+ def __str__(self):
return "HostApplicationHelper({})".format(self.base_cmd_str)
def set_base_cmd(self, base_cmd):
@@ -4519,13 +4577,37 @@ class HostApplicationHelper (object):
hlogger.debug("%s: %s: terminating process %s", self, host, p.pid)
rc = p.poll()
if rc is not None:
- logger.error("%s: %s: process early exit %s: %s", self, host, p.pid, comm_error(p))
- hlogger.error("%s: %s: process early exit %s: %s", self, host, p.pid, comm_error(p))
+ logger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
else:
p.terminate()
p.wait()
- logger.debug("%s: %s: terminated process %s: %s", self, host, p.pid, comm_error(p))
- hlogger.debug("%s: %s: terminated process %s: %s", self, host, p.pid, comm_error(p))
+ logger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
del self.host_procs[host]
@@ -4560,18 +4642,29 @@ class HostApplicationHelper (object):
rc = p.poll()
if rc is None:
continue
- logger.error("%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True)
- hlogger.error("%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True)
+ logger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
+ hlogger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
procs.append(p)
return procs
class IPerfHelper(HostApplicationHelper):
-
- def __str__ (self):
+ def __str__(self):
return "IPerfHelper()"
- def run_join(self, host, join_addr, l4Type="UDP", join_interval=1, join_intf=None, join_towards=None):
+ def run_join(
+ self,
+ host,
+ join_addr,
+ l4Type="UDP",
+ join_interval=1,
+ join_intf=None,
+ join_towards=None,
+ ):
"""
Use iperf to send IGMP join and listen to traffic
@@ -4601,14 +4694,17 @@ class IPerfHelper(HostApplicationHelper):
iperf_args.append("-B")
if join_towards:
- to_intf = frr_unicode(self.tgen.json_topo["routers"][host]["links"][join_towards]["interface"])
+ to_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards][
+ "interface"
+ ]
+ )
iperf_args.append("{}%{}".format(str(bindTo), to_intf))
elif join_intf:
iperf_args.append("{}%{}".format(str(bindTo), join_intf))
else:
iperf_args.append(str(bindTo))
-
if join_interval:
iperf_args.append("-i")
iperf_args.append(str(join_interval))
@@ -4619,8 +4715,9 @@ class IPerfHelper(HostApplicationHelper):
return False
return True
-
- def run_traffic(self, host, sentToAddress, ttl, time=0, l4Type="UDP", bind_towards=None):
+ def run_traffic(
+ self, host, sentToAddress, ttl, time=0, l4Type="UDP", bind_towards=None
+ ):
"""
Run iperf to send IGMP join and traffic
@@ -4646,7 +4743,9 @@ class IPerfHelper(HostApplicationHelper):
# Bind to Interface IP
if bind_towards:
- ifaddr = frr_unicode(self.tgen.json_topo["routers"][host]["links"][bind_towards]["ipv4"])
+ ifaddr = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["ipv4"]
+ )
ipaddr = ipaddress.IPv4Interface(ifaddr).ip
iperf_args.append("-B")
iperf_args.append(str(ipaddr))
@@ -4669,7 +4768,9 @@ class IPerfHelper(HostApplicationHelper):
p = self.run(host, iperf_args)
if p.poll() is not None:
- logger.error("mcast traffic send failed for %s: %s", sendTo, comm_error(p))
+ logger.error(
+ "mcast traffic send failed for %s: %s", sendTo, comm_error(p)
+ )
return False
return True
@@ -4730,9 +4831,7 @@ def verify_ip_nht(tgen, input_dict):
return False
-def scapy_send_raw_packet(
- tgen, topo, senderRouter, intf, packet=None
-):
+def scapy_send_raw_packet(tgen, topo, senderRouter, intf, packet=None):
"""
Using scapy Raw() method to send BSR raw packet from one FRR
to other
@@ -4766,10 +4865,8 @@ def scapy_send_raw_packet(
python3_path = tgen.net.get_exec_path(["python3", "python"])
script_path = os.path.join(CD, "send_bsr_packet.py")
- cmd = (
- "{} {} '{}' '{}' --interval=1 --count=1".format(
- python3_path, script_path, packet, sender_interface
- )
+ cmd = "{} {} '{}' '{}' --interval=1 --count=1".format(
+ python3_path, script_path, packet, sender_interface
)
logger.info("Scapy cmd: \n %s", cmd)
diff --git a/tests/topotests/lib/exa-receive.py b/tests/topotests/lib/exa-receive.py
index 9b27bddea..2ea3a75a5 100755
--- a/tests/topotests/lib/exa-receive.py
+++ b/tests/topotests/lib/exa-receive.py
@@ -10,9 +10,13 @@ from sys import stdin
from datetime import datetime
parser = argparse.ArgumentParser()
-parser.add_argument("--no-timestamp", dest="timestamp", action="store_false", help='Disable timestamps')
-parser.add_argument("--logdir", default="/tmp/gearlogdir", help='The directory to store the peer log in')
-parser.add_argument("peer", type=int, help='The peer number')
+parser.add_argument(
+ "--no-timestamp", dest="timestamp", action="store_false", help="Disable timestamps"
+)
+parser.add_argument(
+ "--logdir", default="/tmp/gearlogdir", help="The directory to store the peer log in"
+)
+parser.add_argument("peer", type=int, help="The peer number")
args = parser.parse_args()
savepath = os.path.join(args.logdir, "peer{}-received.log".format(args.peer))
diff --git a/tests/topotests/lib/fixtures.py b/tests/topotests/lib/fixtures.py
index 5dac29fd3..9d8f63aac 100644
--- a/tests/topotests/lib/fixtures.py
+++ b/tests/topotests/lib/fixtures.py
@@ -26,6 +26,7 @@ import lib.topojson as topojson
import lib.topogen as topogen
from lib.topolog import logger
+
def tgen_json(request):
logger.info("Creating/starting topogen topology for %s", request.module.__name__)
diff --git a/tests/topotests/lib/ltemplate.py b/tests/topotests/lib/ltemplate.py
index 7d2ae4d40..910573c14 100644
--- a/tests/topotests/lib/ltemplate.py
+++ b/tests/topotests/lib/ltemplate.py
@@ -42,6 +42,7 @@ from lib.lutil import *
customize = None
+
class LTemplate:
test = None
testdir = None
@@ -56,11 +57,13 @@ class LTemplate:
global customize
if sys.version_info >= (3, 5):
import importlib.util
+
spec = importlib.util.spec_from_file_location("customize", pathname)
customize = importlib.util.module_from_spec(spec)
spec.loader.exec_module(customize)
else:
import imp
+
customize = imp.load_source("customize", pathname)
self.test = test
self.testdir = testdir
diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py
index a594c4c88..30beccb78 100755
--- a/tests/topotests/lib/mcast-tester.py
+++ b/tests/topotests/lib/mcast-tester.py
@@ -35,12 +35,11 @@ import time
#
def interface_name_to_index(name):
"Gets the interface index using its name. Returns None on failure."
- interfaces = json.loads(
- subprocess.check_output('ip -j link show', shell=True))
+ interfaces = json.loads(subprocess.check_output("ip -j link show", shell=True))
for interface in interfaces:
- if interface['ifname'] == name:
- return interface['ifindex']
+ if interface["ifname"] == name:
+ return interface["ifindex"]
return None
@@ -60,13 +59,12 @@ def multicast_join(sock, ifindex, group, port):
# Main code.
#
parser = argparse.ArgumentParser(description="Multicast RX utility")
-parser.add_argument('group', help='Multicast IP')
-parser.add_argument('interface', help='Interface name')
-parser.add_argument('--socket', help='Point to topotest UNIX socket')
+parser.add_argument("group", help="Multicast IP")
+parser.add_argument("interface", help="Interface name")
+parser.add_argument("--socket", help="Point to topotest UNIX socket")
parser.add_argument(
- '--send',
- help='Transmit instead of join with interval',
- type=float, default=0)
+ "--send", help="Transmit instead of join with interval", type=float, default=0
+)
args = parser.parse_args()
ttl = 16
@@ -75,7 +73,7 @@ port = 1000
# Get interface index/validate.
ifindex = interface_name_to_index(args.interface)
if ifindex is None:
- sys.stderr.write('Interface {} does not exists\n'.format(args.interface))
+ sys.stderr.write("Interface {} does not exists\n".format(args.interface))
sys.exit(1)
# We need root privileges to set up multicast.
@@ -102,17 +100,18 @@ msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if args.send > 0:
# Prepare multicast bit in that interface.
msock.setsockopt(
- socket.SOL_SOCKET, 25,
- struct.pack("%ds" % len(args.interface),
- args.interface.encode('utf-8')))
+ socket.SOL_SOCKET,
+ 25,
+ struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")),
+ )
# Set packets TTL.
- msock.setsockopt(
- socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
+ msock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
# Block to ensure packet send.
msock.setblocking(True)
else:
multicast_join(msock, ifindex, args.group, port)
+
def should_exit():
if not toposock:
# If we are sending then we have slept
@@ -122,12 +121,13 @@ def should_exit():
else:
try:
data = toposock.recv(1)
- if data == b'':
- print(' -> Connection closed')
+ if data == b"":
+ print(" -> Connection closed")
return True
except BlockingIOError:
return False
+
counter = 0
while not should_exit():
if args.send > 0:
diff --git a/tests/topotests/lib/micronet_cli.py b/tests/topotests/lib/micronet_cli.py
index b6cba81d8..6459d5d15 100644
--- a/tests/topotests/lib/micronet_cli.py
+++ b/tests/topotests/lib/micronet_cli.py
@@ -176,9 +176,11 @@ def cli_server(unet, server_sock):
for line in lineiter(sock):
line = line.strip()
+
def writef(x):
xb = x.encode("utf-8")
sock.send(xb)
+
if not doline(unet, line, writef):
return
sock.send(ENDMARKER)
@@ -220,7 +222,7 @@ def cli_client(sockpath, prompt="unet> "):
rb += lb
# Remove the marker
- rb = rb[:-len(ENDMARKER)]
+ rb = rb[: -len(ENDMARKER)]
# Write the output
sys.stdout.write(rb.decode("utf-8"))
@@ -239,7 +241,15 @@ def local_cli(unet, outf, prompt="unet> "):
return
-def cli(unet, histfile=None, sockpath=None, force_window=False, title=None, prompt=None, background=True):
+def cli(
+ unet,
+ histfile=None,
+ sockpath=None,
+ force_window=False,
+ title=None,
+ prompt=None,
+ background=True,
+):
if prompt is None:
prompt = "unet> "
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 803909f35..c21cbf0dd 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -107,9 +107,7 @@ def create_router_ospf(tgen, topo=None, input_dict=None, build=False, load_confi
return result
-def __create_ospf_global(
- tgen, input_dict, router, build, load_config, ospf
-):
+def __create_ospf_global(tgen, input_dict, router, build, load_config, ospf):
"""
Helper API to create ospf global configuration.
@@ -184,8 +182,7 @@ def __create_ospf_global(
if del_log_adj_changes:
config_data.append("no log-adjacency-changes detail")
if log_adj_changes:
- config_data.append("log-adjacency-changes {}".format(
- log_adj_changes))
+ config_data.append("log-adjacency-changes {}".format(log_adj_changes))
# aggregation timer
aggr_timer = ospf_data.setdefault("aggr_timer", None)
@@ -193,8 +190,7 @@ def __create_ospf_global(
if del_aggr_timer:
config_data.append("no aggregation timer")
if aggr_timer:
- config_data.append("aggregation timer {}".format(
- aggr_timer))
+ config_data.append("aggregation timer {}".format(aggr_timer))
# maximum path information
ecmp_data = ospf_data.setdefault("maximum-paths", {})
@@ -242,12 +238,13 @@ def __create_ospf_global(
cmd = "no {}".format(cmd)
config_data.append(cmd)
- #def route information
+ # def route information
def_rte_data = ospf_data.setdefault("default-information", {})
if def_rte_data:
if "originate" not in def_rte_data:
- logger.debug("Router %s: 'originate key' not present in "
- "input_dict", router)
+ logger.debug(
+ "Router %s: 'originate key' not present in " "input_dict", router
+ )
else:
cmd = "default-information originate"
@@ -258,8 +255,7 @@ def __create_ospf_global(
cmd = cmd + " metric {}".format(def_rte_data["metric"])
if "metric-type" in def_rte_data:
- cmd = cmd + " metric-type {}".format(def_rte_data[
- "metric-type"])
+ cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
if "route-map" in def_rte_data:
cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
@@ -284,19 +280,19 @@ def __create_ospf_global(
config_data.append(cmd)
try:
- if "area" in input_dict[router]['links'][neighbor][
- 'ospf6']:
+ if "area" in input_dict[router]["links"][neighbor]["ospf6"]:
iface = input_dict[router]["links"][neighbor]["interface"]
cmd = "interface {} area {}".format(
- iface, input_dict[router]['links'][neighbor][
- 'ospf6']['area'])
- if input_dict[router]['links'][neighbor].setdefault(
- "delete", False):
+ iface,
+ input_dict[router]["links"][neighbor]["ospf6"]["area"],
+ )
+ if input_dict[router]["links"][neighbor].setdefault(
+ "delete", False
+ ):
cmd = "no {}".format(cmd)
config_data.append(cmd)
except KeyError:
- pass
-
+ pass
# summary information
summary_data = ospf_data.setdefault("summary-address", {})
@@ -367,7 +363,9 @@ def __create_ospf_global(
return config_data
-def create_router_ospf6(tgen, topo=None, input_dict=None, build=False, load_config=True):
+def create_router_ospf6(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router
@@ -428,7 +426,9 @@ def create_router_ospf6(tgen, topo=None, input_dict=None, build=False, load_conf
return result
-def config_ospf_interface(tgen, topo=None, input_dict=None, build=False, load_config=True):
+def config_ospf_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router.
@@ -633,7 +633,9 @@ def redistribute_ospf(tgen, topo, dut, route_type, **kwargs):
# Verification procs
################################
@retry(retry_timeout=80)
-def verify_ospf_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False, expected=True):
+def verify_ospf_neighbor(
+ tgen, topo=None, dut=None, input_dict=None, lan=False, expected=True
+):
"""
This API is to verify ospf neighborship by running
show ip ospf neighbour command,
@@ -1325,7 +1327,9 @@ def verify_ospf_rib(
@retry(retry_timeout=20)
-def verify_ospf_interface(tgen, topo=None, dut=None, lan=False, input_dict=None, expected=True):
+def verify_ospf_interface(
+ tgen, topo=None, dut=None, lan=False, input_dict=None, expected=True
+):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@@ -1621,21 +1625,21 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True):
rnode = tgen.routers()[dut]
if ospf:
- if 'ospf6' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(
- router)
+ if "ospf6" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(router)
return errormsg
- show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf summary detail json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf summary detail json", isjson=True
+ )
else:
- if 'ospf' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
- router)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router)
return errormsg
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf summary detail json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
@@ -1646,23 +1650,35 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True):
ospf_summary_data = input_dict
if ospf:
- show_ospf_json = show_ospf_json['default']
+ show_ospf_json = show_ospf_json["default"]
for ospf_summ, summ_data in ospf_summary_data.items():
if ospf_summ not in show_ospf_json:
continue
- summary = ospf_summary_data[ospf_summ]['Summary address']
+ summary = ospf_summary_data[ospf_summ]["Summary address"]
if summary in show_ospf_json:
for summ in summ_data:
if summ_data[summ] == show_ospf_json[summary][summ]:
- logger.info("[DUT: %s] OSPF summary %s:%s is %s",
- router, summary, summ, summ_data[summ])
+ logger.info(
+ "[DUT: %s] OSPF summary %s:%s is %s",
+ router,
+ summary,
+ summ,
+ summ_data[summ],
+ )
result = True
else:
- errormsg = ("[DUT: {}] OSPF summary {} : {} is {}, "
- "Expected is {}".format(router, summary, summ,show_ospf_json[
- summary][summ], summ_data[summ] ))
+ errormsg = (
+ "[DUT: {}] OSPF summary {} : {} is {}, "
+ "Expected is {}".format(
+ router,
+ summary,
+ summ,
+ show_ospf_json[summary][summ],
+ summ_data[summ],
+ )
+ )
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1670,8 +1686,9 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True):
@retry(retry_timeout=30)
-def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
- tag=None, metric=None, fib=None):
+def verify_ospf6_rib(
+ tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None
+):
"""
This API is to verify ospf routes by running
show ip ospf route command.
@@ -1946,7 +1963,7 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
@retry(retry_timeout=6)
-def verify_ospf6_interface(tgen, topo=None, dut=None,lan=False, input_dict=None):
+def verify_ospf6_interface(tgen, topo=None, dut=None, lan=False, input_dict=None):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@@ -1992,7 +2009,7 @@ def verify_ospf6_interface(tgen, topo=None, dut=None,lan=False, input_dict=None)
topo = tgen.json_topo
for router, rnode in tgen.routers().items():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
@@ -2328,7 +2345,9 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
return result
-def config_ospf6_interface(tgen, topo=None, input_dict=None, build=False, load_config=True):
+def config_ospf6_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router.
@@ -2375,11 +2394,14 @@ def config_ospf6_interface(tgen, topo=None, input_dict=None, build=False, load_c
for router in input_dict.keys():
config_data = []
- for lnk in input_dict[router]['links'].keys():
- if "ospf6" not in input_dict[router]['links'][lnk]:
- logger.debug("Router %s: ospf6 config is not present in"
- "input_dict, passed input_dict %s", router,
- str(input_dict))
+ for lnk in input_dict[router]["links"].keys():
+ if "ospf6" not in input_dict[router]["links"][lnk]:
+ logger.debug(
+ "Router %s: ospf6 config is not present in"
+ "input_dict, passed input_dict %s",
+ router,
+ str(input_dict),
+ )
continue
ospf_data = input_dict[router]["links"][lnk]["ospf6"]
data_ospf_area = ospf_data.setdefault("area", None)
@@ -2454,6 +2476,7 @@ def config_ospf6_interface(tgen, topo=None, input_dict=None, build=False, load_c
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
+
@retry(retry_timeout=20)
def verify_ospf_gr_helper(tgen, topo, dut, input_dict=None):
"""
@@ -2481,37 +2504,43 @@ def verify_ospf_gr_helper(tgen, topo, dut, input_dict=None):
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
- if 'ospf' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
- dut)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
return errormsg
rnode = tgen.routers()[dut]
logger.info("Verifying OSPF GR details on router %s:", dut)
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf graceful-restart helper json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf graceful-restart helper json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF is not running"
- raise ValueError (errormsg)
+ raise ValueError(errormsg)
return errormsg
- for ospf_gr, gr_data in input_dict.items():
+ for ospf_gr, gr_data in input_dict.items():
try:
if input_dict[ospf_gr] == show_ospf_json[ospf_gr]:
- logger.info("[DUT: FRR] OSPF GR Helper: %s is %s", ospf_gr,
- show_ospf_json[ospf_gr])
+ logger.info(
+ "[DUT: FRR] OSPF GR Helper: %s is %s",
+ ospf_gr,
+ show_ospf_json[ospf_gr],
+ )
result = True
else:
- errormsg = ("[DUT: FRR] OSPF GR Helper: {} expected is {}, Found "
- "is {}".format(ospf_gr, input_dict[ospf_gr], show_ospf_json[
- ospf_gr]))
- raise ValueError (errormsg)
+ errormsg = (
+ "[DUT: FRR] OSPF GR Helper: {} expected is {}, Found "
+ "is {}".format(
+ ospf_gr, input_dict[ospf_gr], show_ospf_json[ospf_gr]
+ )
+ )
+ raise ValueError(errormsg)
return errormsg
except KeyError:
- errormsg = ("[DUT: FRR] OSPF GR Helper: {}".format(ospf_gr))
+ errormsg = "[DUT: FRR] OSPF GR Helper: {}".format(ospf_gr)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index a165878ca..9d3708821 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -99,9 +99,7 @@ def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True
continue
if "rp" not in input_dict[router]["pim"]:
continue
- _add_pim_rp_config(
- tgen, topo, input_dict, router, build, config_data_dict
- )
+ _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
try:
result = create_common_configurations(
@@ -153,8 +151,7 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
# ip address of RP
if "rp_addr" not in rp_dict and build:
logger.error(
- "Router %s: 'ip address of RP' not "
- "present in input_dict/JSON",
+ "Router %s: 'ip address of RP' not " "present in input_dict/JSON",
router,
)
@@ -199,9 +196,7 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
config_data.append(cmd)
if prefix_list:
- cmd = "ip pim rp {} prefix-list {}".format(
- rp_addr, prefix_list
- )
+ cmd = "ip pim rp {} prefix-list {}".format(rp_addr, prefix_list)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
@@ -357,9 +352,9 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
pim_data = input_dict[router]["pim"]
del_action = pim_data.setdefault("delete", False)
for t in [
- "join-prune-interval",
- "keep-alive-timer",
- "register-suppress-time",
+ "join-prune-interval",
+ "keep-alive-timer",
+ "register-suppress-time",
]:
if t in pim_data:
cmd = "ip pim {} {}".format(t, pim_data[t])
@@ -681,7 +676,14 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
@retry(retry_timeout=60)
def verify_upstream_iif(
- tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1, expected=True
+ tgen,
+ dut,
+ iif,
+ src_address,
+ group_addresses,
+ joinState=None,
+ refCount=1,
+ expected=True,
):
"""
Verify upstream inbound interface is updated correctly
@@ -834,7 +836,9 @@ def verify_upstream_iif(
@retry(retry_timeout=12)
-def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, expected=True):
+def verify_join_state_and_timer(
+ tgen, dut, iif, src_address, group_addresses, expected=True
+):
"""
Verify join state is updated correctly and join timer is
running with the help of "show ip pim upstream" cli
@@ -955,7 +959,15 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, ex
@retry(retry_timeout=120)
def verify_ip_mroutes(
- tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0, expected=True
+ tgen,
+ dut,
+ src_address,
+ group_addresses,
+ iif,
+ oil,
+ return_uptime=False,
+ mwait=0,
+ expected=True,
):
"""
Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes
@@ -1152,7 +1164,15 @@ def verify_ip_mroutes(
@retry(retry_timeout=60)
def verify_pim_rp_info(
- tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None, expected=True
+ tgen,
+ topo,
+ dut,
+ group_addresses,
+ oif=None,
+ rp=None,
+ source=None,
+ iamrp=None,
+ expected=True,
):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@@ -1309,7 +1329,14 @@ def verify_pim_rp_info(
@retry(retry_timeout=60)
def verify_pim_state(
- tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None, expected=True
+ tgen,
+ dut,
+ iif,
+ oil,
+ group_addresses,
+ src_address=None,
+ installed_fl=None,
+ expected=True,
):
"""
Verify pim state by running "show ip pim state" cli
@@ -1478,7 +1505,9 @@ def verify_pim_interface_traffic(tgen, input_dict):
@retry(retry_timeout=40)
-def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None, expected=True):
+def verify_pim_interface(
+ tgen, topo, dut, interface=None, interface_ip=None, expected=True
+):
"""
Verify all PIM interface are up and running, config is verified
using "show ip pim interface" cli
@@ -2150,7 +2179,9 @@ def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None):
@retry(retry_timeout=12)
-def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True):
+def verify_pim_grp_rp_source(
+ tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True
+):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@@ -2309,7 +2340,9 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):
@retry(retry_timeout=60)
-def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None, expected=True):
+def verify_ip_pim_upstream_rpf(
+ tgen, topo, dut, interface, group_addresses, rp=None, expected=True
+):
"""
Verify IP PIM upstream rpf, config is verified
using "show ip pim neighbor" cli
@@ -2507,7 +2540,9 @@ def enable_disable_pim_bsm(tgen, router, intf, enable=True):
@retry(retry_timeout=60)
-def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None, expected=True):
+def verify_ip_pim_join(
+ tgen, topo, dut, interface, group_addresses, src_address=None, expected=True
+):
"""
Verify ip pim join by running "show ip pim join" cli
@@ -3257,7 +3292,9 @@ def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses):
@retry(retry_timeout=40)
-def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag, expected=True):
+def verify_multicast_flag_state(
+ tgen, dut, src_address, group_addresses, flag, expected=True
+):
"""
Verify flag state for mroutes and make sure (*, G)/(S, G) are having
coorect flags by running "show ip mroute" cli
@@ -3417,8 +3454,7 @@ def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=Tr
return True
-class McastTesterHelper (HostApplicationHelper):
-
+class McastTesterHelper(HostApplicationHelper):
def __init__(self, tgen=None):
self.script_path = os.path.join(CWD, "mcast-tester.py")
self.host_conn = {}
@@ -3441,10 +3477,10 @@ class McastTesterHelper (HostApplicationHelper):
super(McastTesterHelper, self).__init__(
tgen,
# [python3_path, self.script_path, self.app_sock_path]
- [python3_path, self.script_path]
+ [python3_path, self.script_path],
)
- def __str__ (self):
+ def __str__(self):
return "McastTesterHelper({})".format(self.script_path)
def run_join(self, host, join_addrs, join_towards=None, join_intf=None):
@@ -3464,7 +3500,9 @@ class McastTesterHelper (HostApplicationHelper):
join_addrs = [join_addrs]
if join_towards:
- join_intf = frr_unicode(self.tgen.json_topo["routers"][host]["links"][join_towards]["interface"])
+ join_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards]["interface"]
+ )
else:
assert join_intf
@@ -3486,7 +3524,9 @@ class McastTesterHelper (HostApplicationHelper):
* `bind_towards`: Router who's interface the source ip address is got from
"""
if bind_towards:
- bind_intf = frr_unicode(self.tgen.json_topo["routers"][host]["links"][bind_towards]["interface"])
+ bind_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["interface"]
+ )
else:
assert bind_intf
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 6d60e3d00..fa943b299 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -110,9 +110,7 @@ def get_exabgp_cmd(commander=None):
return False
version = m.group(1)
if topotest.version_cmp(version, "4") >= 0:
- logging.debug(
- "found exabgp version >= 4 in %s will keep looking", exacmd
- )
+ logging.debug("found exabgp version >= 4 in %s will keep looking", exacmd)
return False
logger.info("Using ExaBGP version %s in %s", version, exacmd)
return True
@@ -172,7 +170,6 @@ class Topogen(object):
self.exabgp_cmd = None
self._init_topo(topodef)
-
logger.info("loading topology: {}".format(self.modname))
# @staticmethod
@@ -209,7 +206,9 @@ class Topogen(object):
# Create new log directory
self.logdir = topotest.get_logs_path(g_extra_config["rundir"])
- subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True
+ )
try:
routertype = self.config.get(self.CONFIG_SECTION, "routertype")
# Only allow group, if it exist.
@@ -241,6 +240,7 @@ class Topogen(object):
# switch away from this use here to the topojson
# fixutre and remove this case
from lib.topojson import build_topo_from_json
+
with open(topodef, "r") as topof:
self.json_topo = json.load(topof)
build_topo_from_json(self, self.json_topo)
@@ -250,7 +250,11 @@ class Topogen(object):
def add_topology_from_dict(self, topodef):
- keylist = topodef.keys() if isinstance(topodef, OrderedDict) else sorted(topodef.keys())
+ keylist = (
+ topodef.keys()
+ if isinstance(topodef, OrderedDict)
+ else sorted(topodef.keys())
+ )
# ---------------------------
# Create all referenced hosts
# ---------------------------
@@ -823,11 +827,15 @@ class TopoRouter(TopoGear):
for daemon, enabled in nrouter.daemons.items():
if enabled and daemon != "snmpd":
self.vtysh_cmd(
- "\n".join(["clear log cmdline-targets",
- "conf t",
- "log file {}.log debug".format(daemon),
- "log commands",
- "log timestamp precision 3"]),
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 3",
+ ]
+ ),
daemon=daemon,
)
@@ -836,7 +844,9 @@ class TopoRouter(TopoGear):
elif nrouter.daemons["ldpd"] == 1 or nrouter.daemons["pathd"] == 1:
# Enable MPLS processing on all interfaces.
for interface in self.links:
- topotest.sysctl_assure(nrouter, "net.mpls.conf.{}.input".format(interface), 1)
+ topotest.sysctl_assure(
+ nrouter, "net.mpls.conf.{}.input".format(interface), 1
+ )
return result
@@ -867,11 +877,15 @@ class TopoRouter(TopoGear):
enabled = nrouter.daemons[daemon]
if enabled and daemon != "snmpd":
self.vtysh_cmd(
- "\n".join(["clear log cmdline-targets",
- "conf t",
- "log file {}.log debug".format(daemon),
- "log commands",
- "log timestamp precision 3"]),
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 3",
+ ]
+ ),
daemon=daemon,
)
@@ -913,7 +927,7 @@ class TopoRouter(TopoGear):
if dbgout:
if "\n" in dbgout:
dbgout = dbgout.replace("\n", "\n\t")
- self.logger.info('vtysh result:\n\t{}'.format(dbgout))
+ self.logger.info("vtysh result:\n\t{}".format(dbgout))
else:
self.logger.info('vtysh result: "{}"'.format(dbgout))
@@ -923,7 +937,12 @@ class TopoRouter(TopoGear):
try:
return json.loads(output)
except ValueError as error:
- logger.warning("vtysh_cmd: %s: failed to convert json output: %s: %s", self.name, str(output), str(error))
+ logger.warning(
+ "vtysh_cmd: %s: failed to convert json output: %s: %s",
+ self.name,
+ str(output),
+ str(error),
+ )
return {}
def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
@@ -950,7 +969,7 @@ class TopoRouter(TopoGear):
dbgcmds = commands if is_string(commands) else "\n".join(commands)
dbgcmds = "\t" + dbgcmds.replace("\n", "\n\t")
- self.logger.info('vtysh command => FILE:\n{}'.format(dbgcmds))
+ self.logger.info("vtysh command => FILE:\n{}".format(dbgcmds))
res = self.run(vtysh_command)
os.unlink(fname)
@@ -959,7 +978,7 @@ class TopoRouter(TopoGear):
if dbgres:
if "\n" in dbgres:
dbgres = dbgres.replace("\n", "\n\t")
- self.logger.info('vtysh result:\n\t{}'.format(dbgres))
+ self.logger.info("vtysh result:\n\t{}".format(dbgres))
else:
self.logger.info('vtysh result: "{}"'.format(dbgres))
return res
@@ -1171,7 +1190,6 @@ def diagnose_env_linux(rundir):
logger.info("Running environment diagnostics")
-
# Assert that we are running as root
if os.getuid() != 0:
logger.error("you must run topotest as root")
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index d8e0e5d18..beb298eb3 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -28,13 +28,18 @@ from re import search as re_search
import pytest
from lib.bgp import create_router_bgp
-from lib.common_config import (create_bgp_community_lists,
- create_interfaces_cfg, create_prefix_lists,
- create_route_maps, create_static_routes,
- create_vrf_cfg, load_config_to_routers,
- start_topology,
- topo_daemons,
- number_to_column)
+from lib.common_config import (
+ create_bgp_community_lists,
+ create_interfaces_cfg,
+ create_prefix_lists,
+ create_route_maps,
+ create_static_routes,
+ create_vrf_cfg,
+ load_config_to_routers,
+ start_topology,
+ topo_daemons,
+ number_to_column,
+)
from lib.ospf import create_router_ospf, create_router_ospf6
from lib.pim import create_igmp_config, create_pim_config
from lib.topolog import logger
@@ -305,7 +310,9 @@ def linux_intf_config_from_json(tgen, topo=None):
if "ipv4" in link:
router.cmd_raises("ip addr add {} dev {}".format(link["ipv4"], lname))
if "ipv6" in link:
- router.cmd_raises("ip -6 addr add {} dev {}".format(link["ipv6"], lname))
+ router.cmd_raises(
+ "ip -6 addr add {} dev {}".format(link["ipv6"], lname)
+ )
def build_config_from_json(tgen, topo=None, save_bkup=True):
@@ -357,7 +364,8 @@ def create_tgen_from_json(testfile, json_file=None):
from the `testfile` first by trying to replace `.py` by `.json` and if that isn't
present then by removing `test_` prefix as well.
"""
- from lib.topogen import Topogen # Topogen imports this module too
+ from lib.topogen import Topogen # Topogen imports this module too
+
thisdir = os.path.dirname(os.path.realpath(testfile))
basename = os.path.basename(testfile)
logger.debug("starting standard JSON based module setup for %s", basename)
diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py
index b2251cbef..9cc338620 100644
--- a/tests/topotests/lib/topolog.py
+++ b/tests/topotests/lib/topolog.py
@@ -39,9 +39,11 @@ else:
try:
from xdist import is_xdist_controller
except ImportError:
+
def is_xdist_controller():
return False
+
BASENAME = "topolog"
# Helper dictionary to convert Topogen logging levels to Python's logging.
@@ -95,10 +97,11 @@ def get_logger(name, log_level=None, target=None):
# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
+
def get_test_logdir(nodeid=None):
"""Get log directory relative pathname."""
xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
if not nodeid:
nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
@@ -114,9 +117,7 @@ def get_test_logdir(nodeid=None):
return os.path.join(path, testname)
else:
assert (
- mode == "no" or
- mode == "loadfile" or
- mode == "loadscope"
+ mode == "no" or mode == "loadfile" or mode == "loadscope"
), "Unknown dist mode {}".format(mode)
return path
@@ -125,7 +126,7 @@ def get_test_logdir(nodeid=None):
def logstart(nodeid, location, rundir):
"""Called from pytest before module setup."""
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
# We only per-test log in the workers (or non-dist)
@@ -137,7 +138,9 @@ def logstart(nodeid, location, rundir):
rel_log_dir = get_test_logdir(nodeid)
exec_log_dir = os.path.join(rundir, rel_log_dir)
- subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True
+ )
exec_log_path = os.path.join(exec_log_dir, "exec.log")
# Add test based exec log handler
@@ -145,7 +148,9 @@ def logstart(nodeid, location, rundir):
handlers[handler_id] = h
if worker:
- logger.info("Logging on worker %s for %s into %s", worker, handler_id, exec_log_path)
+ logger.info(
+ "Logging on worker %s for %s into %s", worker, handler_id, exec_log_path
+ )
else:
logger.info("Logging for %s into %s", handler_id, exec_log_path)
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 76a9430fa..39b01203e 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -54,6 +54,7 @@ from lib.micronet_compat import Node
g_extra_config = {}
+
def get_logs_path(rundir):
logspath = topolog.get_test_logdir()
return os.path.join(rundir, logspath)
@@ -1084,7 +1085,7 @@ def _sysctl_atleast(commander, variable, min_value):
else:
valstr = str(min_value)
logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
- commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
+ commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
def _sysctl_assure(commander, variable, value):
@@ -1115,7 +1116,7 @@ def _sysctl_assure(commander, variable, value):
else:
valstr = str(value)
logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
- commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
+ commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
def sysctl_atleast(commander, variable, min_value, raises=False):
@@ -1126,7 +1127,9 @@ def sysctl_atleast(commander, variable, min_value, raises=False):
except subprocess.CalledProcessError as error:
logger.warning(
"%s: Failed to assure sysctl min value %s = %s",
- commander, variable, min_value
+ commander,
+ variable,
+ min_value,
)
if raises:
raise
@@ -1140,7 +1143,10 @@ def sysctl_assure(commander, variable, value, raises=False):
except subprocess.CalledProcessError as error:
logger.warning(
"%s: Failed to assure sysctl value %s = %s",
- commander, variable, value, exc_info=True
+ commander,
+ variable,
+ value,
+ exc_info=True,
)
if raises:
raise
@@ -1156,8 +1162,7 @@ def rlimit_atleast(rname, min_value, raises=False):
resource.setrlimit(rname, nval)
except subprocess.CalledProcessError as error:
logger.warning(
- "Failed to assure rlimit [%s] = %s",
- rname, min_value, exc_info=True
+ "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
)
if raises:
raise
@@ -1166,8 +1171,8 @@ def rlimit_atleast(rname, min_value, raises=False):
def fix_netns_limits(ns):
# Maximum read and write socket buffer sizes
- sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10*1024, 87380, 16*2**20])
- sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10*1024, 87380, 16*2**20])
+ sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
+ sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
@@ -1210,10 +1215,10 @@ def fix_netns_limits(ns):
def fix_host_limits():
"""Increase system limits."""
- rlimit_atleast(resource.RLIMIT_NPROC, 8*1024)
- rlimit_atleast(resource.RLIMIT_NOFILE, 16*1024)
- sysctl_atleast(None, "fs.file-max", 16*1024)
- sysctl_atleast(None, "kernel.pty.max", 16*1024)
+ rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
+ rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
+ sysctl_atleast(None, "fs.file-max", 16 * 1024)
+ sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
# Enable coredumps
# Original on ubuntu 17.x, but apport won't save as in namespace
@@ -1223,20 +1228,20 @@ def fix_host_limits():
sysctl_assure(None, "fs.suid_dumpable", 1)
# Maximum connection backlog
- sysctl_atleast(None, "net.core.netdev_max_backlog", 4*1024)
+ sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
# Maximum read and write socket buffer sizes
- sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
- sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
+ sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
+ sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
# Garbage Collection Settings for ARP and Neighbors
- sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4*1024)
- sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8*1024)
- sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4*1024)
- sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8*1024)
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
# Hold entries for 10 minutes
- sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
- sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+ sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+ sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
# igmp
sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
@@ -1245,22 +1250,21 @@ def fix_host_limits():
sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
# Increase routing table size to 128K
- sysctl_atleast(None, "net.ipv4.route.max_size", 128*1024)
- sysctl_atleast(None, "net.ipv6.route.max_size", 128*1024)
+ sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
+ sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
def setup_node_tmpdir(logdir, name):
# Cleanup old log, valgrind, and core files.
subprocess.check_call(
- "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(
- logdir, name
- ),
- shell=True
+ "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
)
# Setup the per node directory.
nodelogdir = "{}/{}".format(logdir, name)
- subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
+ )
logfile = "{0}/{1}.log".format(logdir, name)
return logfile
@@ -1382,7 +1386,9 @@ class Router(Node):
# Return count of running daemons
def listDaemons(self):
ret = []
- rc, stdout, _ = self.cmd_status("ls -1 /var/run/%s/*.pid" % self.routertype, warn=False)
+ rc, stdout, _ = self.cmd_status(
+ "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
+ )
if rc:
return ret
for d in stdout.strip().split("\n"):
@@ -1394,7 +1400,13 @@ class Router(Node):
# probably not compatible with bsd.
rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
if rc:
- logger.warning("%s: %s exited leaving pidfile %s (%s)", self.name, name, pidfile, pid)
+ logger.warning(
+ "%s: %s exited leaving pidfile %s (%s)",
+ self.name,
+ name,
+ pidfile,
+ pid,
+ )
self.cmd("rm -- " + pidfile)
else:
ret.append((name, pid))
@@ -1414,7 +1426,9 @@ class Router(Node):
try:
os.kill(pid, signal.SIGTERM)
except OSError as err:
- logger.info("%s: could not kill %s (%s): %s", self.name, name, pid, str(err))
+ logger.info(
+ "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
+ )
running = self.listDaemons()
if running:
@@ -1432,14 +1446,18 @@ class Router(Node):
if not running:
return ""
- logger.warning("%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]))
+ logger.warning(
+ "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
+ )
for name, pid in running:
pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
logger.info("%s: killing %s", self.name, name)
self.cmd("kill -SIGBUS %d" % pid)
self.cmd("rm -- " + pidfile)
- sleep(0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name)
+ sleep(
+ 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
+ )
errors = self.checkRouterCores(reportOnce=True)
if self.checkRouterVersion("<", minErrorVersion):
@@ -1657,16 +1675,28 @@ class Router(Node):
cmdenv = "ASAN_OPTIONS="
if asan_abort:
cmdenv = "abort_on_error=1:"
- cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon)
+ cmdenv += "log_path={0}/{1}.{2}.asan ".format(
+ self.logdir, self.name, daemon
+ )
if valgrind_memleaks:
- this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
- supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
- cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
+ this_dir = os.path.dirname(
+ os.path.abspath(os.path.realpath(__file__))
+ )
+ supp_file = os.path.abspath(
+ os.path.join(this_dir, "../../../tools/valgrind.supp")
+ )
+ cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
+ daemon, self.logdir, self.name, supp_file
+ )
if valgrind_extra:
- cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
+ cmdenv += (
+ "--gen-suppressions=all --expensive-definedness-checks=yes"
+ )
elif daemon in strace_daemons or "all" in strace_daemons:
- cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name)
+ cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
+ daemon, self.logdir, self.name
+ )
cmdopt = "{} --log file:{}.log --log-level debug".format(
daemon_opts, daemon
@@ -1694,7 +1724,9 @@ class Router(Node):
self.run_in_window(gdbcmd, daemon)
- logger.info("%s: %s %s launched in gdb window", self, self.routertype, daemon)
+ logger.info(
+ "%s: %s %s launched in gdb window", self, self.routertype, daemon
+ )
else:
if daemon != "snmpd":
cmdopt += " -d "
@@ -1705,9 +1737,16 @@ class Router(Node):
except subprocess.CalledProcessError as error:
self.logger.error(
'%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
- self, daemon, error.returncode, error.cmd,
- '\n:stdout: "{}"'.format(error.stdout.strip()) if error.stdout else "",
- '\n:stderr: "{}"'.format(error.stderr.strip()) if error.stderr else "",
+ self,
+ daemon,
+ error.returncode,
+ error.cmd,
+ '\n:stdout: "{}"'.format(error.stdout.strip())
+ if error.stdout
+ else "",
+ '\n:stderr: "{}"'.format(error.stderr.strip())
+ if error.stderr
+ else "",
)
else:
logger.info("%s: %s %s started", self, self.routertype, daemon)
@@ -1738,7 +1777,7 @@ class Router(Node):
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
_, output, _ = self.cmd_status(
"for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
- stderr=subprocess.STDOUT
+ stderr=subprocess.STDOUT,
)
logger.debug("Set MACs:\n%s", output)
diff --git a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py
index dc08ae1af..138e19098 100644
--- a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py
+++ b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py
@@ -38,6 +38,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
+
# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
@@ -48,6 +49,7 @@ pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.pimd]
app_helper = McastTesterHelper()
+
def build_topo(tgen):
"Build function"
@@ -104,7 +106,6 @@ def setup_module(mod):
app_helper.init(tgen)
-
def test_wait_ospf_convergence():
"Wait for OSPF to converge"
tgen = get_topogen()
@@ -120,7 +121,7 @@ def test_wait_ospf_convergence():
topotest.router_json_cmp,
tgen.gears[router],
"show {} route json".format(iptype),
- {route: [{"protocol": proto}]}
+ {route: [{"protocol": proto}]},
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=1)
assertmsg = '"{}" OSPF convergence failure'.format(router)
@@ -152,12 +153,14 @@ def test_wait_msdp_convergence():
def expect_msdp_peer(router, peer, sa_count=0):
"Expect MSDP peer connection to be established with SA amount."
- logger.info("waiting MSDP connection from peer {} on router {}".format(peer, router))
+ logger.info(
+ "waiting MSDP connection from peer {} on router {}".format(peer, router)
+ )
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
"show ip msdp peer json",
- {peer: {"state": "established", "saCount": sa_count}}
+ {peer: {"state": "established", "saCount": sa_count}},
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" MSDP connection failure'.format(router)
@@ -198,7 +201,7 @@ def test_msdp_sa_configuration():
topotest.router_json_cmp,
tgen.gears[router],
"show ip msdp sa json",
- {group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}}
+ {group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}},
)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assertmsg = '"{}" MSDP SA failure'.format(router)
diff --git a/tests/topotests/msdp_topo1/test_msdp_topo1.py b/tests/topotests/msdp_topo1/test_msdp_topo1.py
index c30c397ee..46ccd5e59 100755
--- a/tests/topotests/msdp_topo1/test_msdp_topo1.py
+++ b/tests/topotests/msdp_topo1/test_msdp_topo1.py
@@ -39,6 +39,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
+
# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
@@ -111,6 +112,7 @@ def setup_module(mod):
app_helper.init(tgen)
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
@@ -159,6 +161,7 @@ def test_bgp_convergence():
expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp")
expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp")
+
def _test_mroute_install():
"Test that multicast routes propagated and installed"
tgen = get_topogen()
@@ -169,39 +172,36 @@ def _test_mroute_install():
# Test R1 mroute
#
expect_1 = {
- '229.1.2.3': {
- '192.168.10.100': {
- 'iif': 'r1-eth2',
- 'flags': 'SFT',
- 'oil': {
- 'r1-eth0': {
- 'source': '192.168.10.100',
- 'group': '229.1.2.3'
- },
- 'r1-eth1': None
- }
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "iif": "r1-eth2",
+ "flags": "SFT",
+ "oil": {
+ "r1-eth0": {"source": "192.168.10.100", "group": "229.1.2.3"},
+ "r1-eth1": None,
+ },
}
}
}
# Create a deep copy of `expect_1`.
expect_2 = json.loads(json.dumps(expect_1))
# The route will be either via R2 or R3.
- expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth0'] = None
- expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth1'] = {
- 'source': '192.168.10.100',
- 'group': '229.1.2.3'
+ expect_2["229.1.2.3"]["192.168.10.100"]["oil"]["r1-eth0"] = None
+ expect_2["229.1.2.3"]["192.168.10.100"]["oil"]["r1-eth1"] = {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
}
def test_r1_mroute():
"Test r1 multicast routing table function"
- out = tgen.gears['r1'].vtysh_cmd('show ip mroute json', isjson=True)
+ out = tgen.gears["r1"].vtysh_cmd("show ip mroute json", isjson=True)
if topotest.json_cmp(out, expect_1) is None:
return None
return topotest.json_cmp(out, expect_2)
- logger.info('Waiting for R1 multicast routes')
+ logger.info("Waiting for R1 multicast routes")
_, val = topotest.run_and_expect(test_r1_mroute, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
#
# Test routers 2 and 3.
@@ -218,7 +218,7 @@ def _test_mroute_install():
"source": "192.168.10.100",
"group": "229.1.2.3",
}
- }
+ },
}
}
}
@@ -232,24 +232,24 @@ def _test_mroute_install():
"source": "192.168.10.100",
"group": "229.1.2.3",
}
- }
+ },
}
}
}
def test_r2_r3_mroute():
"Test r2/r3 multicast routing table function"
- r2_out = tgen.gears['r2'].vtysh_cmd('show ip mroute json', isjson=True)
- r3_out = tgen.gears['r3'].vtysh_cmd('show ip mroute json', isjson=True)
+ r2_out = tgen.gears["r2"].vtysh_cmd("show ip mroute json", isjson=True)
+ r3_out = tgen.gears["r3"].vtysh_cmd("show ip mroute json", isjson=True)
if topotest.json_cmp(r2_out, expect_r2) is not None:
return topotest.json_cmp(r3_out, expect_r3)
return topotest.json_cmp(r2_out, expect_r2)
- logger.info('Waiting for R2 and R3 multicast routes')
+ logger.info("Waiting for R2 and R3 multicast routes")
_, val = topotest.run_and_expect(test_r2_r3_mroute, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
#
# Test router 4
@@ -264,15 +264,15 @@ def _test_mroute_install():
"source": "*",
"group": "229.1.2.3",
"inboundInterface": "lo",
- "outboundInterface": "pimreg"
+ "outboundInterface": "pimreg",
},
"r4-eth2": {
"source": "*",
"group": "229.1.2.3",
"inboundInterface": "lo",
- "outboundInterface": "r4-eth2"
- }
- }
+ "outboundInterface": "r4-eth2",
+ },
+ },
},
"192.168.10.100": {
"iif": "r4-eth0",
@@ -284,18 +284,21 @@ def _test_mroute_install():
"inboundInterface": "r4-eth0",
"outboundInterface": "r4-eth2",
}
- }
- }
+ },
+ },
}
}
test_func = partial(
topotest.router_json_cmp,
- tgen.gears['r4'], "show ip mroute json", expect_4,
+ tgen.gears["r4"],
+ "show ip mroute json",
+ expect_4,
)
- logger.info('Waiting for R4 multicast routes')
+ logger.info("Waiting for R4 multicast routes")
_, val = topotest.run_and_expect(test_func, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
+
def test_mroute_install():
tgen = get_topogen()
@@ -312,6 +315,7 @@ def test_mroute_install():
_test_mroute_install()
+
def test_msdp():
"""
Test MSDP convergence.
@@ -330,13 +334,13 @@ def test_msdp():
"192.168.0.2": {
"peer": "192.168.0.2",
"local": "192.168.0.1",
- "state": "established"
+ "state": "established",
},
"192.168.1.2": {
"peer": "192.168.1.2",
"local": "192.168.1.1",
- "state": "established"
- }
+ "state": "established",
+ },
}
r1_sa_expect = {
"229.1.2.3": {
@@ -345,7 +349,7 @@ def test_msdp():
"group": "229.1.2.3",
"rp": "-",
"local": "yes",
- "sptSetup": "-"
+ "sptSetup": "-",
}
}
}
@@ -353,13 +357,13 @@ def test_msdp():
"192.168.0.1": {
"peer": "192.168.0.1",
"local": "192.168.0.2",
- "state": "established"
+ "state": "established",
},
"192.168.2.2": {
"peer": "192.168.2.2",
"local": "192.168.2.1",
- "state": "established"
- }
+ "state": "established",
+ },
}
# Only R2 or R3 will get this SA.
r2_r3_sa_expect = {
@@ -377,25 +381,25 @@ def test_msdp():
"192.168.1.1": {
"peer": "192.168.1.1",
"local": "192.168.1.2",
- "state": "established"
+ "state": "established",
},
- #"192.169.3.2": {
+ # "192.169.3.2": {
# "peer": "192.168.3.2",
# "local": "192.168.3.1",
# "state": "established"
- #}
+ # }
}
r4_expect = {
"192.168.2.1": {
"peer": "192.168.2.1",
"local": "192.168.2.2",
- "state": "established"
+ "state": "established",
},
- #"192.168.3.1": {
+ # "192.168.3.1": {
# "peer": "192.168.3.1",
# "local": "192.168.3.2",
# "state": "established"
- #}
+ # }
}
r4_sa_expect = {
"229.1.2.3": {
@@ -404,30 +408,36 @@ def test_msdp():
"group": "229.1.2.3",
"rp": "192.168.1.1",
"local": "no",
- "sptSetup": "yes"
+ "sptSetup": "yes",
}
}
}
- for router in [('r1', r1_expect, r1_sa_expect),
- ('r2', r2_expect, r2_r3_sa_expect),
- ('r3', r3_expect, r2_r3_sa_expect),
- ('r4', r4_expect, r4_sa_expect)]:
+ for router in [
+ ("r1", r1_expect, r1_sa_expect),
+ ("r2", r2_expect, r2_r3_sa_expect),
+ ("r3", r3_expect, r2_r3_sa_expect),
+ ("r4", r4_expect, r4_sa_expect),
+ ]:
test_func = partial(
topotest.router_json_cmp,
- tgen.gears[router[0]], "show ip msdp peer json", router[1]
+ tgen.gears[router[0]],
+ "show ip msdp peer json",
+ router[1],
)
- logger.info('Waiting for {} msdp peer data'.format(router[0]))
+ logger.info("Waiting for {} msdp peer data".format(router[0]))
_, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
test_func = partial(
topotest.router_json_cmp,
- tgen.gears[router[0]], "show ip msdp sa json", router[2]
+ tgen.gears[router[0]],
+ "show ip msdp sa json",
+ router[2],
)
- logger.info('Waiting for {} msdp SA data'.format(router[0]))
+ logger.info("Waiting for {} msdp SA data".format(router[0]))
_, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
def test_memory_leak():
diff --git a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
index 63701871d..5f641b528 100644
--- a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
+++ b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
@@ -93,7 +93,6 @@ from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
-
TOPOLOGY = """
b1_____
@@ -675,9 +674,7 @@ def test_RP_priority_p0(request):
assert (
rp_add1 == rp2[group]
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
- tc_name,
- rp_add1,
- rp2[group] if group in rp2 else None
+ tc_name, rp_add1, rp2[group] if group in rp2 else None
)
# Verify if that rp is installed
diff --git a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
index 313d5da8d..3e14ab716 100755
--- a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
+++ b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
@@ -343,7 +343,6 @@ def test_multicast_data_traffic_static_RP_send_join_then_traffic_p0(request):
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
-
step(
"Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
)
@@ -353,7 +352,6 @@ def test_multicast_data_traffic_static_RP_send_join_then_traffic_p0(request):
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
-
step("joinRx value after join sent")
state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
@@ -457,8 +455,13 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
# (41 * (2 + .5)) == 102.
for data in input_dict:
result = verify_ip_mroutes(
- tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"],
- retry_timeout=102
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN,
+ data["iif"],
+ data["oil"],
+ retry_timeout=102,
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
@@ -623,8 +626,24 @@ def test_verify_mroute_when_same_receiver_in_FHR_LHR_and_RP_p0(request):
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1) to R1")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15} }}}}},
- "r2": {"igmp": {"interfaces": {"r2-i3-eth1": {"igmp": {"version": "2", "query": {"query-interval": 15} }}}}},
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ },
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ "r2-i3-eth1": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ },
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -718,7 +737,15 @@ def test_verify_mroute_when_same_receiver_joining_5_diff_sources_p0(request):
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15} }}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1129,7 +1156,15 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
step("Configure one IGMP interface on FRR3 node and send IGMP" " join (225.1.1.1)")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15} }}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1402,7 +1437,6 @@ def test_modify_igmp_max_query_response_timer_p0(request):
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
-
step(
"Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes"
)
@@ -1413,9 +1447,7 @@ def test_modify_igmp_max_query_response_timer_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Delete the PIM and IGMP on FRR1")
- raw_config = {
- "l1": {"raw_config": ["interface l1-i1-eth1", "no ip pim"]}
- }
+ raw_config = {"l1": {"raw_config": ["interface l1-i1-eth1", "no ip pim"]}}
result = apply_raw_config(tgen, raw_config)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
diff --git a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
index 625839d3c..c7d453ad8 100755
--- a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
+++ b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
@@ -323,7 +323,15 @@ def test_verify_mroute_and_traffic_when_pimd_restarted_p2(request):
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -525,7 +533,15 @@ def test_verify_mroute_and_traffic_when_frr_restarted_p2(request):
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -878,7 +894,15 @@ def test_verify_mroute_after_shut_noshut_of_upstream_interface_p1(request):
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1028,7 +1052,6 @@ def test_verify_mroute_after_shut_noshut_of_upstream_interface_p1(request):
intf_l1_c1 = "l1-c1-eth0"
shutdown_bringup_interface(tgen, dut, intf_l1_c1, False)
-
result = verify_upstream_iif(
tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2, expected=False
)
@@ -1137,7 +1160,15 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
" join (226.1.1.1-5) and (232.1.1.1-5)"
)
input_dict = {
- "c2": {"igmp": {"interfaces": {"c2-i5-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "c2": {
+ "igmp": {
+ "interfaces": {
+ "c2-i5-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1270,7 +1301,15 @@ def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request):
step("Configure one IGMP interface on f1 node and send IGMP" " join (225.1.1.1)")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1350,9 +1389,9 @@ def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request):
data["iif"],
data["oil"],
)
- assert result is True, (
- "Testcase {} : Failed Error mroutes were flushed.".format(tc_name)
- )
+ assert (
+ result is True
+ ), "Testcase {} : Failed Error mroutes were flushed.".format(tc_name)
step(
"After traffic stopped , verify (S,G) entries are flushed out"
@@ -1374,9 +1413,9 @@ def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
write_test_footer(tc_name)
@@ -1454,12 +1493,24 @@ def test_verify_mroute_when_5_different_receiver_joining_same_sources_p0(request
"f1": {
"igmp": {
"interfaces": {
- "f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}},
- "f1-i2-eth1": {"igmp": {"version": "2", "query": {"query-interval": 15}}},
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ },
+ "f1-i2-eth1": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ },
+ }
+ }
+ },
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i6-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
}
}
},
- "l1": {"igmp": {"interfaces": {"l1-i6-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}},
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1513,8 +1564,12 @@ def test_verify_mroute_when_5_different_receiver_joining_same_sources_p0(request
source = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0]
input_dict_all = [
- {"dut": "l1", "src_address": source, "iif": ["l1-r2-eth4", "l1-c1-eth0"],
- "oil": ["l1-i1-eth1", "l1-i6-eth2"]},
+ {
+ "dut": "l1",
+ "src_address": source,
+ "iif": ["l1-r2-eth4", "l1-c1-eth0"],
+ "oil": ["l1-i1-eth1", "l1-i6-eth2"],
+ },
{"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
]
for data in input_dict_all:
@@ -1665,7 +1720,15 @@ def test_verify_oil_iif_for_mroute_after_shut_noshut_source_interface_p1(request
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
diff --git a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
index b0824d2ff..907c75e9e 100755
--- a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
+++ b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
@@ -277,13 +277,11 @@ def verify_state_incremented(state_before, state_after):
for intf, v2 in v1.items():
for state, value in v2.items():
if value >= state_after[ttype][intf][state]:
- errormsg = (
- "[DUT: %s]: state %s value has not incremented, Initial value: %s, Current value: %s [FAILED!!]" % (
- intf,
- state,
- value,
- state_after[ttype][intf][state],
- )
+ errormsg = "[DUT: %s]: state %s value has not incremented, Initial value: %s, Current value: %s [FAILED!!]" % (
+ intf,
+ state,
+ value,
+ state_after[ttype][intf][state],
)
return errormsg
@@ -390,7 +388,15 @@ def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -764,7 +770,15 @@ def test_verify_oil_when_join_prune_sent_scenario_2_p1(request):
intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
input_dict = {
- "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ intf_r2_i3: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1107,9 +1121,9 @@ def test_shut_noshut_source_interface_when_upstream_cleared_from_LHR_p1(request)
result = verify_upstream_iif(
tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed Error: \n mroutes are still present".format(tc_name)
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \n mroutes are still present".format(tc_name)
step("No shut the Source interface just after the upstream is expired" " from FRR1")
shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True)
@@ -1300,9 +1314,9 @@ def test_shut_noshut_receiver_interface_when_upstream_cleared_from_LHR_p1(reques
result = verify_upstream_iif(
tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
)
- assert result is not True, (
- "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
step("No shut the Source interface just after the upstream is expired" " from FRR1")
shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
@@ -1528,7 +1542,15 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
)
input_dict_2 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict_2)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1611,7 +1633,15 @@ def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request):
)
input_dict_2 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict_2)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -1845,7 +1875,21 @@ def test_verify_remove_add_igmp_commands_when_pim_configured_p0(request):
intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
input_dict_1 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2", "query": {"query-max-response-time": 40, "query-interval": 5}}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "query": {
+ "query-max-response-time": 40,
+ "query-interval": 5,
+ },
+ }
+ }
+ }
+ }
+ }
}
result = verify_igmp_config(tgen, input_dict_1)
@@ -2620,7 +2664,15 @@ def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request):
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -2918,7 +2970,15 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -3550,7 +3610,15 @@ def test_mroute_flags_p1(request):
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
@@ -3721,7 +3789,15 @@ def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request):
intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
input_dict = {
- "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2", "query": {"query-interval": 15}}}}}}
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ intf_r2_i3: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
index cf68dd790..9bbe3ca02 100755
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
@@ -129,7 +129,6 @@ from lib.common_config import (
start_router_daemons,
create_static_routes,
topo_daemons,
-
)
from lib.pim import (
create_pim_config,
@@ -463,7 +462,9 @@ def test_add_delete_static_RP_p0(request):
)
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is up and join timer is running \n Error: {}".format(
@@ -785,7 +786,9 @@ def test_not_reachable_static_RP_p0(request):
"r1: join state should not be joined and join timer should stop,"
"verify using show ip pim upstream"
)
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: join state is joined and timer is not stopped \n Error: {}".format(
@@ -910,7 +913,9 @@ def test_add_RP_after_join_received_p1(request):
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running \n Error: {}".format(
@@ -1060,7 +1065,9 @@ def test_reachable_static_RP_after_join_p0(request):
)
step("r1 : Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running\n Error: {}".format(
@@ -2567,7 +2574,8 @@ def test_restart_pimd_process_p2(request):
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False)
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
diff --git a/tests/topotests/nhrp_topo/test_nhrp_topo.py b/tests/topotests/nhrp_topo/test_nhrp_topo.py
index 3dc7ef196..2dd00c018 100644
--- a/tests/topotests/nhrp_topo/test_nhrp_topo.py
+++ b/tests/topotests/nhrp_topo/test_nhrp_topo.py
@@ -34,7 +34,7 @@ import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
@@ -52,45 +52,49 @@ def build_topo(tgen):
# Create 3 routers.
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r3'])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r2'])
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
def _populate_iface():
tgen = get_topogen()
- cmds_tot_hub = ['ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.2.1.{1} remote 0.0.0.0',
- 'ip link set dev {0}-gre0 up',
- 'echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6']
-
- cmds_tot = ['ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.1.1.{1} remote 0.0.0.0',
- 'ip link set dev {0}-gre0 up',
- 'echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6']
+ cmds_tot_hub = [
+ "ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.2.1.{1} remote 0.0.0.0",
+ "ip link set dev {0}-gre0 up",
+ "echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6",
+ ]
+
+ cmds_tot = [
+ "ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.1.1.{1} remote 0.0.0.0",
+ "ip link set dev {0}-gre0 up",
+ "echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6",
+ ]
for cmd in cmds_tot_hub:
- input = cmd.format('r2', '2')
- logger.info('input: '+cmd)
- output = tgen.net['r2'].cmd(cmd.format('r2', '2'))
- logger.info('output: '+output);
+ input = cmd.format("r2", "2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2", "2"))
+ logger.info("output: " + output)
for cmd in cmds_tot:
- input = cmd.format('r1', '1')
- logger.info('input: '+cmd)
- output = tgen.net['r1'].cmd(cmd.format('r1', '1'))
- logger.info('output: '+output);
+ input = cmd.format("r1", "1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1", "1"))
+ logger.info("output: " + output)
def setup_module(mod):
@@ -100,20 +104,19 @@ def setup_module(mod):
router_list = tgen.routers()
_populate_iface()
-
+
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)),
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
)
- if rname in ('r1', 'r2'):
+ if rname in ("r1", "r2"):
router.load_config(
- TopoRouter.RD_NHRP,
- os.path.join(CWD, '{}/nhrpd.conf'.format(rname))
+ TopoRouter.RD_NHRP, os.path.join(CWD, "{}/nhrpd.conf".format(rname))
)
# Initialize all routers.
- logger.info('Launching NHRP')
+ logger.info("Launching NHRP")
for name in router_list:
router = tgen.gears[name]
router.start()
@@ -139,52 +142,52 @@ def test_protocols_convergence():
router_list = tgen.routers()
for rname, router in router_list.items():
- if rname == 'r3':
+ if rname == "r3":
continue
- json_file = '{}/{}/nhrp4_cache.json'.format(CWD, router.name)
+ json_file = "{}/{}/nhrp4_cache.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip nhrp cache json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=40,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip nhrp cache json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- output = router.vtysh_cmd('show ip nhrp cache')
+ output = router.vtysh_cmd("show ip nhrp cache")
logger.info(output)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
for rname, router in router_list.items():
- if rname == 'r3':
+ if rname == "r3":
continue
- json_file = '{}/{}/nhrp_route4.json'.format(CWD, router.name)
+ json_file = "{}/{}/nhrp_route4.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip route nhrp json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=40,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route nhrp json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- output = router.vtysh_cmd('show ip route nhrp')
+ output = router.vtysh_cmd("show ip route nhrp")
logger.info(output)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
for rname, router in router_list.items():
- if rname == 'r3':
+ if rname == "r3":
continue
- logger.info('Dump neighbor information on {}-gre0'.format(rname))
- output = router.run('ip neigh show')
+ logger.info("Dump neighbor information on {}-gre0".format(rname))
+ output = router.run("ip neigh show")
logger.info(output)
@@ -194,26 +197,26 @@ def test_nhrp_connection():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- pingrouter = tgen.gears['r1']
- logger.info('Check Ping IPv4 from R1 to R2 = 10.255.255.2)')
- output = pingrouter.run('ping 10.255.255.2 -f -c 1000')
+ pingrouter = tgen.gears["r1"]
+ logger.info("Check Ping IPv4 from R1 to R2 = 10.255.255.2)")
+ output = pingrouter.run("ping 10.255.255.2 -f -c 1000")
logger.info(output)
- if '1000 packets transmitted, 1000 received' not in output:
- assertmsg = 'expected ping IPv4 from R1 to R2 should be ok'
+ if "1000 packets transmitted, 1000 received" not in output:
+ assertmsg = "expected ping IPv4 from R1 to R2 should be ok"
assert 0, assertmsg
else:
- logger.info('Check Ping IPv4 from R1 to R2 OK')
+ logger.info("Check Ping IPv4 from R1 to R2 OK")
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
index 2bc8e2157..ac4a23da9 100755
--- a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
+++ b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
@@ -98,7 +98,6 @@ pytestmark = [pytest.mark.ospfd]
def build_topo(tgen):
"Build function"
-
# Create 4 routers
for routern in range(1, 5):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
index 6f7daf533..3738a0c33 100644
--- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
+++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
@@ -83,10 +83,7 @@ def expect_ospfv3_routes(router, routes, wait=5, detail=False):
logger.info("waiting OSPFv3 router '{}' route".format(router))
test_func = partial(
- topotest.router_json_cmp,
- tgen.gears[router],
- cmd,
- {"routes": routes}
+ topotest.router_json_cmp, tgen.gears[router], cmd, {"routes": routes}
)
_, result = topotest.run_and_expect(test_func, None, count=wait, wait=1)
assertmsg = '"{}" convergence failure'.format(router)
@@ -254,11 +251,13 @@ def test_redistribute_metrics():
route = {
"2001:db8:500::/64": {
- "metricType":2,
- "metricCost":10,
+ "metricType": 2,
+ "metricCost": 10,
}
}
- logger.info("Expecting AS-external route 2001:db8:500::/64 to show up with default metrics")
+ logger.info(
+ "Expecting AS-external route 2001:db8:500::/64 to show up with default metrics"
+ )
expect_ospfv3_routes("r2", route, wait=30, detail=True)
# Change the metric of redistributed routes of the static type on r3.
@@ -272,15 +271,16 @@ def test_redistribute_metrics():
# Check if r3 reinstalled 2001:db8:500::/64 using the new metric type and value.
route = {
"2001:db8:500::/64": {
- "metricType":1,
- "metricCost":60,
+ "metricType": 1,
+ "metricCost": 60,
}
}
- logger.info("Expecting AS-external route 2001:db8:500::/64 to show up with updated metric type and value")
+ logger.info(
+ "Expecting AS-external route 2001:db8:500::/64 to show up with updated metric type and value"
+ )
expect_ospfv3_routes("r2", route, wait=30, detail=True)
-
def test_nssa_lsa_type7():
"""
Test that static route gets announced as external route when redistributed
@@ -310,9 +310,7 @@ def test_nssa_lsa_type7():
route = {
"2001:db8:100::/64": {
"pathType": "E1",
- "nextHops": [
- {"nextHop": "::", "interfaceName": "r4-eth0"}
- ]
+ "nextHops": [{"nextHop": "::", "interfaceName": "r4-eth0"}],
}
}
@@ -331,8 +329,10 @@ def test_nssa_lsa_type7():
def dont_expect_lsa(unexpected_lsa):
"Specialized test function to expect LSA go missing"
- output = tgen.gears["r4"].vtysh_cmd("show ipv6 ospf6 database type-7 detail json", isjson=True)
- for lsa in output['areaScopedLinkStateDb'][0]['lsa']:
+ output = tgen.gears["r4"].vtysh_cmd(
+ "show ipv6 ospf6 database type-7 detail json", isjson=True
+ )
+ for lsa in output["areaScopedLinkStateDb"][0]["lsa"]:
if lsa["prefix"] == unexpected_lsa["prefix"]:
if lsa["forwardingAddress"] == unexpected_lsa["forwardingAddress"]:
return lsa
@@ -345,7 +345,6 @@ def test_nssa_lsa_type7():
return output["routes"][unexpected_route]
return None
-
logger.info("Expecting LSA type-7 and OSPFv3 route 2001:db8:100::/64 to go away")
# Test that LSA doesn't exist.
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
index a5c92023b..86f3213fc 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
@@ -298,7 +298,7 @@ def test_ospf_chaos_tc31_p1(request):
def test_ospf_chaos_tc32_p1(request):
- """Verify ospf functionality after restart FRR service. """
+ """Verify ospf functionality after restart FRR service."""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
index 621c3e50d..adc1b2cf3 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
@@ -48,7 +48,7 @@ from lib.common_config import (
step,
create_route_maps,
verify_prefix_lists,
- topo_daemons
+ topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -486,7 +486,13 @@ def test_ospf_routemaps_functionality_tc20_p0(request):
)
result = verify_rib(
- tgen, "ipv4", dut, input_dict, protocol=protocol, retry_timeout=4, expected=False
+ tgen,
+ "ipv4",
+ dut,
+ input_dict,
+ protocol=protocol,
+ retry_timeout=4,
+ expected=False,
)
assert (
result is not True
@@ -1038,94 +1044,75 @@ def test_ospf_routemaps_functionality_tc25_p0(request):
step(
"Create static routes(10.0.20.1/32) in R1 and redistribute "
- "to OSPF using route map.")
+ "to OSPF using route map."
+ )
# Create Static routes
input_dict = {
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 5,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_red_r0 = {
"r0": {
"ospf": {
- "redistribute": [{
- "redist_type": "static",
- "route_map": "rmap_ipv4"
- }]
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_red_r0)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure route map with permit rule")
# Create route map
- routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv4": [{
- "action": "permit"
- }]
- }
- }
- }
+ routemaps = {"r0": {"route_maps": {"rmap_ipv4": [{"action": "permit"}]}}}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that route is advertised to R1.")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure route map with deny rule")
# Create route map
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv4": [{
- "seq_id": 10,
- "action": "deny"
- }]
- }
- }
+ "r0": {"route_maps": {"rmap_ipv4": [{"seq_id": 10, "action": "deny"}]}}
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step("Verify that route is not advertised to R1.")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
@@ -1148,76 +1135,64 @@ def test_ospf_routemaps_functionality_tc22_p0(request):
step(
"Configure route map with seq number 10 to with ip prefix"
- " permitting route 10.0.20.1/32 in R1")
+ " permitting route 10.0.20.1/32 in R1"
+ )
step(
"Configure route map with seq number 20 to with ip prefix"
- " permitting route 10.0.20.2/32 in R1")
+ " permitting route 10.0.20.2/32 in R1"
+ )
# Create route map
input_dict_3 = {
- "r0": {
- "route_maps": {
- "rmap_ipv4": [{
- "action": "permit",
- 'seq_id': '10',
- "match": {
- "ipv4": {
- "prefix_lists": "pf_list_1_ipv4"
- }
- }
- },
- {
- "action": "permit",
- 'seq_id': '20',
- "match": {
- "ipv4": {
- "prefix_lists": "pf_list_2_ipv4"
- }
- }
- }
- ]
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ },
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv4": {"prefix_lists": "pf_list_2_ipv4"}},
+ },
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
- assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
- 'r0': {
- 'prefix_lists': {
- 'ipv4': {
- 'pf_list_1_ipv4': [{
- 'seqid': 10,
- 'network': NETWORK['ipv4'][0],
- 'action': 'permit'
- }]
- }
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": NETWORK["ipv4"][0], "action": "permit"}
+ ]
+ }
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
- 'r0': {
- 'prefix_lists': {
- 'ipv4': {
- 'pf_list_2_ipv4': [{
- 'seqid': 10,
- 'network': NETWORK['ipv4'][1],
- 'action': 'permit'
- }]
- }
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_2_ipv4": [
+ {"seqid": 10, "network": NETWORK["ipv4"][1], "action": "permit"}
+ ]
+ }
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure static routes 10.0.20.1/32 and 10.0.20.2 in R1")
# Create Static routes
@@ -1225,127 +1200,112 @@ def test_ospf_routemaps_functionality_tc22_p0(request):
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 5,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure redistribute static route with route map.")
ospf_red_r0 = {
"r0": {
"ospf": {
- "redistribute": [{
- "redist_type": "static",
- "route_map": "rmap_ipv4"
- }]
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_red_r0)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict = {
"r0": {
"static_routes": [
{
- "network": NETWORK['ipv4'][0],
+ "network": NETWORK["ipv4"][0],
"no_of_ip": 2,
- "next_hop": 'Null0',
+ "next_hop": "Null0",
}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that both routes are learned in R1 and R2")
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r2'
- protocol = 'ospf'
+ dut = "r2"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Change route map with seq number 20 to deny.")
# Create route map
input_dict_3 = {
- "r0": {
- "route_maps": {
- "rmap_ipv4": [
- {
- "action": "deny",
- 'seq_id': '20',
- "match": {
- "ipv4": {
- "prefix_lists": "pf_list_2_ipv4"
- }
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "deny",
+ "seq_id": "20",
+ "match": {"ipv4": {"prefix_lists": "pf_list_2_ipv4"}},
}
- }
- ]
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
- assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify the route 10.0.20.2/32 is withdrawn and not present "
- "in the routing table of R0 and R1.")
+ "in the routing table of R0 and R1."
+ )
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK['ipv4'][1],
- "next_hop": 'Null0'
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"][1], "next_hop": "Null0"}]}
}
- dut = 'r1'
- protocol = 'ospf'
+ dut = "r1"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- dut = 'r2'
- protocol = 'ospf'
+ dut = "r2"
+ protocol = "ospf"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
diff --git a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
index 2ea697ea7..07d4ca01a 100644
--- a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
+++ b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
@@ -33,7 +33,6 @@ pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
topo = None
-
def setup_module(mod):
"""Sets up the pytest environment."""
testsuite_run_time = time.asctime(time.localtime(time.time()))
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py
index d0c918d6b..413771246 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py
@@ -44,7 +44,7 @@ from lib.common_config import (
step,
create_interfaces_cfg,
topo_daemons,
- scapy_send_raw_packet
+ scapy_send_raw_packet,
)
from lib.topolog import logger
diff --git a/tests/topotests/ospf_sr_te_topo1/test_ospf_sr_te_topo1.py b/tests/topotests/ospf_sr_te_topo1/test_ospf_sr_te_topo1.py
index f061c51b4..6e992674a 100755
--- a/tests/topotests/ospf_sr_te_topo1/test_ospf_sr_te_topo1.py
+++ b/tests/topotests/ospf_sr_te_topo1/test_ospf_sr_te_topo1.py
@@ -111,15 +111,15 @@ def build_topo(tgen):
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- #switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ # switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- #switch = tgen.add_switch("s3")
- #switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- #switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+ # switch = tgen.add_switch("s3")
+ # switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ # switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
@@ -153,7 +153,7 @@ def setup_module(mod):
frrdir = tgen.config.get(tgen.CONFIG_SECTION, "frrdir")
if not os.path.isfile(os.path.join(frrdir, "pathd")):
- pytest.skip("pathd daemon wasn't built in:"+frrdir)
+ pytest.skip("pathd daemon wasn't built in:" + frrdir)
tgen.start_topology()
@@ -391,21 +391,23 @@ def check_bsid(rt, bsid, fn_name, positive):
candidate_output = router.vtysh_cmd("show mpls table json")
candidate_output_json = json.loads(candidate_output)
for item in candidate_output_json.items():
- # logger.info('item "%s"', item)
- if item[0] == candidate_key:
- matched_key = True
- if positive:
- break
+ # logger.info('item "%s"', item)
+ if item[0] == candidate_key:
+ matched_key = True
+ if positive:
+ break
if positive:
if matched_key:
matched = True
assertmsg = "{} don't has entry {} but is was expected".format(
- router.name, candidate_key)
+ router.name, candidate_key
+ )
else:
if not matched_key:
matched = True
assertmsg = "{} has entry {} but is wans't expected".format(
- router.name, candidate_key)
+ router.name, candidate_key
+ )
if matched:
logger.info('Success "%s" in "%s"', router.name, fn_name)
return
@@ -430,7 +432,12 @@ def test_srte_add_candidate_check_mpls_table_step1():
for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
add_candidate_path(rname, endpoint, 100, "default")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
@@ -445,7 +452,12 @@ def test_srte_reinstall_sr_policy_check_mpls_table_step1():
check_bsid(rname, bsid, test_srte_init_step1.__name__, False)
create_sr_policy(rname, endpoint, bsid)
add_candidate_path(rname, endpoint, 100, "default")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
@@ -572,7 +584,12 @@ def test_srte_change_segment_list_check_mpls_table_step4():
add_candidate_path(rname, endpoint, 100, "default")
# now change the segment list name
add_candidate_path(rname, endpoint, 100, "default", "test")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_segment(rname, "test", 10)
delete_segment(rname, "test", 20)
delete_segment(rname, "test", 30)
@@ -587,7 +604,12 @@ def test_srte_change_segment_list_check_mpls_table_step4():
add_segment_adj(rname, "test", 20, "10.0.6.5", "10.0.6.4")
add_segment_adj(rname, "test", 30, "10.0.2.4", "10.0.2.2")
add_segment_adj(rname, "test", 40, "10.0.1.2", "10.0.1.1")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
@@ -598,7 +620,12 @@ def test_srte_change_sl_priority_error_ted_check_mpls_table_step4():
add_candidate_path(rname, endpoint, 100, "default")
# now change the segment list name
add_candidate_path(rname, endpoint, 200, "test", "test")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_segment(rname, "test", 10)
delete_segment(rname, "test", 20)
delete_segment(rname, "test", 30)
@@ -615,7 +642,12 @@ def test_srte_change_sl_priority_error_ted_check_mpls_table_step4():
add_segment_adj(rname, "test", 30, "10.0.2.99", "10.0.2.99")
add_segment_adj(rname, "test", 40, "10.0.1.99", "10.0.1.99")
# So policy sticks with default sl even higher prio
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
diff --git a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
index 56c8db7f5..01ddbc152 100644
--- a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
+++ b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
@@ -55,7 +55,6 @@ pytestmark = [pytest.mark.ospfd]
def build_topo(tgen):
"Build function"
-
# Create routers
for router in range(1, 4):
tgen.add_router("r{}".format(router))
diff --git a/tests/topotests/ospf_topo2/test_ospf_topo2.py b/tests/topotests/ospf_topo2/test_ospf_topo2.py
index 2b06ce857..1ad62ff18 100644
--- a/tests/topotests/ospf_topo2/test_ospf_topo2.py
+++ b/tests/topotests/ospf_topo2/test_ospf_topo2.py
@@ -49,6 +49,7 @@ pytestmark = [pytest.mark.ospfd]
CWD = os.path.dirname(os.path.realpath(__file__))
+
def build_topo(tgen):
"Build function"
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
index 49f1f7ea5..47333fcb3 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
@@ -252,6 +252,7 @@ def red_connected(dut, config=True):
# Test cases start here.
# ##################################
+
def test_ospfv3_type5_summary_tc42_p0(request):
"""OSPF summarisation functionality."""
tc_name = request.node.name
@@ -266,81 +267,69 @@ def test_ospfv3_type5_summary_tc42_p0(request):
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
- " routes to one route. with aggregate timer as 6 sec")
+ " routes to one route. with aggregate timer as 6 sec"
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }],
- "aggr_timer": 6
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ],
+ "aggr_timer": 6,
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -349,64 +338,69 @@ def test_ospfv3_type5_summary_tc42_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "del_aggr_timer": True,
- "delete": True
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "del_aggr_timer": True,
+ "delete": True,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary Route still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
step("show ip ospf summary should not have any summary address.")
input_dict = {
@@ -415,40 +409,40 @@ def test_ospfv3_type5_summary_tc42_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary still present in DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
- dut = 'r1'
+ dut = "r1"
step("All 5 routes are advertised after deletion of configured summary.")
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("configure the summary again and delete static routes .")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict = {
SUMMARY["ipv6"][0]: {
@@ -456,91 +450,80 @@ def test_ospfv3_type5_summary_tc42_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
input_dict = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole",
- "delete": True
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole", "delete": True}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
step("Verify that summary route is withdrawn from R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Add back static routes.")
input_dict_static_rtes = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
- " address on R0 and only one route is sent to R1.")
- dut = 'r1'
+ " address on R0 and only one route is sent to R1."
+ )
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
result = verify_rib(
- tgen, "ipv6", dut, input_dict_static_rtes,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show configure summaries.")
@@ -550,28 +533,23 @@ def test_ospfv3_type5_summary_tc42_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Configure new static route which is matching configured summary.")
input_dict_static_rtes = {
"r0": {
- "static_routes": [
- {
- "network": NETWORK_11["ipv6"],
- "next_hop": "blackhole"
- }
- ]
+ "static_routes": [{"network": NETWORK_11["ipv6"], "next_hop": "blackhole"}]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
@@ -580,17 +558,12 @@ def test_ospfv3_type5_summary_tc42_p0(request):
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK_11["ipv6"],
- "next_hop": "blackhole",
- "delete": True
- }
+ {"network": NETWORK_11["ipv6"], "next_hop": "blackhole", "delete": True}
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
@@ -600,50 +573,43 @@ def test_ospfv3_type5_summary_tc42_p0(request):
step(
"Configure redistribute connected and configure ospf external"
- " summary address to summarise the connected routes.")
+ " summary address to summarise the connected routes."
+ )
- dut = 'r0'
+ dut = "r0"
red_connected(dut)
- clear_ospf(tgen, dut, ospf='ospf6')
+ clear_ospf(tgen, dut, ospf="ospf6")
- ip = topo['routers']['r0']['links']['r3']['ipv6']
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
- ip_net = str(ipaddress.ip_interface(u'{}'.format(ip)).network)
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": ip_net.split('/')[0],
- "mask": "8"
- }]
+ "summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured "
- "summary address on R0 and only one route is sent to R1.")
+ "summary address on R0 and only one route is sent to R1."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": "fd00::/64"}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": "fd00::/64"}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Shut one of the interface")
- intf = topo['routers']['r0']['links']['r3-link0']['interface']
+ intf = topo["routers"]["r0"]["links"]["r3-link0"]["interface"]
shutdown_bringup_interface(tgen, dut, intf, False)
# step("verify that summary lsa is not refreshed.")
@@ -662,13 +628,7 @@ def test_ospfv3_type5_summary_tc42_p0(request):
# show ip ospf database command is not working, waiting for DEV fix.
step("Delete OSPF process.")
- ospf_del = {
- "r0": {
- "ospf6": {
- "delete": True
- }
- }
- }
+ ospf_del = {"r0": {"ospf6": {"delete": True}}}
result = create_router_ospf(tgen, topo, ospf_del)
assert result is True, "Testcase : Failed \n Error: {}".format(result)
@@ -678,40 +638,32 @@ def test_ospfv3_type5_summary_tc42_p0(request):
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
red_connected(dut)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
- "address on R0 and only one route is sent to R1.")
+ "address on R0 and only one route is sent to R1."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
@@ -719,79 +671,78 @@ def test_ospfv3_type5_summary_tc42_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
-
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
step("Delete the redistribute command in ospf.")
- dut = 'r0'
+ dut = "r0"
red_connected(dut, config=False)
red_static(dut, config=False)
step("Verify that summary route is withdrawn from the peer.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "metric": "1234"
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "metric": "1234",
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
@@ -811,125 +762,129 @@ def test_ospfv3_type5_summary_tc46_p0(request):
step("Configure OSPF on all the routers of the topology.")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
- " routes to one route with no advertise option.")
+ " routes to one route with no advertise option."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
" address on R0 and summary route is not advertised to neighbor as"
- " no advertise is configured..")
+ " no advertise is configured.."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step(
- "Verify that show ip ospf summary should show the "
- "configured summaries.")
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "delete": True
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "delete": True,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Summary has 5 sec delay timer, sleep 5 secs...")
sleep(5)
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary Route still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
step("show ip ospf summary should not have any summary address.")
input_dict = {
@@ -938,117 +893,118 @@ def test_ospfv3_type5_summary_tc46_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 1234,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary still present in DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
step("Reconfigure summary with no advertise.")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
" address on R0 and summary route is not advertised to neighbor as"
- " no advertise is configured..")
+ " no advertise is configured.."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step(
- "Verify that show ip ospf summary should show the "
- "configured summaries.")
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step(
"Change summary address from no advertise to advertise "
- "(summary-address 10.0.0.0 255.255.0.0)")
+ "(summary-address 10.0.0.0 255.255.0.0)"
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1057,36 +1013,33 @@ def test_ospfv3_type5_summary_tc46_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes is present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -1105,80 +1058,67 @@ def test_ospfv3_type5_summary_tc48_p0(request):
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
- "Configure External Route summary in R0 to summarise 5"
- " routes to one route.")
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1187,40 +1127,38 @@ def test_ospfv3_type5_summary_tc48_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step(
"Configure route map and & rule to permit configured summary address,"
- " redistribute static & connected routes with the route map.")
+ " redistribute static & connected routes with the route map."
+ )
step("Configure prefixlist to permit the static routes, add to route map.")
# Create ip prefix list
pfx_list = {
@@ -1228,75 +1166,57 @@ def test_ospfv3_type5_summary_tc48_p0(request):
"prefix_lists": {
"ipv6": {
"pf_list_1_ipv6": [
- {
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }
+ {"seqid": 10, "network": "any", "action": "permit"}
]
}
}
}
}
result = create_prefix_lists(tgen, pfx_list)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
"action": "permit",
- "seq_id": '1',
- "match": {
- "ipv6": {
- "prefix_lists":
- "pf_list_1_ipv6"
- }
- }
- }]
- }
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_red_r1 = {
"r0": {
"ospf6": {
- "redistribute": [{
- "redist_type": "static",
- "route_map": "rmap_ipv6"
- }]
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_red_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured"
"summary address on R0 and only one route is sent to R1. Verify that "
- "show ip ospf summary should show the configure summaries.")
+ "show ip ospf summary should show the configure summaries."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv6"][0]: {
@@ -1304,87 +1224,88 @@ def test_ospfv3_type5_summary_tc48_p0(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Configure metric type as 1 in route map.")
-
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
- "seq_id": '1',
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "seq_id": "1",
"action": "permit",
- "set":{
- "metric-type": "type-1"
- }
- }]
- }
+ "set": {"metric-type": "type-1"},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes(static / connected) are summarised"
- " to configured summary address with metric type 2.")
+ " to configured summary address with metric type 2."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Un configure metric type from route map.")
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
"action": "permit",
- "seq_id": '1',
- "set":{
- "metric-type": "type-1",
- "delete": True
- }
- }]
- }
+ "seq_id": "1",
+ "set": {"metric-type": "type-1", "delete": True},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes(static / connected) are summarised"
- " to configured summary address with metric type 2.")
+ " to configured summary address with metric type 2."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Change rule from permit to deny in prefix list.")
pfx_list = {
@@ -1392,42 +1313,39 @@ def test_ospfv3_type5_summary_tc48_p0(request):
"prefix_lists": {
"ipv6": {
"pf_list_1_ipv6": [
- {
- "seqid": 10,
- "network": "any",
- "action": "deny"
- }
+ {"seqid": 10, "network": "any", "action": "deny"}
]
}
}
}
}
result = create_prefix_lists(tgen, pfx_list)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that previously originated summary lsa "
- "is withdrawn from the neighbor.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "is withdrawn from the neighbor."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
step("summary route has delay of 5 secs, wait for 5 secs")
sleep(5)
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -1453,103 +1371,104 @@ def test_ospfv3_type5_summary_tc51_p2(request):
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "tag": 4294967295
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure and re configure all the commands 10 times in a loop.")
- for itrate in range(0,10):
+ for itrate in range(0, 10):
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "8",
- "tag": 4294967295
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
]
}
- }
+ }
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "8",
- "tag": 4294967295,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False,
- "delete": True
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
]
}
+ }
}
- }
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify the show commands")
@@ -1559,13 +1478,14 @@ def test_ospfv3_type5_summary_tc51_p2(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 0
+ "External route count": 0,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
write_test_footer(tc_name)
@@ -1584,80 +1504,67 @@ def test_ospfv3_type5_summary_tc49_p2(request):
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
- "Configure External Route summary in R0 to summarise 5"
- " routes to one route.")
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1666,61 +1573,54 @@ def test_ospfv3_type5_summary_tc49_p2(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step('Reload the FRR router')
+ step("Reload the FRR router")
# stop/start -> restart FRR router and verify
- stop_router(tgen, 'r0')
- start_router(tgen, 'r0')
+ stop_router(tgen, "r0")
+ start_router(tgen, "r0")
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1729,36 +1629,33 @@ def test_ospfv3_type5_summary_tc49_p2(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Kill OSPF6d daemon on R0.")
kill_router_daemons(tgen, "r0", ["ospf6d"])
@@ -1769,28 +1666,25 @@ def test_ospfv3_type5_summary_tc49_p2(request):
step("Verify OSPF neighbors are up after bringing back ospf6d in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1799,36 +1693,33 @@ def test_ospfv3_type5_summary_tc49_p2(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("restart zebrad")
kill_router_daemons(tgen, "r0", ["zebra"])
@@ -1839,22 +1730,18 @@ def test_ospfv3_type5_summary_tc49_p2(request):
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
@@ -1863,36 +1750,33 @@ def test_ospfv3_type5_summary_tc49_p2(request):
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
index 69accfc5b..ed70c09fa 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
@@ -46,7 +46,7 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons
+ topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
diff --git a/tests/topotests/pbr_topo1/test_pbr_topo1.py b/tests/topotests/pbr_topo1/test_pbr_topo1.py
index daf03e73d..586d9217d 100644
--- a/tests/topotests/pbr_topo1/test_pbr_topo1.py
+++ b/tests/topotests/pbr_topo1/test_pbr_topo1.py
@@ -59,7 +59,6 @@ pytestmark = [pytest.mark.pbrd]
def build_topo(tgen):
"Build function"
-
# Populate routers
for routern in range(1, 2):
tgen.add_router("r{}".format(routern))
diff --git a/tests/topotests/pim_acl/test_pim_acl.py b/tests/topotests/pim_acl/test_pim_acl.py
index 4e8dec0db..a4e6630f7 100755
--- a/tests/topotests/pim_acl/test_pim_acl.py
+++ b/tests/topotests/pim_acl/test_pim_acl.py
@@ -40,7 +40,7 @@ test_pim_acl.py: Test PIM with RP selection using ACLs
# R1 and R11 - R15.
# - test_pim_convergence()
# Wait for PIM convergence on all routers. PIM is run on
-# R1 and R11 - R15.
+# R1 and R11 - R15.
# - test_mcast_acl_1():
# Test 1st ACL entry 239.100.0.0/28 with 239.100.0.1 which
# should use R11 as RP
@@ -121,9 +121,8 @@ from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.pimd, pytest.mark.ospfd]
-
def build_topo(tgen):
- for hostNum in range(1,3):
+ for hostNum in range(1, 3):
tgen.add_router("h{}".format(hostNum))
# Create the main router
@@ -157,6 +156,7 @@ def build_topo(tgen):
#
#####################################################
+
def setup_module(module):
logger.info("PIM RP ACL Topology: \n {}".format(TOPOLOGY))
@@ -171,7 +171,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
- if rname[0] != 'h':
+ if rname[0] != "h":
# Only load ospf on routers, not on end hosts
router.load_config(
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
@@ -231,12 +231,13 @@ def test_pim_convergence():
assert res is None, assertmsg
-
def check_mcast_entry(entry, mcastaddr, pimrp):
"Helper function to check RP"
tgen = get_topogen()
- logger.info("Testing PIM RP selection for ACL {} entry using {}".format(entry, mcastaddr));
+ logger.info(
+ "Testing PIM RP selection for ACL {} entry using {}".format(entry, mcastaddr)
+ )
with McastTesterHelper(tgen) as helper:
helper.run("h2", ["--send=0.7", mcastaddr, "h2-eth0"])
@@ -281,7 +282,7 @@ def test_mcast_acl_1():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(1, '239.100.0.1', 'r11')
+ check_mcast_entry(1, "239.100.0.1", "r11")
def test_mcast_acl_2():
@@ -292,7 +293,7 @@ def test_mcast_acl_2():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(2, '239.100.0.17', 'r12')
+ check_mcast_entry(2, "239.100.0.17", "r12")
def test_mcast_acl_3():
@@ -303,7 +304,7 @@ def test_mcast_acl_3():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(3, '239.100.0.32', 'r13')
+ check_mcast_entry(3, "239.100.0.32", "r13")
def test_mcast_acl_4():
@@ -314,7 +315,7 @@ def test_mcast_acl_4():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(4, '239.100.0.255', 'r14')
+ check_mcast_entry(4, "239.100.0.255", "r14")
def test_mcast_acl_5():
@@ -325,7 +326,7 @@ def test_mcast_acl_5():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(5, '239.100.0.97', 'r14')
+ check_mcast_entry(5, "239.100.0.97", "r14")
def test_mcast_acl_6():
@@ -336,7 +337,7 @@ def test_mcast_acl_6():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(6, '239.100.0.70', 'r15')
+ check_mcast_entry(6, "239.100.0.70", "r15")
if __name__ == "__main__":
diff --git a/tests/topotests/pim_basic/test_pim.py b/tests/topotests/pim_basic/test_pim.py
index 1d1067c34..03b4368e4 100644
--- a/tests/topotests/pim_basic/test_pim.py
+++ b/tests/topotests/pim_basic/test_pim.py
@@ -205,7 +205,7 @@ def test_pim_igmp_report():
r1 = tgen.gears["r1"]
# Let's send a igmp report from r2->r1
- cmd = [ os.path.join(CWD, "mcast-rx.py"), "229.1.1.2", "r2-eth0" ]
+ cmd = [os.path.join(CWD, "mcast-rx.py"), "229.1.1.2", "r2-eth0"]
p = r2.popen(cmd)
try:
expected = {
@@ -221,7 +221,7 @@ def test_pim_igmp_report():
test_func = partial(
topotest.router_json_cmp, r1, "show ip pim upstream json", expected
)
- _, result = topotest.run_and_expect(test_func, None, count=5, wait=.5)
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(r1.name)
assert result is None, assertmsg
finally:
diff --git a/tests/topotests/pim_basic_topo2/test_pim_basic_topo2.py b/tests/topotests/pim_basic_topo2/test_pim_basic_topo2.py
index b64572865..9506c3c6d 100644
--- a/tests/topotests/pim_basic_topo2/test_pim_basic_topo2.py
+++ b/tests/topotests/pim_basic_topo2/test_pim_basic_topo2.py
@@ -103,7 +103,7 @@ def expect_neighbor(router, interface, peer):
topotest.router_json_cmp,
tgen.gears[router],
"show ip pim neighbor json",
- {interface: {peer: {}}}
+ {interface: {peer: {}}},
)
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
assertmsg = '"{}" PIM convergence failure'.format(router)
@@ -118,14 +118,14 @@ def test_wait_pim_convergence():
logger.info("waiting for PIM to converge")
- expect_neighbor('r1', 'r1-eth0', '192.168.1.2')
- expect_neighbor('r2', 'r2-eth0', '192.168.1.1')
+ expect_neighbor("r1", "r1-eth0", "192.168.1.2")
+ expect_neighbor("r2", "r2-eth0", "192.168.1.1")
- expect_neighbor('r2', 'r2-eth1', '192.168.2.3')
- expect_neighbor('r2', 'r2-eth2', '192.168.3.4')
+ expect_neighbor("r2", "r2-eth1", "192.168.2.3")
+ expect_neighbor("r2", "r2-eth2", "192.168.3.4")
- expect_neighbor('r3', 'r3-eth0', '192.168.2.1')
- expect_neighbor('r4', 'r4-eth0', '192.168.3.1')
+ expect_neighbor("r3", "r3-eth0", "192.168.2.1")
+ expect_neighbor("r4", "r4-eth0", "192.168.3.1")
def test_bfd_peers():
@@ -143,7 +143,7 @@ def test_bfd_peers():
topotest.router_json_cmp,
tgen.gears[router],
"show bfd peers json",
- [{"peer": peer, "status": "up"}]
+ [{"peer": peer, "status": "up"}],
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=1)
assertmsg = '"{}" BFD convergence failure'.format(router)
@@ -173,7 +173,7 @@ def test_pim_reconvergence():
topotest.router_json_cmp,
tgen.gears[router],
"show ip pim neighbor json",
- {interface: {peer: None}}
+ {interface: {peer: None}},
)
_, result = topotest.run_and_expect(test_func, None, count=4, wait=1)
assertmsg = '"{}" PIM convergence failure'.format(router)
@@ -199,23 +199,29 @@ def test_pim_bfd_profile():
topotest.router_json_cmp,
tgen.gears[router],
"show bfd peers json",
- [settings]
+ [settings],
)
_, result = topotest.run_and_expect(test_func, None, count=4, wait=1)
assertmsg = '"{}" BFD convergence failure'.format(router)
assert result is None, assertmsg
- expect_bfd_peer_settings("r1", {
- "peer": "192.168.1.2",
- "receive-interval": 250,
- "transmit-interval": 250,
- })
-
- expect_bfd_peer_settings("r2", {
- "peer": "192.168.1.1",
- "remote-receive-interval": 250,
- "remote-transmit-interval": 250,
- })
+ expect_bfd_peer_settings(
+ "r1",
+ {
+ "peer": "192.168.1.2",
+ "receive-interval": 250,
+ "transmit-interval": 250,
+ },
+ )
+
+ expect_bfd_peer_settings(
+ "r2",
+ {
+ "peer": "192.168.1.1",
+ "remote-receive-interval": 250,
+ "remote-transmit-interval": 250,
+ },
+ )
def test_memory_leak():
diff --git a/tests/topotests/pim_igmp_vrf/test_pim_vrf.py b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
index 73cee74b5..f845a4a6e 100755
--- a/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
+++ b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
@@ -34,7 +34,7 @@ test_pim_vrf.py: Test PIM with VRFs.
# R1 is split into 2 VRF: Blue and Red, the others are normal
# routers and Hosts
# There are 2 similar topologies with overlapping IPs in each
-# section.
+# section.
#
# Test steps:
# - setup_module()
@@ -48,15 +48,15 @@ test_pim_vrf.py: Test PIM with VRFs.
# R1, R11 and R12. R11 is the RP for vrf blue, R12 is RP
# for vrf red.
# - test_vrf_pimreg_interfaces()
-# Adding PIM RP in VRF information and verify pimreg
+# Adding PIM RP in VRF information and verify pimreg
# interfaces in VRF blue and red
# - test_mcast_vrf_blue()
-# Start multicast stream for group 239.100.0.1 from Host
+# Start multicast stream for group 239.100.0.1 from Host
# H2 and join from Host H1 on vrf blue
# Verify PIM JOIN status on R1 and R11
# Stop multicast after verification
# - test_mcast_vrf_red()
-# Start multicast stream for group 239.100.0.1 from Host
+# Start multicast stream for group 239.100.0.1 from Host
# H4 and join from Host H3 on vrf blue
# Verify PIM JOIN status on R1 and R12
# Stop multicast after verification
@@ -104,17 +104,15 @@ from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.topotest import iproute2_is_vrf_capable
-from lib.common_config import (
- required_linux_kernel_version)
+from lib.common_config import required_linux_kernel_version
from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.ospfd, pytest.mark.pimd]
-
def build_topo(tgen):
- for hostNum in range(1,5):
+ for hostNum in range(1, 5):
tgen.add_router("h{}".format(hostNum))
# Create the main router
@@ -154,12 +152,14 @@ def build_topo(tgen):
tgen.gears["h4"].add_link(tgen.gears["sw4"])
tgen.gears["r12"].add_link(tgen.gears["sw4"])
+
#####################################################
#
# Tests starting
#
#####################################################
+
def setup_module(module):
logger.info("PIM IGMP VRF Topology: \n {}".format(TOPOLOGY))
@@ -189,7 +189,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
- if rname[0] != 'h':
+ if rname[0] != "h":
# Only load ospf on routers, not on end hosts
router.load_config(
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
@@ -230,7 +230,10 @@ def test_ospf_convergence():
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip ospf vrf blue neighbor json", expected
+ topotest.router_json_cmp,
+ router,
+ "show ip ospf vrf blue neighbor json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
assertmsg = "OSPF router R1 did not converge on VRF blue"
@@ -297,7 +300,10 @@ def test_vrf_pimreg_interfaces():
reffile = os.path.join(CWD, "r1/pim_blue_pimreg11.json")
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, r1, "show ip pim vrf blue inter pimreg11 json", expected
+ topotest.router_json_cmp,
+ r1,
+ "show ip pim vrf blue inter pimreg11 json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=5, wait=2)
assertmsg = "PIM router R1, VRF blue (table 11) pimreg11 interface missing or incorrect status"
@@ -310,7 +316,10 @@ def test_vrf_pimreg_interfaces():
reffile = os.path.join(CWD, "r1/pim_red_pimreg12.json")
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, r1, "show ip pim vrf red inter pimreg12 json", expected
+ topotest.router_json_cmp,
+ r1,
+ "show ip pim vrf red inter pimreg12 json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=5, wait=2)
assertmsg = "PIM router R1, VRF red (table 12) pimreg12 interface missing or incorrect status"
@@ -321,11 +330,12 @@ def test_vrf_pimreg_interfaces():
### Test PIM / IGMP with VRF
##################################
+
def check_mcast_entry(mcastaddr, pimrp, receiver, sender, vrf):
"Helper function to check RP"
tgen = get_topogen()
- logger.info("Testing PIM for VRF {} entry using {}".format(vrf, mcastaddr));
+ logger.info("Testing PIM for VRF {} entry using {}".format(vrf, mcastaddr))
with McastTesterHelper(tgen) as helper:
helper.run(sender, ["--send=0.7", mcastaddr, str(sender) + "-eth0"])
@@ -339,8 +349,10 @@ def check_mcast_entry(mcastaddr, pimrp, receiver, sender, vrf):
logger.info("verifying pim join on r1 for {} on VRF {}".format(mcastaddr, vrf))
test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip pim vrf {} join json".format(vrf),
- expected
+ topotest.router_json_cmp,
+ router,
+ "show ip pim vrf {} join json".format(vrf),
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
assertmsg = "PIM router r1 did not show join status on VRF {}".format(vrf)
@@ -355,7 +367,11 @@ def check_mcast_entry(mcastaddr, pimrp, receiver, sender, vrf):
topotest.router_json_cmp, router, "show ip pim join json", expected
)
_, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
- assertmsg = "PIM router {} did not get selected as the PIM RP for VRF {}".format(pimrp, vrf)
+ assertmsg = (
+ "PIM router {} did not get selected as the PIM RP for VRF {}".format(
+ pimrp, vrf
+ )
+ )
assert res is None, assertmsg
@@ -367,7 +383,7 @@ def test_mcast_vrf_blue():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry('239.100.0.1', 'r11', 'h1', 'h2', 'blue')
+ check_mcast_entry("239.100.0.1", "r11", "h1", "h2", "blue")
def test_mcast_vrf_red():
@@ -378,7 +394,7 @@ def test_mcast_vrf_red():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry('239.100.0.1', 'r12', 'h3', 'h4', 'red')
+ check_mcast_entry("239.100.0.1", "r12", "h3", "h4", "red")
if __name__ == "__main__":
diff --git a/tests/topotests/rip_topo1/test_rip_topo1.py b/tests/topotests/rip_topo1/test_rip_topo1.py
index 88299c90d..c5812f28c 100644
--- a/tests/topotests/rip_topo1/test_rip_topo1.py
+++ b/tests/topotests/rip_topo1/test_rip_topo1.py
@@ -34,7 +34,6 @@ import pytest
from time import sleep
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
from lib.topogen import Topogen, get_topogen
diff --git a/tests/topotests/route_scale/test_route_scale.py b/tests/topotests/route_scale/test_route_scale.py
index 12cb835b8..fefeccd5e 100644
--- a/tests/topotests/route_scale/test_route_scale.py
+++ b/tests/topotests/route_scale/test_route_scale.py
@@ -209,7 +209,9 @@ def test_route_install():
m = re.search("Mem:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)", output)
total_mem = int(m.group(2))
if total_mem < 4000000:
- logger.info("Limited memory available: {}, skipping x32 testcase".format(total_mem))
+ logger.info(
+ "Limited memory available: {}, skipping x32 testcase".format(total_mem)
+ )
scale_setups = scale_setups[0:-1]
# Run each step using the dicts we've built
diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py
index 495a4c9cd..35f021013 100755
--- a/tests/topotests/simple_snmp_test/test_simple_snmp.py
+++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py
@@ -50,11 +50,7 @@ def setup_module(mod):
error_msg = "SNMP not installed - skipping"
pytest.skip(error_msg)
# This function initiates the topology build with Topogen...
- topodef = {
- "s1": "r1",
- "s2": "r1",
- "s3": "r1"
- }
+ topodef = {"s1": "r1", "s2": "r1", "s3": "r1"}
tgen = Topogen(topodef, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
diff --git a/tests/topotests/srv6_locator/test_srv6_locator.py b/tests/topotests/srv6_locator/test_srv6_locator.py
index 5a0c5b5ea..b48cd09bf 100755
--- a/tests/topotests/srv6_locator/test_srv6_locator.py
+++ b/tests/topotests/srv6_locator/test_srv6_locator.py
@@ -34,7 +34,7 @@ import pytest
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
@@ -52,16 +52,20 @@ def open_json_file(filename):
assert False, "Could not read file {}".format(filename)
-
-
def setup_module(mod):
tgen = Topogen({None: "r1"}, mod.__name__)
tgen.start_topology()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, '{}/sharpd.conf'.format(rname)))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
@@ -74,7 +78,7 @@ def test_srv6():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
def _check_srv6_locator(router, expected_locator_file):
logger.info("checking zebra locator status")
@@ -91,12 +95,12 @@ def test_srv6():
def check_srv6_locator(router, expected_file):
func = functools.partial(_check_srv6_locator, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
def check_sharpd_chunk(router, expected_file):
func = functools.partial(_check_sharpd_chunk, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
logger.info("Test1 for Locator Configuration")
check_srv6_locator(router, "expected_locators1.json")
@@ -132,6 +136,6 @@ def test_srv6():
check_sharpd_chunk(router, "expected_chunks5.json")
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/zebra_netlink/test_zebra_netlink.py b/tests/topotests/zebra_netlink/test_zebra_netlink.py
index a15ddec14..05cc0ae4a 100644
--- a/tests/topotests/zebra_netlink/test_zebra_netlink.py
+++ b/tests/topotests/zebra_netlink/test_zebra_netlink.py
@@ -55,7 +55,7 @@ pytestmark = [pytest.mark.sharpd]
def setup_module(mod):
"Sets up the pytest environment"
- topodef = { "s1": ("r1") }
+ topodef = {"s1": ("r1")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
diff --git a/tests/topotests/zebra_opaque/test_zebra_opaque.py b/tests/topotests/zebra_opaque/test_zebra_opaque.py
index 2cd4a8853..2983df3ed 100644
--- a/tests/topotests/zebra_opaque/test_zebra_opaque.py
+++ b/tests/topotests/zebra_opaque/test_zebra_opaque.py
@@ -39,9 +39,7 @@ pytestmark = [pytest.mark.bgpd]
def setup_module(mod):
- topodef = {
- "s1": ("r1", "r2")
- }
+ topodef = {"s1": ("r1", "r2")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py
index cc12189fd..ae891d906 100644
--- a/tests/topotests/zebra_rib/test_zebra_rib.py
+++ b/tests/topotests/zebra_rib/test_zebra_rib.py
@@ -47,12 +47,9 @@ from time import sleep
pytestmark = [pytest.mark.sharpd]
-
def setup_module(mod):
"Sets up the pytest environment"
- topodef = {
- "s1": ("r1", "r1", "r1", "r1", "r1", "r1", "r1", "r1")
- }
+ topodef = {"s1": ("r1", "r1", "r1", "r1", "r1", "r1", "r1", "r1")}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
@@ -88,16 +85,29 @@ def test_zebra_kernel_admin_distance():
distance = 255
metric = 8192
+
def makekmetric(dist, metric):
return (dist << 24) + metric
- r1.run("ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric " + str(makekmetric(255, 8192)))
+ r1.run(
+ "ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric "
+ + str(makekmetric(255, 8192))
+ )
# Route with 1/1 metric
- r1.run("ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric " + str(makekmetric(1, 1)))
+ r1.run(
+ "ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric "
+ + str(makekmetric(1, 1))
+ )
# Route with 10/1 metric
- r1.run("ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric " + str(makekmetric(10, 1)))
+ r1.run(
+ "ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric "
+ + str(makekmetric(10, 1))
+ )
# Same route with a 160/1 metric
- r1.run("ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric " + str(makekmetric(160, 1)))
+ r1.run(
+ "ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric "
+ + str(makekmetric(160, 1))
+ )
# Currently I believe we have a bug here with the same route and different
# metric. That needs to be properly resolved. Making a note for
@@ -184,6 +194,7 @@ def test_route_map_usage():
logger.info(
"Does the show route-map static command run the correct number of times"
)
+
def check_static_map_correct_runs():
actual = r1.vtysh_cmd("show route-map static")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
@@ -193,13 +204,17 @@ def test_route_map_usage():
title1="Actual Route-map output",
title2="Expected Route-map output",
)
- ok, result = topotest.run_and_expect(check_static_map_correct_runs, "", count=5, wait=1)
+
+ ok, result = topotest.run_and_expect(
+ check_static_map_correct_runs, "", count=5, wait=1
+ )
assert ok, result
sharp_rmapfile = "%s/r1/sharp_rmap.ref" % (thisDir)
expected = open(sharp_rmapfile).read().rstrip()
expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
logger.info("Does the show route-map sharp command run the correct number of times")
+
def check_sharp_map_correct_runs():
actual = r1.vtysh_cmd("show route-map sharp")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
@@ -209,7 +224,10 @@ def test_route_map_usage():
title1="Actual Route-map output",
title2="Expected Route-map output",
)
- ok, result = topotest.run_and_expect(check_sharp_map_correct_runs, "", count=5, wait=1)
+
+ ok, result = topotest.run_and_expect(
+ check_sharp_map_correct_runs, "", count=5, wait=1
+ )
assert ok, result
logger.info(
@@ -225,6 +243,7 @@ def test_route_map_usage():
sharp_ipfile = "%s/r1/iproute.ref" % (thisDir)
expected = open(sharp_ipfile).read().rstrip()
expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
+
def check_routes_installed():
actual = r1.run("ip route show")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
@@ -240,8 +259,12 @@ def test_route_map_usage():
actual = re.sub(r" metric", " metric", actual)
actual = re.sub(r" link ", " link ", actual)
return topotest.get_textdiff(
- actual, expected, title1="Actual ip route show", title2="Expected ip route show"
+ actual,
+ expected,
+ title1="Actual ip route show",
+ title2="Expected ip route show",
)
+
ok, result = topotest.run_and_expect(check_routes_installed, "", count=5, wait=1)
assert ok, result
diff --git a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
index 68212c9a8..cdad988b8 100755
--- a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
+++ b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
@@ -55,9 +55,15 @@ def setup_module(mod):
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
- router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ router.run(
+ "/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname)))
+ )
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
@@ -74,26 +80,33 @@ def test_zebra_seg6local_routes():
r1 = tgen.gears["r1"]
def check(router, dest, nh, sid, expected):
- router.vtysh_cmd("sharp install seg6-routes {} "\
- "nexthop-seg6 {} encap {} 1".format(dest, nh, sid))
+ router.vtysh_cmd(
+ "sharp install seg6-routes {} "
+ "nexthop-seg6 {} encap {} 1".format(dest, nh, sid)
+ )
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
- output = output.get('{}/128'.format(dest))
+ output = output.get("{}/128".format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
- logger.info("CHECK {} {} {}".format(manifest['in']['dest'],
- manifest['in']['nh'],
- manifest['in']['sid']))
- test_func = partial(check, r1,
- manifest['in']['dest'],
- manifest['in']['nh'],
- manifest['in']['sid'],
- manifest['out'])
+ logger.info(
+ "CHECK {} {} {}".format(
+ manifest["in"]["dest"], manifest["in"]["nh"], manifest["in"]["sid"]
+ )
+ )
+ test_func = partial(
+ check,
+ r1,
+ manifest["in"]["dest"],
+ manifest["in"]["nh"],
+ manifest["in"]["sid"],
+ manifest["out"],
+ )
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
if __name__ == "__main__":
diff --git a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
index 32ae8a86e..1062c306a 100755
--- a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
+++ b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
@@ -55,9 +55,15 @@ def setup_module(mod):
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
- router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ router.run(
+ "/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname)))
+ )
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
@@ -74,24 +80,30 @@ def test_zebra_seg6local_routes():
r1 = tgen.gears["r1"]
def check(router, dest, context, expected):
- router.vtysh_cmd("sharp install seg6local-routes {} "\
- "nexthop-seg6local dum0 {} 1".format(dest, context))
+ router.vtysh_cmd(
+ "sharp install seg6local-routes {} "
+ "nexthop-seg6local dum0 {} 1".format(dest, context)
+ )
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
- output = output.get('{}/128'.format(dest))
+ output = output.get("{}/128".format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
- logger.info("CHECK {} {}".format(manifest['in']['dest'],
- manifest['in']['context']))
- test_func = partial(check, r1,
- manifest['in']['dest'],
- manifest['in']['context'],
- manifest['out'])
+ logger.info(
+ "CHECK {} {}".format(manifest["in"]["dest"], manifest["in"]["context"])
+ )
+ test_func = partial(
+ check,
+ r1,
+ manifest["in"]["dest"],
+ manifest["in"]["context"],
+ manifest["out"],
+ )
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
if __name__ == "__main__":
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 6d99f866a..da51c231d 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -45,6 +45,7 @@ from pprint import pformat
def iteritems(d):
return iter(d.items())
+
log = logging.getLogger(__name__)
@@ -556,49 +557,26 @@ end
"router ospf6": {},
"router eigrp ": {},
"router babel": {},
- "mpls ldp": {
- "address-family ": {
- "interface ": {}
- }
- },
- "l2vpn ": {
- "member pseudowire ": {}
- },
- "key chain ": {
- "key ": {}
- },
+ "mpls ldp": {"address-family ": {"interface ": {}}},
+ "l2vpn ": {"member pseudowire ": {}},
+ "key chain ": {"key ": {}},
"vrf ": {},
- "interface ": {
- "link-params": {}
- },
+ "interface ": {"link-params": {}},
"pseudowire ": {},
"segment-routing": {
"traffic-eng": {
"segment-list ": {},
- "policy ": {
- "candidate-path ": {}
- },
- "pcep": {
- "pcc": {},
- "pce ": {},
- "pce-config ": {}
- }
+ "policy ": {"candidate-path ": {}},
+ "pcep": {"pcc": {}, "pce ": {}, "pce-config ": {}},
},
- "srv6": {
- "locators": {
- "locator ": {}
- }
- }
+ "srv6": {"locators": {"locator ": {}}},
},
"nexthop-group ": {},
"route-map ": {},
"pbr-map ": {},
"rpki": {},
- "bfd": {
- "peer ": {},
- "profile ": {}
- },
- "line vty": {}
+ "bfd": {"peer ": {}, "profile ": {}},
+ "line vty": {},
}
# stack of context keys
@@ -1890,7 +1868,9 @@ if __name__ == "__main__":
nolines = [x.strip() for x in nolines]
# For topotests leave these lines in (don't delete them)
# [chopps: why is "log file" more special than other "log" commands?]
- nolines = [x for x in nolines if "debug" not in x and "log file" not in x]
+ nolines = [
+ x for x in nolines if "debug" not in x and "log file" not in x
+ ]
if not nolines:
continue