summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--alpine/APKBUILD.in9
-rw-r--r--bgpd/bgp_attr.c4
-rw-r--r--bgpd/bgp_mac.c21
-rw-r--r--bgpd/bgp_mplsvpn.c18
-rw-r--r--bgpd/bgp_pbr.c3
-rw-r--r--bgpd/bgp_regex.h15
-rw-r--r--bgpd/bgp_route.c13
-rw-r--r--bgpd/bgp_route.h7
-rw-r--r--bgpd/bgp_routemap.c9
-rw-r--r--bgpd/bgp_rpki.c117
-rw-r--r--bgpd/bgpd.c22
-rw-r--r--bgpd/bgpd.h3
-rw-r--r--configure.ac12
-rw-r--r--debian/frr.pam1
-rw-r--r--doc/user/installation.rst7
-rw-r--r--doc/user/zebra.rst30
-rw-r--r--docker/alpine/Dockerfile6
-rw-r--r--isisd/isis_adjacency.c40
-rw-r--r--isisd/isis_adjacency.h1
-rw-r--r--isisd/isis_circuit.c39
-rw-r--r--isisd/isis_circuit.h5
-rw-r--r--isisd/isis_lfa.c18
-rw-r--r--isisd/isis_route.c99
-rw-r--r--isisd/isis_route.h5
-rw-r--r--isisd/isis_spf.c27
-rw-r--r--isisd/isis_spf.h4
-rw-r--r--isisd/isisd.c19
-rw-r--r--isisd/isisd.h3
-rw-r--r--lib/frrstr.c9
-rw-r--r--lib/frrstr.h9
-rw-r--r--lib/srv6.c8
-rw-r--r--lib/srv6.h20
-rw-r--r--lib/vty.c9
-rw-r--r--lib/vty.h9
-rw-r--r--lib/zclient.c2
-rw-r--r--ospf6d/ospf6_interface.c18
-rw-r--r--ospf6d/ospf6_nssa.h2
-rw-r--r--ospfd/ospf_abr.h2
-rw-r--r--ospfd/ospf_lsa.c4
-rw-r--r--pimd/pim_iface.c4
-rw-r--r--pimd/pim_igmp_mtrace.c19
-rw-r--r--pimd/pim_nb_config.c3
-rw-r--r--pimd/pim_nht.c49
-rw-r--r--pimd/pim_nht.h7
-rw-r--r--pimd/pim_rpf.c17
-rw-r--r--redhat/frr.pam1
-rw-r--r--tests/isisd/test_isis_spf.refout180
-rw-r--r--tests/topotests/isis_lfa_topo1/rt1/bfdd.conf6
-rw-r--r--tests/topotests/isis_lfa_topo1/rt1/step14/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_lfa_topo1/rt1/step15/show_ipv6_route.ref.diff50
-rw-r--r--tests/topotests/isis_lfa_topo1/rt1/step16/show_ipv6_route.ref.diff53
-rw-r--r--tests/topotests/isis_lfa_topo1/rt2/bfdd.conf6
-rwxr-xr-xtests/topotests/isis_lfa_topo1/test_isis_lfa_topo1.py412
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step11/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step11/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step11/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step12/show_ip_route.ref.diff19
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step12/show_ipv6_route.ref.diff18
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt1/step12/show_mpls_table.ref.diff28
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step11/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step11/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step11/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step12/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step12/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt2/step12/show_mpls_table.ref.diff20
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step11/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step11/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step11/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step12/show_ip_route.ref.diff58
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step12/show_ipv6_route.ref.diff45
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt3/step12/show_mpls_table.ref.diff60
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step11/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step11/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step11/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step12/show_ip_route.ref.diff144
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step12/show_ipv6_route.ref.diff50
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt4/step12/show_mpls_table.ref.diff78
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/bfdd.conf14
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step11/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step11/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step11/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step12/show_ip_route.ref.diff151
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step12/show_ipv6_route.ref.diff53
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt5/step12/show_mpls_table.ref.diff80
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/bfdd.conf14
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step10/show_ip_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step10/show_ipv6_route.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step10/show_mpls_table.ref.diff0
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step11/show_ip_route.ref.diff125
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step11/show_ipv6_route.ref.diff56
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step11/show_mpls_table.ref.diff106
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step12/show_ip_route.ref.diff153
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step12/show_ipv6_route.ref.diff66
-rw-r--r--tests/topotests/isis_tilfa_topo1/rt6/step12/show_mpls_table.ref.diff78
-rwxr-xr-xtests/topotests/isis_tilfa_topo1/test_isis_tilfa_topo1.py367
-rw-r--r--tests/topotests/lib/common_config.py160
-rw-r--r--tests/topotests/lib/pim.py1123
-rw-r--r--tests/topotests/multicast_pim6_static_rp_topo1/__init__.py0
-rw-r--r--tests/topotests/multicast_pim6_static_rp_topo1/multicast_pim6_static_rp.json (renamed from tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json)0
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py1321
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py1324
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py414
-rw-r--r--tests/topotests/pytest.ini1
-rw-r--r--tests/topotests/srv6_locator_usid/__init__.py0
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_1.json1
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_2.json8
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_3.json1
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_4.json1
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_5.json2
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_6.json2
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_7.json2
-rw-r--r--tests/topotests/srv6_locator_usid/expected_chunks_8.json2
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_1.json20
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_2.json20
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_3.json20
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_4.json35
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_5.json36
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_6.json35
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_7.json19
-rw-r--r--tests/topotests/srv6_locator_usid/expected_locators_8.json4
-rw-r--r--tests/topotests/srv6_locator_usid/r1/setup.sh2
-rw-r--r--tests/topotests/srv6_locator_usid/r1/sharpd.conf7
-rw-r--r--tests/topotests/srv6_locator_usid/r1/zebra.conf20
-rwxr-xr-xtests/topotests/srv6_locator_usid/test_srv6_locator_usid.py276
-rw-r--r--tools/etc/frr/support_bundle_commands.conf31
-rwxr-xr-xtools/frrcommon.sh.in2
-rw-r--r--zebra/dplane_fpm_nl.c14
-rw-r--r--zebra/netconf_netlink.c2
-rw-r--r--zebra/zapi_msg.c1
-rw-r--r--zebra/zebra_evpn_mh.c6
-rw-r--r--zebra/zebra_srv6.c52
-rw-r--r--zebra/zebra_srv6.h3
-rw-r--r--zebra/zebra_srv6_vty.c38
148 files changed, 7515 insertions, 729 deletions
diff --git a/alpine/APKBUILD.in b/alpine/APKBUILD.in
index 51986de2d..3aad9549b 100644
--- a/alpine/APKBUILD.in
+++ b/alpine/APKBUILD.in
@@ -15,8 +15,8 @@ makedepends="ncurses-dev net-snmp-dev gawk texinfo perl
libcap-dev libcurl libedit libffi libgcc libgomp libisoburn libisofs
libltdl libressl libssh2 libstdc++ libtool libuuid
linux-headers lzip lzo m4 make mkinitfs mpc1 mpfr4 mtools musl-dev
- ncurses-libs ncurses-terminfo ncurses-terminfo-base patch pax-utils pcre
- perl pkgconf python3 python3-dev readline readline-dev sqlite-libs
+ ncurses-libs ncurses-terminfo ncurses-terminfo-base patch pax-utils pcre2
+ perl pkgconf python3 python3-dev readline readline-dev sqlite-libs pcre2-dev
squashfs-tools sudo tar texinfo xorriso xz-libs py-pip rtrlib rtrlib-dev
py3-sphinx elfutils elfutils-dev libyang-dev"
checkdepends="pytest py-setuptools"
@@ -46,8 +46,9 @@ build() {
--enable-multipath=64 \
--enable-vty-group=frrvty \
--enable-user=$_user \
- --enable-group=$_user
- make
+ --enable-group=$_user \
+ --enable-pcre2posix
+ make -j $(nproc)
}
check() {
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index e9050c5ae..89aed1ba6 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -4507,7 +4507,9 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
stream_put(s, &attr->srv6_l3vpn->sid,
sizeof(attr->srv6_l3vpn->sid)); /* sid */
stream_putc(s, 0); /* sid_flags */
- stream_putw(s, 0xffff); /* endpoint */
+ stream_putw(s,
+ attr->srv6_l3vpn
+ ->endpoint_behavior); /* endpoint */
stream_putc(s, 0); /* reserved */
stream_putc(
s,
diff --git a/bgpd/bgp_mac.c b/bgpd/bgp_mac.c
index 02b7e6486..b9649ac4d 100644
--- a/bgpd/bgp_mac.c
+++ b/bgpd/bgp_mac.c
@@ -242,19 +242,18 @@ static void bgp_mac_rescan_evpn_table(struct bgp *bgp, struct ethaddr *macaddr)
if (!peer_established(peer))
continue;
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_SOFT_RECONFIG)) {
- if (bgp_debug_update(peer, NULL, NULL, 1))
- zlog_debug("Processing EVPN MAC interface change on peer %s (inbound, soft-reconfig)",
- peer->host);
-
- bgp_soft_reconfig_in(peer, afi, safi);
- } else {
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug(
+ "Processing EVPN MAC interface change on peer %s %s",
+ peer->host,
+ CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_SOFT_RECONFIG)
+ ? "(inbound, soft-reconfig)"
+ : "");
+
+ if (!bgp_soft_reconfig_in(peer, afi, safi)) {
struct bgp_table *table = bgp->rib[afi][safi];
- if (bgp_debug_update(peer, NULL, NULL, 1))
- zlog_debug("Processing EVPN MAC interface change on peer %s",
- peer->host);
bgp_process_mac_rescan_table(bgp, peer, table, macaddr);
}
}
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 9a25450af..18cb90763 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1554,13 +1554,22 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
/* Set SID for SRv6 VPN */
if (from_bgp->vpn_policy[afi].tovpn_sid_locator) {
+ struct srv6_locator_chunk *locator =
+ from_bgp->vpn_policy[afi].tovpn_sid_locator;
encode_label(
from_bgp->vpn_policy[afi].tovpn_sid_transpose_label,
&label);
static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
sizeof(struct bgp_attr_srv6_l3vpn));
static_attr.srv6_l3vpn->sid_flags = 0x00;
- static_attr.srv6_l3vpn->endpoint_behavior = 0xffff;
+ static_attr.srv6_l3vpn->endpoint_behavior =
+ afi == AFI_IP
+ ? (CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID)
+ ? SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID
+ : SRV6_ENDPOINT_BEHAVIOR_END_DT4)
+ : (CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID)
+ ? SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID
+ : SRV6_ENDPOINT_BEHAVIOR_END_DT6);
static_attr.srv6_l3vpn->loc_block_len =
from_bgp->vpn_policy[afi]
.tovpn_sid_locator->block_bits_length;
@@ -1587,12 +1596,17 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
.tovpn_sid_locator->prefix.prefix,
sizeof(struct in6_addr));
} else if (from_bgp->tovpn_sid_locator) {
+ struct srv6_locator_chunk *locator =
+ from_bgp->tovpn_sid_locator;
encode_label(from_bgp->tovpn_sid_transpose_label, &label);
static_attr.srv6_l3vpn =
XCALLOC(MTYPE_BGP_SRV6_L3VPN,
sizeof(struct bgp_attr_srv6_l3vpn));
static_attr.srv6_l3vpn->sid_flags = 0x00;
- static_attr.srv6_l3vpn->endpoint_behavior = 0xffff;
+ static_attr.srv6_l3vpn->endpoint_behavior =
+ CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID)
+ ? SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID
+ : SRV6_ENDPOINT_BEHAVIOR_END_DT46;
static_attr.srv6_l3vpn->loc_block_len =
from_bgp->tovpn_sid_locator->block_bits_length;
static_attr.srv6_l3vpn->loc_node_len =
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index 7b5e28724..b71e19ab3 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -2074,6 +2074,9 @@ static void bgp_pbr_icmp_action(struct bgp *bgp, struct bgp_path_info *path,
bgp, path, bpf);
}
}
+
+ bpf->src_port = NULL;
+ bpf->dst_port = NULL;
}
static void bgp_pbr_policyroute_remove_from_zebra_recursive(
diff --git a/bgpd/bgp_regex.h b/bgpd/bgp_regex.h
index 43ebb9ac9..e07b7f911 100644
--- a/bgpd/bgp_regex.h
+++ b/bgpd/bgp_regex.h
@@ -18,19 +18,24 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef _QUAGGA_BGP_REGEX_H
-#define _QUAGGA_BGP_REGEX_H
+#ifndef _FRR_BGP_REGEX_H
+#define _FRR_BGP_REGEX_H
#include <zebra.h>
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
extern void bgp_regex_free(regex_t *regex);
extern regex_t *bgp_regcomp(const char *str);
extern int bgp_regexec(regex_t *regex, struct aspath *aspath);
-#endif /* _QUAGGA_BGP_REGEX_H */
+#endif /* _FRR_BGP_REGEX_H */
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 1a2408ade..130a0b4ab 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -5342,7 +5342,10 @@ void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
}
}
-void bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
+/*
+ * Returns false if the peer is not configured for soft reconfig in
+ */
+bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
{
struct bgp_dest *dest;
struct bgp_table *table;
@@ -5350,14 +5353,14 @@ void bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
struct peer *npeer;
struct peer_af *paf;
- if (!peer_established(peer))
- return;
+ if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG))
+ return false;
if ((safi != SAFI_MPLS_VPN) && (safi != SAFI_ENCAP)
&& (safi != SAFI_EVPN)) {
table = peer->bgp->rib[afi][safi];
if (!table)
- return;
+ return true;
table->soft_reconfig_init = true;
@@ -5417,6 +5420,8 @@ void bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi)
bgp_soft_reconfig_table(peer, afi, safi, table, &prd);
}
+
+ return true;
}
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index dfe741914..c85551634 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -676,7 +676,12 @@ extern void bgp_default_originate(struct peer *, afi_t, safi_t, int);
extern void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
const struct bgp_table *table,
const struct peer *peer);
-extern void bgp_soft_reconfig_in(struct peer *, afi_t, safi_t);
+
+/*
+ * If this peer is configured for soft reconfig in then do the work
+ * and return true. If it is not return false; and do nothing
+ */
+extern bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi);
extern void bgp_clear_route(struct peer *, afi_t, safi_t);
extern void bgp_clear_route_all(struct peer *);
extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t);
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index aff09206e..b736e6c38 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -30,11 +30,16 @@
#include "log.h"
#include "frrlua.h"
#include "frrscript.h"
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
#include "buffer.h"
#include "sockunion.h"
#include "hash.h"
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 2acf74c52..73c6fe0c4 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -62,6 +62,7 @@
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE, "BGP RPKI Cache server");
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group");
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_RTRLIB, "BGP RPKI RTRLib");
+DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_REVALIDATE, "BGP RPKI Revalidation");
#define POLLING_PERIOD_DEFAULT 3600
#define EXPIRE_INTERVAL_DEFAULT 7200
@@ -371,10 +372,9 @@ inline bool is_stopping(void)
return rtr_is_stopping;
}
-static struct prefix *pfx_record_to_prefix(struct pfx_record *record)
+static void pfx_record_to_prefix(struct pfx_record *record,
+ struct prefix *prefix)
{
- struct prefix *prefix = prefix_new();
-
prefix->prefixlen = record->min_len;
if (record->prefix.ver == LRTR_IPV4) {
@@ -385,15 +385,41 @@ static struct prefix *pfx_record_to_prefix(struct pfx_record *record)
ipv6_addr_to_network_byte_order(record->prefix.u.addr6.addr,
prefix->u.prefix6.s6_addr32);
}
+}
+
+struct rpki_revalidate_prefix {
+ struct bgp *bgp;
+ struct prefix prefix;
+ afi_t afi;
+ safi_t safi;
+};
+
+static void rpki_revalidate_prefix(struct thread *thread)
+{
+ struct rpki_revalidate_prefix *rrp = THREAD_ARG(thread);
+ struct bgp_dest *match, *node;
+
+ match = bgp_table_subtree_lookup(rrp->bgp->rib[rrp->afi][rrp->safi],
+ &rrp->prefix);
- return prefix;
+ node = match;
+
+ while (node) {
+ if (bgp_dest_has_bgp_path_info_data(node)) {
+ revalidate_bgp_node(node, rrp->afi, rrp->safi);
+ }
+
+ node = bgp_route_next_until(node, match);
+ }
+
+ XFREE(MTYPE_BGP_RPKI_REVALIDATE, rrp);
}
static void bgpd_sync_callback(struct thread *thread)
{
struct bgp *bgp;
struct listnode *node;
- struct prefix *prefix;
+ struct prefix prefix;
struct pfx_record rec;
thread_add_read(bm->master, bgpd_sync_callback, NULL,
@@ -416,7 +442,7 @@ static void bgpd_sync_callback(struct thread *thread)
RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
return;
}
- prefix = pfx_record_to_prefix(&rec);
+ pfx_record_to_prefix(&rec, &prefix);
afi_t afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
@@ -425,30 +451,20 @@ static void bgpd_sync_callback(struct thread *thread)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
struct bgp_table *table = bgp->rib[afi][safi];
+ struct rpki_revalidate_prefix *rrp;
if (!table)
continue;
- struct bgp_dest *match;
- struct bgp_dest *node;
-
- match = bgp_table_subtree_lookup(table, prefix);
- node = match;
-
- while (node) {
- if (bgp_dest_has_bgp_path_info_data(node)) {
- revalidate_bgp_node(node, afi, safi);
- }
-
- node = bgp_route_next_until(node, match);
- }
-
- if (match)
- bgp_dest_unlock_node(match);
+ rrp = XCALLOC(MTYPE_BGP_RPKI_REVALIDATE, sizeof(*rrp));
+ rrp->bgp = bgp;
+ rrp->prefix = prefix;
+ rrp->afi = afi;
+ rrp->safi = safi;
+ thread_add_event(bm->master, rpki_revalidate_prefix,
+ rrp, 0, &bgp->t_revalidate[afi][safi]);
}
}
-
- prefix_free(&prefix);
}
static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi,
@@ -473,6 +489,31 @@ static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi,
}
}
+/*
+ * The act of a soft reconfig in revalidation is really expensive
+ * coupled with the fact that the download of a full rpki state
+ * from a rpki server can be expensive, let's break up the revalidation
+ * to a point in time in the future to allow other bgp events
+ * to take place too.
+ */
+struct rpki_revalidate_peer {
+ afi_t afi;
+ safi_t safi;
+ struct peer *peer;
+};
+
+static void bgp_rpki_revalidate_peer(struct thread *thread)
+{
+ struct rpki_revalidate_peer *rvp = THREAD_ARG(thread);
+
+ /*
+ * Here's the expensive bit of gnomish deviousness
+ */
+ bgp_soft_reconfig_in(rvp->peer, rvp->afi, rvp->safi);
+
+ XFREE(MTYPE_BGP_RPKI_REVALIDATE, rvp);
+}
+
static void revalidate_all_routes(void)
{
struct bgp *bgp;
@@ -483,18 +524,28 @@ static void revalidate_all_routes(void)
struct listnode *peer_listnode;
for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) {
+ afi_t afi;
+ safi_t safi;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ struct rpki_revalidate_peer *rvp;
+
+ if (!bgp->rib[afi][safi])
+ continue;
- for (size_t i = 0; i < 2; i++) {
- safi_t safi;
- afi_t afi = (i == 0) ? AFI_IP : AFI_IP6;
+ if (!peer_established(peer))
+ continue;
- for (safi = SAFI_UNICAST; safi < SAFI_MAX;
- safi++) {
- if (!peer->bgp->rib[afi][safi])
- continue;
+ rvp = XCALLOC(MTYPE_BGP_RPKI_REVALIDATE,
+ sizeof(*rvp));
+ rvp->peer = peer;
+ rvp->afi = afi;
+ rvp->safi = safi;
- bgp_soft_reconfig_in(peer, afi, safi);
- }
+ thread_add_event(
+ bm->master, bgp_rpki_revalidate_peer,
+ rvp, 0,
+ &peer->t_revalidate_all[afi][safi]);
}
}
}
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 8273c78f1..6ad1cf2c0 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1127,6 +1127,8 @@ static void peer_free(struct peer *peer)
bgp_timer_set(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
+ FOREACH_AFI_SAFI (afi, safi)
+ THREAD_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->t_write);
assert(!peer->t_read);
BGP_EVENT_FLUSH(peer);
@@ -2444,6 +2446,8 @@ int peer_delete(struct peer *peer)
bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
+ FOREACH_AFI_SAFI (afi, safi)
+ THREAD_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
@@ -3641,6 +3645,9 @@ int bgp_delete(struct bgp *bgp)
hook_call(bgp_inst_delete, bgp);
+ FOREACH_AFI_SAFI (afi, safi)
+ THREAD_OFF(bgp->t_revalidate[afi][safi]);
+
THREAD_OFF(bgp->t_condition_check);
THREAD_OFF(bgp->t_startup);
THREAD_OFF(bgp->t_maxmed_onstartup);
@@ -5517,11 +5524,11 @@ void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
if (!peer_established(peer))
return;
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_SOFT_RECONFIG)) {
- bgp_soft_reconfig_in(peer, afi, safi);
- } else if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) ||
- CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) {
+ if (bgp_soft_reconfig_in(peer, afi, safi))
+ return;
+
+ if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) ||
+ CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) {
if (CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_ADV) &&
(CHECK_FLAG(peer->af_cap[afi][safi],
@@ -7777,10 +7784,7 @@ int peer_clear_soft(struct peer *peer, afi_t afi, safi_t safi,
|| stype == BGP_CLEAR_SOFT_IN_ORF_PREFIX) {
/* If neighbor has soft reconfiguration inbound flag.
Use Adj-RIB-In database. */
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_SOFT_RECONFIG))
- bgp_soft_reconfig_in(peer, afi, safi);
- else {
+ if (!bgp_soft_reconfig_in(peer, afi, safi)) {
/* If neighbor has route refresh capability, send route
refresh
message to the peer. */
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 8daf8aeb6..b3fce0734 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -470,6 +470,8 @@ struct bgp {
/* BGP update delay on startup */
struct thread *t_update_delay;
struct thread *t_establish_wait;
+ struct thread *t_revalidate[AFI_MAX][SAFI_MAX];
+
uint8_t update_delay_over;
uint8_t main_zebra_update_hold;
uint8_t main_peers_update_hold;
@@ -1554,6 +1556,7 @@ struct peer {
struct thread *t_gr_restart;
struct thread *t_gr_stale;
struct thread *t_llgr_stale[AFI_MAX][SAFI_MAX];
+ struct thread *t_revalidate_all[AFI_MAX][SAFI_MAX];
struct thread *t_generate_updgrp_packets;
struct thread *t_process_packet;
struct thread *t_process_packet_error;
diff --git a/configure.ac b/configure.ac
index 1a481ecd7..97c8ca451 100644
--- a/configure.ac
+++ b/configure.ac
@@ -712,6 +712,8 @@ AC_ARG_ENABLE([cpu-time],
AS_HELP_STRING([--disable-cpu-time], [disable cpu usage data gathering]))
AC_ARG_ENABLE([pcreposix],
AS_HELP_STRING([--enable-pcreposix], [enable using PCRE Posix libs for regex functions]))
+AC_ARG_ENABLE([pcre2posix],
+ AS_HELP_STRING([--enable-pcre2posix], [enable using PCRE2 Posix libs for regex functions]))
AC_ARG_ENABLE([fpm],
AS_HELP_STRING([--enable-fpm], [enable Forwarding Plane Manager support]))
AC_ARG_ENABLE([werror],
@@ -1659,6 +1661,16 @@ if test "$enable_pcreposix" = "yes"; then
fi
AC_SUBST([HAVE_LIBPCREPOSIX])
+dnl ---------------------------
+dnl check system has PCRE2 regexp
+dnl ---------------------------
+if test "$enable_pcre2posix" = "yes"; then
+ AC_CHECK_LIB([pcre2-posix], [regexec], [], [
+ AC_MSG_ERROR([--enable-pcre2posix given but unable to find libpcre2-posix])
+ ])
+fi
+AC_SUBST([HAVE_LIBPCRE2_POSIX])
+
dnl ##########################################################################
dnl test "$enable_clippy_only" != "yes"
fi
diff --git a/debian/frr.pam b/debian/frr.pam
index 2b106d43b..737b88953 100644
--- a/debian/frr.pam
+++ b/debian/frr.pam
@@ -1,3 +1,4 @@
# Any user may call vtysh but only those belonging to the group frrvty can
# actually connect to the socket and use the program.
auth sufficient pam_permit.so
+account sufficient pam_rootok.so
diff --git a/doc/user/installation.rst b/doc/user/installation.rst
index ba35facf2..8f89c6c4f 100644
--- a/doc/user/installation.rst
+++ b/doc/user/installation.rst
@@ -368,6 +368,13 @@ options from the list below.
Turn on the usage of PCRE Posix libs for regex functionality.
+.. option:: --enable-pcre2posix
+
+ Turn on the usage of PCRE2 Posix libs for regex functionality.
+
+ PCRE2 versions <= 10.31 work a bit differently. We suggest using at least
+ >= 10.36.
+
.. option:: --enable-rpath
Set hardcoded rpaths in the executable [default=yes].
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 0a843b928..db43266d6 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -810,6 +810,36 @@ and this section also helps that case.
!
...
+.. clicmd:: behavior usid
+
+ Specify the SRv6 locator as a Micro-segment (uSID) locator. When a locator is
+ specified as a uSID locator, all the SRv6 SIDs allocated from the locator by the routing
+ protocols are bound to the SRv6 uSID behaviors. For example, if you configure BGP to use
+ a locator specified as a uSID locator, BGP instantiates and advertises SRv6 uSID behaviors
+ (e.g., ``uDT4`` / ``uDT6`` / ``uDT46``) instead of classic SRv6 behaviors
+ (e.g., ``End.DT4`` / ``End.DT6`` / ``End.DT46``).
+
+::
+
+ router# configure terminal
+ router(config)# segment-routinig
+ router(config-sr)# srv6
+ router(config-srv6)# locators
+ router(config-srv6-locators)# locator loc1
+ router(config-srv6-locator)# prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16
+ router(config-srv6-locator)# behavior usid
+
+ router(config-srv6-locator)# show run
+ ...
+ segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix fc00:0:1::/48
+ behavior usid
+ !
+ ...
+
.. _multicast-rib-commands:
Multicast RIB Commands
diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile
index b9278dbb8..238a7fc40 100644
--- a/docker/alpine/Dockerfile
+++ b/docker/alpine/Dockerfile
@@ -1,7 +1,7 @@
# syntax=docker/dockerfile:1
# Create a basic stage set up to build APKs
-FROM alpine:3.15 as alpine-builder
+FROM alpine:3.16 as alpine-builder
RUN apk add \
--update-cache \
abuild \
@@ -13,7 +13,7 @@ RUN apk add \
RUN adduser -D -G abuild builder && su builder -c 'abuild-keygen -a -n'
# This stage builds a dist tarball from the source
-FROM alpine:3.15 as source-builder
+FROM alpine:3.16 as source-builder
RUN mkdir -p /src/alpine
COPY alpine/APKBUILD.in /src/alpine
@@ -48,7 +48,7 @@ RUN cd /dist \
&& abuild -r -P /pkgs/apk
# This stage installs frr from the apk
-FROM alpine:3.15
+FROM alpine:3.16
RUN mkdir -p /pkgs/apk
COPY --from=alpine-apk-builder /pkgs/apk/ /pkgs/apk/
RUN apk add \
diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c
index 00763135e..0957c897e 100644
--- a/isisd/isis_adjacency.c
+++ b/isisd/isis_adjacency.c
@@ -212,6 +212,36 @@ static const char *adj_level2string(int level)
return NULL; /* not reached */
}
+static void isis_adj_route_switchover(struct isis_adjacency *adj)
+{
+ union g_addr ip = {};
+ ifindex_t ifindex;
+ unsigned int i;
+
+ if (!adj->circuit || !adj->circuit->interface)
+ return;
+
+ ifindex = adj->circuit->interface->ifindex;
+
+ for (i = 0; i < adj->ipv4_address_count; i++) {
+ ip.ipv4 = adj->ipv4_addresses[i];
+ isis_circuit_switchover_routes(adj->circuit, AF_INET, &ip,
+ ifindex);
+ }
+
+ for (i = 0; i < adj->ll_ipv6_count; i++) {
+ ip.ipv6 = adj->ll_ipv6_addrs[i];
+ isis_circuit_switchover_routes(adj->circuit, AF_INET6, &ip,
+ ifindex);
+ }
+
+ for (i = 0; i < adj->global_ipv6_count; i++) {
+ ip.ipv6 = adj->global_ipv6_addrs[i];
+ isis_circuit_switchover_routes(adj->circuit, AF_INET6, &ip,
+ ifindex);
+ }
+}
+
void isis_adj_process_threeway(struct isis_adjacency *adj,
struct isis_threeway_adj *tw_adj,
enum isis_adj_usage adj_usage)
@@ -298,6 +328,16 @@ void isis_adj_state_change(struct isis_adjacency **padj,
if (new_state == old_state)
return;
+ if (old_state == ISIS_ADJ_UP &&
+ !CHECK_FLAG(adj->circuit->flags, ISIS_CIRCUIT_IF_DOWN_FROM_Z)) {
+ if (IS_DEBUG_EVENTS)
+ zlog_debug(
+ "ISIS-Adj (%s): Starting fast-reroute on state change %d->%d: %s",
+ circuit->area->area_tag, old_state, new_state,
+ reason ? reason : "unspecified");
+ isis_adj_route_switchover(adj);
+ }
+
adj->adj_state = new_state;
send_hello_sched(circuit, adj->level, TRIGGERED_IIH_DELAY);
diff --git a/isisd/isis_adjacency.h b/isisd/isis_adjacency.h
index 7467a619c..49adc89ae 100644
--- a/isisd/isis_adjacency.h
+++ b/isisd/isis_adjacency.h
@@ -151,5 +151,4 @@ void isis_adj_build_up_list(struct list *adjdb, struct list *list);
int isis_adj_usage2levels(enum isis_adj_usage usage);
void isis_bfd_startup_timer(struct thread *thread);
const char *isis_adj_name(const struct isis_adjacency *adj);
-
#endif /* ISIS_ADJACENCY_H */
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index dcc4ed6e4..fa0f2c998 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -598,6 +598,32 @@ size_t isis_circuit_pdu_size(struct isis_circuit *circuit)
return ISO_MTU(circuit);
}
+static bool isis_circuit_lfa_enabled(struct isis_circuit *circuit, int level)
+{
+ return (circuit->lfa_protection[level - 1] ||
+ circuit->rlfa_protection[level - 1] ||
+ circuit->tilfa_protection[level - 1]);
+}
+
+void isis_circuit_switchover_routes(struct isis_circuit *circuit, int family,
+ union g_addr *nexthop_ip, ifindex_t ifindex)
+{
+ char is_type;
+
+ if (!circuit->area)
+ return;
+
+ is_type = circuit->area->is_type;
+ if ((is_type == IS_LEVEL_1 || is_type == IS_LEVEL_1_AND_2) &&
+ isis_circuit_lfa_enabled(circuit, IS_LEVEL_1))
+ isis_area_switchover_routes(circuit->area, family, nexthop_ip,
+ ifindex, IS_LEVEL_1);
+ if ((is_type == IS_LEVEL_2 || is_type == IS_LEVEL_1_AND_2) &&
+ isis_circuit_lfa_enabled(circuit, IS_LEVEL_2))
+ isis_area_switchover_routes(circuit->area, family, nexthop_ip,
+ ifindex, IS_LEVEL_2);
+}
+
void isis_circuit_stream(struct isis_circuit *circuit, struct stream **stream)
{
size_t stream_size = isis_circuit_pdu_size(circuit);
@@ -1602,17 +1628,26 @@ static int isis_ifp_up(struct interface *ifp)
{
struct isis_circuit *circuit = ifp->info;
- if (circuit)
+ if (circuit) {
+ UNSET_FLAG(circuit->flags, ISIS_CIRCUIT_IF_DOWN_FROM_Z);
isis_csm_state_change(IF_UP_FROM_Z, circuit, ifp);
+ }
return 0;
}
static int isis_ifp_down(struct interface *ifp)
{
+ afi_t afi;
struct isis_circuit *circuit = ifp->info;
- if (circuit) {
+ if (circuit &&
+ !CHECK_FLAG(circuit->flags, ISIS_CIRCUIT_IF_DOWN_FROM_Z)) {
+ SET_FLAG(circuit->flags, ISIS_CIRCUIT_IF_DOWN_FROM_Z);
+ for (afi = AFI_IP; afi <= AFI_IP6; afi++)
+ isis_circuit_switchover_routes(
+ circuit, afi == AFI_IP ? AF_INET : AF_INET6,
+ NULL, ifp->ifindex);
isis_csm_state_change(IF_DOWN_FROM_Z, circuit, ifp);
SET_FLAG(circuit->flags, ISIS_CIRCUIT_FLAPPED_AFTER_SPF);
diff --git a/isisd/isis_circuit.h b/isisd/isis_circuit.h
index 5ff0390c2..b3ad3f7ff 100644
--- a/isisd/isis_circuit.h
+++ b/isisd/isis_circuit.h
@@ -28,6 +28,7 @@
#include "qobj.h"
#include "prefix.h"
#include "ferr.h"
+#include "nexthop.h"
#include "isis_constants.h"
#include "isis_common.h"
@@ -141,6 +142,7 @@ struct isis_circuit {
struct list *ipv6_non_link; /* our non-link local IPv6 addresses */
uint16_t upadjcount[ISIS_LEVELS];
#define ISIS_CIRCUIT_FLAPPED_AFTER_SPF 0x01
+#define ISIS_CIRCUIT_IF_DOWN_FROM_Z 0x02
uint8_t flags;
bool disable_threeway_adj;
struct {
@@ -209,6 +211,9 @@ void isis_circuit_print_vty(struct isis_circuit *circuit, struct vty *vty,
void isis_circuit_print_json(struct isis_circuit *circuit,
struct json_object *json, char detail);
size_t isis_circuit_pdu_size(struct isis_circuit *circuit);
+void isis_circuit_switchover_routes(struct isis_circuit *circuit, int family,
+ union g_addr *nexthop_ip,
+ ifindex_t ifindex);
void isis_circuit_stream(struct isis_circuit *circuit, struct stream **stream);
void isis_circuit_af_set(struct isis_circuit *circuit, bool ip_router,
diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c
index c4fadcba0..2ec6dafd3 100644
--- a/isisd/isis_lfa.c
+++ b/isisd/isis_lfa.c
@@ -1836,7 +1836,7 @@ static bool clfa_loop_free_check(struct isis_spftree *spftree,
struct isis_vertex *vertex_S_D,
struct isis_spf_adj *sadj_primary,
struct isis_spf_adj *sadj_N,
- uint32_t *lfa_metric)
+ uint32_t *path_metric)
{
struct isis_spf_node *node_N;
uint32_t dist_N_D;
@@ -1882,7 +1882,7 @@ static bool clfa_loop_free_check(struct isis_spftree *spftree,
dist_N_S, dist_S_D);
if (dist_N_D < (dist_N_S + dist_S_D)) {
- *lfa_metric = sadj_N->metric + dist_N_D;
+ *path_metric = sadj_N->metric + dist_N_D;
return true;
}
@@ -2082,7 +2082,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
struct isis_spftree *spftree,
struct lfa_protected_resource *resource)
{
- struct isis_vertex *vertex;
+ struct isis_vertex *vertex, *parent_vertex;
struct listnode *vnode, *snode;
int level = spftree->level;
@@ -2099,7 +2099,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
struct isis_vertex_adj *vadj_primary;
struct isis_spf_adj *sadj_primary;
bool allow_ecmp;
- uint32_t best_metric = UINT32_MAX;
+ uint32_t prefix_metric, best_metric = UINT32_MAX;
char buf[VID2STR_BUFFER];
if (!VTYPE_IP(vertex->type))
@@ -2133,6 +2133,9 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
vadj_primary = listnode_head(vertex->Adj_N);
sadj_primary = vadj_primary->sadj;
+ parent_vertex = listnode_head(vertex->parents);
+ prefix_metric = vertex->d_N - parent_vertex->d_N;
+
/*
* Loop over list of SPF adjacencies and compute a list of
* preliminary LFAs.
@@ -2140,7 +2143,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
lfa_list = list_new();
lfa_list->del = isis_vertex_adj_free;
for (ALL_LIST_ELEMENTS_RO(spftree->sadj_list, snode, sadj_N)) {
- uint32_t lfa_metric;
+ uint32_t lfa_metric, path_metric;
struct isis_vertex_adj *lfa;
struct isis_prefix_sid *psid = NULL;
bool last_hop = false;
@@ -2190,7 +2193,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
/* Check loop-free criterion. */
if (!clfa_loop_free_check(spftree, vertex, sadj_primary,
- sadj_N, &lfa_metric)) {
+ sadj_N, &path_metric)) {
if (IS_DEBUG_LFA)
zlog_debug(
"ISIS-LFA: LFA condition not met for %s",
@@ -2198,6 +2201,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
continue;
}
+ lfa_metric = path_metric + prefix_metric;
if (lfa_metric < best_metric)
best_metric = lfa_metric;
@@ -2208,7 +2212,7 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit,
if (vertex->N.ip.sr.present) {
psid = &vertex->N.ip.sr.sid;
- if (lfa_metric == sadj_N->metric)
+ if (path_metric == sadj_N->metric)
last_hop = true;
}
lfa = isis_vertex_adj_add(spftree, vertex, lfa_list,
diff --git a/isisd/isis_route.c b/isisd/isis_route.c
index 9f8f639e5..4fdb11b21 100644
--- a/isisd/isis_route.c
+++ b/isisd/isis_route.c
@@ -91,11 +91,18 @@ static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
struct isis_nexthop *nh;
for (ALL_LIST_ELEMENTS_RO(nexthops, node, nh)) {
- if (nh->family != family)
- continue;
if (nh->ifindex != ifindex)
continue;
+ /* if the IP is unspecified, return the first nexthop found on
+ * the interface
+ */
+ if (!ip)
+ return nh;
+
+ if (nh->family != family)
+ continue;
+
switch (family) {
case AF_INET:
if (IPV4_ADDR_CMP(&nh->ip.ipv4, &ip->ipv4))
@@ -459,6 +466,21 @@ void isis_route_delete(struct isis_area *area, struct route_node *rode,
route_unlock_node(rode);
}
+static void isis_route_remove_previous_sid(struct isis_area *area,
+ struct prefix *prefix,
+ struct isis_route_info *route_info)
+{
+ /*
+ * Explicitly uninstall previous Prefix-SID label if it has
+ * changed or was removed.
+ */
+ if (route_info->sr_previous.present &&
+ (!route_info->sr.present ||
+ route_info->sr_previous.label != route_info->sr.label))
+ isis_zebra_prefix_sid_uninstall(area, prefix, route_info,
+ &route_info->sr_previous);
+}
+
static void isis_route_update(struct isis_area *area, struct prefix *prefix,
struct prefix_ipv6 *src_p,
struct isis_route_info *route_info)
@@ -467,21 +489,12 @@ static void isis_route_update(struct isis_area *area, struct prefix *prefix,
if (CHECK_FLAG(route_info->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED))
return;
- /*
- * Explicitly uninstall previous Prefix-SID label if it has
- * changed or was removed.
- */
- if (route_info->sr_previous.present
- && (!route_info->sr.present
- || route_info->sr_previous.label
- != route_info->sr.label))
- isis_zebra_prefix_sid_uninstall(
- area, prefix, route_info,
- &route_info->sr_previous);
+ isis_route_remove_previous_sid(area, prefix, route_info);
/* Install route. */
- isis_zebra_route_add_route(area->isis, prefix, src_p,
- route_info);
+ if (area)
+ isis_zebra_route_add_route(area->isis, prefix, src_p,
+ route_info);
/* Install/reinstall Prefix-SID label. */
if (route_info->sr.present)
isis_zebra_prefix_sid_install(area, prefix, route_info,
@@ -496,8 +509,9 @@ static void isis_route_update(struct isis_area *area, struct prefix *prefix,
isis_zebra_prefix_sid_uninstall(
area, prefix, route_info, &route_info->sr);
/* Uninstall route. */
- isis_zebra_route_del_route(area->isis, prefix, src_p,
- route_info);
+ if (area)
+ isis_zebra_route_del_route(area->isis, prefix, src_p,
+ route_info);
hook_call(isis_route_update_hook, area, prefix, route_info);
UNSET_FLAG(route_info->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED);
@@ -724,3 +738,54 @@ void isis_route_invalidate_table(struct isis_area *area,
UNSET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ACTIVE);
}
}
+
+void isis_route_switchover_nexthop(struct isis_area *area,
+ struct route_table *table, int family,
+ union g_addr *nexthop_addr,
+ ifindex_t ifindex)
+{
+ const char *ifname = NULL, *vrfname = NULL;
+ struct isis_route_info *rinfo;
+ struct prefix_ipv6 *src_p;
+ struct route_node *rnode;
+ vrf_id_t vrf_id;
+ struct prefix *prefix;
+
+ if (IS_DEBUG_EVENTS) {
+ if (area && area->isis) {
+ vrf_id = area->isis->vrf_id;
+ vrfname = vrf_id_to_name(vrf_id);
+ ifname = ifindex2ifname(ifindex, vrf_id);
+ }
+ zlog_debug("%s: initiating fast-reroute %s on VRF %s iface %s",
+ __func__, family2str(family), vrfname ? vrfname : "",
+ ifname ? ifname : "");
+ }
+
+ for (rnode = route_top(table); rnode;
+ rnode = srcdest_route_next(rnode)) {
+ if (!rnode->info)
+ continue;
+ rinfo = rnode->info;
+
+ if (!rinfo->backup)
+ continue;
+
+ if (!nexthoplookup(rinfo->nexthops, family, nexthop_addr,
+ ifindex))
+ continue;
+
+ srcdest_rnode_prefixes(rnode, (const struct prefix **)&prefix,
+ (const struct prefix **)&src_p);
+
+ /* Switchover route. */
+ isis_route_remove_previous_sid(area, prefix, rinfo);
+ UNSET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED);
+ isis_route_update(area, prefix, src_p, rinfo->backup);
+
+ isis_route_info_delete(rinfo);
+
+ rnode->info = NULL;
+ route_unlock_node(rnode);
+ }
+}
diff --git a/isisd/isis_route.h b/isisd/isis_route.h
index 0e206d08f..a0e0500ae 100644
--- a/isisd/isis_route.h
+++ b/isisd/isis_route.h
@@ -86,4 +86,9 @@ void isis_route_invalidate_table(struct isis_area *area,
void isis_route_node_cleanup(struct route_table *table,
struct route_node *node);
+void isis_route_switchover_nexthop(struct isis_area *area,
+ struct route_table *table, int family,
+ union g_addr *nexthop_addr,
+ ifindex_t ifindex);
+
#endif /* _ZEBRA_ISIS_ROUTE_H */
diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c
index bdd323e1a..0d1a5db0d 100644
--- a/isisd/isis_spf.c
+++ b/isisd/isis_spf.c
@@ -1851,6 +1851,15 @@ void isis_spf_invalidate_routes(struct isis_spftree *tree)
tree->route_table_backup->cleanup = isis_route_node_cleanup;
}
+void isis_spf_switchover_routes(struct isis_area *area,
+ struct isis_spftree **trees, int family,
+ union g_addr *nexthop_ip, ifindex_t ifindex,
+ int level)
+{
+ isis_route_switchover_nexthop(area, trees[level - 1]->route_table,
+ family, nexthop_ip, ifindex);
+}
+
static void isis_run_spf_cb(struct thread *thread)
{
struct isis_spf_run *run = THREAD_ARG(thread);
@@ -1922,9 +1931,19 @@ void isis_spf_timer_free(void *run)
int _isis_spf_schedule(struct isis_area *area, int level,
const char *func, const char *file, int line)
{
- struct isis_spftree *spftree = area->spftree[SPFTREE_IPV4][level - 1];
- time_t now = monotime(NULL);
- int diff = now - spftree->last_run_monotime;
+ struct isis_spftree *spftree;
+ time_t now;
+ long tree_diff, diff;
+ int tree;
+
+ now = monotime(NULL);
+ diff = 0;
+ for (tree = SPFTREE_IPV4; tree < SPFTREE_COUNT; tree++) {
+ spftree = area->spftree[tree][level - 1];
+ tree_diff = difftime(now - spftree->last_run_monotime, 0);
+ if (tree_diff != now && (diff == 0 || tree_diff < diff))
+ diff = tree_diff;
+ }
if (CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
return 0;
@@ -1934,7 +1953,7 @@ int _isis_spf_schedule(struct isis_area *area, int level,
if (IS_DEBUG_SPF_EVENTS) {
zlog_debug(
- "ISIS-SPF (%s) L%d SPF schedule called, lastrun %d sec ago Caller: %s %s:%d",
+ "ISIS-SPF (%s) L%d SPF schedule called, lastrun %ld sec ago Caller: %s %s:%d",
area->area_tag, level, diff, func, file, line);
}
diff --git a/isisd/isis_spf.h b/isisd/isis_spf.h
index 815db7b22..3fa5182ba 100644
--- a/isisd/isis_spf.h
+++ b/isisd/isis_spf.h
@@ -60,6 +60,10 @@ struct isis_vertex *isis_spf_prefix_sid_lookup(struct isis_spftree *spftree,
void isis_spf_invalidate_routes(struct isis_spftree *tree);
void isis_spf_verify_routes(struct isis_area *area,
struct isis_spftree **trees);
+void isis_spf_switchover_routes(struct isis_area *area,
+ struct isis_spftree **trees, int family,
+ union g_addr *nexthop_ip, ifindex_t ifindex,
+ int level);
void isis_spftree_del(struct isis_spftree *spftree);
void spftree_area_init(struct isis_area *area);
void spftree_area_del(struct isis_area *area);
diff --git a/isisd/isisd.c b/isisd/isisd.c
index 17f4b2073..cd0852547 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -3090,6 +3090,25 @@ void isis_area_verify_routes(struct isis_area *area)
isis_spf_verify_routes(area, area->spftree[tree]);
}
+void isis_area_switchover_routes(struct isis_area *area, int family,
+ union g_addr *nexthop_ip, ifindex_t ifindex,
+ int level)
+{
+ int tree;
+
+ /* TODO SPFTREE_DSTSRC */
+ if (family == AF_INET)
+ tree = SPFTREE_IPV4;
+ else if (family == AF_INET6)
+ tree = SPFTREE_IPV6;
+ else
+ return;
+
+ isis_spf_switchover_routes(area, area->spftree[tree], family,
+ nexthop_ip, ifindex, level);
+}
+
+
static void area_resign_level(struct isis_area *area, int level)
{
isis_area_invalidate_routes(area, level);
diff --git a/isisd/isisd.h b/isisd/isisd.h
index a9c1d6043..38f20b211 100644
--- a/isisd/isisd.h
+++ b/isisd/isisd.h
@@ -291,6 +291,9 @@ struct isis_lsp *lsp_for_sysid(struct lspdb_head *head, const char *sysid_str,
void isis_area_invalidate_routes(struct isis_area *area, int levels);
void isis_area_verify_routes(struct isis_area *area);
+void isis_area_switchover_routes(struct isis_area *area, int family,
+ union g_addr *nexthop_ip, ifindex_t ifindex,
+ int level);
void isis_area_overload_bit_set(struct isis_area *area, bool overload_bit);
void isis_area_overload_on_startup_set(struct isis_area *area,
diff --git a/lib/frrstr.c b/lib/frrstr.c
index 1b98b224c..d66c6f8c1 100644
--- a/lib/frrstr.c
+++ b/lib/frrstr.c
@@ -23,11 +23,16 @@
#include <string.h>
#include <ctype.h>
#include <sys/types.h>
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
#include "frrstr.h"
#include "memory.h"
diff --git a/lib/frrstr.h b/lib/frrstr.h
index d52d6a448..f0066d0fc 100644
--- a/lib/frrstr.h
+++ b/lib/frrstr.h
@@ -23,11 +23,16 @@
#include <sys/types.h>
#include <sys/types.h>
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
#include <stdbool.h>
#include "vector.h"
diff --git a/lib/srv6.c b/lib/srv6.c
index 1c2c8913d..5cd82080f 100644
--- a/lib/srv6.c
+++ b/lib/srv6.c
@@ -241,6 +241,10 @@ json_object *srv6_locator_json(const struct srv6_locator *loc)
json_object_int_add(jo_root, "argumentBitsLength",
loc->argument_bits_length);
+ /* set true if the locator is a Micro-segment (uSID) locator */
+ if (CHECK_FLAG(loc->flags, SRV6_LOCATOR_USID))
+ json_object_string_add(jo_root, "behavior", "usid");
+
/* set status_up */
json_object_boolean_add(jo_root, "statusUp",
loc->status_up);
@@ -286,6 +290,10 @@ json_object *srv6_locator_detailed_json(const struct srv6_locator *loc)
json_object_int_add(jo_root, "argumentBitsLength",
loc->argument_bits_length);
+ /* set true if the locator is a Micro-segment (uSID) locator */
+ if (CHECK_FLAG(loc->flags, SRV6_LOCATOR_USID))
+ json_object_string_add(jo_root, "behavior", "usid");
+
/* set algonum */
json_object_int_add(jo_root, "algoNum", loc->algonum);
diff --git a/lib/srv6.h b/lib/srv6.h
index 18d5bdebc..acfb0631c 100644
--- a/lib/srv6.h
+++ b/lib/srv6.h
@@ -92,6 +92,9 @@ struct srv6_locator {
bool status_up;
struct list *chunks;
+ uint8_t flags;
+#define SRV6_LOCATOR_USID (1 << 0) /* The SRv6 Locator is a uSID Locator */
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(srv6_locator);
@@ -116,6 +119,23 @@ struct srv6_locator_chunk {
uint8_t proto;
uint16_t instance;
uint32_t session_id;
+
+ uint8_t flags;
+};
+
+/*
+ * SRv6 Endpoint Behavior codepoints, as defined by IANA in
+ * https://www.iana.org/assignments/segment-routing/segment-routing.xhtml
+ */
+enum srv6_endpoint_behavior_codepoint {
+ SRV6_ENDPOINT_BEHAVIOR_RESERVED = 0x0000,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT6 = 0x0012,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT4 = 0x0013,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT46 = 0x0014,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID = 0x003E,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID = 0x003F,
+ SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID = 0x0040,
+ SRV6_ENDPOINT_BEHAVIOR_OPAQUE = 0xFFFF,
};
struct nexthop_srv6 {
diff --git a/lib/vty.c b/lib/vty.c
index d524ae53c..5fe8d8247 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -24,11 +24,16 @@
#include <lib/version.h>
#include <sys/types.h>
#include <sys/types.h>
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
#include <stdio.h>
#include "linklist.h"
diff --git a/lib/vty.h b/lib/vty.h
index 430579c5a..0b3fd2443 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -22,11 +22,16 @@
#define _ZEBRA_VTY_H
#include <sys/types.h>
-#ifdef HAVE_LIBPCREPOSIX
+#ifdef HAVE_LIBPCRE2_POSIX
+#ifndef _FRR_PCRE2_POSIX
+#define _FRR_PCRE2_POSIX
+#include <pcre2posix.h>
+#endif /* _FRR_PCRE2_POSIX */
+#elif defined(HAVE_LIBPCREPOSIX)
#include <pcreposix.h>
#else
#include <regex.h>
-#endif /* HAVE_LIBPCREPOSIX */
+#endif /* HAVE_LIBPCRE2_POSIX */
#include "thread.h"
#include "log.h"
diff --git a/lib/zclient.c b/lib/zclient.c
index 2517773dc..fd6eb7db0 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -1088,6 +1088,7 @@ int zapi_srv6_locator_chunk_encode(struct stream *s,
stream_putc(s, c->node_bits_length);
stream_putc(s, c->function_bits_length);
stream_putc(s, c->argument_bits_length);
+ stream_putc(s, c->flags);
return 0;
}
@@ -1109,6 +1110,7 @@ int zapi_srv6_locator_chunk_decode(struct stream *s,
STREAM_GETC(s, c->node_bits_length);
STREAM_GETC(s, c->function_bits_length);
STREAM_GETC(s, c->argument_bits_length);
+ STREAM_GETC(s, c->flags);
return 0;
stream_failure:
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index 155374d3f..ed228f46a 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -37,6 +37,7 @@
#include "ospf6_route.h"
#include "ospf6_area.h"
#include "ospf6_abr.h"
+#include "ospf6_nssa.h"
#include "ospf6_interface.h"
#include "ospf6_neighbor.h"
#include "ospf6_intra.h"
@@ -1116,14 +1117,21 @@ static int ospf6_interface_show(struct vty *vty, struct interface *ifp,
oi->dead_interval);
json_object_int_add(json_obj, "timerIntervalsConfigRetransmit",
oi->rxmt_interval);
+ json_object_boolean_add(
+ json_obj, "timerPassiveIface",
+ !!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE));
} else {
vty_out(vty, " State %s, Transmit Delay %d sec, Priority %d\n",
ospf6_interface_state_str[oi->state], oi->transdelay,
oi->priority);
vty_out(vty, " Timer intervals configured:\n");
- vty_out(vty, " Hello %d(%pTHd), Dead %d, Retransmit %d\n",
- oi->hello_interval, oi->thread_send_hello,
- oi->dead_interval, oi->rxmt_interval);
+ if (!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE))
+ vty_out(vty,
+ " Hello %d(%pTHd), Dead %d, Retransmit %d\n",
+ oi->hello_interval, oi->thread_send_hello,
+ oi->dead_interval, oi->rxmt_interval);
+ else
+ vty_out(vty, " No Hellos (Passive interface)\n");
}
inet_ntop(AF_INET, &oi->drouter, drouter, sizeof(drouter));
@@ -1736,8 +1744,10 @@ void ospf6_interface_start(struct ospf6_interface *oi)
ospf6_interface_enable(oi);
/* If the router is ABR, originate summary routes */
- if (ospf6_check_and_set_router_abr(ospf6))
+ if (ospf6_check_and_set_router_abr(ospf6)) {
ospf6_abr_enable_area(oa);
+ ospf6_schedule_abr_task(ospf6);
+ }
}
void ospf6_interface_stop(struct ospf6_interface *oi)
diff --git a/ospf6d/ospf6_nssa.h b/ospf6d/ospf6_nssa.h
index 02234cc8b..3cc45d900 100644
--- a/ospf6d/ospf6_nssa.h
+++ b/ospf6d/ospf6_nssa.h
@@ -45,7 +45,7 @@ extern unsigned char config_debug_ospf6_nssa;
#define OSPF6_LSA_APPROVED 0x08
#define OSPF6_LSA_LOCAL_XLT 0x40
-#define OSPF6_ABR_TASK_DELAY 7
+#define OSPF6_ABR_TASK_DELAY 5
int ospf6_area_nssa_no_summary_set(struct ospf6 *ospf6, struct in_addr area_id);
int ospf6_area_nssa_unset(struct ospf6 *ospf6, struct ospf6_area *area);
diff --git a/ospfd/ospf_abr.h b/ospfd/ospf_abr.h
index e15f4a6bf..c523638fb 100644
--- a/ospfd/ospf_abr.h
+++ b/ospfd/ospf_abr.h
@@ -22,7 +22,7 @@
#ifndef _ZEBRA_OSPF_ABR_H
#define _ZEBRA_OSPF_ABR_H
-#define OSPF_ABR_TASK_DELAY 7
+#define OSPF_ABR_TASK_DELAY 5
#define OSPF_AREA_RANGE_ADVERTISE (1 << 0)
#define OSPF_AREA_RANGE_SUBSTITUTE (1 << 1)
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index 4fb3a3356..a67b6c6c1 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -2205,7 +2205,7 @@ struct ospf_lsa *ospf_external_lsa_originate(struct ospf *ospf,
*/
if (ospf->router_id.s_addr == INADDR_ANY) {
- if (IS_DEBUG_OSPF_EVENT)
+ if (ei && IS_DEBUG_OSPF_EVENT)
zlog_debug(
"LSA[Type5:%pI4]: deferring AS-external-LSA origination, router ID is zero",
&ei->p.prefix);
@@ -2214,7 +2214,7 @@ struct ospf_lsa *ospf_external_lsa_originate(struct ospf *ospf,
/* Create new AS-external-LSA instance. */
if ((new = ospf_external_lsa_new(ospf, ei, NULL)) == NULL) {
- if (IS_DEBUG_OSPF_EVENT)
+ if (ei && IS_DEBUG_OSPF_EVENT)
zlog_debug(
"LSA[Type5:%pI4]: Could not originate AS-external-LSA",
&ei->p.prefix);
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 6f272f008..db9156b04 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -689,7 +689,7 @@ static void pim_if_addr_del_pim(struct connected *ifc)
{
struct pim_interface *pim_ifp = ifc->ifp->info;
- if (ifc->address->family != AF_INET) {
+ if (ifc->address->family != PIM_AF) {
/* non-IPv4 address */
return;
}
@@ -843,7 +843,7 @@ void pim_if_addr_del_all(struct interface *ifp)
for (ALL_LIST_ELEMENTS(ifp->connected, node, nextnode, ifc)) {
struct prefix *p = ifc->address;
- if (p->family != AF_INET)
+ if (p->family != PIM_AF)
continue;
pim_if_addr_del(ifc, 1 /* force_prim_as_any=true */);
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
index 1a90b46de..259c34c81 100644
--- a/pimd/pim_igmp_mtrace.c
+++ b/pimd/pim_igmp_mtrace.c
@@ -365,19 +365,9 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr,
if (ip_hdr->ip_ttl-- <= 1)
return -1;
- ip_hdr->ip_sum = in_cksum(ip_hdr, ip_hdr->ip_hl * 4);
-
- fd = pim_socket_raw(IPPROTO_RAW);
-
- if (fd < 0)
- return -1;
-
- pim_socket_ip_hdr(fd);
-
if (interface == NULL) {
memset(&nexthop, 0, sizeof(nexthop));
if (!pim_nexthop_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
- close(fd);
if (PIM_DEBUG_MTRACE)
zlog_debug(
"Dropping mtrace packet, no route to destination");
@@ -389,6 +379,15 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr,
if_out = interface;
}
+ ip_hdr->ip_sum = in_cksum(ip_hdr, ip_hdr->ip_hl * 4);
+
+ fd = pim_socket_raw(IPPROTO_RAW);
+
+ if (fd < 0)
+ return -1;
+
+ pim_socket_ip_hdr(fd);
+
ret = pim_socket_bind(fd, if_out);
if (ret < 0) {
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 12f8ffedf..c4ff912cd 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -24,6 +24,7 @@
#include "lib/northbound_cli.h"
#include "pim_igmpv3.h"
#include "pim_neighbor.h"
+#include "pim_nht.h"
#include "pim_pim.h"
#include "pim_mlag.h"
#include "pim_bfd.h"
@@ -146,6 +147,7 @@ static int pim_cmd_interface_add(struct interface *ifp)
pim_ifp->pim_enable = true;
pim_if_addr_add_all(ifp);
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
pim_if_membership_refresh(ifp);
pim_if_create_pimreg(pim_ifp->pim);
@@ -171,6 +173,7 @@ static int pim_cmd_interface_delete(struct interface *ifp)
if (!pim_ifp->gm_enable) {
pim_if_addr_del_all(ifp);
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
pim_if_delete(ifp);
}
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index f9a9aeb1b..a33da6456 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -469,6 +469,40 @@ static int pim_update_upstream_nh(struct pim_instance *pim,
return 0;
}
+static int pim_upstream_nh_if_update_helper(struct hash_bucket *bucket,
+ void *arg)
+{
+ struct pim_nexthop_cache *pnc = bucket->data;
+ struct pnc_hash_walk_data *pwd = arg;
+ struct pim_instance *pim = pwd->pim;
+ struct interface *ifp = pwd->ifp;
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ if (ifp != if_lookup_by_index(first_ifindex, pim->vrf->vrf_id))
+ continue;
+
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
+ }
+
+ return HASHWALK_CONTINUE;
+}
+
+void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp)
+{
+ struct pnc_hash_walk_data pwd;
+
+ pwd.pim = pim;
+ pwd.ifp = ifp;
+
+ hash_walk(pim->rpf_hash, pim_upstream_nh_if_update_helper, &pwd);
+}
+
uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
{
uint32_t hash_val;
@@ -495,6 +529,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
uint32_t hash_val = 0, mod_val = 0;
uint8_t nh_iter = 0, found = 0;
uint32_t i, num_nbrs = 0;
+ struct pim_interface *pim_ifp;
if (!pnc || !pnc->nexthop_num || !nexthop)
return 0;
@@ -611,10 +646,13 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
nh_iter++;
continue;
}
- if (!ifp->info) {
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
__func__, ifp->name, pim->vrf->name,
first_ifindex, &src);
if (nh_iter == mod_val)
@@ -882,6 +920,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
uint8_t i = 0;
uint32_t hash_val = 0, mod_val = 0;
uint32_t num_nbrs = 0;
+ struct pim_interface *pim_ifp;
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
@@ -964,10 +1003,12 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
continue;
}
- if (!ifp->info) {
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
__func__, ifp->name, pim->vrf->name,
first_ifindex, &src);
if (i == mod_val)
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index 240e61d98..f487a21ba 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -53,6 +53,11 @@ struct pim_nexthop_cache {
uint32_t bsr_count;
};
+struct pnc_hash_walk_data {
+ struct pim_instance *pim;
+ struct interface *ifp;
+};
+
int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
struct pim_upstream *up, struct rp_info *rp,
@@ -77,5 +82,5 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
/* RPF(bsr_addr) == src_ip%src_ifp? */
bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct interface *src_ifp, pim_addr src_ip);
-
+void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp);
#endif
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index a28278c58..d237a7312 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -61,6 +61,7 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
ifindex_t first_ifindex = 0;
int found = 0;
int i = 0;
+ struct pim_interface *pim_ifp;
#if PIM_IPV == 4
/*
@@ -97,9 +98,10 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
addr, PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
- zlog_warn(
- "%s %s: could not find nexthop ifindex for address %pPAs",
- __FILE__, __func__, &addr);
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s %s: could not find nexthop ifindex for address %pPAs",
+ __FILE__, __func__, &addr);
return false;
}
@@ -117,15 +119,16 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
continue;
}
- if (!ifp->info) {
+ pim_ifp = ifp->info;
+ if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_ZEBRA)
zlog_debug(
- "%s: multicast not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
+ "%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
__func__, ifp->name, first_ifindex,
&addr);
i++;
- } else if (neighbor_needed
- && !pim_if_connected_to_source(ifp, addr)) {
+ } else if (neighbor_needed &&
+ !pim_if_connected_to_source(ifp, addr)) {
nbr = pim_neighbor_find(ifp,
nexthop_tab[i].nexthop_addr);
if (PIM_DEBUG_PIM_TRACE_DETAIL)
diff --git a/redhat/frr.pam b/redhat/frr.pam
index 5cef5d9d7..17a62f199 100644
--- a/redhat/frr.pam
+++ b/redhat/frr.pam
@@ -5,6 +5,7 @@
# Only allow root (and possibly wheel) to use this because enable access
# is unrestricted.
auth sufficient pam_rootok.so
+account sufficient pam_rootok.so
# Uncomment the following line to implicitly trust users in the "wheel" group.
#auth sufficient pam_wheel.so trust use_uid
diff --git a/tests/isisd/test_isis_spf.refout b/tests/isisd/test_isis_spf.refout
index bdd5b2e43..23d41b9e5 100644
--- a/tests/isisd/test_isis_spf.refout
+++ b/tests/isisd/test_isis_spf.refout
@@ -823,7 +823,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
----------------------------------------------------------
- 10.0.255.2/32 40 - rt2 implicit-null
+ 10.0.255.2/32 50 - rt2 implicit-null
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -859,7 +859,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------------
- 2001:db8::2/128 40 - rt2 implicit-null
+ 2001:db8::2/128 50 - rt2 implicit-null
test# test isis topology 2 root rt4 lfa system-id rt6
IS-IS paths to level-1 routers that speak IP
@@ -896,7 +896,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.6/32 20 - rt5 16060
+ 10.0.255.6/32 30 - rt5 16060
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -932,7 +932,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------
- 2001:db8::6/128 20 - rt5 16061
+ 2001:db8::6/128 30 - rt5 16061
test# test isis topology 3 root rt1 lfa system-id rt2
IS-IS paths to level-1 routers that speak IP
@@ -967,10 +967,10 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.2/32 20 - rt3 16020
- 10.0.255.4/32 30 - rt3 16040
- 10.0.255.5/32 40 - rt3 16050
- 10.0.255.6/32 40 - rt3 16060
+ 10.0.255.2/32 30 - rt3 16020
+ 10.0.255.4/32 40 - rt3 16040
+ 10.0.255.5/32 50 - rt3 16050
+ 10.0.255.6/32 50 - rt3 16060
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1017,7 +1017,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.3/32 20 - rt2 16030
+ 10.0.255.3/32 30 - rt2 16030
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1085,17 +1085,17 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------------
- 10.0.255.2/32 40 - rt2 implicit-null
- 10.0.255.3/32 50 - rt2 16030
- 10.0.255.4/32 60 - rt2 16040
- 10.0.255.5/32 50 - rt2 16050
- 10.0.255.6/32 60 - rt2 16060
- 10.0.255.7/32 70 - rt2 16070
- 10.0.255.8/32 60 - rt2 16080
- 10.0.255.9/32 70 - rt2 16090
- 10.0.255.10/32 80 - rt2 16100
- 10.0.255.11/32 70 - rt2 16110
- 10.0.255.12/32 80 - rt2 16120
+ 10.0.255.2/32 50 - rt2 implicit-null
+ 10.0.255.3/32 60 - rt2 16030
+ 10.0.255.4/32 70 - rt2 16040
+ 10.0.255.5/32 60 - rt2 16050
+ 10.0.255.6/32 70 - rt2 16060
+ 10.0.255.7/32 80 - rt2 16070
+ 10.0.255.8/32 70 - rt2 16080
+ 10.0.255.9/32 80 - rt2 16090
+ 10.0.255.10/32 90 - rt2 16100
+ 10.0.255.11/32 80 - rt2 16110
+ 10.0.255.12/32 90 - rt2 16120
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1173,10 +1173,10 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------
- 10.0.255.8/32 40 - rt10 16080
- 10.0.255.9/32 50 - rt10 16090
- 10.0.255.11/32 30 - rt10 16110
- 10.0.255.12/32 40 - rt10 16120
+ 10.0.255.8/32 50 - rt10 16080
+ 10.0.255.9/32 60 - rt10 16090
+ 10.0.255.11/32 40 - rt10 16110
+ 10.0.255.12/32 50 - rt10 16120
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1252,7 +1252,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------
- 10.0.255.10/32 30 - rt7 16100
+ 10.0.255.10/32 40 - rt7 16100
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1313,14 +1313,14 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
----------------------------------------------------------
- 10.0.255.1/32 120 - rt4 16010
- 10.0.255.2/32 110 - rt4 16020
- 10.0.255.4/32 100 - rt4 implicit-null
- 10.0.255.5/32 110 - rt4 16050
- 10.0.255.6/32 130 - rt4 16060
- 10.0.255.7/32 130 - rt4 16070
- 10.0.255.8/32 130 - rt4 16080
- 10.0.255.9/32 120 - rt4 16090
+ 10.0.255.1/32 130 - rt4 16010
+ 10.0.255.2/32 120 - rt4 16020
+ 10.0.255.4/32 110 - rt4 implicit-null
+ 10.0.255.5/32 120 - rt4 16050
+ 10.0.255.6/32 140 - rt4 16060
+ 10.0.255.7/32 140 - rt4 16070
+ 10.0.255.8/32 140 - rt4 16080
+ 10.0.255.9/32 130 - rt4 16090
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1366,14 +1366,14 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------------
- 2001:db8::1/128 120 - rt4 16011
- 2001:db8::2/128 110 - rt4 16021
- 2001:db8::4/128 100 - rt4 implicit-null
- 2001:db8::5/128 110 - rt4 16051
- 2001:db8::6/128 130 - rt4 16061
- 2001:db8::7/128 130 - rt4 16071
- 2001:db8::8/128 130 - rt4 16081
- 2001:db8::9/128 120 - rt4 16091
+ 2001:db8::1/128 130 - rt4 16011
+ 2001:db8::2/128 120 - rt4 16021
+ 2001:db8::4/128 110 - rt4 implicit-null
+ 2001:db8::5/128 120 - rt4 16051
+ 2001:db8::6/128 140 - rt4 16061
+ 2001:db8::7/128 140 - rt4 16071
+ 2001:db8::8/128 140 - rt4 16081
+ 2001:db8::9/128 130 - rt4 16091
test# test isis topology 10 root rt8 lfa system-id rt5
IS-IS paths to level-1 routers that speak IP
@@ -1414,15 +1414,15 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.1/32 80 - rt6 16010
+ 10.0.255.1/32 90 - rt6 16010
- rt7 16010
- 10.0.255.2/32 90 - rt6 16020
+ 10.0.255.2/32 100 - rt6 16020
- rt7 16020
- 10.0.255.3/32 60 - rt6 16030
+ 10.0.255.3/32 70 - rt6 16030
- rt7 16030
- 10.0.255.4/32 60 - rt6 16040
+ 10.0.255.4/32 70 - rt6 16040
- rt7 16040
- 10.0.255.5/32 100 - rt6 16050
+ 10.0.255.5/32 110 - rt6 16050
- rt7 16050
IS-IS paths to level-1 routers that speak IPv6
@@ -1463,15 +1463,15 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------
- 2001:db8::1/128 80 - rt6 16011
+ 2001:db8::1/128 90 - rt6 16011
- rt7 16011
- 2001:db8::2/128 90 - rt6 16021
+ 2001:db8::2/128 100 - rt6 16021
- rt7 16021
- 2001:db8::3/128 60 - rt6 16031
+ 2001:db8::3/128 70 - rt6 16031
- rt7 16031
- 2001:db8::4/128 60 - rt6 16041
+ 2001:db8::4/128 70 - rt6 16041
- rt7 16041
- 2001:db8::5/128 100 - rt6 16051
+ 2001:db8::5/128 110 - rt6 16051
- rt7 16051
test# test isis topology 11 root rt3 lfa system-id rt5
@@ -1511,8 +1511,8 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.5/32 30 - rt2 16050
- 10.0.255.6/32 30 - rt2 16060
+ 10.0.255.5/32 40 - rt2 16050
+ 10.0.255.6/32 40 - rt2 16060
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1550,8 +1550,8 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------
- 2001:db8::5/128 30 - rt2 16051
- 2001:db8::6/128 30 - rt2 16061
+ 2001:db8::5/128 40 - rt2 16051
+ 2001:db8::6/128 40 - rt2 16061
test# test isis topology 13 root rt4 lfa system-id rt3
IS-IS paths to level-1 routers that speak IP
@@ -1593,10 +1593,10 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
----------------------------------------------------------
- 10.0.255.3/32 110 - rt5 16030
- 10.0.255.5/32 100 - rt5 implicit-null
- 10.0.255.6/32 120 - rt5 16060
- 10.0.255.7/32 110 - rt5 16070
+ 10.0.255.3/32 120 - rt5 16030
+ 10.0.255.5/32 110 - rt5 implicit-null
+ 10.0.255.6/32 130 - rt5 16060
+ 10.0.255.7/32 120 - rt5 16070
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1699,7 +1699,7 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.2/32 20 - rt3 -
+ 10.0.255.2/32 30 - rt3 -
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1731,7 +1731,7 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------
- 2001:db8::2/128 20 - rt3 -
+ 2001:db8::2/128 30 - rt3 -
test# test isis topology 14 root rt5 lfa system-id rt4
IS-IS paths to level-1 routers that speak IP
@@ -1765,10 +1765,10 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.1/32 60 - rt3 -
- 10.0.255.2/32 60 - rt3 -
- 10.0.255.3/32 50 - rt3 -
- 10.0.255.4/32 60 - rt3 -
+ 10.0.255.1/32 70 - rt3 -
+ 10.0.255.2/32 70 - rt3 -
+ 10.0.255.3/32 60 - rt3 -
+ 10.0.255.4/32 70 - rt3 -
IS-IS paths to level-1 routers that speak IPv6
Vertex Type Metric Next-Hop Interface Parent
@@ -1801,10 +1801,10 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
-------------------------------------------------------
- 2001:db8::1/128 60 - rt3 -
- 2001:db8::2/128 60 - rt3 -
- 2001:db8::3/128 50 - rt3 -
- 2001:db8::4/128 60 - rt3 -
+ 2001:db8::1/128 70 - rt3 -
+ 2001:db8::2/128 70 - rt3 -
+ 2001:db8::3/128 60 - rt3 -
+ 2001:db8::4/128 70 - rt3 -
test#
test# test isis topology 1 root rt1 remote-lfa system-id rt2
@@ -2174,11 +2174,11 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
-----------------------------------------------------
- 10.0.255.1/32 40 - rt3 16010
+ 10.0.255.1/32 50 - rt3 16010
- rt6 16010
- 10.0.255.2/32 30 - rt3 16020
+ 10.0.255.2/32 40 - rt3 16020
- rt6 16020
- 10.0.255.4/32 20 - rt3 16040
+ 10.0.255.4/32 30 - rt3 16040
- rt6 16040
test# test isis topology 3 root rt5 remote-lfa system-id rt3 ipv4-only
@@ -2535,13 +2535,13 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
--------------------------------------------------------
- 10.0.255.1/32 50 - rt10 16010
+ 10.0.255.1/32 60 - rt10 16010
10.0.255.2/32 60 - rt12 50900/16020
10.0.255.3/32 70 - rt12 50900/16030
- 10.0.255.4/32 40 - rt10 16040
+ 10.0.255.4/32 50 - rt10 16040
10.0.255.5/32 50 - rt12 50900/16050
10.0.255.6/32 60 - rt12 50900/16060
- 10.0.255.7/32 30 - rt10 16070
+ 10.0.255.7/32 40 - rt10 16070
10.0.255.8/32 40 - rt12 50900/16080
test# test isis topology 7 root rt6 remote-lfa system-id rt5 ipv4-only
@@ -2671,13 +2671,13 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------
- 10.0.255.1/32 70 - rt9 16010
- 10.0.255.4/32 60 - rt9 16040
- 10.0.255.5/32 50 - rt9 16050
- 10.0.255.7/32 50 - rt9 16070
- 10.0.255.8/32 40 - rt9 16080
- 10.0.255.10/32 60 - rt9 16100
- 10.0.255.11/32 50 - rt9 16110
+ 10.0.255.1/32 80 - rt9 16010
+ 10.0.255.4/32 70 - rt9 16040
+ 10.0.255.5/32 60 - rt9 16050
+ 10.0.255.7/32 60 - rt9 16070
+ 10.0.255.8/32 50 - rt9 16080
+ 10.0.255.10/32 70 - rt9 16100
+ 10.0.255.11/32 60 - rt9 16110
test# test isis topology 8 root rt2 remote-lfa system-id rt5 ipv4-only
P-space (self):
@@ -2863,14 +2863,14 @@ IS-IS L1 IPv4 routing table:
Prefix Metric Interface Nexthop Label(s)
----------------------------------------------------------
- 10.0.255.1/32 50 - rt1 implicit-null
+ 10.0.255.1/32 60 - rt1 implicit-null
- rt3 16010
- 10.0.255.3/32 50 - rt1 16030
+ 10.0.255.3/32 60 - rt1 16030
- rt3 implicit-null
10.0.255.4/32 80 - rt3 50500/16040
- 10.0.255.5/32 60 - rt1 16050
+ 10.0.255.5/32 70 - rt1 16050
- rt3 16050
- 10.0.255.6/32 70 - rt3 16060
+ 10.0.255.6/32 80 - rt3 16060
P-space (self):
@@ -2941,14 +2941,14 @@ IS-IS L1 IPv6 routing table:
Prefix Metric Interface Nexthop Label(s)
------------------------------------------------------------
- 2001:db8::1/128 50 - rt1 implicit-null
+ 2001:db8::1/128 60 - rt1 implicit-null
- rt3 16011
- 2001:db8::3/128 50 - rt1 16031
+ 2001:db8::3/128 60 - rt1 16031
- rt3 implicit-null
2001:db8::4/128 80 - rt3 50500/16041
- 2001:db8::5/128 60 - rt1 16051
+ 2001:db8::5/128 70 - rt1 16051
- rt3 16051
- 2001:db8::6/128 70 - rt3 16061
+ 2001:db8::6/128 80 - rt3 16061
test# test isis topology 13 root rt1 remote-lfa system-id rt3 ipv4-only
P-space (self):
diff --git a/tests/topotests/isis_lfa_topo1/rt1/bfdd.conf b/tests/topotests/isis_lfa_topo1/rt1/bfdd.conf
new file mode 100644
index 000000000..86cf68dd8
--- /dev/null
+++ b/tests/topotests/isis_lfa_topo1/rt1/bfdd.conf
@@ -0,0 +1,6 @@
+hostname rt1
+!
+bfd
+ peer 2001:db8:1000::2 multihop local-address 2001:db8:1000::1
+ !
+!
diff --git a/tests/topotests/isis_lfa_topo1/rt1/step14/show_ipv6_route.ref.diff b/tests/topotests/isis_lfa_topo1/rt1/step14/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_lfa_topo1/rt1/step14/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_lfa_topo1/rt1/step15/show_ipv6_route.ref.diff b/tests/topotests/isis_lfa_topo1/rt1/step15/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..a00d2d3b6
--- /dev/null
+++ b/tests/topotests/isis_lfa_topo1/rt1/step15/show_ipv6_route.ref.diff
@@ -0,0 +1,50 @@
+--- a/rt1/step14/show_ipv6_route.ref
++++ b/rt1/step15/show_ipv6_route.ref
+@@ -6,22 +6,12 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":25,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+- "interfaceName":"eth-rt2",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+ "active":true
+ }
+@@ -151,22 +141,12 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":25,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+- "interfaceName":"eth-rt2",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+ "interfaceName":"eth-rt6",
+ "active":true
+ }
diff --git a/tests/topotests/isis_lfa_topo1/rt1/step16/show_ipv6_route.ref.diff b/tests/topotests/isis_lfa_topo1/rt1/step16/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..5e48511ba
--- /dev/null
+++ b/tests/topotests/isis_lfa_topo1/rt1/step16/show_ipv6_route.ref.diff
@@ -0,0 +1,53 @@
+--- a/rt1/step15/show_ipv6_route.ref
++++ b/rt1/step16/show_ipv6_route.ref
+@@ -32,16 +32,6 @@
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt3",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+@@ -90,16 +80,6 @@
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
+@@ -119,16 +99,6 @@
+ "fib":true,
+ "afi":"ipv6",
+ "interfaceName":"eth-rt6",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt2",
+ "active":true
+ }
+ ]
diff --git a/tests/topotests/isis_lfa_topo1/rt2/bfdd.conf b/tests/topotests/isis_lfa_topo1/rt2/bfdd.conf
new file mode 100644
index 000000000..40357a4d0
--- /dev/null
+++ b/tests/topotests/isis_lfa_topo1/rt2/bfdd.conf
@@ -0,0 +1,6 @@
+hostname rt2
+!
+bfd
+ peer 2001:db8:1000::1 multihop local-address 2001:db8:1000::2
+ !
+!
diff --git a/tests/topotests/isis_lfa_topo1/test_isis_lfa_topo1.py b/tests/topotests/isis_lfa_topo1/test_isis_lfa_topo1.py
index 7e902213e..f72942b60 100755
--- a/tests/topotests/isis_lfa_topo1/test_isis_lfa_topo1.py
+++ b/tests/topotests/isis_lfa_topo1/test_isis_lfa_topo1.py
@@ -55,6 +55,7 @@ import os
import sys
import pytest
import json
+import time
import tempfile
from functools import partial
@@ -128,7 +129,7 @@ def build_topo(tgen):
files = ["show_ipv6_route.ref", "show_yang_interface_isis_adjacencies.ref"]
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
outputs[rname] = {}
- for step in range(1, 13 + 1):
+ for step in range(1, 16 + 1):
outputs[rname][step] = {}
for file in files:
if step == 1:
@@ -174,6 +175,9 @@ def setup_module(mod):
router.load_config(
TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
+ router.load_config(
+ TopoRouter.RD_BFD, os.path.join(CWD, "/dev/null".format(rname))
+ )
tgen.start_router()
@@ -186,7 +190,7 @@ def teardown_module(mod):
tgen.stop_topology()
-def router_compare_json_output(rname, command, reference):
+def router_compare_json_output(rname, command, reference, wait=0.5, count=120):
"Compare router JSON output"
logger.info('Comparing router "%s" "%s" output', rname, command)
@@ -196,7 +200,7 @@ def router_compare_json_output(rname, command, reference):
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
- _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
+ _, diff = topotest.run_and_expect(test_func, None, count=count, wait=wait)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
@@ -616,6 +620,408 @@ def test_rib_ipv6_step13():
)
+#
+# Step 14
+#
+# Action(s):
+# - Setting spf-delay-ietf init-delay of 15s
+#
+# Expected changes:
+# - No routing table change
+# - At the end of test, SPF reacts to a failure in 15s
+#
+def test_rib_ipv6_step14():
+ logger.info("Test (step 14): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Setting spf-delay-ietf init-delay of 15s")
+ tgen.net["rt1"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "spf-delay-ietf init-delay 15000 short-delay 0 long-delay 0 holddown 0 time-to-learn 0"'
+ )
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][14]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 15
+#
+# Action(s):
+# - shut the eth-rt2 interface on rt1
+#
+# Expected changes:
+# - Route switchover of routes via eth-rt2
+#
+def test_rib_ipv6_step15():
+ logger.info("Test (step 15): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Shut the interface to rt2 from the switch side and check fast-reroute")
+ tgen.net.cmd_raises("ip link set %s down" % tgen.net["s1"].intfs[0])
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][15]["show_ipv6_route.ref"],
+ count=2,
+ wait=0.05,
+ )
+
+
+#
+# Step 16
+#
+# Action(s): wait for the convergence and SPF computation on rt1
+#
+# Expected changes:
+# - convergence of IPv6 RIB
+#
+def test_rib_ipv6_step16():
+ logger.info("Test (step 16): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][16]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 17
+#
+# Action(s):
+# - Unshut the interface to rt2 from the switch sid
+#
+# Expected changes:
+# - The routing table converges
+#
+def test_rib_ipv6_step17():
+ logger.info("Test (step 17): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ rname = "rt1"
+
+ logger.info("Unsetting spf-delay-ietf init-delay of 15s")
+ tgen.net[rname].cmd('vtysh -c "conf t" -c "router isis 1" -c "no spf-delay-ietf"')
+
+ logger.info(
+ "Unshut the interface to rt2 from the switch side and check fast-reroute"
+ )
+ tgen.net.cmd_raises("ip link set %s up" % tgen.net["s1"].intfs[0])
+
+ logger.info("Setting spf-delay-ietf init-delay of 15s")
+ tgen.net[rname].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "spf-delay-ietf init-delay 15000 short-delay 0 long-delay 0 holddown 0 time-to-learn 0"'
+ )
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][14]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 18
+#
+# Action(s):
+# - drop traffic between rt1 and rt2 by shutting down the bridge between
+# the routers. Interfaces on rt1 and rt2 stay up.
+#
+#
+# Expected changes:
+# - Route switchover of routes via eth-rt2
+#
+def test_rib_ipv6_step18():
+ logger.info("Test (step 18): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Drop traffic between rt1 and rt2")
+ tgen.net.cmd_raises("ip link set s1 down")
+
+ rname = "rt1"
+
+ retry = 200 + 1
+
+ while retry:
+ retry -= 1
+ output = tgen.gears[rname].vtysh_cmd("show isis neighbor json")
+ output_json = json.loads(output)
+ found = False
+ for neighbor in output_json["areas"][0]["circuits"]:
+ if "adj" in neighbor and neighbor["adj"] == "rt2":
+ found = True
+ break
+ if not found:
+ break
+ time.sleep(0.05)
+
+ assert not found, "rt2 neighbor is still present"
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][15]["show_ipv6_route.ref"],
+ count=2,
+ wait=0.05,
+ )
+
+
+#
+# Step 19
+#
+# Action(s): wait for the convergence and SPF computation on rt1
+#
+# Expected changes:
+# - convergence of IPv6 RIB
+#
+def test_rib_ipv6_step19():
+ logger.info("Test (step 19): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][16]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 20
+#
+# Action(s):
+# - Unshut the switch from rt1 to rt2
+#
+# Expected changes:
+# - The routing table goes back to the nominal state
+#
+def test_rib_ipv6_step20():
+ logger.info("Test (step 20): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ rname = "rt1"
+
+ logger.info("Unsetting spf-delay-ietf init-delay of 15s")
+ tgen.net[rname].cmd('vtysh -c "conf t" -c "router isis 1" -c "no spf-delay-ietf"')
+
+ logger.info(
+ "Unshut the interface to rt2 from the switch side and check fast-reroute"
+ )
+ tgen.net.cmd_raises("ip link set s1 up")
+
+ logger.info("Setting spf-delay-ietf init-delay of 15s")
+ tgen.net[rname].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "spf-delay-ietf init-delay 15000 short-delay 0 long-delay 0 holddown 0 time-to-learn 0"'
+ )
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][14]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 21
+#
+# Action(s):
+# - clear the rt2 ISIS neighbor on rt1
+#
+# Expected changes:
+# - Route switchover of routes via eth-rt2
+#
+def test_rib_ipv6_step21():
+ logger.info("Test (step 21): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ rname = "rt1"
+
+ logger.info("Clear the rt2 ISIS neighbor on rt1 and check fast-reroute")
+ tgen.gears[rname].vtysh_cmd("clear isis neighbor rt2")
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][15]["show_ipv6_route.ref"],
+ count=2,
+ wait=0.05,
+ )
+
+
+#
+# Step 22
+#
+# Action(s): wait for the convergence and SPF computation on rt1
+#
+# Expected changes:
+# - convergence of IPv6 RIB
+#
+def test_rib_ipv6_step22():
+ logger.info("Test (step 22): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][16]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 23
+#
+# Action(s):
+# - Setting BFD
+#
+# Expected changes:
+# - No routing table change
+# - BFD comes up
+#
+def test_rib_ipv6_step23():
+ logger.info("Test (step 23): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Setup BFD on rt1 and rt2")
+ for rname in ["rt1", "rt2"]:
+ conf_file = os.path.join(CWD, "{}/bfdd.conf".format(rname))
+ tgen.net[rname].cmd("vtysh -f {}".format(conf_file))
+
+ rname = "rt1"
+ expect = '[{"multihop":true,"peer":"2001:db8:1000::2","local":"2001:db8:1000::1","status":"up"}]'
+ router_compare_json_output(rname, "show bfd peers json", expect)
+
+ logger.info("Set ISIS BFD")
+ tgen.net["rt1"].cmd('vtysh -c "conf t" -c "int eth-rt2" -c "isis bfd"')
+ tgen.net["rt2"].cmd('vtysh -c "conf t" -c "int eth-rt1" -c "isis bfd"')
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][14]["show_ipv6_route.ref"],
+ )
+
+
+#
+# Step 24
+#
+# Action(s):
+# - drop traffic between rt1 and rt2 by shutting down the bridge between
+# the routers. Interfaces on rt1 and rt2 stay up.
+#
+# Expected changes:
+# - BFD comes down before IS-IS
+# - Route switchover of routes via eth-rt2
+#
+def test_rib_ipv6_step24():
+ logger.info("Test (step 24): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Shut the interface to rt2 from the switch side and check fast-reroute")
+ tgen.net.cmd_raises("ip link set s1 down")
+
+ rname = "rt1"
+ expect = '[{"multihop":true,"peer":"2001:db8:1000::2","local":"2001:db8:1000::1","status":"down"}]'
+ router_compare_json_output(
+ rname,
+ "show bfd peers json",
+ expect,
+ count=40,
+ wait=0.05,
+ )
+
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][15]["show_ipv6_route.ref"],
+ count=4,
+ )
+
+
+#
+# Step 25
+#
+# Action(s): wait for the convergence and SPF computation on rt1
+#
+# Expected changes:
+# - convergence of IPv6 RIB
+#
+def test_rib_ipv6_step25():
+ logger.info("Test (step 25): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+
+ for rname in ["rt1"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][16]["show_ipv6_route.ref"],
+ )
+
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step11/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..a8d6e6c65
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ip_route.ref.diff
@@ -0,0 +1,19 @@
+--- a/rt1/step11/show_ip_route.ref
++++ b/rt1/step12/show_ip_route.ref
+@@ -110,16 +110,6 @@
+ "labels":[
+ 16060
+ ]
+- },
+- {
+- "fib":true,
+- "ip":"10.0.1.3",
+- "afi":"ipv4",
+- "interfaceName":"eth-sw1",
+- "active":true,
+- "labels":[
+- 16060
+- ]
+ }
+ ]
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..637c59f6e
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_ipv6_route.ref.diff
@@ -0,0 +1,18 @@
+--- a/rt1/step11/show_ipv6_route.ref
++++ b/rt1/step12/show_ipv6_route.ref
+@@ -105,15 +105,6 @@
+ "labels":[
+ 16061
+ ]
+- },
+- {
+- "fib":true,
+- "afi":"ipv6",
+- "interfaceName":"eth-sw1",
+- "active":true,
+- "labels":[
+- 16061
+- ]
+ }
+ ]
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt1/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e110bf48e
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt1/step12/show_mpls_table.ref.diff
@@ -0,0 +1,28 @@
+--- a/rt1/step11/show_mpls_table.ref
++++ b/rt1/step12/show_mpls_table.ref
+@@ -79,12 +79,6 @@
+ "type":"SR (IS-IS)",
+ "outLabel":16060,
+ "installed":true,
+- "nexthop":"10.0.1.3"
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16060,
+- "installed":true,
+ "nexthop":"10.0.1.2"
+ }
+ ]
+@@ -96,12 +90,6 @@
+ {
+ "type":"SR (IS-IS)",
+ "outLabel":16061,
+- "installed":true,
+- "interface":"eth-sw1"
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16061,
+ "installed":true,
+ "interface":"eth-sw1"
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step11/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt2/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..84a36442d
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt2/step12/show_mpls_table.ref.diff
@@ -0,0 +1,20 @@
+--- a/rt2/step11/show_mpls_table.ref
++++ b/rt2/step12/show_mpls_table.ref
+@@ -199,7 +199,7 @@
+ "backupNexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":16060,
++ "outLabel":16500,
+ "nexthop":"10.0.1.3"
+ }
+ ]
+@@ -230,7 +230,7 @@
+ "backupNexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":16061,
++ "outLabel":16501,
+ "interface":"eth-sw1"
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step11/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..8695cf848
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ip_route.ref.diff
@@ -0,0 +1,58 @@
+--- a/rt3/step11/show_ip_route.ref
++++ b/rt3/step12/show_ip_route.ref
+@@ -198,44 +198,37 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":30,
++ "metric":40,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.4.5",
++ "ip":"10.0.1.2",
+ "afi":"ipv4",
+- "interfaceName":"eth-rt5-1",
++ "interfaceName":"eth-sw1",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+- 30060
++ 16060
+ ]
+ },
+ {
+ "fib":true,
+- "ip":"10.0.5.5",
++ "ip":"10.0.4.5",
+ "afi":"ipv4",
+- "interfaceName":"eth-rt5-2",
++ "interfaceName":"eth-rt5-1",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 30060
+ ]
+- }
+- ],
+- "backupNexthops":[
++ },
+ {
+- "ip":"10.0.1.2",
++ "fib":true,
++ "ip":"10.0.5.5",
+ "afi":"ipv4",
+- "interfaceName":"eth-sw1",
++ "interfaceName":"eth-rt5-2",
+ "active":true,
+ "labels":[
+- 16060
++ 30060
+ ]
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..661d0fe75
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_ipv6_route.ref.diff
@@ -0,0 +1,45 @@
+--- a/rt3/step11/show_ipv6_route.ref
++++ b/rt3/step12/show_ipv6_route.ref
+@@ -186,7 +186,7 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":30,
++ "metric":40,
+ "installed":true,
+ "nexthops":[
+ {
+@@ -194,9 +194,6 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5-1",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 30061
+ ]
+@@ -206,23 +203,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5-2",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 30061
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-sw1",
+- "active":true,
+- "labels":[
+- 16061
+- ]
+- }
+ ]
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt3/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..30941b398
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt3/step12/show_mpls_table.ref.diff
@@ -0,0 +1,60 @@
+--- a/rt3/step11/show_mpls_table.ref
++++ b/rt3/step12/show_mpls_table.ref
+@@ -165,27 +165,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":30060,
+- "installed":true,
+- "nexthop":"10.0.5.5",
+- "backupIndex":[
+- 0
+- ]
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30060,
+- "installed":true,
+- "nexthop":"10.0.4.5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16060,
++ "installed":true,
+ "nexthop":"10.0.1.2"
+ }
+ ]
+@@ -196,27 +177,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":30061,
+- "installed":true,
+- "interface":"eth-rt5-2",
+- "backupIndex":[
+- 0
+- ]
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30061,
+- "installed":true,
+- "interface":"eth-rt5-1",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16061,
++ "installed":true,
+ "interface":"eth-sw1"
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step11/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..2645c5945
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ip_route.ref.diff
@@ -0,0 +1,144 @@
+--- a/rt4/step11/show_ip_route.ref
++++ b/rt4/step12/show_ip_route.ref
+@@ -160,23 +160,13 @@
+ "interfaceName":"eth-rt5",
+ "active":true,
+ "backupIndex":[
+- 0
++ 0,
++ 1
+ ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.7.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "labels":[
+- 16500
+- ]
+- }
+ ]
+ }
+ ],
+@@ -196,24 +186,10 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt6",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.6.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30060
+- ]
+- }
+ ]
+ }
+ ],
+@@ -352,19 +328,12 @@
+ "active":true,
+ "backupIndex":[
+ 0,
+- 1,
+- 2
++ 1
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+- "ip":"10.0.7.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- },
+- {
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2-1",
+@@ -397,19 +366,12 @@
+ "active":true,
+ "backupIndex":[
+ 0,
+- 1,
+- 2
++ 1
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+- "ip":"10.0.7.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- },
+- {
+ "ip":"10.0.2.2",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt2-1",
+@@ -439,14 +401,6 @@
+ 0
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.7.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- }
+ ]
+ }
+ ],
+@@ -460,18 +414,7 @@
+ {
+ "ip":"10.0.7.6",
+ "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.6.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true
++ "interfaceName":"eth-rt6"
+ }
+ ]
+ }
+@@ -492,13 +435,6 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt5",
+ "active":true
+- },
+- {
+- "fib":true,
+- "ip":"10.0.7.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+ }
+ ]
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..37e3185ae
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_ipv6_route.ref.diff
@@ -0,0 +1,50 @@
+--- a/rt4/step11/show_ipv6_route.ref
++++ b/rt4/step12/show_ipv6_route.ref
+@@ -149,23 +149,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt5",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "labels":[
+- 16501
+- ]
+- }
+ ]
+ }
+ ],
+@@ -184,23 +171,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt6",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30061
+- ]
+- }
+ ]
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt4/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..186291ada
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt4/step12/show_mpls_table.ref.diff
@@ -0,0 +1,78 @@
+--- a/rt4/step11/show_mpls_table.ref
++++ b/rt4/step12/show_mpls_table.ref
+@@ -179,17 +179,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "nexthop":"10.0.7.6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30060,
+- "nexthop":"10.0.6.5"
++ "nexthop":"10.0.7.6"
+ }
+ ]
+ },
+@@ -201,17 +191,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "interface":"eth-rt6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30061,
+- "interface":"eth-rt5"
++ "interface":"eth-rt6"
+ }
+ ]
+ },
+@@ -223,17 +203,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "nexthop":"10.0.6.5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16500,
+- "nexthop":"10.0.7.6"
++ "nexthop":"10.0.6.5"
+ }
+ ]
+ },
+@@ -245,17 +215,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "interface":"eth-rt5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16501,
+- "interface":"eth-rt6"
++ "interface":"eth-rt5"
+ }
+ ]
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/bfdd.conf b/tests/topotests/isis_tilfa_topo1/rt5/bfdd.conf
new file mode 100644
index 000000000..d27625ff3
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/bfdd.conf
@@ -0,0 +1,14 @@
+hostname rt5
+!
+#debug bfd network
+#debug bfd peer
+#debug bfd zebra
+!
+bfd
+ peer 10.0.8.6 interface eth-rt6
+ detect-multiplier 3
+ receive-interval 300
+ transmit-interval 300
+ no shutdown
+ !
+! \ No newline at end of file
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step11/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..3d21c0429
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ip_route.ref.diff
@@ -0,0 +1,151 @@
+--- a/rt5/step11/show_ip_route.ref
++++ b/rt5/step12/show_ip_route.ref
+@@ -159,24 +159,10 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "labels":[
+- 16040
+- ]
+- }
+ ]
+ }
+ ],
+@@ -187,25 +173,11 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 3
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.6.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+@@ -276,19 +248,12 @@
+ "active":true,
+ "backupIndex":[
+ 0,
+- 1,
+- 2
++ 1
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- },
+- {
+ "ip":"10.0.4.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3-1",
+@@ -321,19 +286,12 @@
+ "active":true,
+ "backupIndex":[
+ 0,
+- 1,
+- 2
++ 1
+ ]
+ }
+ ],
+ "backupNexthops":[
+ {
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- },
+- {
+ "ip":"10.0.4.3",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt3-1",
+@@ -439,14 +397,6 @@
+ 0
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- }
+ ]
+ }
+ ],
+@@ -465,39 +415,6 @@
+ "ip":"10.0.6.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+- "active":true
+- },
+- {
+- "fib":true,
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "active":true
+- }
+- ]
+- }
+- ],
+- "10.0.8.0\/24":[
+- {
+- "prefix":"10.0.8.0\/24",
+- "protocol":"isis",
+- "distance":115,
+- "metric":20,
+- "nexthops":[
+- {
+- "ip":"10.0.8.6",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.6.4",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt4",
+ "active":true
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..66a9dace8
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_ipv6_route.ref.diff
@@ -0,0 +1,53 @@
+--- a/rt5/step11/show_ipv6_route.ref
++++ b/rt5/step12/show_ipv6_route.ref
+@@ -149,23 +149,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "labels":[
+- 16041
+- ]
+- }
+ ]
+ }
+ ],
+@@ -176,25 +163,12 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+- "interfaceName":"eth-rt6",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 3
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+ "labels":[
diff --git a/tests/topotests/isis_tilfa_topo1/rt5/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..cdfc407f9
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt5/step12/show_mpls_table.ref.diff
@@ -0,0 +1,80 @@
+--- a/rt5/step11/show_mpls_table.ref
++++ b/rt5/step12/show_mpls_table.ref
+@@ -179,17 +179,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "nexthop":"10.0.6.4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16040,
+- "nexthop":"10.0.8.6"
++ "nexthop":"10.0.6.4"
+ }
+ ]
+ },
+@@ -201,17 +191,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "interface":"eth-rt4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":16041,
+- "interface":"eth-rt6"
++ "interface":"eth-rt4"
+ }
+ ]
+ },
+@@ -221,18 +201,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":3,
+- "installed":true,
+- "nexthop":"10.0.8.6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16060,
++ "installed":true,
+ "nexthop":"10.0.6.4"
+ }
+ ]
+@@ -243,18 +213,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":3,
+- "installed":true,
+- "interface":"eth-rt6",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16061,
++ "installed":true,
+ "interface":"eth-rt4"
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/bfdd.conf b/tests/topotests/isis_tilfa_topo1/rt6/bfdd.conf
new file mode 100644
index 000000000..0c8ba7268
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/bfdd.conf
@@ -0,0 +1,14 @@
+hostname rt6
+!
+#debug bfd network
+#debug bfd peer
+#debug bfd zebra
+!
+bfd
+ peer 10.0.8.5 interface eth-rt5
+ detect-multiplier 3
+ receive-interval 300
+ transmit-interval 300
+ no shutdown
+ !
+! \ No newline at end of file
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ip_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ip_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_ipv6_route.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step10/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_mpls_table.ref.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step10/show_mpls_table.ref.diff
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ip_route.ref.diff
new file mode 100644
index 000000000..e477e87d1
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ip_route.ref.diff
@@ -0,0 +1,125 @@
+--- a/rt6/step10/show_ip_route.ref
++++ b/rt6/step11/show_ip_route.ref
+@@ -76,25 +76,11 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":30,
++ "metric":40,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 30030
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+@@ -150,25 +136,11 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 3
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+@@ -276,22 +248,11 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+@@ -307,22 +268,11 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+@@ -389,19 +339,9 @@
+ "prefix":"10.0.8.0\/24",
+ "protocol":"isis",
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "nexthops":[
+ {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..12e0b591d
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_ipv6_route.ref.diff
@@ -0,0 +1,56 @@
+--- a/rt6/step10/show_ipv6_route.ref
++++ b/rt6/step11/show_ipv6_route.ref
+@@ -72,25 +72,12 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":30,
++ "metric":40,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 30031
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+ "labels":[
+@@ -142,25 +129,12 @@
+ "selected":true,
+ "destSelected":true,
+ "distance":115,
+- "metric":20,
++ "metric":30,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "backupIndex":[
+- 0
+- ],
+- "labels":[
+- 3
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+ "labels":[
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step11/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_mpls_table.ref.diff
new file mode 100644
index 000000000..387dcca3f
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step11/show_mpls_table.ref.diff
@@ -0,0 +1,106 @@
+--- a/rt6/step10/show_mpls_table.ref
++++ b/rt6/step11/show_mpls_table.ref
+@@ -8,12 +8,6 @@
+ "outLabel":16010,
+ "installed":true,
+ "nexthop":"10.0.7.4"
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30010,
+- "installed":true,
+- "nexthop":"10.0.8.5"
+ }
+ ]
+ },
+@@ -26,12 +20,6 @@
+ "outLabel":16011,
+ "installed":true,
+ "interface":"eth-rt4"
+- },
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30011,
+- "installed":true,
+- "interface":"eth-rt5"
+ }
+ ]
+ },
+@@ -85,18 +73,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":30030,
+- "installed":true,
+- "nexthop":"10.0.8.5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16030,
++ "installed":true,
+ "nexthop":"10.0.7.4"
+ }
+ ]
+@@ -107,17 +85,6 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":30031,
+- "installed":true,
+- "interface":"eth-rt5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16031,
+ "interface":"eth-rt4"
+ }
+@@ -173,18 +140,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":3,
+- "installed":true,
+- "nexthop":"10.0.8.5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16500,
++ "installed":true,
+ "nexthop":"10.0.7.4"
+ }
+ ]
+@@ -195,18 +152,8 @@
+ "nexthops":[
+ {
+ "type":"SR (IS-IS)",
+- "outLabel":3,
+- "installed":true,
+- "interface":"eth-rt5",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+ "outLabel":16501,
++ "installed":true,
+ "interface":"eth-rt4"
+ }
+ ]
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ip_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ip_route.ref.diff
new file mode 100644
index 000000000..1086b6e70
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ip_route.ref.diff
@@ -0,0 +1,153 @@
+--- a/rt6/step12/show_ip_route.ref
++++ b/rt6/step12/show_ip_route.ref
+@@ -18,16 +18,6 @@
+ "labels":[
+ 16010
+ ]
+- },
+- {
+- "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30010
+- ]
+ }
+ ]
+ }
+@@ -48,24 +38,10 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 16020
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30020
+- ]
+- }
+ ]
+ }
+ ],
+@@ -108,24 +84,10 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30040
+- ]
+- }
+ ]
+ }
+ ],
+@@ -168,13 +130,6 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+- },
+- {
+- "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true
+ }
+ ]
+ }
+@@ -194,17 +149,6 @@
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+@@ -225,17 +169,6 @@
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+- "active":true,
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+ "active":true
+ }
+ ]
+@@ -297,13 +230,6 @@
+ "afi":"ipv4",
+ "interfaceName":"eth-rt4",
+ "active":true
+- },
+- {
+- "fib":true,
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true
+ }
+ ]
+ }
+@@ -318,18 +244,7 @@
+ {
+ "ip":"10.0.7.4",
+ "afi":"ipv4",
+- "interfaceName":"eth-rt4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "ip":"10.0.8.5",
+- "afi":"ipv4",
+- "interfaceName":"eth-rt5",
+- "active":true
++ "interfaceName":"eth-rt4"
+ }
+ ]
+ }
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ipv6_route.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ipv6_route.ref.diff
new file mode 100644
index 000000000..571c66fb6
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_ipv6_route.ref.diff
@@ -0,0 +1,66 @@
+--- a/rt6/step12/show_ipv6_route.ref
++++ b/rt6/step12/show_ipv6_route.ref
+@@ -17,15 +17,6 @@
+ "labels":[
+ 16011
+ ]
+- },
+- {
+- "fib":true,
+- "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30011
+- ]
+ }
+ ]
+ }
+@@ -45,23 +36,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 16021
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30021
+- ]
+- }
+ ]
+ }
+ ],
+@@ -102,23 +80,10 @@
+ "afi":"ipv6",
+ "interfaceName":"eth-rt4",
+ "active":true,
+- "backupIndex":[
+- 0
+- ],
+ "labels":[
+ 3
+ ]
+ }
+- ],
+- "backupNexthops":[
+- {
+- "afi":"ipv6",
+- "interfaceName":"eth-rt5",
+- "active":true,
+- "labels":[
+- 30041
+- ]
+- }
+ ]
+ }
+ ],
diff --git a/tests/topotests/isis_tilfa_topo1/rt6/step12/show_mpls_table.ref.diff b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_mpls_table.ref.diff
new file mode 100644
index 000000000..18322f18a
--- /dev/null
+++ b/tests/topotests/isis_tilfa_topo1/rt6/step12/show_mpls_table.ref.diff
@@ -0,0 +1,78 @@
+--- a/rt6/step12/show_mpls_table.ref
++++ b/rt6/step12/show_mpls_table.ref
+@@ -31,17 +31,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":16020,
+ "installed":true,
+- "nexthop":"10.0.7.4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30020,
+- "nexthop":"10.0.8.5"
++ "nexthop":"10.0.7.4"
+ }
+ ]
+ },
+@@ -53,17 +43,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":16021,
+ "installed":true,
+- "interface":"eth-rt4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30021,
+- "interface":"eth-rt5"
++ "interface":"eth-rt4"
+ }
+ ]
+ },
+@@ -98,17 +78,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "nexthop":"10.0.7.4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30040,
+- "nexthop":"10.0.8.5"
++ "nexthop":"10.0.7.4"
+ }
+ ]
+ },
+@@ -120,17 +90,7 @@
+ "type":"SR (IS-IS)",
+ "outLabel":3,
+ "installed":true,
+- "interface":"eth-rt4",
+- "backupIndex":[
+- 0
+- ]
+- }
+- ],
+- "backupNexthops":[
+- {
+- "type":"SR (IS-IS)",
+- "outLabel":30041,
+- "interface":"eth-rt5"
++ "interface":"eth-rt4"
+ }
+ ]
+ }, \ No newline at end of file
diff --git a/tests/topotests/isis_tilfa_topo1/test_isis_tilfa_topo1.py b/tests/topotests/isis_tilfa_topo1/test_isis_tilfa_topo1.py
index 07e91f1a4..5a5b9c59d 100755
--- a/tests/topotests/isis_tilfa_topo1/test_isis_tilfa_topo1.py
+++ b/tests/topotests/isis_tilfa_topo1/test_isis_tilfa_topo1.py
@@ -144,7 +144,7 @@ def build_topo(tgen):
]
for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
outputs[rname] = {}
- for step in range(1, 9 + 1):
+ for step in range(1, 12 + 1):
outputs[rname][step] = {}
for file in files:
if step == 1:
@@ -188,6 +188,9 @@ def setup_module(mod):
router.load_config(
TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
+ router.load_config(
+ TopoRouter.RD_BFD, os.path.join(CWD, "/dev/null".format(rname))
+ )
tgen.start_router()
@@ -200,7 +203,7 @@ def teardown_module(mod):
tgen.stop_topology()
-def router_compare_json_output(rname, command, reference):
+def router_compare_json_output(rname, command, reference, count=120, wait=0.5):
"Compare router JSON output"
logger.info('Comparing router "%s" "%s" output', rname, command)
@@ -210,7 +213,7 @@ def router_compare_json_output(rname, command, reference):
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
- _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
+ _, diff = topotest.run_and_expect(test_func, None, count=count, wait=wait)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
@@ -740,6 +743,364 @@ def test_mpls_lib_step9():
)
+#
+# Step 10
+#
+# Action(s):
+# - Setting spf-delay-ietf init-delay of 15s
+#
+# Expected changes:
+# - No routing table change
+# - At the end of test, SPF reacts to a failure in 15s
+#
+def test_rib_ipv4_step10():
+ logger.info("Test (step 10): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Setting spf-delay-ietf init-delay of 15s")
+ tgen.net["rt6"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "spf-delay-ietf init-delay 15000 short-delay 0 long-delay 0 holddown 0 time-to-learn 0"'
+ )
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname, "show ip route isis json", outputs[rname][10]["show_ip_route.ref"]
+ )
+
+
+def test_rib_ipv6_step10():
+ logger.info("Test (step 10): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][10]["show_ipv6_route.ref"],
+ )
+
+
+def test_mpls_lib_step10():
+ logger.info("Test (step 10): verify MPLS LIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname, "show mpls table json", outputs[rname][10]["show_mpls_table.ref"]
+ )
+
+
+#
+# Step 11
+#
+# Action(s):
+# - shut the eth-rt5 interface on rt6
+#
+# Expected changes:
+# - Route switchover of routes via eth-rt5
+#
+def test_rt6_step11():
+ logger.info(
+ "Test (step 11): Check IPv4/6 RIB and MPLS table after a LFA switchover"
+ )
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info(
+ "Shut a rt6 interface to rt5 from the switch side and check fast-reroute"
+ )
+ tgen.net.cmd_raises("ip link set %s down" % tgen.net["s8"].intfs[1])
+
+ rname = "rt6"
+ router_compare_json_output(
+ rname,
+ "show ip route isis json",
+ outputs[rname][11]["show_ip_route.ref"],
+ count=1,
+ )
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][11]["show_ipv6_route.ref"],
+ count=1,
+ )
+ router_compare_json_output(
+ rname,
+ "show mpls table json",
+ outputs[rname][11]["show_mpls_table.ref"],
+ count=1,
+ )
+
+
+#
+# Step 12
+#
+# Action(s): wait for the convergence and SPF computation on rt6
+#
+# Expected changes:
+# - convergence of IPv4/6 RIB and MPLS table
+#
+def test_rib_ipv4_step12():
+ logger.info("Test (step 12): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ip route isis json",
+ outputs[rname][12]["show_ip_route.ref"],
+ )
+
+
+def test_rib_ipv6_step12():
+ logger.info("Test (step 12): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][12]["show_ipv6_route.ref"],
+ )
+
+
+def test_mpls_lib_step12():
+ logger.info("Test (step 12): verify MPLS LIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show mpls table json",
+ outputs[rname][12]["show_mpls_table.ref"],
+ )
+
+
+#
+# Step 13
+#
+# Action(s):
+# - unshut the rt6 to rt5 interface
+# - Setup BFD
+#
+# Expected changes:
+# - All route tables go back to previous state situation
+# - At the end of test, next SPF is scheduled in approximatively 15s
+#
+def test_rib_ipv4_step13():
+ logger.info("Test (step 13): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Unsetting spf-delay-ietf init-delay of 15s")
+ tgen.net["rt6"].cmd('vtysh -c "conf t" -c "router isis 1" -c "no spf-delay-ietf"')
+
+ logger.info(
+ "Unshut the rt6 interface to rt5 from the switch side and check fast-reroute"
+ )
+ tgen.net.cmd_raises("ip link set %s up" % tgen.net["s8"].intfs[1])
+
+ logger.info("Setup BFD on rt5 and rt6")
+ for rname in ["rt5", "rt6"]:
+ conf_file = os.path.join(CWD, "{}/bfdd.conf".format(rname))
+ tgen.net[rname].cmd("vtysh -f {}".format(conf_file))
+
+ expect = (
+ '[{"multihop":false,"peer":"10.0.8.5","interface":"eth-rt5","status":"up"}]'
+ )
+ router_compare_json_output("rt6", "show bfd peers json", expect)
+
+ # Unset link detection. We want zebra to consider linkdow as operationaly up
+ # in order that BFD triggers LFA instead of the interface down
+
+ # reset spf-interval
+ logger.info("Set spf-interval to 15s")
+ tgen.net["rt6"].cmd(
+ 'vtysh -c "conf t" -c "router isis 1" -c "spf-delay-ietf init-delay 15000 short-delay 0 long-delay 0 holddown 0 time-to-learn 0"'
+ )
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname, "show ip route isis json", outputs[rname][10]["show_ip_route.ref"]
+ )
+
+ logger.info("Set ISIS BFD")
+ tgen.net["rt5"].cmd('vtysh -c "conf t" -c "int eth-rt6" -c "isis bfd"')
+ tgen.net["rt6"].cmd('vtysh -c "conf t" -c "int eth-rt5" -c "isis bfd"')
+
+
+def test_rib_ipv6_step13():
+ logger.info("Test (step 13): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][10]["show_ipv6_route.ref"],
+ )
+
+
+def test_mpls_lib_step13():
+ logger.info("Test (step 13): verify MPLS LIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname, "show mpls table json", outputs[rname][10]["show_mpls_table.ref"]
+ )
+
+
+#
+# Step 14
+#
+# Action(s):
+# - drop traffic between rt5 and rt6 by shutting down the bridge between
+# the routers. Interfaces on rt5 and rt6 stay up.
+#
+# Expected changes:
+# - Route switchover of routes via eth-rt5
+#
+def test_rt6_step14():
+ logger.info("Test (step 14): verify IPv4/6 RIB and MPLS table")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Drop traffic between rt5 and rt6")
+ tgen.net.cmd_raises("ip link set s8 down")
+
+ rname = "rt6"
+
+ expect = (
+ '[{"multihop":false,"peer":"10.0.8.5","interface":"eth-rt5","status":"down"}]'
+ )
+ router_compare_json_output(
+ rname,
+ "show bfd peers json",
+ expect,
+ count=40,
+ wait=0.05,
+ )
+
+ router_compare_json_output(
+ rname,
+ "show ip route isis json",
+ outputs[rname][11]["show_ip_route.ref"],
+ count=4,
+ )
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][11]["show_ipv6_route.ref"],
+ count=4,
+ )
+ router_compare_json_output(
+ rname,
+ "show mpls table json",
+ outputs[rname][11]["show_mpls_table.ref"],
+ count=4,
+ )
+
+
+#
+# Step 15
+#
+# Action(s): wait for the convergence and SPF computation on rt6
+#
+# Expected changes:
+# - convergence of IPv4/6 RIB and MPLS table
+#
+def test_rib_ipv4_step15():
+ logger.info("Test (step 15): verify IPv4 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Check SPF convergence")
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ip route isis json",
+ outputs[rname][12]["show_ip_route.ref"],
+ )
+
+
+def test_rib_ipv6_step15():
+ logger.info("Test (step 15): verify IPv6 RIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show ipv6 route isis json",
+ outputs[rname][12]["show_ipv6_route.ref"],
+ )
+
+
+def test_mpls_lib_step15():
+ logger.info("Test (step 15): verify MPLS LIB")
+ tgen = get_topogen()
+
+ # Skip if previous fatal error condition is raised
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ router_compare_json_output(
+ rname,
+ "show mpls table json",
+ outputs[rname][12]["show_mpls_table.ref"],
+ )
+
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 5f4c28071..737226c7f 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -3244,33 +3244,29 @@ def configure_interface_mac(tgen, input_dict):
return True
-def socat_send_igmp_join_traffic(
+def socat_send_mld_join(
tgen,
server,
protocol_option,
- igmp_groups,
+ mld_groups,
send_from_intf,
send_from_intf_ip=None,
port=12345,
reuseaddr=True,
- join=False,
- traffic=False,
):
"""
- API to send IGMP join using SOCAT tool
+ API to send MLD join using SOCAT tool
Parameters:
-----------
* `tgen` : Topogen object
* `server`: iperf server, from where IGMP join would be sent
* `protocol_option`: Protocol options, ex: UDP6-RECV
- * `igmp_groups`: IGMP group for which join has to be sent
+ * `mld_groups`: IGMP group for which join has to be sent
* `send_from_intf`: Interface from which join would be sent
* `send_from_intf_ip`: Interface IP, default is None
* `port`: Port to be used, default is 12345
* `reuseaddr`: True|False, bydefault True
- * `join`: If join needs to be sent
- * `traffic`: If traffic needs to be sent
returns:
--------
@@ -3280,36 +3276,32 @@ def socat_send_igmp_join_traffic(
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
rnode = tgen.routers()[server]
- socat_cmd = "socat -u "
+ socat_args = "socat -u "
- # UDP4/TCP4/UDP6/UDP6-RECV
+ # UDP4/TCP4/UDP6/UDP6-RECV/UDP6-SEND
if protocol_option:
- socat_cmd += "{}".format(protocol_option)
+ socat_args += "{}".format(protocol_option)
if port:
- socat_cmd += ":{},".format(port)
+ socat_args += ":{},".format(port)
if reuseaddr:
- socat_cmd += "{},".format("reuseaddr")
+ socat_args += "{},".format("reuseaddr")
# Group address range to cover
- if igmp_groups:
- if not isinstance(igmp_groups, list):
- igmp_groups = [igmp_groups]
+ if mld_groups:
+ if not isinstance(mld_groups, list):
+ mld_groups = [mld_groups]
- for igmp_group in igmp_groups:
- if join:
- join_traffic_option = "ipv6-join-group"
- elif traffic:
- join_traffic_option = "ipv6-join-group-source"
+ for mld_group in mld_groups:
+ socat_cmd = socat_args
+ join_option = "ipv6-join-group"
if send_from_intf and not send_from_intf_ip:
- socat_cmd += "{}='[{}]:{}'".format(
- join_traffic_option, igmp_group, send_from_intf
- )
+ socat_cmd += "{}='[{}]:{}'".format(join_option, mld_group, send_from_intf)
else:
socat_cmd += "{}='[{}]:{}:[{}]'".format(
- join_traffic_option, igmp_group, send_from_intf, send_from_intf_ip
+ join_option, mld_group, send_from_intf, send_from_intf_ip
)
socat_cmd += " STDOUT"
@@ -3324,6 +3316,124 @@ def socat_send_igmp_join_traffic(
return True
+def socat_send_pim6_traffic(
+ tgen,
+ server,
+ protocol_option,
+ mld_groups,
+ send_from_intf,
+ port=12345,
+ multicast_hops=True,
+):
+ """
+ API to send pim6 data taffic using SOCAT tool
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `server`: iperf server, from where IGMP join would be sent
+ * `protocol_option`: Protocol options, ex: UDP6-RECV
+ * `mld_groups`: MLD group for which join has to be sent
+ * `send_from_intf`: Interface from which join would be sent
+ * `port`: Port to be used, default is 12345
+ * `multicast_hops`: multicast-hops count, default is 255
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ rnode = tgen.routers()[server]
+ socat_args = "socat -u STDIO "
+
+ # UDP4/TCP4/UDP6/UDP6-RECV/UDP6-SEND
+ if protocol_option:
+ socat_args += "'{}".format(protocol_option)
+
+ # Group address range to cover
+ if mld_groups:
+ if not isinstance(mld_groups, list):
+ mld_groups = [mld_groups]
+
+ for mld_group in mld_groups:
+ socat_cmd = socat_args
+ if port:
+ socat_cmd += ":[{}]:{},".format(mld_group, port)
+
+ if send_from_intf:
+ socat_cmd += "interface={0},so-bindtodevice={0},".format(send_from_intf)
+
+ if multicast_hops:
+ socat_cmd += "multicast-hops=255'"
+
+ socat_cmd += " &>{}/socat.logs &".format(tgen.logdir)
+
+ # Run socat command to send pim6 traffic
+ logger.info(
+ "[DUT: {}]: Running command: [set +m; ( while sleep 1; do date; done ) | {}]".format(
+ server, socat_cmd
+ )
+ )
+
+ # Open a shell script file and write data to it, which will be
+ # used to send pim6 traffic continously
+ traffic_shell_script = "{}/{}/traffic.sh".format(tgen.logdir, server)
+ with open("{}".format(traffic_shell_script), "w") as taffic_sh:
+ taffic_sh.write(
+ "#!/usr/bin/env bash\n( while sleep 1; do date; done ) | {}\n".format(
+ socat_cmd
+ )
+ )
+
+ rnode.run("chmod 755 {}".format(traffic_shell_script))
+ output = rnode.run("{} &> /dev/null".format(traffic_shell_script))
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def kill_socat(tgen, dut=None, action=None):
+ """
+ Killing socat process if running for any router in topology
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `dut` : Any iperf hostname to send igmp prune
+ * `action`: to kill mld join using socat
+ to kill mld traffic using socat
+
+ Usage:
+ ------
+ kill_socat(tgen, dut ="i6", action="remove_mld_join")
+
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ if dut is not None and router != dut:
+ continue
+
+ if action == "remove_mld_join":
+ cmd = "ps -ef | grep socat | grep UDP6-RECV | grep {}".format(router)
+ elif action == "remove_mld_traffic":
+ cmd = "ps -ef | grep socat | grep UDP6-SEND | grep {}".format(router)
+ else:
+ cmd = "ps -ef | grep socat".format(router)
+
+ awk_cmd = "awk -F' ' '{print $2}' | xargs kill -9 &>/dev/null &"
+ cmd = "{} | {}".format(cmd, awk_cmd)
+
+ logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd))
+ rnode.run(cmd)
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+
#############################################
# Verification APIs
#############################################
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 03ab02460..7a57af7db 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -37,6 +37,7 @@ from lib.common_config import (
retry,
run_frr_cmd,
validate_ip_address,
+ get_frr_ipv6_linklocal,
)
from lib.micronet import get_exec_path
from lib.topolog import logger
@@ -48,7 +49,7 @@ CWD = os.path.dirname(os.path.realpath(__file__))
def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True):
"""
- API to configure pim/pimv6 on router
+ API to configure pim/pim6 on router
Parameters
----------
@@ -149,7 +150,7 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
if "rp" in input_dict[router]["pim"]:
rp_data += pim_data["rp"]
- # PIMv6
+ # pim6
pim6_data = None
if "pim6" in input_dict[router]:
pim6_data = input_dict[router]["pim6"]
@@ -370,7 +371,7 @@ def create_igmp_config(tgen, topo, input_dict=None, build=False):
def create_mld_config(tgen, topo, input_dict=None, build=False):
"""
- API to configure mld for PIMv6 on router
+ API to configure mld for pim6 on router
Parameters
----------
@@ -515,6 +516,19 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
config_data.append(cmd)
config_data.append("ip pim")
+ if "pim" in input_dict[router]:
+ if "disable" in input_dict[router]["pim"]:
+ enable_flag = False
+ interfaces = input_dict[router]["pim"]["disable"]
+
+ if type(interfaces) is not list:
+ interfaces = [interfaces]
+
+ for interface in interfaces:
+ cmd = "interface {}".format(interface)
+ config_data.append(cmd)
+ config_data.append("no ip pim")
+
if "pim6" in data and data["pim6"] == "enable":
# Loopback interfaces
if "type" in data and data["type"] == "loopback":
@@ -526,6 +540,19 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
config_data.append(cmd)
config_data.append("ipv6 pim")
+ if "pim6" in input_dict[router]:
+ if "disable" in input_dict[router]["pim6"]:
+ enable_flag = False
+ interfaces = input_dict[router]["pim6"]["disable"]
+
+ if type(interfaces) is not list:
+ interfaces = [interfaces]
+
+ for interface in interfaces:
+ cmd = "interface {}".format(interface)
+ config_data.append(cmd)
+ config_data.append("no ipv6 pim")
+
# pim global config
if "pim" in input_dict[router]:
pim_data = input_dict[router]["pim"]
@@ -797,6 +824,134 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected
return True
+@retry(retry_timeout=12)
+def verify_pim6_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True):
+ """
+ Verify all pim6 neighbors are up and running, config is verified
+ using "show ipv6 pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : dut info
+ * `iface` : link for which PIM nbr need to check
+ * `nbr_ip` : neighbor ip of interface
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_pim6_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if dut is not None and dut != router:
+ continue
+
+ rnode = tgen.routers()[router]
+ show_ip_pim_neighbor_json = rnode.vtysh_cmd(
+ "show ipv6 pim neighbor json", isjson=True
+ )
+
+ for destLink, data in topo["routers"][router]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if iface is not None and iface != data["interface"]:
+ continue
+
+ if "pim6" not in data:
+ continue
+
+ if "pim6" in data and data["pim6"] == "disable":
+ continue
+
+ if "pim6" in data and data["pim6"] == "enable":
+ local_interface = data["interface"]
+
+ if "-" in destLink:
+ # Spliting and storing destRouterLink data in tempList
+ tempList = destLink.split("-")
+
+ # destRouter
+ destLink = tempList.pop(0)
+
+ # Current Router Link
+ tempList.insert(0, router)
+ curRouter = "-".join(tempList)
+ else:
+ curRouter = router
+ if destLink not in topo["routers"]:
+ continue
+ data = topo["routers"][destLink]["links"][curRouter]
+ peer_interface = data["interface"]
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if "pim6" not in data:
+ continue
+
+ logger.info("[DUT: %s]: Verifying PIM neighbor status:", router)
+
+ if "pim6" in data and data["pim6"] == "enable":
+ pim_nh_intf_ip = get_frr_ipv6_linklocal(tgen, destLink, peer_interface)
+
+ # Verifying PIM neighbor
+ if local_interface in show_ip_pim_neighbor_json:
+ if show_ip_pim_neighbor_json[local_interface]:
+ if (
+ show_ip_pim_neighbor_json[local_interface][pim_nh_intf_ip][
+ "neighbor"
+ ]
+ != pim_nh_intf_ip
+ ):
+ errormsg = (
+ "[DUT %s]: Local interface: %s, PIM6"
+ " neighbor check failed "
+ "Expected neighbor: %s, Found neighbor:"
+ " %s"
+ % (
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ show_ip_pim_neighbor_json[local_interface][
+ pim_nh_intf_ip
+ ]["neighbor"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Local interface: %s, Found"
+ " expected PIM6 neighbor %s",
+ router,
+ local_interface,
+ pim_nh_intf_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, and"
+ "interface ip: %s is not found in "
+ "PIM6 neighbor " % (router, local_interface, pim_nh_intf_ip)
+ )
+ return errormsg
+ else:
+ errormsg = (
+ "[DUT %s]: Local interface: %s, is not "
+ "present in PIM6 neighbor " % (router, local_interface)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
@retry(retry_timeout=40, diag_pct=0)
def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
"""
@@ -871,7 +1026,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
return True
-@retry(retry_timeout=60, diag_pct=0)
+@retry(retry_timeout=60, diag_pct=2)
def verify_upstream_iif(
tgen,
dut,
@@ -879,7 +1034,9 @@ def verify_upstream_iif(
src_address,
group_addresses,
joinState=None,
+ regState=None,
refCount=1,
+ addr_type="ipv4",
expected=True,
):
"""
@@ -910,7 +1067,6 @@ def verify_upstream_iif(
-------
errormsg(str) or True
"""
-
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if dut not in tgen.routers():
@@ -919,7 +1075,8 @@ def verify_upstream_iif(
rnode = tgen.routers()[dut]
logger.info(
- "[DUT: %s]: Verifying upstream Inbound Interface" " for IGMP groups received:",
+ "[DUT: %s]: Verifying upstream Inbound Interface"
+ " for IGMP/MLD groups received:",
dut,
)
@@ -1010,14 +1167,33 @@ def verify_upstream_iif(
)
return errormsg
+ if regState:
+ if group_addr_json[src_address]["regState"] != regState:
+ errormsg = (
+ "[DUT %s]: Verifying iif "
+ "(Inbound Interface) for (%s,%s) and"
+ " rejstate :%s [FAILED]!! "
+ " Expected: %s, Found: %s"
+ % (
+ dut,
+ src_address,
+ grp_addr,
+ group_addr_json[src_address]["regState"],
+ in_interface,
+ group_addr_json[src_address]["inboundInterface"],
+ )
+ )
+ return errormsg
+
logger.info(
"[DUT %s]: Verifying iif(Inbound Interface)"
- " for (%s,%s) and joinState is %s [PASSED]!! "
+ " for (%s,%s) and joinState is %s regstate is %s [PASSED]!! "
" Found Expected: (%s)",
dut,
src_address,
grp_addr,
group_addr_json[src_address]["joinState"],
+ group_addr_json[src_address]["regState"],
group_addr_json[src_address]["inboundInterface"],
)
if not found:
@@ -1042,7 +1218,7 @@ def verify_upstream_iif(
@retry(retry_timeout=12)
def verify_join_state_and_timer(
- tgen, dut, iif, src_address, group_addresses, expected=True
+ tgen, dut, iif, src_address, group_addresses, addr_type="ipv4", expected=True
):
"""
Verify join state is updated correctly and join timer is
@@ -1178,6 +1354,7 @@ def verify_mroutes(
oil,
return_uptime=False,
mwait=0,
+ addr_type="ipv4",
expected=True,
):
"""
@@ -1393,6 +1570,7 @@ def verify_pim_rp_info(
rp=None,
source=None,
iamrp=None,
+ addr_type="ipv4",
expected=True,
):
"""
@@ -1578,6 +1756,7 @@ def verify_pim_state(
group_addresses,
src_address=None,
installed_fl=None,
+ addr_type="ipv4",
expected=True,
):
"""
@@ -1697,7 +1876,7 @@ def verify_pim_state(
def get_pim_interface_traffic(tgen, input_dict):
"""
- get ip pim interface traffice by running
+ get ip pim interface traffic by running
"show ip pim interface traffic" cli
Parameters
@@ -1768,9 +1947,82 @@ def get_pim_interface_traffic(tgen, input_dict):
return output_dict
+def get_pim6_interface_traffic(tgen, input_dict):
+ """
+ get ipv6 pim interface traffic by running
+ "show ipv6 pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and from which interfaces
+ traffic needs to be retrieved
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "r1-r0-eth0": {
+ "helloRx": 0,
+ "helloTx": 1,
+ "joinRx": 0,
+ "joinTx": 0
+ }
+ }
+ }
+
+ result = get_pim_interface_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ output_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
+
+ def show_pim_intf_traffic(rnode, dut, input_dict, output_dict):
+ show_pim_intf_traffic_json = run_frr_cmd(
+ rnode, "show ipv6 pim interface traffic json", isjson=True
+ )
+
+ output_dict[dut] = {}
+ for intf, data in input_dict[dut].items():
+ interface_json = show_pim_intf_traffic_json[intf]
+ for state in data:
+
+ # Verify Tx/Rx
+ if state in interface_json:
+ output_dict[dut][state] = interface_json[state]
+ else:
+ errormsg = (
+ "[DUT %s]: %s is not present"
+ "for interface %s [FAILED]!! " % (dut, state, intf)
+ )
+ return errormsg
+ return None
+
+ test_func = functools.partial(
+ show_pim_intf_traffic, rnode, dut, input_dict, output_dict
+ )
+ (result, out) = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ if not result:
+ return out
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return output_dict
+
+
@retry(retry_timeout=40, diag_pct=0)
def verify_pim_interface(
- tgen, topo, dut, interface=None, interface_ip=None, expected=True
+ tgen, topo, dut, interface=None, interface_ip=None, addr_type="ipv4", expected=True
):
"""
Verify all PIM interface are up and running, config is verified
@@ -1803,29 +2055,48 @@ def verify_pim_interface(
logger.info("[DUT: %s]: Verifying PIM interface status:", dut)
rnode = tgen.routers()[dut]
- show_ip_pim_interface_json = rnode.vtysh_cmd(
- "show ip pim interface json", isjson=True
+
+ if addr_type == "ipv4":
+ addr_cmd = "ip"
+ pim_cmd = "pim"
+ elif addr_type == "ipv6":
+ addr_cmd = "ipv6"
+ pim_cmd = "pim6"
+ show_pim_interface_json = rnode.vtysh_cmd(
+ "show {} pim interface json".format(addr_cmd), isjson=True
)
- logger.info("show_ip_pim_interface_json: \n %s", show_ip_pim_interface_json)
+ logger.info("show_pim_interface_json: \n %s", show_pim_interface_json)
if interface_ip:
- if interface in show_ip_pim_interface_json:
- pim_intf_json = show_ip_pim_interface_json[interface]
+ if interface in show_pim_interface_json:
+ pim_intf_json = show_pim_interface_json[interface]
if pim_intf_json["address"] != interface_ip:
errormsg = (
- "[DUT %s]: PIM interface "
- "ip is not correct "
+ "[DUT %s]: %s interface "
+ "%s is not correct "
"[FAILED]!! Expected : %s, Found : %s"
- % (dut, pim_intf_json["address"], interface_ip)
+ % (
+ dut,
+ pim_cmd,
+ addr_cmd,
+ pim_intf_json["address"],
+ interface_ip,
+ )
)
return errormsg
else:
logger.info(
- "[DUT %s]: PIM interface "
- "ip is correct "
+ "[DUT %s]: %s interface "
+ "%s is correct "
"[Passed]!! Expected : %s, Found : %s"
- % (dut, pim_intf_json["address"], interface_ip)
+ % (
+ dut,
+ pim_cmd,
+ addr_cmd,
+ pim_intf_json["address"],
+ interface_ip,
+ )
)
return True
else:
@@ -1833,17 +2104,17 @@ def verify_pim_interface(
if "type" in data and data["type"] == "loopback":
continue
- if "pim" in data and data["pim"] == "enable":
+ if pim_cmd in data and data[pim_cmd] == "enable":
pim_interface = data["interface"]
- pim_intf_ip = data["ipv4"].split("/")[0]
+ pim_intf_ip = data[addr_type].split("/")[0]
- if pim_interface in show_ip_pim_interface_json:
- pim_intf_json = show_ip_pim_interface_json[pim_interface]
+ if pim_interface in show_pim_interface_json:
+ pim_intf_json = show_pim_interface_json[pim_interface]
else:
errormsg = (
- "[DUT %s]: PIM interface: %s "
- "PIM interface ip: %s, not Found"
- % (dut, pim_interface, pim_intf_ip)
+ "[DUT %s]: %s interface: %s "
+ "PIM interface %s: %s, not Found"
+ % (dut, pim_cmd, pim_interface, addr_cmd, pim_intf_ip)
)
return errormsg
@@ -1853,12 +2124,14 @@ def verify_pim_interface(
and pim_intf_json["state"] != "up"
):
errormsg = (
- "[DUT %s]: PIM interface: %s "
- "PIM interface ip: %s, status check "
+ "[DUT %s]: %s interface: %s "
+ "PIM interface %s: %s, status check "
"[FAILED]!! Expected : %s, Found : %s"
% (
dut,
+ pim_cmd,
pim_interface,
+ addr_cmd,
pim_intf_ip,
pim_interface,
pim_intf_json["state"],
@@ -1867,11 +2140,13 @@ def verify_pim_interface(
return errormsg
logger.info(
- "[DUT %s]: PIM interface: %s, "
- "interface ip: %s, status: %s"
+ "[DUT %s]: %s interface: %s, "
+ "interface %s: %s, status: %s"
" [PASSED]!!",
dut,
+ pim_cmd,
pim_interface,
+ addr_cmd,
pim_intf_ip,
pim_intf_json["state"],
)
@@ -1882,8 +2157,8 @@ def verify_pim_interface(
def clear_pim_interface_traffic(tgen, topo):
"""
- Clear ip/ipv6 pim interface traffice by running
- "clear ip/ipv6 pim interface traffic" cli
+ Clear ip pim interface traffic by running
+ "clear ip pim interface traffic" cli
Parameters
----------
@@ -1914,6 +2189,74 @@ def clear_pim_interface_traffic(tgen, topo):
return True
+def clear_pim6_interface_traffic(tgen, topo):
+ """
+ Clear ipv6 pim interface traffic by running
+ "clear ipv6 pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ Usage
+ -----
+
+ result = clear_pim6_interface_traffic(tgen, topo)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in tgen.routers():
+ if "pim" not in topo["routers"][dut]:
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Clearing pim6 interface traffic", dut)
+ result = run_frr_cmd(rnode, "clear ipv6 pim interface traffic")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
+def clear_pim6_interfaces(tgen, topo):
+ """
+ Clear ipv6 pim interface by running
+ "clear ipv6 pim interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ Usage
+ -----
+
+ result = clear_pim6_interfaces(tgen, topo)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in tgen.routers():
+ if "pim" not in topo["routers"][dut]:
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Clearing pim6 interfaces", dut)
+ result = run_frr_cmd(rnode, "clear ipv6 pim interface")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
def clear_pim_interfaces(tgen, dut):
"""
Clear ip/ipv6 pim interface by running
@@ -1961,8 +2304,8 @@ def clear_pim_interfaces(tgen, dut):
# Waiting for maximum 60 sec
fail_intf = []
for retry in range(1, 13):
- logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut)
sleep(5)
+ logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut)
run_json_after = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True)
found = True
for pim_intf in nh_before_clear.keys():
@@ -2212,6 +2555,35 @@ def clear_mroute(tgen, dut=None):
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+def clear_pim6_mroute(tgen, dut=None):
+ """
+ Clear ipv6 mroute by running "clear ipv6 mroute" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test, default None
+
+ Usage
+ -----
+ clear_mroute(tgen, dut)
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ router_list = tgen.routers()
+ for router, rnode in router_list.items():
+ if dut is not None and router != dut:
+ continue
+
+ logger.debug("[DUT: %s]: Clearing ipv6 mroute", router)
+ rnode.vtysh_cmd("clear ipv6 mroute")
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+
+ return True
+
+
def reconfig_interfaces(tgen, topo, senderRouter, receiverRouter, packet=None):
"""
Configure interface ip for sender and receiver routers
@@ -2812,7 +3184,14 @@ def enable_disable_pim_bsm(tgen, router, intf, enable=True):
@retry(retry_timeout=60, diag_pct=0)
def verify_pim_join(
- tgen, topo, dut, interface, group_addresses, src_address=None, expected=True
+ tgen,
+ topo,
+ dut,
+ interface,
+ group_addresses,
+ src_address=None,
+ addr_type="ipv4",
+ expected=True,
):
"""
Verify ip/ipv6 pim join by running "show ip/ipv6 pim join" cli
@@ -2846,11 +3225,22 @@ def verify_pim_join(
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying pim join", dut)
- show_pim_join_json = run_frr_cmd(rnode, "show ip pim join json", isjson=True)
if type(group_addresses) is not list:
group_addresses = [group_addresses]
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ show_pim_join_json = run_frr_cmd(
+ rnode, "show {} pim join json".format(ip_cmd), isjson=True
+ )
+
for grp_addr in group_addresses:
# Verify if IGMP is enabled in DUT
if "igmp" not in topo["routers"][dut]:
@@ -3660,7 +4050,7 @@ def verify_multicast_flag_state(
@retry(retry_timeout=40, diag_pct=0)
-def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=True):
+def verify_igmp_interface(tgen, dut, igmp_iface, interface_ip, expected=True):
"""
Verify all IGMP interface are up and running, config is verified
using "show ip igmp interface" cli
@@ -3884,7 +4274,7 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
"""
- Verify ip pim interface traffice by running
+ Verify ip pim interface traffic by running
"show ip pim interface traffic" cli
Parameters
@@ -3950,6 +4340,661 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type=
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True if return_stats == False else output_dict
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_mld_groups(tgen, dut, interface, group_addresses, expected=True):
+ """
+ Verify IGMP groups are received from an intended interface
+ by running "show ip mld groups" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which MLD groups would be received
+ * `group_addresses`: MLD group address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "ffaa::1"
+ result = verify_mld_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying mld groups received:", dut)
+ show_mld_json = run_frr_cmd(rnode, "show ipv6 mld groups json", isjson=True)
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface in show_mld_json:
+ show_mld_json = show_mld_json[interface]["groups"]
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying MLD group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ found = False
+ for grp_addr in group_addresses:
+ for index in show_mld_json:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if found is not True:
+ errormsg = (
+ "[DUT %s]: Verifying MLD group received"
+ " from interface %s [FAILED]!! "
+ " Expected not found: %s" % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying MLD group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=40, diag_pct=0)
+def verify_mld_interface(tgen, dut, mld_iface, interface_ip, expected=True):
+ """
+ Verify all IGMP interface are up and running, config is verified
+ using "show ip mld interface" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : device under test
+ * `mld_iface` : interface name
+ * `interface_ip` : interface ip address
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ result = verify_mld_interface(tgen, topo, dut, mld_iface, interface_ip)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for router in tgen.routers():
+ if router != dut:
+ continue
+
+ logger.info("[DUT: %s]: Verifying MLD interface status:", dut)
+
+ rnode = tgen.routers()[dut]
+ show_mld_interface_json = run_frr_cmd(
+ rnode, "show ipv6 mld interface json", isjson=True
+ )
+
+ if mld_iface in show_mld_interface_json:
+ mld_intf_json = show_mld_interface_json[mld_iface]
+ # Verifying igmp interface
+ if mld_intf_json["address"] != interface_ip:
+ errormsg = (
+ "[DUT %s]: igmp interface ip is not correct "
+ "[FAILED]!! Expected : %s, Found : %s"
+ % (dut, mld_intf_json["address"], interface_ip)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: igmp interface: %s, " "interface ip: %s" " [PASSED]!!",
+ dut,
+ mld_iface,
+ interface_ip,
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: igmp interface: %s "
+ "igmp interface ip: %s, is not present "
+ % (dut, mld_iface, interface_ip)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_mld_config(tgen, input_dict, stats_return=False, expected=True):
+ """
+ Verify mld interface details, verifying following configs:
+ timerQueryInterval
+ timerQueryResponseIntervalMsec
+ lastMemberQueryCount
+ timerLastMemberQueryMsec
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict` : Input dict data, required to verify
+ timer
+ * `stats_return`: If user wants API to return statistics
+ * `expected` : expected results from API, by-default True
+
+ Usage
+ -----
+ input_dict ={
+ "l1": {
+ "mld": {
+ "interfaces": {
+ "l1-i1-eth1": {
+ "mld": {
+ "query": {
+ "query-interval" : 200,
+ "query-max-response-time" : 100
+ },
+ "statistics": {
+ "queryV2" : 2,
+ "reportV2" : 1
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = verify_mld_config(tgen, input_dict, stats_return)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ for dut in input_dict.keys():
+ rnode = tgen.routers()[dut]
+
+ for interface, data in input_dict[dut]["igmp"]["interfaces"].items():
+
+ statistics = False
+ report = False
+ if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]:
+ statistics = True
+ cmd = "show ipv6 mld statistics"
+ else:
+ cmd = "show ipv6 mld"
+
+ logger.info("[DUT: %s]: Verifying MLD interface %s detail:", dut, interface)
+
+ if statistics:
+ if (
+ "report"
+ in input_dict[dut]["mld"]["interfaces"][interface]["mld"][
+ "statistics"
+ ]
+ ):
+ report = True
+
+ if statistics and report:
+ show_ipv6_mld_intf_json = run_frr_cmd(
+ rnode, "{} json".format(cmd), isjson=True
+ )
+ intf_detail_json = show_ipv6_mld_intf_json["global"]
+ else:
+ show_ipv6_mld_intf_json = run_frr_cmd(
+ rnode, "{} interface {} json".format(cmd, interface), isjson=True
+ )
+
+ if not report:
+ if interface not in show_ipv6_mld_intf_json:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " is not present in CLI output "
+ "[FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ else:
+ intf_detail_json = show_ipv6_mld_intf_json[interface]
+
+ if stats_return:
+ mld_stats = {}
+
+ if "statistics" in data["mld"]:
+ if stats_return:
+ mld_stats["statistics"] = {}
+ for query, value in data["mld"]["statistics"].items():
+ if query == "queryV1":
+ # Verifying IGMP interface queryV2 statistics
+ if stats_return:
+ mld_stats["statistics"][query] = intf_detail_json["queryV1"]
+
+ else:
+ if intf_detail_json["queryV1"] != value:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " queryV1 statistics verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["queryV1"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "queryV1 statistics is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if query == "reportV1":
+ # Verifying IGMP interface timerV2 statistics
+ if stats_return:
+ mld_stats["statistics"][query] = intf_detail_json[
+ "reportV1"
+ ]
+
+ else:
+ if intf_detail_json["reportV1"] <= value:
+ errormsg = (
+ "[DUT %s]: MLD reportV1 "
+ "statistics verification "
+ "[FAILED]!! Expected : %s "
+ "or more, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD reportV1 " "statistics is %s",
+ dut,
+ intf_detail_json["reportV1"],
+ )
+
+ if "query" in data["mld"]:
+ for query, value in data["mld"]["query"].items():
+ if query == "query-interval":
+ # Verifying IGMP interface query interval timer
+ if intf_detail_json["timerQueryInterval"] != value:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " query-interval verification "
+ "[FAILED]!! Expected : %s,"
+ " Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["timerQueryInterval"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s " "query-interval is %s",
+ dut,
+ interface,
+ value,
+ )
+
+ if query == "query-max-response-time":
+ # Verifying IGMP interface query max response timer
+ if (
+ intf_detail_json["timerQueryResponseIntervalMsec"]
+ != value * 100
+ ):
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "query-max-response-time "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 1000,
+ intf_detail_json["timerQueryResponseIntervalMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "query-max-response-time is %s ms",
+ dut,
+ interface,
+ value * 100,
+ )
+
+ if query == "last-member-query-count":
+ # Verifying IGMP interface last member query count
+ if intf_detail_json["lastMemberQueryCount"] != value:
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-count "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value,
+ intf_detail_json["lastMemberQueryCount"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-count is %s ms",
+ dut,
+ interface,
+ value * 1000,
+ )
+
+ if query == "last-member-query-interval":
+ # Verifying IGMP interface last member query interval
+ if (
+ intf_detail_json["timerLastMemberQueryMsec"]
+ != value * 100 * intf_detail_json["lastMemberQueryCount"]
+ ):
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-interval "
+ "verification [FAILED]!!"
+ " Expected : %s, Found : %s"
+ % (
+ dut,
+ interface,
+ value * 1000,
+ intf_detail_json["timerLastMemberQueryMsec"],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s "
+ "last-member-query-interval is %s ms",
+ dut,
+ interface,
+ value * intf_detail_json["lastMemberQueryCount"] * 100,
+ )
+
+ if "version" in data["mld"]:
+ # Verifying IGMP interface state is up
+ if intf_detail_json["state"] != "up":
+ errormsg = (
+ "[DUT %s]: MLD interface: %s "
+ " state: %s verification "
+ "[FAILED]!!" % (dut, interface, intf_detail_json["state"])
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: MLD interface: %s " "state: %s",
+ dut,
+ interface,
+ intf_detail_json["state"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if stats_return == False else mld_stats
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_nexthop(tgen, topo, dut, nexthop, addr_type):
+ """
+ Verify all PIM nexthop details using "show ip/ipv6 pim neighbor" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `topo` : json file data
+ * `dut` : dut info
+ * `nexthop` : nexthop ip/ipv6 address
+
+ Usage
+ -----
+ result = verify_pim_nexthop(tgen, topo, dut, nexthop)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ rnode = tgen.routers()[dut]
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} pim nexthop".format(addr_type)
+ pim_nexthop = rnode.vtysh_cmd(cmd)
+
+ if nexthop in pim_nexthop:
+ logger.info("[DUT %s]: Expected nexthop: %s, Found", dut, nexthop)
+ return True
+ else:
+ errormsg = "[DUT %s]: Nexthop not found: %s" % (dut, nexthop)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
+def verify_mroute_summary(
+ tgen, dut, sg_mroute=None, starg_mroute=None, total_mroute=None, addr_type="ipv4"
+):
+ """
+ Verify ip mroute summary has correct (*,g) (s,G) and total mroutes
+ by running "show ip mroutes summary json" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `sg_mroute`: Number of installed (s,g) mroute
+ * `starg_mroute`: Number installed of (*,g) mroute
+ * `Total_mroute`: Total number of installed mroutes
+ * 'addr_type : IPv4 or IPv6 address
+ * `return_json`: Whether to return raw json data
+
+ Usage
+ -----
+ dut = "r1"
+ sg_mroute = "4000"
+ starg_mroute= "2000"
+ total_mroute = "6000"
+ addr_type=IPv4 or IPv6
+ result = verify_mroute_summary(tgen, dut, sg_mroute=None, starg_mroute=None,
+ total_mroute= None)
+ Returns
+ -------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying mroute summary", dut)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} mroute summary json".format(ip_cmd)
+ show_mroute_summary_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if starg_mroute is not None:
+ if show_mroute_summary_json["wildcardGroup"]["installed"] != starg_mroute:
+ logger.error(
+ "Number of installed starg are: %s but expected: %s",
+ show_mroute_summary_json["wildcardGroup"]["installed"],
+ starg_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed starg routes are %s",
+ show_mroute_summary_json["wildcardGroup"]["installed"],
+ )
+
+ if sg_mroute is not None:
+ if show_mroute_summary_json["sourceGroup"]["installed"] != sg_mroute:
+ logger.error(
+ "Number of installed SG routes are: %s but expected: %s",
+ show_mroute_summary_json["sourceGroup"]["installed"],
+ sg_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed SG routes are %s",
+ show_mroute_summary_json["sourceGroup"]["installed"],
+ )
+
+ if total_mroute is not None:
+ if show_mroute_summary_json["totalNumOfInstalledMroutes"] != total_mroute:
+ logger.error(
+ "Total number of installed mroutes are: %s but expected: %s",
+ show_mroute_summary_json["totalNumOfInstalledMroutes"],
+ total_mroute,
+ )
+ return False
+ logger.info(
+ "Number of installed Total mroute are %s",
+ show_mroute_summary_json["totalNumOfInstalledMroutes"],
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+def verify_sg_traffic(tgen, dut, groups, src, addr_type="ipv4"):
+ """
+ Verify multicast traffic by running
+ "show ip mroute count json" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `groups`: igmp or mld groups where traffic needs to be verified
+
+ Usage
+ -----
+ result = verify_sg_traffic(tgen, "r1", igmp_groups, srcaddress)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying multicast " "SG traffic", dut)
+
+ if addr_type == "ipv4":
+ cmd = "show ip mroute count json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 mroute count json"
+ # import pdb; pdb.set_trace()
+ show_mroute_sg_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if bool(show_mroute_sg_traffic_json) is False:
+ errormsg = "[DUT %s]: Json output is empty" % (dut)
+ return errormsg
+
+ before_traffic = {}
+ after_traffic = {}
+
+ for grp in groups:
+ if grp not in show_mroute_sg_traffic_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src,
+ grp,
+ )
+ if src not in show_mroute_sg_traffic_json[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying source is not present in "
+ " %s [FAILED]!! " % (dut, src)
+ )
+ return errormsg
+
+ before_traffic[grp] = show_mroute_sg_traffic_json[grp][src]["packets"]
+
+ logger.info("Waiting for 10sec traffic to increament")
+ sleep(10)
+
+ show_mroute_sg_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ for grp in groups:
+ if grp not in show_mroute_sg_traffic_json:
+ errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
+ dut,
+ src,
+ grp,
+ )
+ if src not in show_mroute_sg_traffic_json[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying source is not present in "
+ " %s [FAILED]!! " % (dut, src)
+ )
+ return errormsg
+
+ after_traffic[grp] = show_mroute_sg_traffic_json[grp][src]["packets"]
+
+ for grp in groups:
+ if after_traffic[grp] < before_traffic[grp]:
+ errormsg = (
+ "[DUT %s]: Verifying igmp group %s source %s not increamenting traffic"
+ " [FAILED]!! " % (dut, grp, src)
+ )
+ return errormsg
+ else:
+ logger.info(
+ "[DUT %s]:igmp group %s source %s receiving traffic"
+ " [PASSED]!! " % (dut, grp, src)
+ )
+ result = True
+
+ return result
+
# def cleanup(self):
# super(McastTesterHelper, self).cleanup()
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/__init__.py b/tests/topotests/multicast_pim6_static_rp_topo1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/__init__.py
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json b/tests/topotests/multicast_pim6_static_rp_topo1/multicast_pim6_static_rp.json
index 9edfae4a2..9edfae4a2 100644
--- a/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/multicast_pim6_static_rp.json
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
new file mode 100755
index 000000000..dd8818e92
--- /dev/null
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
@@ -0,0 +1,1321 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+1. Verify upstream interfaces(IIF) and join state are updated
+ properly after adding and deleting the static RP
+2. Verify IIF and OIL in "show ipv6 PIM6 state" updated properly when
+ RP becomes unreachable
+3. Verify RP becomes reachable after MLD join received, PIM6 join
+ towards RP is sent immediately
+4. Verify (*,G) and (S,G) populated correctly when SPT and RPT
+ share the same path
+5. Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+ in LHR router
+6. Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+ in FHR router
+7. Verify (*,G) and (S,G) populated correctly when RPT and SPT path
+ are different
+8. Verify PIM6 join send towards the higher preferred RP
+9. Verify PIM6 prune send towards the lower preferred RP
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ create_static_routes,
+ check_router_status,
+ socat_send_mld_join,
+ socat_send_pim6_traffic,
+ kill_socat,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ verify_upstream_iif,
+ verify_join_state_and_timer,
+ verify_mroutes,
+ verify_pim_neighbors,
+ verify_pim_interface_traffic,
+ verify_pim_rp_info,
+ verify_pim_state,
+ clear_pim6_interface_traffic,
+ clear_pim6_mroute,
+ verify_pim6_neighbors,
+ get_pim6_interface_traffic,
+ clear_pim6_interfaces,
+ verify_mld_groups,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Global variables
+GROUP_RANGE_1 = "ff08::/64"
+GROUP_ADDRESS_1 = "ff08::1"
+GROUP_RANGE_3 = "ffaa::/64"
+GROUP_ADDRESS_3 = "ffaa::1"
+GROUP_RANGE_4 = "ff00::/8"
+GROUP_ADDRESS_4 = "ff00::1"
+STAR = "*"
+SOURCE = "Static"
+ASSERT_MSG = "Testcase {} : Failed Error: {}"
+
+pytestmark = [pytest.mark.pim6d]
+
+
+def build_topo(tgen):
+ """Build function"""
+
+ # Building topology from json file
+ build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: %s", testsuite_run_time)
+ logger.info("=" * 40)
+
+ topology = """
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+ """
+ logger.info("Master Topology: \n %s", topology)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global TOPO
+ TOPO = tgen.json_topo
+
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, TOPO)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, TOPO)
+
+ # Verify PIM6 neighbors
+ result = verify_pim6_neighbors(tgen, TOPO)
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local API
+#
+#####################################################
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_pim6_add_delete_static_RP_p0(request):
+ """
+ Verify upstream interfaces(IIF) and join state are updated
+ properly after adding and deleting the static RP
+ Verify IIF and OIL in "show ipv6 PIM6 state" updated properly when
+ RP becomes unreachable
+ Verify RP becomes reachable after MLD join received, PIM6 join
+ towards RP is sent immediately
+
+ TOPOlogy used:
+ r0------r1-----r2
+ iperf DUT RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Shut link b/w R1 and R3 and R1 and R4 as per testcase topology")
+ intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ for intf in [intf_r1_r3, intf_r1_r4]:
+ shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
+
+ step("Enable PIM6 between r1 and r2")
+ step("Enable MLD on r1 interface and send MLD " "join {} to r1".\
+ format(GROUP_RANGE_1))
+ step("Configure r2 loopback interface as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify show ipv6 mld group without any MLD join")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r1: mld group present without any MLD join \n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("Verify show ipv6 PIM6 interface traffic without any mld join")
+ state_dict = {
+ "r1": {TOPO["routers"]["r1"]["links"]["r2"]["interface"]: ["pruneTx"]}
+ }
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("send mld join {} to R1".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ oif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ iif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, oif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, oif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify PIM6 state")
+ result = verify_pim_state(tgen, dut, oif, iif, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, oif, iif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_1, oif, rp_address, SOURCE, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n " "RP: {} info is still present \n Error: {}".format(
+ tc_name, rp_address, result
+ )
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, oif, STAR, GROUP_ADDRESS_1, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "Upstream ({}, {}) is still in join state \n Error: {}".format(
+ tc_name, STAR, GROUP_ADDRESS_1, result
+ )
+ )
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, oif, STAR, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "Upstream ({}, {}) timer is still running \n Error: {}".format(
+ tc_name, STAR, GROUP_ADDRESS_1, result
+ )
+ )
+
+ step("r1: Verify PIM6 state")
+ result = verify_pim_state(tgen, dut, oif, iif, GROUP_ADDRESS_1, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "PIM state for group: {} is still Active \n Error: {}".format(
+ tc_name, GROUP_ADDRESS_1, result
+ )
+ )
+
+ step("r1: Verify ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, oif, iif, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "mroute ({}, {}) is still present \n Error: {}".format(
+ tc_name, STAR, GROUP_ADDRESS_1, result
+ )
+ )
+
+ step("r1: Verify show ipv6 PIM6 interface traffic without any MLD join")
+ state_after = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_SPT_RPT_path_same_p1(request):
+ """
+ Verify (*,G) and (S,G) populated correctly when SPT and RPT
+ share the same path
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1 r3-----r5
+
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Shut link b/w R1->R3, R1->R4 and R3->R1, R3->R4 as per " "testcase topology")
+ intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ intf_r3_r1 = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ intf_r3_r4 = TOPO["routers"]["r3"]["links"]["r4"]["interface"]
+ for intf in [intf_r1_r3, intf_r1_r4]:
+ shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
+
+ for intf in [intf_r3_r1, intf_r3_r4]:
+ shutdown_bringup_interface(tgen, "r3", intf, ifaceaction=False)
+
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Configure RP on r2 (loopback interface) for the group range {}".\
+ format(GROUP_ADDRESS_1))
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("Send multicast traffic from R5")
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ oif = "lo"
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S, G) upstream join state is up and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r2"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_RP_configured_as_LHR_p1(request):
+ """
+ Verify OIF and RPF for (*,G) and (S,G) when static RP configure
+ in LHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR/RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+
+ step("r1: Configure r1(LHR) as RP")
+ input_dict = {
+ "r1": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r1"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Shut not Shut loopback interface")
+ shutdown_bringup_interface(tgen, "r1", "lo", False)
+ shutdown_bringup_interface(tgen, "r1", "lo", True)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ iif = "lo"
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ rp_address = TOPO["routers"]["r1"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE_1, iif, rp_address, SOURCE)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("send mld join {} to R1".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S, G) upstream join state is joined and join"
+ " timer is running \n Error: {}".format(tc_name, result)
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_RP_configured_as_FHR_p1(request):
+ """
+ Verify OIF and RFP for (*,G) and (S,G) when static RP configure
+ in FHR router
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR
+ r3 : FHR/RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("r3: Configure r3(FHR) as RP")
+ input_dict = {
+ "r3": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r3"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ rp_address = TOPO["routers"]["r3"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE_1, iif, rp_address, SOURCE)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("send mld join {} to R1".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_SPT_RPT_path_different_p1(request):
+ """
+ Verify (*,G) and (S,G) populated correctly when RPT and SPT path
+ are different
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1: LHR
+ r2: RP
+ r3: FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("r2: Configure r2 as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ iif = "lo"
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_ADDRESS_1, iif, rp_address, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("send mld join {} to R1".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ dut = "r2"
+ iif = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_1, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_send_join_on_higher_preffered_rp_p1(request):
+ """
+ Verify PIM6 join send towards the higher preferred RP
+ Verify PIM6 prune send towards the lower preferred RP
+
+ Topology used:
+ _______r2
+ |
+ iperf |
+ r0-----r1
+ |
+ |_______r4
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM66 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("Configure RP on r2 (loopback interface) for the group range {}".\
+ format(GROUP_RANGE_4))
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_4,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r3 : Make all interface not reachable")
+ intf_r3_r1 = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ intf_r3_r2 = TOPO["routers"]["r3"]["links"]["r2"]["interface"]
+ intf_r3_r4 = TOPO["routers"]["r3"]["links"]["r4"]["interface"]
+ intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ intf_r2_r3 = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ intf_r4_r3 = TOPO["routers"]["r4"]["links"]["r3"]["interface"]
+
+ for dut, intf in zip(["r1", "r2", "r3"], [intf_r1_r3, intf_r2_r3, intf_r4_r3]):
+ shutdown_bringup_interface(tgen, dut, intf, ifaceaction=False)
+
+ for intf in [intf_r3_r1, intf_r3_r4, intf_r3_r4]:
+ shutdown_bringup_interface(tgen, "r3", intf, ifaceaction=False)
+
+ step("Verify show ipv6 PIM6 interface traffic without any mld join")
+ state_dict = {"r1": {TOPO["routers"]["r1"]["links"]["r4"]["interface"]: ["joinTx"]}}
+
+ state_before = get_pim6_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("r0: send mld join {} to R1".format(GROUP_ADDRESS_3))
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_3, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("Configure RP on r4 (loopback interface) for the group range " "ffaa::/128")
+ input_dict = {
+ "r4": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1 : Verify RP info for group {}".format(GROUP_ADDRESS_4))
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ rp_address_1 = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_ADDRESS_4, iif, rp_address_1, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify RP info for group {}".format(GROUP_ADDRESS_3))
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ rp_address_2 = TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_ADDRESS_3, iif, rp_address_2, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify join is sent to higher preferred RP")
+ step("r1 : Verify prune is sent to lower preferred RP")
+ state_after = get_pim6_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("r1 : Verify ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_3, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify PIM6 state")
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ clear_pim6_interface_traffic(tgen, TOPO)
+
+ step("r1 : Verify joinTx, pruneTx count before RP gets deleted")
+ state_dict = {
+ "r1": {
+ TOPO["routers"]["r1"]["links"]["r2"]["interface"]: ["joinTx"],
+ TOPO["routers"]["r1"]["links"]["r4"]["interface"]: ["pruneTx"],
+ }
+ }
+ state_before = get_pim6_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("r1 : Delete RP configuration for {}".format(GROUP_RANGE_3))
+ input_dict = {
+ "r4": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1 : Verify rp-info for group {}".format(GROUP_RANGE_3))
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_3, iif, rp_address_1, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1 : Verify rp-info for group {}".format(GROUP_RANGE_4))
+ iif = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_4, oif, rp_address_2, SOURCE, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r1: rp-info is present for group {} \n Error: {}".format(tc_name,
+ GROUP_RANGE_4,
+ result)
+ )
+
+ step(
+ "r1 : Verify RPF interface updated in mroute when higher preferred"
+ "RP gets deleted"
+ )
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_3, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+ logger.info("Expected behavior: %s", result)
+
+ step(
+ "r1 : Verify IIF and OIL in show ipv6 PIM6 state updated when higher"
+ "preferred overlapping RP is deleted"
+ )
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step(
+ "r1 : Verify upstream IIF updated when higher preferred overlapping"
+ "RP deleted"
+ )
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_3)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step(
+ "r1 : Verify upstream join state and join timer updated when higher"
+ "preferred overlapping RP deleted"
+ )
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_3, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step(
+ "r1 : Verify join is sent to lower preferred RP, when higher"
+ "preferred RP gets deleted"
+ )
+ step(
+ "r1 : Verify prune is sent to higher preferred RP when higher"
+ " preferred RP gets deleted"
+ )
+ state_after = get_pim6_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
new file mode 100755
index 000000000..f366708ec
--- /dev/null
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
@@ -0,0 +1,1324 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+1. Configure multiple groups (10 grps) with same RP address
+2. Verify IIF and OIL in updated in mroute when upstream interface
+ configure as RP
+3. Verify RP info and (*,G) mroute after deleting the RP and shut /
+ no shut the RPF interface.
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ create_static_routes,
+ check_router_status,
+ socat_send_mld_join,
+ socat_send_pim6_traffic,
+ kill_socat,
+ topo_daemons,
+)
+from lib.pim import (
+ create_pim_config,
+ verify_upstream_iif,
+ verify_join_state_and_timer,
+ verify_mroutes,
+ verify_pim_neighbors,
+ verify_pim_interface_traffic,
+ verify_pim_rp_info,
+ verify_pim_state,
+ clear_pim6_interface_traffic,
+ clear_pim6_mroute,
+ verify_pim6_neighbors,
+ get_pim6_interface_traffic,
+ clear_pim6_interfaces,
+ verify_mld_groups,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Global variables
+GROUP_RANGE_1 = "ff08::/64"
+GROUP_ADDRESS_1 = "ff08::1"
+GROUP_RANGE_3 = "ffaa::/64"
+GROUP_ADDRESS_3 = "ffaa::1"
+GROUP_RANGE_LIST_1 = [
+ "ffaa::1/128",
+ "ffaa::2/128",
+ "ffaa::3/128",
+ "ffaa::4/128",
+ "ffaa::5/128",
+]
+GROUP_RANGE_LIST_2 = [
+ "ffaa::6/128",
+ "ffaa::7/128",
+ "ffaa::8/128",
+ "ffaa::9/128",
+ "ffaa::10/128",
+]
+GROUP_ADDRESS_LIST_1 = ["ffaa::1", "ffaa::2", "ffaa::3", "ffaa::4", "ffaa::5"]
+GROUP_ADDRESS_LIST_2 = ["ffaa::6", "ffaa::7", "ffaa::8", "ffaa::9", "ffaa::10"]
+STAR = "*"
+SOURCE = "Static"
+ASSERT_MSG = "Testcase {} : Failed Error: {}"
+
+pytestmark = [pytest.mark.pim6d]
+
+
+def build_topo(tgen):
+ """Build function"""
+
+ # Building topology from json file
+ build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: %s", testsuite_run_time)
+ logger.info("=" * 40)
+
+ topology = """
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+ """
+ logger.info("Master Topology: \n %s", topology)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global TOPO
+ TOPO = tgen.json_topo
+
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, TOPO)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, TOPO)
+
+ # Verify PIM6 neighbors
+ result = verify_pim6_neighbors(tgen, TOPO)
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local API
+#
+#####################################################
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+def test_pim6_multiple_groups_same_RP_address_p2(request):
+ """
+ Configure multiple groups (10 grps) with same RP address
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+
+ r1 : LHR
+ r2 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("r2: Configure r2 as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r2: verify rp-info")
+ dut = "r2"
+ oif = "lo"
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE_3, oif, rp_address, SOURCE)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+ step("r0: Send MLD join for 10 groups")
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r5: Send multicast traffic for group {}".format(group_address_list))
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ group_address_list,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ dut = "r2"
+ iif = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ group_address_list,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r1": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
+ dut = "r1"
+ intf = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No Shut the interface r1-r2-eth1 from R1 to R2")
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Configure RP")
+ input_dict = {
+ "r1": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_3,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Shut the interface r1-r0-eth0 from R1 to R2")
+ intf = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: No Shut the interface r1-r0-eth0 from R1 to R2")
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ group_address_list,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_multiple_groups_different_RP_address_p2(request):
+ """
+ Verify IIF and OIL in updated in mroute when upstream interface
+ configure as RP
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+ r1 : LHR
+ r2 & r4 : RP
+ r3 : FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("r2: Configure r2 as RP")
+ step("r4: Configure r4 as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r2: Verify RP info")
+ dut = "r2"
+ oif = "lo"
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_LIST_1, oif, rp_address, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify RP info")
+ dut = "r4"
+ rp_address = TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_LIST_2, oif, rp_address, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
+ step("r0: Send MLD join for 10 groups")
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r5: Send multicast traffic for group {}".format(group_address_list))
+ intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
+ SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
+ result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, group_address_list)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, group_address_list, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_1,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_1,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream IIF interface")
+ dut = "r4"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r4"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r4"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_2,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r4: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_2,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("Delete RP configuration")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1, r2, r3, r4: Re-configure RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_1,
+ }
+ ]
+ }
+ },
+ "r4": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r4"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_LIST_2,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Shut/No Shut the interfacesfrom R1 to R2, R4 and R0")
+ dut = "r1"
+ intf1 = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ intf2 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ intf3 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ for intf in [intf1, intf2, intf3]:
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream IIF interface")
+ dut = "r2"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r2"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_1,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r2: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_1,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream IIF interface")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (S, G) ip mroutes")
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream IIF interface")
+ dut = "r4"
+ iif = "lo"
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2, addr_type="ipv6"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (*, G) ip mroutes")
+ oif = TOPO["routers"]["r4"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream IIF interface")
+ iif = TOPO["routers"]["r4"]["links"]["r3"]["interface"]
+ result = verify_upstream_iif(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined"
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r4: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_2,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r4: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r4: Verify (S, G) ip mroutes")
+ oif = "none"
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream IIF interface")
+ dut = "r3"
+ iif = TOPO["routers"]["r3"]["links"]["r5"]["interface"]
+ result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r3: Verify (S, G) upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen,
+ dut,
+ iif,
+ SOURCE_ADDRESS,
+ GROUP_ADDRESS_LIST_2,
+ addr_type="ipv6",
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r3: Verify (S, G) ip mroutes")
+ oif = TOPO["routers"]["r3"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request):
+ """
+ Verify RP info and (*,G) mroute after deleting the RP and shut /
+ no shut the RPF interface.
+
+ Topology used:
+ ________r2_____
+ | |
+ iperf | |
+ r0-----r1-------------r3
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Creating configuration from JSON")
+ kill_socat(tgen)
+ clear_pim6_mroute(tgen)
+ clear_pim6_interface_traffic(tgen, TOPO)
+ reset_config_on_routers(tgen)
+
+ step("Enable MLD on r1 interface")
+ step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
+ step("r2: Configure r2 as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r2: verify rp-info")
+ dut = "r2"
+ oif = "lo"
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_ADDRESS_1, oif, rp_address, SOURCE
+ )
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r0: Send MLD join")
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_mld_join(
+ tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify MLD groups")
+ dut = "r1"
+ intf_r1_r0 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mld_groups(tgen, dut, intf_r1_r0, GROUP_ADDRESS_1)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Verify (*, G) ip mroutes created")
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r2: Verify (*, G) ip mroutes created")
+ dut = "r2"
+ iif = "lo"
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif)
+ assert result is True, ASSERT_MSG.format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r1": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Shut/No Shut the interface r1-r2-eth1/r1-r0-eth0 from R1 to R2")
+ dut = "r1"
+ intf1 = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ intf2 = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ for intf in [intf1, intf2]:
+ shutdown_bringup_interface(tgen, dut, intf, False)
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step("r2: Shut the RP interface lo")
+ dut = "r2"
+ intf = "lo"
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Shut the interface r1-r2-eth1/r1-r3-eth2 towards RP")
+ intf3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ for intf in [intf1, intf3]:
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("r1: Verify (*, G) ip mroutes cleared")
+ dut = "r1"
+ iif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ oif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r1: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ step("r2: Verify (*, G) ip mroutes cleared")
+ dut = "r2"
+ iif = "lo"
+ oif = TOPO["routers"]["r2"]["links"]["r1"]["interface"]
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_1, iif, oif, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "r2: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
+ tc_name, result
+ )
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py
deleted file mode 100755
index bd5473a51..000000000
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py
+++ /dev/null
@@ -1,414 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Copyright (c) 2022 by VMware, Inc. ("VMware")
-# Used Copyright (c) 2018 by Network Device Education Foundation,
-# Inc. ("NetDEF") in this file.
-#
-# Permission to use, copy, modify, and/or distribute this software
-# for any purpose with or without fee is hereby granted, provided
-# that the above copyright notice and this permission notice appear
-# in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-#
-
-"""
-Following tests are covered to test Multicast basic functionality:
-
-Topology:
-
- _______r2_____
- | |
- iperf | | iperf
- r0-----r1-------------r3-----r5
- | |
- |_____________|
- r4
-
-Test steps
-- Create topology (setup module)
-- Bring up topology
-
-TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
- after adding and deleting the static RP
-TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
- adding and deleting the static RP
-TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
-TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
-TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
- same path
-"""
-
-import os
-import sys
-import json
-import time
-import pytest
-from time import sleep
-import datetime
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
-
-from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
- reset_config_on_routers,
- step,
- shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
- create_static_routes,
- check_router_status,
- socat_send_igmp_join_traffic,
- topo_daemons
-)
-from lib.pim import (
- create_pim_config,
- verify_igmp_groups,
- verify_upstream_iif,
- verify_join_state_and_timer,
- verify_mroutes,
- verify_pim_neighbors,
- verify_pim_interface_traffic,
- verify_pim_rp_info,
- verify_pim_state,
- clear_pim_interface_traffic,
- clear_igmp_interfaces,
- clear_pim_interfaces,
- clear_mroute,
- clear_mroute_verify,
-)
-from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-# Global variables
-GROUP_RANGE_V6 = "ff08::/64"
-IGMP_JOIN_V6 = "ff08::1"
-STAR = "*"
-SOURCE = "Static"
-
-pytestmark = [pytest.mark.pimd]
-
-
-def build_topo(tgen):
- """Build function"""
-
- # Building topology from json file
- build_topo_from_json(tgen, TOPO)
-
-
-def setup_module(mod):
- """
- Sets up the pytest environment
-
- * `mod`: module name
- """
-
- testsuite_run_time = time.asctime(time.localtime(time.time()))
- logger.info("Testsuite start time: %s", testsuite_run_time)
- logger.info("=" * 40)
-
- topology = """
-
- _______r2_____
- | |
- iperf | | iperf
- r0-----r1-------------r3-----r5
- | |
- |_____________|
- r4
-
- """
- logger.info("Master Topology: \n %s", topology)
-
- logger.info("Running setup_module to create topology")
-
- # This function initiates the topology build with Topogen...
- json_file = "{}/multicast_pimv6_static_rp.json".format(CWD)
- tgen = Topogen(json_file, mod.__name__)
- global TOPO
- TOPO = tgen.json_topo
-
- # ... and here it calls Mininet initialization functions.
-
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
- # Starting topology, create tmp files which are loaded to routers
- # to start daemons and then start routers
- start_topology(tgen, daemons)
-
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
- # Creating configuration from JSON
- build_config_from_json(tgen, TOPO)
-
- # Verify PIM neighbors
- result = verify_pim_neighbors(tgen, TOPO)
- assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
-
- logger.info("Running setup_module() done")
-
-
-def teardown_module():
- """Teardown the pytest environment"""
-
- logger.info("Running teardown_module to delete topology")
- tgen = get_topogen()
-
- # Stop toplogy and Remove tmp files
- tgen.stop_topology()
-
- logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
- logger.info("=" * 40)
-
-
-#####################################################
-#
-# Testcases
-#
-#####################################################
-
-
-def verify_state_incremented(state_before, state_after):
- """
- API to compare interface traffic state incrementing
-
- Parameters
- ----------
- * `state_before` : State dictionary for any particular instance
- * `state_after` : State dictionary for any particular instance
- """
-
- for router, state_data in state_before.items():
- for state, value in state_data.items():
- if state_before[router][state] >= state_after[router][state]:
- errormsg = (
- "[DUT: %s]: state %s value has not"
- " incremented, Initial value: %s, "
- "Current value: %s [FAILED!!]"
- % (
- router,
- state,
- state_before[router][state],
- state_after[router][state],
- )
- )
- return errormsg
-
- logger.info(
- "[DUT: %s]: State %s value is "
- "incremented, Initial value: %s, Current value: %s"
- " [PASSED!!]",
- router,
- state,
- state_before[router][state],
- state_after[router][state],
- )
-
- return True
-
-
-#####################################################
-
-def test_pimv6_add_delete_static_RP_p0(request):
- """
- TC_1: Verify upstream interfaces(IIF) and join state are updated
- properly after adding and deleting the static RP
- TC_2: Verify IIF and OIL in "show ip pim state" updated properly
- after adding and deleting the static RP
- TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
- TC_4: Verify (*,G) prune is send towards the RP after deleting the
- static RP
-
- TOPOlogy used:
- r0------r1-----r2
- iperf DUT RP
- """
-
- tgen = get_topogen()
- tc_name = request.node.name
- write_test_header(tc_name)
-
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- check_router_status(tgen)
-
- step("Shut link b/w R1 and R3 and R1 and R4 as per tescase topology")
- intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
- intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
- for intf in [intf_r1_r3, intf_r1_r4]:
- shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
-
- step("Enable PIM between r1 and r2")
- step("Enable MLD on r1 interface and send IGMP " "join (FF08::1) to r1")
- step("Configure r2 loopback interface as RP")
- input_dict = {
- "r2": {
- "pim6": {
- "rp": [
- {
- "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
- "/"
- )[0],
- "group_addr_range": GROUP_RANGE_V6,
- }
- ]
- }
- }
- }
-
- result = create_pim_config(tgen, TOPO, input_dict)
- assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
-
- step("Verify show ip pim interface traffic without any mld join")
- state_dict = {
- "r1": {TOPO["routers"]["r1"]["links"]["r2"]["interface"]: ["pruneTx"]}
- }
-
- state_before = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
- assert isinstance(
- state_before, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
- tc_name, result
- )
-
- step("send mld join (FF08::1) to R1")
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_igmp_join_traffic(
- tgen, "r0", "UDP6-RECV", IGMP_JOIN_V6, intf, intf_ip, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- step("r1: Verify RP info")
- dut = "r1"
- oif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
- iif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
- rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
- result = verify_pim_rp_info(
- tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE
- )
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r1: Verify upstream IIF interface")
- result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, oif, STAR, IGMP_JOIN_V6)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r1: Verify PIM state")
- result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r1: Verify ip mroutes")
- result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r1: Delete RP configuration")
- input_dict = {
- "r2": {
- "pim6": {
- "rp": [
- {
- "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
- "/"
- )[0],
- "group_addr_range": GROUP_RANGE_V6,
- "delete": True,
- }
- ]
- }
- }
- }
-
- result = create_pim_config(tgen, TOPO, input_dict)
- assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
-
- step("r1: Verify RP info")
- result = verify_pim_rp_info(
- tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE, expected=False
- )
- assert (
- result is not True
- ), "Testcase {} :Failed \n " "RP: {} info is still present \n Error: {}".format(
- tc_name, rp_address, result
- )
-
- step("r1: Verify upstream IIF interface")
- result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n "
- "Upstream ({}, {}) is still in join state \n Error: {}".format(
- tc_name, STAR, IGMP_JOIN_V6, result
- )
- )
-
- step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(
- tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False
- )
- assert result is not True, (
- "Testcase {} :Failed \n "
- "Upstream ({}, {}) timer is still running \n Error: {}".format(
- tc_name, STAR, IGMP_JOIN_V6, result
- )
- )
-
- step("r1: Verify PIM state")
- result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n "
- "PIM state for group: {} is still Active \n Error: {}".format(
- tc_name, IGMP_JOIN_V6, result
- )
- )
-
- step("r1: Verify ip mroutes")
- result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n "
- "mroute ({}, {}) is still present \n Error: {}".format(
- tc_name, STAR, IGMP_JOIN_V6, result
- )
- )
-
- step("r1: Verify show ip pim interface traffic without any IGMP join")
- state_after = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
- assert isinstance(
- state_after, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
- tc_name, result
- )
-
- result = verify_state_incremented(state_before, state_after)
- assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
-
- write_test_footer(tc_name)
-
-
-if __name__ == "__main__":
- args = ["-s"] + sys.argv[1:]
- sys.exit(pytest.main(args))
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 7dd13935b..6986e3051 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -46,6 +46,7 @@ markers =
pathd: Tests that run against PATHD
pbrd: Tests that run against PBRD
pimd: Tests that run against PIMD
+ pim6d: Tests that run against PIM6D
ripd: Tests that run against RIPD
ripngd: Tests that run against RIPNGD
sharpd: Tests that run against SHARPD
diff --git a/tests/topotests/srv6_locator_usid/__init__.py b/tests/topotests/srv6_locator_usid/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/__init__.py
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_1.json b/tests/topotests/srv6_locator_usid/expected_chunks_1.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_1.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_2.json b/tests/topotests/srv6_locator_usid/expected_chunks_2.json
new file mode 100644
index 000000000..304d73807
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_2.json
@@ -0,0 +1,8 @@
+[
+ {
+ "name": "loc1",
+ "chunks": [
+ "fc00:0:1::/48"
+ ]
+ }
+]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_3.json b/tests/topotests/srv6_locator_usid/expected_chunks_3.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_3.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_4.json b/tests/topotests/srv6_locator_usid/expected_chunks_4.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_4.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_5.json b/tests/topotests/srv6_locator_usid/expected_chunks_5.json
new file mode 100644
index 000000000..0d4f101c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_5.json
@@ -0,0 +1,2 @@
+[
+]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_6.json b/tests/topotests/srv6_locator_usid/expected_chunks_6.json
new file mode 100644
index 000000000..0d4f101c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_6.json
@@ -0,0 +1,2 @@
+[
+]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_7.json b/tests/topotests/srv6_locator_usid/expected_chunks_7.json
new file mode 100644
index 000000000..0d4f101c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_7.json
@@ -0,0 +1,2 @@
+[
+]
diff --git a/tests/topotests/srv6_locator_usid/expected_chunks_8.json b/tests/topotests/srv6_locator_usid/expected_chunks_8.json
new file mode 100644
index 000000000..0d4f101c7
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_chunks_8.json
@@ -0,0 +1,2 @@
+[
+]
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_1.json b/tests/topotests/srv6_locator_usid/expected_locators_1.json
new file mode 100644
index 000000000..c0eeacc09
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_1.json
@@ -0,0 +1,20 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_2.json b/tests/topotests/srv6_locator_usid/expected_locators_2.json
new file mode 100644
index 000000000..38a6739d6
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_2.json
@@ -0,0 +1,20 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "sharp"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_3.json b/tests/topotests/srv6_locator_usid/expected_locators_3.json
new file mode 100644
index 000000000..c0eeacc09
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_3.json
@@ -0,0 +1,20 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_4.json b/tests/topotests/srv6_locator_usid/expected_locators_4.json
new file mode 100644
index 000000000..b1528ff11
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_4.json
@@ -0,0 +1,35 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "fc00:0:2::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:2::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_5.json b/tests/topotests/srv6_locator_usid/expected_locators_5.json
new file mode 100644
index 000000000..b6acc238a
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_5.json
@@ -0,0 +1,36 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "fc00:0:2::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:2::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_6.json b/tests/topotests/srv6_locator_usid/expected_locators_6.json
new file mode 100644
index 000000000..b1528ff11
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_6.json
@@ -0,0 +1,35 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "fc00:0:1::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "behavior": "usid",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:1::/48",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "fc00:0:2::/48",
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "fc00:0:2::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_7.json b/tests/topotests/srv6_locator_usid/expected_locators_7.json
new file mode 100644
index 000000000..e965e0217
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_7.json
@@ -0,0 +1,19 @@
+{
+ "locators":[
+ {
+ "name": "loc2",
+ "prefix": "fc00:0:2::/48",
+ "statusUp": true,
+ "blockBitsLength": 32,
+ "nodeBitsLength": 16,
+ "functionBitsLength": 16,
+ "argumentBitsLength": 0,
+ "chunks":[
+ {
+ "prefix": "fc00:0:2::/48",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/expected_locators_8.json b/tests/topotests/srv6_locator_usid/expected_locators_8.json
new file mode 100644
index 000000000..6e1b993ca
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/expected_locators_8.json
@@ -0,0 +1,4 @@
+{
+ "locators":[
+ ]
+}
diff --git a/tests/topotests/srv6_locator_usid/r1/setup.sh b/tests/topotests/srv6_locator_usid/r1/setup.sh
new file mode 100644
index 000000000..36ed713f2
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/r1/setup.sh
@@ -0,0 +1,2 @@
+ip link add dummy0 type dummy
+ip link set dummy0 up
diff --git a/tests/topotests/srv6_locator_usid/r1/sharpd.conf b/tests/topotests/srv6_locator_usid/r1/sharpd.conf
new file mode 100644
index 000000000..d46085935
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/r1/sharpd.conf
@@ -0,0 +1,7 @@
+hostname r1
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file sharpd.log debugging
+!
diff --git a/tests/topotests/srv6_locator_usid/r1/zebra.conf b/tests/topotests/srv6_locator_usid/r1/zebra.conf
new file mode 100644
index 000000000..78ef1e9d4
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/r1/zebra.conf
@@ -0,0 +1,20 @@
+hostname r1
+!
+! debug zebra events
+! debug zebra rib detailed
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file zebra.log debugging
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix fc00:0:1::/48 func-bits 16 block-len 32 node-len 16
+ behavior usid
+ !
+ !
+ !
+!
diff --git a/tests/topotests/srv6_locator_usid/test_srv6_locator_usid.py b/tests/topotests/srv6_locator_usid/test_srv6_locator_usid.py
new file mode 100755
index 000000000..37fd736d2
--- /dev/null
+++ b/tests/topotests/srv6_locator_usid/test_srv6_locator_usid.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2022, University of Rome Tor Vergata
+# Authored by Carmine Scarpitta <carmine.scarpitta@uniroma2.it>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_srv6_locator_usid.py:
+Test for SRv6 Locator uSID on zebra
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd]
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def setup_module(mod):
+ tgen = Topogen({None: "r1"}, mod.__name__)
+ tgen.start_topology()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(
+ CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(
+ CWD, "{}/sharpd.conf".format(rname))
+ )
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def _check_srv6_locator(router, expected_locator_file):
+ logger.info("checking zebra locator status")
+ output = json.loads(
+ router.vtysh_cmd("show segment-routing srv6 locator json")
+ )
+ expected = open_json_file("{}/{}".format(CWD, expected_locator_file))
+ return topotest.json_cmp(output, expected)
+
+
+def _check_sharpd_chunk(router, expected_chunk_file):
+ logger.info("checking sharpd locator chunk status")
+ output = json.loads(
+ router.vtysh_cmd("show sharp segment-routing srv6 json")
+ )
+ expected = open_json_file("{}/{}".format(CWD, expected_chunk_file))
+ return topotest.json_cmp(output, expected)
+
+
+def check_srv6_locator(router, expected_file):
+ func = functools.partial(_check_srv6_locator, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=3)
+ assert result is None, "Failed"
+
+
+def check_sharpd_chunk(router, expected_file):
+ func = functools.partial(_check_sharpd_chunk, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=3)
+ assert result is None, "Failed"
+
+
+def test_srv6_usid_locator_configuration():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Verify SRv6 Locators instantiated from config file")
+ check_srv6_locator(router, "expected_locators_1.json")
+ check_sharpd_chunk(router, "expected_chunks_1.json")
+
+
+def test_srv6_usid_locator_get_chunk():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Get chunk for the locator loc1")
+ router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators_2.json")
+ check_sharpd_chunk(router, "expected_chunks_2.json")
+
+
+def test_srv6_usid_locator_release_chunk():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Release chunk for the locator loc1")
+ router.vtysh_cmd("sharp srv6-manager release-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators_3.json")
+ check_sharpd_chunk(router, "expected_chunks_3.json")
+
+
+def test_srv6_usid_locator_create_locator():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Create an additional SRv6 Locator")
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ locator loc2
+ prefix fc00:0:2::/48 func-bits 16 block-len 32 node-len 16
+ """
+ )
+ check_srv6_locator(router, "expected_locators_4.json")
+ check_sharpd_chunk(router, "expected_chunks_4.json")
+
+
+def test_srv6_usid_locator_set_behavior_usid():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info(
+ "Specify the SRv6 Locator loc2 as a Micro-segment (uSID) Locator"
+ )
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ locator loc2
+ behavior usid
+ """
+ )
+ check_srv6_locator(router, "expected_locators_5.json")
+ check_sharpd_chunk(router, "expected_chunks_5.json")
+
+
+def test_srv6_usid_locator_unset_behavior_usid():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Clear Micro-segment (uSID) Locator flag for loc2")
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ locator loc2
+ no behavior usid
+ """
+ )
+ check_srv6_locator(router, "expected_locators_6.json")
+ check_sharpd_chunk(router, "expected_chunks_6.json")
+
+
+def test_srv6_usid_locator_delete():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info(
+ "Delete locator loc1 and verify that the chunk is released automatically"
+ )
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ no locator loc1
+ """
+ )
+ check_srv6_locator(router, "expected_locators_7.json")
+ check_sharpd_chunk(router, "expected_chunks_7.json")
+
+
+def test_srv6_usid_locator_delete_all():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Delete all the SRv6 configuration")
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ no srv6
+ """
+ )
+ check_srv6_locator(router, "expected_locators_8.json")
+ check_sharpd_chunk(router, "expected_chunks_8.json")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf
index ff2c633cc..914363157 100644
--- a/tools/etc/frr/support_bundle_commands.conf
+++ b/tools/etc/frr/support_bundle_commands.conf
@@ -208,3 +208,34 @@ show ipv6 ospf6 vrf all spf tree
show ipv6 ospf6 vrf all summary-address detail
show ipv6 ospf6 zebra
CMD_LIST_END
+
+#PIMv6 Support Bundle Command List
+PROC_NAME:pim6
+CMD_LIST_START
+show ipv6 pim channel
+show ipv6 pim interface
+show ipv6 pim interface traffic
+show ipv6 pim join
+show ipv6 jp-agg
+show ipv6 pim nexthop
+show ipv6 pim nexthop-lookup
+show ipv6 pim neighbor
+show ipv6 pim local-membership
+show ipv6 pim rp-info
+show ipv6 pim rpf
+show ipv6 pim secondary
+show ipv6 pim state
+show ipv6 pim statistics
+show ipv6 pim upstream
+show ipv6 pim upstream-join-desired
+show ipv6 pim upstream-rpf
+show ipv6 mld interface
+show ipv6 mld statistics
+show ipv6 mld joins
+show ipv6 mld groups
+show ipv6 multicast
+show ipv6 mroute
+show ipv6 pim bsr
+show ipv6 pim bsrp-info
+show ipv6 pim bsm-databases
+CMD_LIST_END
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 61f1abb37..3c16c27c6 100755
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -335,7 +335,7 @@ if [ -z "$FRR_PATHSPACE" ]; then
load_old_config "/etc/sysconfig/frr"
fi
-if { declare -p watchfrr_options 2>/dev/null || true; } | grep -q '^declare \-a'; then
+if { declare -p watchfrr_options 2>/dev/null || true; } | grep -q '^declare -a'; then
log_warning_msg "watchfrr_options contains a bash array value." \
"The configured value is intentionally ignored since it is likely wrong." \
"Please remove or fix the setting."
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index d07c4c633..c5e1c113c 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -98,6 +98,7 @@ struct fpm_nl_ctx {
struct thread *t_read;
struct thread *t_write;
struct thread *t_event;
+ struct thread *t_nhg;
struct thread *t_dequeue;
/* zebra events. */
@@ -271,7 +272,7 @@ DEFUN(fpm_use_nhg, fpm_use_nhg_cmd,
return CMD_SUCCESS;
thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_event);
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
@@ -287,7 +288,7 @@ DEFUN(no_fpm_use_nhg, no_fpm_use_nhg_cmd,
return CMD_SUCCESS;
thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_event);
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
@@ -1275,7 +1276,7 @@ static void fpm_process_queue(struct thread *t)
static void fpm_process_event(struct thread *t)
{
struct fpm_nl_ctx *fnc = THREAD_ARG(t);
- int event = THREAD_VAL(t);
+ enum fpm_nl_events event = THREAD_VAL(t);
switch (event) {
case FNE_DISABLE:
@@ -1328,11 +1329,6 @@ static void fpm_process_event(struct thread *t)
if (IS_ZEBRA_DEBUG_FPM)
zlog_debug("%s: LSP walk finished", __func__);
break;
-
- default:
- if (IS_ZEBRA_DEBUG_FPM)
- zlog_debug("%s: unhandled event %d", __func__, event);
- break;
}
}
@@ -1372,6 +1368,8 @@ static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
THREAD_OFF(fnc->t_ribwalk);
THREAD_OFF(fnc->t_rmacreset);
THREAD_OFF(fnc->t_rmacwalk);
+ THREAD_OFF(fnc->t_event);
+ THREAD_OFF(fnc->t_nhg);
thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
diff --git a/zebra/netconf_netlink.c b/zebra/netconf_netlink.c
index 56f56bfe6..4c30544e5 100644
--- a/zebra/netconf_netlink.c
+++ b/zebra/netconf_netlink.c
@@ -106,9 +106,11 @@ int netlink_netconf_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
* to do a good job of not sending data that is mixed/matched
* across families
*/
+#ifdef AF_MPLS
if (ncm->ncm_family == AF_MPLS)
afi = AFI_IP;
else
+#endif /* AF_MPLS */
afi = family2afi(ncm->ncm_family);
netlink_parse_rtattr(tb, NETCONFA_MAX, netconf_rta(ncm), len);
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index a3db53f29..85eb6b345 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2710,6 +2710,7 @@ int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
chunk.keep = 0;
chunk.proto = client->proto;
chunk.instance = client->instance;
+ chunk.flags = loc->flags;
zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK, vrf_id);
zapi_srv6_locator_chunk_encode(s, &chunk);
diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c
index 98120accf..01ea9c5b9 100644
--- a/zebra/zebra_evpn_mh.c
+++ b/zebra/zebra_evpn_mh.c
@@ -2759,6 +2759,12 @@ bool zebra_evpn_is_if_es_capable(struct zebra_if *zif)
if (zif->zif_type == ZEBRA_IF_BOND)
return true;
+ /* relax the checks to allow config to be applied in zebra
+ * before interface is rxed from the kernel
+ */
+ if (zif->ifp->ifindex == IFINDEX_INTERNAL)
+ return true;
+
/* XXX: allow swpX i.e. a regular ethernet port to be an ES link too */
return false;
}
diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c
index 36506cacc..d61e4f804 100644
--- a/zebra/zebra_srv6.c
+++ b/zebra/zebra_srv6.c
@@ -177,6 +177,58 @@ struct srv6_locator *zebra_srv6_locator_lookup(const char *name)
return NULL;
}
+void zebra_notify_srv6_locator_add(struct srv6_locator *locator)
+{
+ struct listnode *node;
+ struct zserv *client;
+
+ /*
+ * Notify new locator info to zclients.
+ *
+ * The srv6 locators and their prefixes are managed by zserv(zebra).
+ * And an actual configuration the srv6 sid in the srv6 locator is done
+ * by zclient(bgpd, isisd, etc). The configuration of each locator
+ * allocation and specify it by zserv and zclient should be
+ * asynchronous. For that, zclient should be received the event via
+ * ZAPI when a srv6 locator is added on zebra.
+ * Basically, in SRv6, adding/removing SRv6 locators is performed less
+ * frequently than adding rib entries, so a broad to all zclients will
+ * not degrade the overall performance of FRRouting.
+ */
+ for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client))
+ zsend_zebra_srv6_locator_add(client, locator);
+}
+
+void zebra_notify_srv6_locator_delete(struct srv6_locator *locator)
+{
+ struct listnode *n;
+ struct srv6_locator_chunk *c;
+ struct zserv *client;
+
+ /*
+ * Notify deleted locator info to zclients if needed.
+ *
+ * zclient(bgpd,isisd,etc) allocates a sid from srv6 locator chunk and
+ * uses it for its own purpose. For example, in the case of BGP L3VPN,
+ * the SID assigned to vpn unicast rib will be given.
+ * And when the locator is deleted by zserv(zebra), those SIDs need to
+ * be withdrawn. The zclient must initiate the withdrawal of the SIDs
+ * by ZEBRA_SRV6_LOCATOR_DELETE, and this notification is sent to the
+ * owner of each chunk.
+ */
+ for (ALL_LIST_ELEMENTS_RO((struct list *)locator->chunks, n, c)) {
+ if (c->proto == ZEBRA_ROUTE_SYSTEM)
+ continue;
+ client = zserv_find_client(c->proto, c->instance);
+ if (!client) {
+ zlog_warn("Not found zclient(proto=%u, instance=%u).",
+ c->proto, c->instance);
+ continue;
+ }
+ zsend_zebra_srv6_locator_delete(client, locator);
+ }
+}
+
struct zebra_srv6 *zebra_srv6_get_default(void)
{
static struct zebra_srv6 srv6;
diff --git a/zebra/zebra_srv6.h b/zebra/zebra_srv6.h
index 84fcc305b..f320b9ca0 100644
--- a/zebra/zebra_srv6.h
+++ b/zebra/zebra_srv6.h
@@ -61,6 +61,9 @@ extern void zebra_srv6_locator_add(struct srv6_locator *locator);
extern void zebra_srv6_locator_delete(struct srv6_locator *locator);
extern struct srv6_locator *zebra_srv6_locator_lookup(const char *name);
+void zebra_notify_srv6_locator_add(struct srv6_locator *locator);
+void zebra_notify_srv6_locator_delete(struct srv6_locator *locator);
+
extern void zebra_srv6_init(void);
extern struct zebra_srv6 *zebra_srv6_get_default(void);
extern bool zebra_srv6_is_enable(void);
diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c
index e6810bdc5..1221365d4 100644
--- a/zebra/zebra_srv6_vty.c
+++ b/zebra/zebra_srv6_vty.c
@@ -172,6 +172,9 @@ DEFUN (show_srv6_locator_detail,
vty_out(vty, "Argument-Bit-Len: %u\n",
locator->argument_bits_length);
+ if (CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID))
+ vty_out(vty, "Behavior: uSID\n");
+
vty_out(vty, "Chunks:\n");
for (ALL_LIST_ELEMENTS_RO((struct list *)locator->chunks, node,
chunk)) {
@@ -369,6 +372,38 @@ DEFPY (locator_prefix,
return CMD_SUCCESS;
}
+DEFPY (locator_behavior,
+ locator_behavior_cmd,
+ "[no] behavior usid",
+ NO_STR
+ "Configure SRv6 behavior\n"
+ "Specify SRv6 behavior uSID\n")
+{
+ VTY_DECLVAR_CONTEXT(srv6_locator, locator);
+
+ if (no && !CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID))
+ /* SRv6 locator uSID flag already unset, nothing to do */
+ return CMD_SUCCESS;
+
+ if (!no && CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID))
+ /* SRv6 locator uSID flag already set, nothing to do */
+ return CMD_SUCCESS;
+
+ /* Remove old locator from zclients */
+ zebra_notify_srv6_locator_delete(locator);
+
+ /* Set/Unset the SRV6_LOCATOR_USID */
+ if (no)
+ UNSET_FLAG(locator->flags, SRV6_LOCATOR_USID);
+ else
+ SET_FLAG(locator->flags, SRV6_LOCATOR_USID);
+
+ /* Notify the new locator to zclients */
+ zebra_notify_srv6_locator_add(locator);
+
+ return CMD_SUCCESS;
+}
+
static int zebra_sr_config(struct vty *vty)
{
struct zebra_srv6 *srv6 = zebra_srv6_get_default();
@@ -399,6 +434,8 @@ static int zebra_sr_config(struct vty *vty)
if (locator->argument_bits_length)
vty_out(vty, " arg-len %u",
locator->argument_bits_length);
+ if (CHECK_FLAG(locator->flags, SRV6_LOCATOR_USID))
+ vty_out(vty, " behavior usid");
vty_out(vty, "\n");
vty_out(vty, " exit\n");
vty_out(vty, " !\n");
@@ -435,6 +472,7 @@ void zebra_srv6_vty_init(void)
/* Command for configuration */
install_element(SRV6_LOC_NODE, &locator_prefix_cmd);
+ install_element(SRV6_LOC_NODE, &locator_behavior_cmd);
/* Command for operation */
install_element(VIEW_NODE, &show_srv6_locator_cmd);