diff options
Diffstat (limited to 'tests')
74 files changed, 5553 insertions, 263 deletions
diff --git a/tests/isisd/test_common.c b/tests/isisd/test_common.c index 5b2028ffd..ade357353 100644 --- a/tests/isisd/test_common.c +++ b/tests/isisd/test_common.c @@ -309,7 +309,8 @@ static int topology_load_node(const struct isis_topology *topology, { int ret; - isis_dynhn_insert(tnode->sysid, tnode->hostname, tnode->level); + isis_dynhn_insert(area->isis, tnode->sysid, tnode->hostname, + tnode->level); for (int level = IS_LEVEL_1; level <= IS_LEVEL_2; level++) { if ((tnode->level & level) == 0) diff --git a/tests/isisd/test_isis_spf.c b/tests/isisd/test_isis_spf.c index b89a5a008..a30f33cca 100644 --- a/tests/isisd/test_isis_spf.c +++ b/tests/isisd/test_isis_spf.c @@ -269,7 +269,7 @@ static int test_run(struct vty *vty, const struct isis_topology *topology, if (sysid2buff(fail_id, fail_sysid_str) == 0) { struct isis_dynhn *dynhn; - dynhn = dynhn_find_by_name(fail_sysid_str); + dynhn = dynhn_find_by_name(area->isis, fail_sysid_str); if (dynhn == NULL) { vty_out(vty, "Invalid system id %s\n", fail_sysid_str); @@ -339,9 +339,6 @@ static int test_run(struct vty *vty, const struct isis_topology *topology, /* Cleanup IS-IS area. */ isis_area_destroy(area); - /* Cleanup hostnames. */ - dyn_cache_cleanup_all(); - return CMD_SUCCESS; } diff --git a/tests/lib/test_grpc.cpp b/tests/lib/test_grpc.cpp new file mode 100644 index 000000000..491796802 --- /dev/null +++ b/tests/lib/test_grpc.cpp @@ -0,0 +1,979 @@ +/* + * May 16 2021, Christian Hopps <chopps@labn.net> + * + * Copyright (c) 2021, LabN Consulting, L.L.C + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <time.h> +#include <unistd.h> +#include <zebra.h> + +#include "filter.h" +#include "frr_pthread.h" +#include "libfrr.h" +#include "routing_nb.h" +#include "northbound_cli.h" +#include "thread.h" +#include "vrf.h" +#include "vty.h" + +#include "staticd/static_debug.h" +#include "staticd/static_nb.h" +#include "staticd/static_vrf.h" +#include "staticd/static_vty.h" +#include "staticd/static_zebra.h" + +// GRPC C++ includes +#include <string> +#include <sstream> +#include <grpc/grpc.h> +#include <grpcpp/channel.h> +#include <grpcpp/client_context.h> +#include <grpcpp/create_channel.h> +#include <grpcpp/security/credentials.h> +#include "grpc/frr-northbound.grpc.pb.h" + +DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm)); +DEFINE_KOOH(frr_fini, (), ()); + +struct vty *vty; + +bool mpls_enabled; +struct thread_master *master; +struct zebra_privs_t static_privs = {0}; +struct frrmod_runtime *grpc_module; +char binpath[2 * MAXPATHLEN + 1]; + +extern const char *json_expect1; +extern const char *json_expect2; +extern const char *json_expect3; +extern const char *json_loadconf1; + +int test_dbg = 1; + +void inline test_debug(const std::string &s) +{ + if (test_dbg) + std::cout << s << std::endl; +} + +// static struct option_chain modules[] = {{ .arg = "grpc:50051" }] +// static struct option_chain **modnext = modules->next; + +static const struct frr_yang_module_info *const staticd_yang_modules[] = { + &frr_interface_info, &frr_filter_info, &frr_routing_info, + &frr_staticd_info, &frr_vrf_info, +}; + +static int grpc_thread_stop(struct thread *thread); + +static void static_startup(void) +{ + // struct frrmod_runtime module; + // static struct option_chain *oc; + char moderr[256] = {}; + cmd_init(1); + + zlog_aux_init("NONE: ", LOG_DEBUG); + zprivs_preinit(&static_privs); + zprivs_init(&static_privs); + + /* Load the server side module -- check libtool path first */ + std::string modpath = std::string(binpath) + std::string("../../../lib/.libs"); + grpc_module = frrmod_load("grpc:50051", modpath.c_str(), moderr, sizeof(moderr)); + if (!grpc_module) { + modpath = std::string(binpath) + std::string("../../lib"); + grpc_module = frrmod_load("grpc:50051", modpath.c_str(), moderr, + sizeof(moderr)); + } + if (!grpc_module) { + std::cout << "Failed to load grpc module:" << moderr + << std::endl; + exit(1); + } + + static_debug_init(); + + master = thread_master_create(NULL); + nb_init(master, staticd_yang_modules, array_size(staticd_yang_modules), + false); + + static_zebra_init(); + vty_init(master, true); + static_vrf_init(); + static_vty_init(); + + hook_register(routing_conf_event, + routing_control_plane_protocols_name_validate); + + routing_control_plane_protocols_register_vrf_dependency(); + + // Add a route + vty = vty_new(); + vty->type = vty::VTY_TERM; + vty_config_enter(vty, true, false); + + auto ret = cmd_execute(vty, "ip route 11.0.0.0/8 Null0", NULL, 0); + assert(!ret); + + ret = cmd_execute(vty, "end", NULL, 0); + assert(!ret); + + nb_cli_pending_commit_check(vty); + + frr_pthread_init(); + + // frr_config_fork(); + hook_call(frr_late_init, master); +} + +static void static_shutdown(void) +{ + hook_call(frr_fini); + vty_close(vty); + vrf_terminate(); + vty_terminate(); + cmd_terminate(); + nb_terminate(); + yang_terminate(); + thread_master_free(master); + master = NULL; +} + +using frr::Northbound; +using grpc::Channel; +using grpc::ClientAsyncResponseReader; +using grpc::ClientContext; +using grpc::CompletionQueue; +using grpc::Status; + +class NorthboundClient +{ + public: + NorthboundClient(std::shared_ptr<Channel> channel) + : stub_(frr::Northbound::NewStub(channel)) + { + } + + void Commit(uint32_t candidate_id) + { + frr::CommitRequest request; + frr::CommitResponse reply; + ClientContext context; + Status status; + + request.set_candidate_id(candidate_id); + + request.set_phase(frr::CommitRequest::ALL); + status = stub_->Commit(&context, request, &reply); + _throw_if_not_ok(status); +#if 0 + request.set_phase(frr::CommitRequest::VALIDATE); + status = stub_->Commit(&context, request, &reply); + _throw_if_not_ok(status); + + request.set_phase(frr::CommitRequest::PREPARE); + status = stub_->Commit(&context, request, &reply); + _throw_if_not_ok(status); + + request.set_phase(frr::CommitRequest::APPLY); + status = stub_->Commit(&context, request, &reply); + _throw_if_not_ok(status); +#endif + } + + uint32_t CreateCandidate() + { + frr::CreateCandidateRequest request; + frr::CreateCandidateResponse reply; + ClientContext context; + Status status; + + status = stub_->CreateCandidate(&context, request, &reply); + _throw_if_not_ok(status); + return reply.candidate_id(); + } + + void DeleteCandidate(uint32_t candidate_id) + { + frr::DeleteCandidateRequest request; + frr::DeleteCandidateResponse reply; + ClientContext context; + Status status; + + request.set_candidate_id(candidate_id); + status = stub_->DeleteCandidate(&context, request, &reply); + _throw_if_not_ok(status); + } + + void EditCandidate(uint32_t candidate_id, const std::string &path, + const std::string &value) + { + frr::EditCandidateRequest request; + frr::EditCandidateResponse reply; + ClientContext context; + + request.set_candidate_id(candidate_id); + frr::PathValue *pv = request.add_update(); + pv->set_path(path); + pv->set_value(value); + + Status status = stub_->EditCandidate(&context, request, &reply); + _throw_if_not_ok(status); + } + + std::string Get(const std::string &path, + frr::GetRequest::DataType dtype, frr::Encoding enc, + bool with_defaults) + { + frr::GetRequest request; + frr::GetResponse reply; + ClientContext context; + std::ostringstream ss; + + request.set_type(dtype); + request.set_encoding(enc); + request.set_with_defaults(with_defaults); + request.add_path(path); + + auto stream = stub_->Get(&context, request); + while (stream->Read(&reply)) { + ss << reply.data().data() << std::endl; + } + auto status = stream->Finish(); + _throw_if_not_ok(status); + return ss.str(); + } + + std::string GetCapabilities() + { + frr::GetCapabilitiesRequest request; + frr::GetCapabilitiesResponse reply; + ClientContext context; + + Status status = + stub_->GetCapabilities(&context, request, &reply); + _throw_if_not_ok(status); + + std::ostringstream ss; + ss << "Capabilities:" << std::endl + << "\tVersion: " << reply.frr_version() << std::endl + << "\tRollback Support: " << reply.rollback_support() + << std::endl + << "\tSupported Modules:"; + + for (int i = 0; i < reply.supported_modules_size(); i++) { + auto sm = reply.supported_modules(i); + ss << std::endl + << "\t\tName: \"" << sm.name() + << "\" Revision: " << sm.revision() << " Org: \"" + << sm.organization() << "\""; + } + + ss << std::endl << "\tSupported Encodings:"; + + for (int i = 0; i < reply.supported_encodings_size(); i++) { + auto se = reply.supported_encodings(i); + auto desc = + google::protobuf::GetEnumDescriptor<decltype( + se)>(); + ss << std::endl + << "\t\t" << desc->FindValueByNumber(se)->name(); + } + + ss << std::endl; + + return ss.str(); + } + + void LoadToCandidate(uint32_t candidate_id, bool is_replace, + bool is_json, const std::string &data) + { + frr::LoadToCandidateRequest request; + frr::LoadToCandidateResponse reply; + frr::DataTree *dt = new frr::DataTree; + ClientContext context; + + request.set_candidate_id(candidate_id); + request.set_type(is_replace + ? frr::LoadToCandidateRequest::REPLACE + : frr::LoadToCandidateRequest::MERGE); + dt->set_encoding(is_json ? frr::JSON : frr::XML); + dt->set_data(data); + request.set_allocated_config(dt); + + Status status = + stub_->LoadToCandidate(&context, request, &reply); + _throw_if_not_ok(status); + } + + std::string ListTransactions() + { + frr::ListTransactionsRequest request; + frr::ListTransactionsResponse reply; + ClientContext context; + std::ostringstream ss; + + auto stream = stub_->ListTransactions(&context, request); + + while (stream->Read(&reply)) { + ss << "Tx ID: " << reply.id() + << " client: " << reply.client() + << " date: " << reply.date() + << " comment: " << reply.comment() << std::endl; + } + + auto status = stream->Finish(); + _throw_if_not_ok(status); + return ss.str(); + } + + private: + std::unique_ptr<frr::Northbound::Stub> stub_; + + void _throw_if_not_ok(Status &status) + { + if (!status.ok()) + throw std::runtime_error( + std::to_string(status.error_code()) + ": " + + status.error_message()); + } +}; + + +bool stop = false; + +int grpc_client_test_stop(struct frr_pthread *fpt, void **result) +{ + test_debug("client: STOP pthread"); + + assert(fpt->running); + atomic_store_explicit(&fpt->running, false, memory_order_relaxed); + + test_debug("client: joining pthread"); + pthread_join(fpt->thread, result); + + test_debug("client: joined pthread"); + return 0; +} + +int find_first_diff(const std::string &s1, const std::string &s2) +{ + int s1len = s1.length(); + int s2len = s2.length(); + int mlen = std::min(s1len, s2len); + + for (int i = 0; i < mlen; i++) + if (s1[i] != s2[i]) + return i; + return s1len == s2len ? -1 : mlen; +} + +void assert_no_diff(const std::string &s1, const std::string &s2) +{ + int pos = find_first_diff(s1, s2); + if (pos == -1) + return; + std::cout << "not ok" << std::endl; + std::cout << "Same: " << s1.substr(0, pos) << std::endl; + std::cout << "Diff s1: " << s1.substr(pos) << std::endl; + std::cout << "Diff s2: " << s2.substr(pos) << std::endl; + assert(false); +} + +void assert_config_same(NorthboundClient &client, const std::string &compare) +{ + std::string confs = client.Get("/frr-routing:routing", + frr::GetRequest::ALL, frr::JSON, true); + assert_no_diff(confs, compare); + std::cout << "ok" << std::endl; +} + +void grpc_client_run_test(void) +{ + NorthboundClient client(grpc::CreateChannel( + "localhost:50051", grpc::InsecureChannelCredentials())); + + std::string reply = client.GetCapabilities(); + + uint32_t cid; + cid = client.CreateCandidate(); + std::cout << "CreateCandidate -> " << cid << std::endl; + assert(cid == 1); + client.DeleteCandidate(cid); + std::cout << "DeleteCandidate(" << cid << ")" << std::endl; + cid = client.CreateCandidate(); + assert(cid == 2); + std::cout << "CreateCandidate -> " << cid << std::endl; + + /* + * Get initial configuration + */ + std::cout << "Comparing initial config..."; + assert_config_same(client, json_expect1); + + /* + * Add config using EditCandidate + */ + + char xpath_buf[1024]; + strlcpy(xpath_buf, + "/frr-routing:routing/control-plane-protocols/" + "control-plane-protocol[type='frr-staticd:staticd']" + "[name='staticd'][vrf='default']/frr-staticd:staticd/route-list", + sizeof(xpath_buf)); + int slen = strlen(xpath_buf); + for (int i = 0; i < 4; i++) { + snprintf(xpath_buf + slen, sizeof(xpath_buf) - slen, + "[prefix='13.0.%d.0/24']" + "[afi-safi='frr-routing:ipv4-unicast']/" + "path-list[table-id='0'][distance='1']/" + "frr-nexthops/nexthop[nh-type='blackhole']" + "[vrf='default'][gateway=''][interface='(null)']", + i); + client.EditCandidate(cid, xpath_buf, ""); + } + client.Commit(cid); + std::cout << "Comparing EditCandidate config..."; + assert_config_same(client, json_expect2); + + client.DeleteCandidate(cid); + std::cout << "DeleteCandidate(" << cid << ")" << std::endl; + + /* + * Add config using LoadToCandidate + */ + + cid = client.CreateCandidate(); + std::cout << "CreateCandidate -> " << cid << std::endl; + + client.LoadToCandidate(cid, false, true, json_loadconf1); + client.Commit(cid); + + std::cout << "Comparing LoadToCandidate config..."; + assert_config_same(client, json_expect3); + + client.DeleteCandidate(cid); + std::cout << "DeleteCandidate(" << cid << ")" << std::endl; + + std::string ltxreply = client.ListTransactions(); + // std::cout << "client: pthread received: " << ltxreply << std::endl; +} + +void *grpc_client_test_start(void *arg) +{ + struct frr_pthread *fpt = (struct frr_pthread *)arg; + fpt->master->owner = pthread_self(); + frr_pthread_set_name(fpt); + frr_pthread_notify_running(fpt); + + try { + grpc_client_run_test(); + std::cout << "TEST PASSED" << std::endl; + } catch (std::exception &e) { + std::cout << "Exception in test: " << e.what() << std::endl; + } + + // Signal FRR event loop to stop + test_debug("client: pthread: adding event to stop us"); + thread_add_event(master, grpc_thread_stop, NULL, 0, NULL); + + test_debug("client: pthread: DONE (returning)"); + + return NULL; +} + +static int grpc_thread_start(struct thread *thread) +{ + struct frr_pthread_attr client = { + .start = grpc_client_test_start, + .stop = grpc_client_test_stop, + }; + + auto pth = frr_pthread_new(&client, "GRPC Client thread", "grpc"); + frr_pthread_run(pth, NULL); + frr_pthread_wait_running(pth); + + return 0; +} + +static int grpc_thread_stop(struct thread *thread) +{ + std::cout << __func__ << ": frr_pthread_stop_all" << std::endl; + frr_pthread_stop_all(); + std::cout << __func__ << ": static_shutdown" << std::endl; + static_shutdown(); + std::cout << __func__ << ": exit cleanly" << std::endl; + exit(0); +} + +/* + * return abs path to this binary with trailing `/`. Does not parse path + * environment to find in path, which should not matter for unit testing. + */ +static int get_binpath(const char *argv0, char cwd[2 * MAXPATHLEN + 1]) +{ + const char *rch; + if (argv0[0] == '/') { + *cwd = 0; + rch = strrchr(argv0, '/'); + strlcpy(cwd, argv0, MIN(rch - argv0 + 2, 2 * MAXPATHLEN + 1)); + return 0; + } + if (!(rch = strrchr(argv0, '/'))) { + /* Does not handle using PATH, shouldn't matter for test */ + errno = EINVAL; + return -1; + } + if (!getcwd(cwd, MAXPATHLEN)) + return -1; + int len = strlen(cwd); + cwd[len++] = '/'; + strlcpy(cwd + len, argv0, MIN(rch - argv0 + 2, 2 * MAXPATHLEN + 1)); + return 0; +} + +int main(int argc, char **argv) +{ + assert(argc >= 1); + if (get_binpath(argv[0], binpath) < 0) + exit(1); + + static_startup(); + + thread_add_event(master, grpc_thread_start, NULL, 0, NULL); + + /* Event Loop */ + struct thread thread; + while (thread_fetch(master, &thread)) + thread_call(&thread); + return 0; +} + +// clang-format off + +const char *json_expect1 = R"NONCE({ + "frr-routing:routing": { + "control-plane-protocols": { + "control-plane-protocol": [ + { + "type": "frr-staticd:staticd", + "name": "staticd", + "vrf": "default", + "frr-staticd:staticd": { + "route-list": [ + { + "prefix": "11.0.0.0/8", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + } + ] + } + } + ] + } + }, + "frr-vrf:lib": { + "vrf": [ + { + "name": "default", + "state": { + "active": false + } + } + ] + } +} + +)NONCE"; + +const char *json_loadconf1 = R"NONCE( +{ + "frr-routing:routing": { + "control-plane-protocols": { + "control-plane-protocol": [ + { + "type": "frr-staticd:staticd", + "name": "staticd", + "vrf": "default", + "frr-staticd:staticd": { + "route-list": [ + { + "prefix": "10.0.0.0/13", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)" + } + ] + } + } + ] + } + ] + } + } + ] + } + }, + "frr-vrf:lib": { + "vrf": [ + { + "name": "default" + } + ] + } +})NONCE"; + +const char *json_expect2 = R"NONCE({ + "frr-routing:routing": { + "control-plane-protocols": { + "control-plane-protocol": [ + { + "type": "frr-staticd:staticd", + "name": "staticd", + "vrf": "default", + "frr-staticd:staticd": { + "route-list": [ + { + "prefix": "11.0.0.0/8", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.0.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.1.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.2.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.3.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + } + ] + } + } + ] + } + }, + "frr-vrf:lib": { + "vrf": [ + { + "name": "default", + "state": { + "active": false + } + } + ] + } +} + +)NONCE"; + +const char *json_expect3 = R"NONCE({ + "frr-routing:routing": { + "control-plane-protocols": { + "control-plane-protocol": [ + { + "type": "frr-staticd:staticd", + "name": "staticd", + "vrf": "default", + "frr-staticd:staticd": { + "route-list": [ + { + "prefix": "11.0.0.0/8", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.0.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.1.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.2.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "13.0.3.0/24", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + }, + { + "prefix": "10.0.0.0/13", + "afi-safi": "frr-routing:ipv4-unicast", + "path-list": [ + { + "table-id": 0, + "distance": 1, + "tag": 0, + "frr-nexthops": { + "nexthop": [ + { + "nh-type": "blackhole", + "vrf": "default", + "gateway": "", + "interface": "(null)", + "bh-type": "null", + "onlink": false + } + ] + } + } + ] + } + ] + } + } + ] + } + }, + "frr-vrf:lib": { + "vrf": [ + { + "name": "default", + "state": { + "active": false + } + } + ] + } +} + +)NONCE"; diff --git a/tests/lib/test_grpc.py b/tests/lib/test_grpc.py new file mode 100644 index 000000000..06ae6c05d --- /dev/null +++ b/tests/lib/test_grpc.py @@ -0,0 +1,23 @@ +import inspect +import os +import subprocess +import pytest +import frrtest + +class TestGRPC(object): + program = "./test_grpc" + + @pytest.mark.skipif( + 'S["GRPC_TRUE"]=""\n' not in open("../config.status").readlines(), + reason="GRPC not enabled", + ) + def test_exits_cleanly(self): + basedir = os.path.dirname(inspect.getsourcefile(type(self))) + program = os.path.join(basedir, self.program) + proc = subprocess.Popen( + [frrtest.binpath(program)], stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) + output, _ = proc.communicate() + self.exitcode = proc.wait() + if self.exitcode != 0: + raise frrtest.TestExitNonzero(self) diff --git a/tests/subdir.am b/tests/subdir.am index 399669977..ca477851e 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -106,6 +106,12 @@ check_PROGRAMS = \ $(TESTS_ZEBRA) \ # end +if GRPC +check_PROGRAMS += \ + tests/lib/test_grpc \ + #end +endif + if ZEROMQ check_PROGRAMS += \ tests/lib/test_zmq \ @@ -156,9 +162,19 @@ TESTS_CFLAGS = \ # end # note no -Werror +TESTS_CXXFLAGS = \ + $(AC_CXXFLAGS) \ + $(LIBYANG_CFLAGS) \ + $(SAN_FLAGS) \ + # end +# note no -Werror + ALL_TESTS_LDADD = lib/libfrr.la $(LIBCAP) BGP_TEST_LDADD = bgpd/libbgp.a $(RFPLDADD) $(ALL_TESTS_LDADD) $(LIBYANG_LIBS) -lm ISISD_TEST_LDADD = isisd/libisis.a $(ALL_TESTS_LDADD) +if GRPC +GRPC_TESTS_LDADD = staticd/libstatic.a grpc/libfrrgrpc_pb.la -lgrpc++ -lprotobuf $(ALL_TESTS_LDADD) $(LIBYANG_LIBS) -lm +endif OSPFD_TEST_LDADD = ospfd/libfrrospf.a $(ALL_TESTS_LDADD) OSPF6_TEST_LDADD = ospf6d/libospf6.a $(ALL_TESTS_LDADD) ZEBRA_TEST_LDADD = zebra/label_manager.o $(ALL_TESTS_LDADD) @@ -251,6 +267,12 @@ tests_lib_northbound_test_oper_data_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_northbound_test_oper_data_LDADD = $(ALL_TESTS_LDADD) tests_lib_northbound_test_oper_data_SOURCES = tests/lib/northbound/test_oper_data.c nodist_tests_lib_northbound_test_oper_data_SOURCES = yang/frr-test-module.yang.c +if GRPC +tests_lib_test_grpc_CXXFLAGS = $(WERROR) $(TESTS_CXXFLAGS) +tests_lib_test_grpc_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_lib_test_grpc_LDADD = $(GRPC_TESTS_LDADD) +tests_lib_test_grpc_SOURCES = tests/lib/test_grpc.cpp +endif tests_lib_test_assert_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_assert_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_assert_LDADD = $(ALL_TESTS_LDADD) diff --git a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py index 597e23069..2d75428f1 100644 --- a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py +++ b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py @@ -906,17 +906,25 @@ def test_bgp_summary(): # Read expected result from file expected_original = open(refTableFile).read().rstrip() - for filter in ["", "remote-as internal", "remote-as external", + for arguments in ["", "remote-as internal", "remote-as external", "remote-as 100", "remote-as 123", "neighbor 192.168.7.10", "neighbor 192.168.7.10", "neighbor fc00:0:0:8::1000", - "neighbor 10.0.0.1"]: + "neighbor 10.0.0.1", + "terse", + "remote-as internal terse", + "remote-as external terse", + "remote-as 100 terse", "remote-as 123 terse", + "neighbor 192.168.7.10 terse", "neighbor 192.168.7.10 terse", + "neighbor fc00:0:0:8::1000 terse", + "neighbor 10.0.0.1 terse"]: # Actual output from router actual = ( net["r%s" % i] - .cmd('vtysh -c "show ip bgp summary ' + filter + '" 2> /dev/null') + .cmd('vtysh -c "show ip bgp summary ' + arguments + '" 2> /dev/null') .rstrip() ) + # Mask out "using XXiXX bytes" portion. They are random... actual = re.sub(r"using [0-9]+ bytes", "using XXXX bytes", actual) # Mask out "using XiXXX KiB" portion. They are random... @@ -928,48 +936,55 @@ def test_bgp_summary(): actual = re.sub(r"Total number.*", "", actual) actual = re.sub(r"Displayed.*", "", actual) # Remove IPv4 Unicast Summary (Title only) - actual = re.sub(r"IPv4 Unicast Summary:", "", actual) + actual = re.sub(r"IPv4 Unicast Summary \(VRF default\):", "", actual) # Remove IPv4 Multicast Summary (all of it) - actual = re.sub(r"IPv4 Multicast Summary:", "", actual) + actual = re.sub(r"IPv4 Multicast Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv4 Multicast neighbor is configured", "", actual) # Remove IPv4 VPN Summary (all of it) - actual = re.sub(r"IPv4 VPN Summary:", "", actual) + actual = re.sub(r"IPv4 VPN Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv4 VPN neighbor is configured", "", actual) # Remove IPv4 Encap Summary (all of it) - actual = re.sub(r"IPv4 Encap Summary:", "", actual) + actual = re.sub(r"IPv4 Encap Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv4 Encap neighbor is configured", "", actual) # Remove Unknown Summary (all of it) - actual = re.sub(r"Unknown Summary:", "", actual) + actual = re.sub(r"Unknown Summary \(VRF default\):", "", actual) actual = re.sub(r"No Unknown neighbor is configured", "", actual) - actual = re.sub(r"IPv4 labeled-unicast Summary:", "", actual) + actual = re.sub(r"IPv4 labeled-unicast Summary \(VRF default\):", "", actual) actual = re.sub( r"No IPv4 labeled-unicast neighbor is configured", "", actual ) expected = expected_original - # apply filters on expected output - if "internal" in filter or "remote-as 100" in filter: + # apply argumentss on expected output + if "internal" in arguments or "remote-as 100" in arguments: expected = re.sub(r".+\s+200\s+.+", "", expected) - elif "external" in filter: + elif "external" in arguments: expected = re.sub(r".+\s+100\s+.+Active.+", "", expected) - elif "remote-as 123" in filter: + elif "remote-as 123" in arguments: expected = re.sub( r"(192.168.7.(1|2)0|fc00:0:0:8::(1|2)000).+Active.+", "", expected ) - elif "192.168.7.10" in filter: + expected = re.sub(r"\nNeighbor.+Desc", "", expected) + expected = expected + "% No matching neighbor\n" + elif "192.168.7.10" in arguments: expected = re.sub( r"(192.168.7.20|fc00:0:0:8::(1|2)000).+Active.+", "", expected ) - elif "fc00:0:0:8::1000" in filter: + elif "fc00:0:0:8::1000" in arguments: expected = re.sub( r"(192.168.7.(1|2)0|fc00:0:0:8::2000).+Active.+", "", expected ) - elif "10.0.0.1" in filter: - expected = "No such neighbor in this view/vrf" + elif "10.0.0.1" in arguments: + expected = "No such neighbor in VRF default" + + if "terse" in arguments: + expected = re.sub(r"BGP table version .+", "", expected) + expected = re.sub(r"RIB entries .+", "", expected) + expected = re.sub(r"Peers [0-9]+, using .+", "", expected) # Strip empty lines actual = actual.lstrip().rstrip() @@ -978,13 +993,17 @@ def test_bgp_summary(): expected = re.sub(r"\n+", "\n", expected) # reapply initial formatting - actual = re.sub(r"KiB of memory\n", "KiB of memory\n\n", actual) - expected = re.sub(r"KiB of memory\n", "KiB of memory\n\n", expected) + if "terse" in arguments: + actual = re.sub(r" vrf-id 0\n", " vrf-id 0\n\n", actual) + expected = re.sub(r" vrf-id 0\n", " vrf-id 0\n\n", expected) + else: + actual = re.sub(r"KiB of memory\n", "KiB of memory\n\n", actual) + expected = re.sub(r"KiB of memory\n", "KiB of memory\n\n", expected) # realign expected neighbor columns if needed try: - idx_actual = re.search(r"\n(Neighbor\s+V\s+)", actual).group(1).find("V") - idx_expected = re.search(r"\n(Neighbor\s+V\s+)", expected).group(1).find("V") + idx_actual = re.search(r"(Neighbor\s+V\s+)", actual).group(1).find("V") + idx_expected = re.search(r"(Neighbor\s+V\s+)", expected).group(1).find("V") idx_diff = idx_expected - idx_actual if idx_diff > 0: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd @@ -1002,8 +1021,8 @@ def test_bgp_summary(): diff = topotest.get_textdiff( actual, expected, - title1="actual SHOW IP BGP SUMMARY " + filter.upper() , - title2="expected SHOW IP BGP SUMMARY " + filter.upper(), + title1="actual SHOW IP BGP SUMMARY " + arguments.upper() , + title2="expected SHOW IP BGP SUMMARY " + arguments.upper(), ) # Empty string if it matches, otherwise diff contains unified diff @@ -1020,13 +1039,6 @@ def test_bgp_summary(): diff, ) - # Actual output from router - actual = ( - net["r%s" % i] - .cmd('vtysh -c "show ip bgp summary" 2> /dev/null') - .rstrip() - ) - # Make sure that all daemons are running for i in range(1, 2): fatal_error = net["r%s" % i].checkRouterRunning() @@ -1074,22 +1086,22 @@ def test_bgp_ipv6_summary(): actual = re.sub(r"Total number.*", "", actual) actual = re.sub(r"Displayed.*", "", actual) # Remove IPv4 Unicast Summary (Title only) - actual = re.sub(r"IPv6 Unicast Summary:", "", actual) + actual = re.sub(r"IPv6 Unicast Summary \(VRF default\):", "", actual) # Remove IPv4 Multicast Summary (all of it) - actual = re.sub(r"IPv6 Multicast Summary:", "", actual) + actual = re.sub(r"IPv6 Multicast Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv6 Multicast neighbor is configured", "", actual) # Remove IPv4 VPN Summary (all of it) - actual = re.sub(r"IPv6 VPN Summary:", "", actual) + actual = re.sub(r"IPv6 VPN Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv6 VPN neighbor is configured", "", actual) # Remove IPv4 Encap Summary (all of it) - actual = re.sub(r"IPv6 Encap Summary:", "", actual) + actual = re.sub(r"IPv6 Encap Summary \(VRF default\):", "", actual) actual = re.sub(r"No IPv6 Encap neighbor is configured", "", actual) # Remove Unknown Summary (all of it) - actual = re.sub(r"Unknown Summary:", "", actual) + actual = re.sub(r"Unknown Summary \(VRF default\):", "", actual) actual = re.sub(r"No Unknown neighbor is configured", "", actual) # Remove Labeled Unicast Summary (all of it) - actual = re.sub(r"IPv6 labeled-unicast Summary:", "", actual) + actual = re.sub(r"IPv6 labeled-unicast Summary \(VRF default\):", "", actual) actual = re.sub( r"No IPv6 labeled-unicast neighbor is configured", "", actual ) diff --git a/tests/topotests/bfd_topo2/r4/ipv6_routes.json b/tests/topotests/bfd_topo2/r4/ipv6_routes.json index c828575c8..eb571d5d1 100644 --- a/tests/topotests/bfd_topo2/r4/ipv6_routes.json +++ b/tests/topotests/bfd_topo2/r4/ipv6_routes.json @@ -35,7 +35,7 @@ { "distance": 110, "protocol": "ospf6", - "metric": 10, + "metric": 20, "selected": true, "installed": true, "prefix": "2001:db8:1::/64", diff --git a/tests/topotests/config_timing/r1/staticd.conf b/tests/topotests/config_timing/r1/staticd.conf new file mode 100644 index 000000000..0f9f97ca1 --- /dev/null +++ b/tests/topotests/config_timing/r1/staticd.conf @@ -0,0 +1 @@ +log timestamp precision 3 diff --git a/tests/topotests/config_timing/r1/zebra.conf b/tests/topotests/config_timing/r1/zebra.conf new file mode 100644 index 000000000..46fd96503 --- /dev/null +++ b/tests/topotests/config_timing/r1/zebra.conf @@ -0,0 +1,18 @@ +log timestamp precision 3 + +ip prefix-list ANY permit 0.0.0.0/0 le 32 +ipv6 prefix-list ANY seq 10 permit any + +route-map RM-NONE4 deny 10 +exit-route-map + +route-map RM-NONE6 deny 10 +exit-route-map + +interface r1-eth0 + ip address 100.0.0.1/24 + ipv6 address 2102::1/64 +exit + +ip protocol static route-map RM-NONE4 +ipv6 protocol static route-map RM-NONE6 diff --git a/tests/topotests/config_timing/test_config_timing.py b/tests/topotests/config_timing/test_config_timing.py new file mode 100644 index 000000000..db8baa860 --- /dev/null +++ b/tests/topotests/config_timing/test_config_timing.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# +# June 2 2021, Christian Hopps <chopps@labn.net> +# +# Copyright (c) 2021, LabN Consulting, L.L.C. +# Copyright (c) 2019-2020 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test the timing of config operations. + +The initial add of 10k routes is used as a baseline for timing and all future +operations are expected to complete in under 2 times that baseline. This is a +lot of slop; however, the pre-batching code some of these operations (e.g., +adding the same set of 10k routes) would take 100 times longer, so the intention +is to catch those types of regressions. +""" + +import datetime +import ipaddress +import math +import os +import sys +import pytest + + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + +pytestmark = [pytest.mark.staticd] + +class TimingTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + tgen.add_router("r1") + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + + +def setup_module(mod): + tgen = Topogen(TimingTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)), + ) + router.load_config( + TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + +def get_ip_networks(super_prefix, count): + count_log2 = math.log(count, 2) + if count_log2 != int(count_log2): + count_log2 = int(count_log2) + 1 + else: + count_log2 = int(count_log2) + network = ipaddress.ip_network(super_prefix) + return tuple(network.subnets(count_log2))[0:count] + +def test_static_timing(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def do_config( + count, bad_indices, base_delta, d_multiplier, add=True, do_ipv6=False, super_prefix=None, en_dbg=False + ): + router_list = tgen.routers() + tot_delta = float(0) + + optype = "adding" if add else "removing" + iptype = "IPv6" if do_ipv6 else "IPv4" + if super_prefix is None: + super_prefix = u"2001::/48" if do_ipv6 else u"10.0.0.0/8" + via = u"lo" + optyped = "added" if add else "removed" + + for rname, router in router_list.items(): + router.logger.info("{} {} static {} routes".format( + optype, count, iptype) + ) + + # Generate config file. + config_file = os.path.join( + router.logdir, rname, "{}-routes-{}.conf".format( + iptype.lower(), optype + ) + ) + with open(config_file, "w") as f: + for i, net in enumerate(get_ip_networks(super_prefix, count)): + if i in bad_indices: + if add: + f.write("ip route {} {} bad_input\n".format(net, via)) + else: + f.write("no ip route {} {} bad_input\n".format(net, via)) + elif add: + f.write("ip route {} {}\n".format(net, via)) + else: + f.write("no ip route {} {}\n".format(net, via)) + + # Enable debug + if en_dbg: + router.vtysh_cmd("debug northbound callbacks configuration") + + # Load config file. + load_command = 'vtysh -f "{}"'.format(config_file) + tstamp = datetime.datetime.now() + output = router.run(load_command) + delta = (datetime.datetime.now() - tstamp).total_seconds() + tot_delta += delta + + router.logger.info( + "\nvtysh command => {}\nvtysh output <= {}\nin {}s".format( + load_command, output, delta + ) + ) + + limit_delta = base_delta * d_multiplier + logger.info( + "{} {} {} static routes under {} in {}s (limit: {}s)".format( + optyped, count, iptype.lower(), super_prefix, tot_delta, limit_delta + ) + ) + if limit_delta: + assert tot_delta <= limit_delta + + return tot_delta + + # Number of static routes + prefix_count = 10000 + prefix_base = [[u"10.0.0.0/8", u"11.0.0.0/8"], + [u"2100:1111:2220::/44", u"2100:3333:4440::/44"]] + + bad_indices = [] + for ipv6 in [False, True]: + base_delta = do_config(prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0]) + + # Another set of same number of prefixes + do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][1]) + + # Duplicate config + do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]) + + # Remove 1/2 of duplicate + do_config(prefix_count / 2, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0]) + + # Add all back in so 1/2 replicate 1/2 new + do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]) + + # remove all + delta = do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0]) + delta += do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][1]) + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index de5c584e9..e57db7471 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -2,8 +2,10 @@ Topotest conftest.py file. """ +import glob import os import pdb +import re import pytest from lib.topogen import get_topogen, diagnose_env @@ -11,6 +13,12 @@ from lib.topotest import json_cmp_result from lib.topotest import g_extra_config as topotest_extra_config from lib.topolog import logger +try: + from _pytest._code.code import ExceptionInfo + leak_check_ok = True +except ImportError: + leak_check_ok = False + def pytest_addoption(parser): """ @@ -67,6 +75,18 @@ def pytest_addoption(parser): ) parser.addoption( + "--valgrind-extra", + action="store_true", + help="Generate suppression file, and enable more precise (slower) valgrind checks", + ) + + parser.addoption( + "--valgrind-memleaks", + action="store_true", + help="Run all daemons under valgrind for memleak detection", + ) + + parser.addoption( "--vtysh", metavar="ROUTER[,ROUTER...]", help="Comma-separated list of routers to spawn vtysh on, or 'all'", @@ -79,6 +99,37 @@ def pytest_addoption(parser): ) +def check_for_memleaks(): + if not topotest_extra_config["valgrind_memleaks"]: + return + + leaks = [] + tgen = get_topogen() + latest = [] + existing = [] + if tgen is not None: + logdir = "/tmp/topotests/{}".format(tgen.modname) + if hasattr(tgen, "valgrind_existing_files"): + existing = tgen.valgrind_existing_files + latest = glob.glob(os.path.join(logdir, "*.valgrind.*")) + + for vfile in latest: + if vfile in existing: + continue + with open(vfile) as vf: + vfcontent = vf.read() + match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent) + if match and match.group(1) != "0": + emsg = '{} in {}'.format(match.group(1), vfile) + leaks.append(emsg) + + if leaks: + if leak_check_ok: + pytest.fail("Memleaks found:\n\t" + "\n\t".join(leaks)) + else: + logger.error("Memleaks found:\n\t" + "\n\t".join(leaks)) + + def pytest_runtest_call(): """ This function must be run after setup_module(), it does standarized post @@ -139,6 +190,9 @@ def pytest_configure(config): shell_on_error = config.getoption("--shell-on-error") topotest_extra_config["shell_on_error"] = shell_on_error + topotest_extra_config["valgrind_extra"] = config.getoption("--valgrind-extra") + topotest_extra_config["valgrind_memleaks"] = config.getoption("--valgrind-memleaks") + vtysh = config.getoption("--vtysh") topotest_extra_config["vtysh"] = vtysh.split(",") if vtysh else [] @@ -159,6 +213,12 @@ def pytest_runtest_makereport(item, call): else: pause = False + if call.excinfo is None and call.when == "call": + try: + check_for_memleaks() + except: + call.excinfo = ExceptionInfo() + if call.excinfo is None: error = False else: diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index 50cb586ac..db7b3586f 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -990,7 +990,7 @@ def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict): # Verification APIs ############################################# @retry(attempts=4, wait=2, return_is_str=True) -def verify_router_id(tgen, topo, input_dict): +def verify_router_id(tgen, topo, input_dict, expected=True): """ Running command "show ip bgp json" for DUT and reading router-id from input_dict and verifying with command output. @@ -1006,6 +1006,8 @@ def verify_router_id(tgen, topo, input_dict): * `topo`: input json file data * `input_dict`: input dictionary, have details of Device Under Test, for which user wants to test the data + * `expected` : expected results from API, by-default True + Usage ----- # Verify if router-id for r1 is 12.12.12.12 @@ -1060,7 +1062,7 @@ def verify_router_id(tgen, topo, input_dict): @retry(attempts=50, wait=3, return_is_str=True) -def verify_bgp_convergence(tgen, topo, dut=None): +def verify_bgp_convergence(tgen, topo, dut=None, expected=True): """ API will verify if BGP is converged with in the given time frame. Running "show bgp summary json" command and verify bgp neighbor @@ -1070,6 +1072,8 @@ def verify_bgp_convergence(tgen, topo, dut=None): * `tgen`: topogen object * `topo`: input json file data * `dut`: device under test + * `expected` : expected results from API, by-default True + Usage ----- # To veriry is BGP is converged for all the routers used in @@ -1264,7 +1268,7 @@ def verify_bgp_convergence(tgen, topo, dut=None): @retry(attempts=4, wait=4, return_is_str=True) def verify_bgp_community( - tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False + tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False, expected=True ): """ API to veiryf BGP large community is attached in route for any given @@ -1280,6 +1284,7 @@ def verify_bgp_community( values needs to be verified * `vrf`: VRF name * `bestpath`: To check best path cli + * `expected` : expected results from API, by-default True Usage ----- @@ -1423,7 +1428,7 @@ def modify_as_number(tgen, topo, input_dict): @retry(attempts=4, wait=2, return_is_str=True) -def verify_as_numbers(tgen, topo, input_dict): +def verify_as_numbers(tgen, topo, input_dict, expected=True): """ This API is to verify AS numbers for given DUT by running "show ip bgp neighbor json" command. Local AS and Remote AS @@ -1435,6 +1440,7 @@ def verify_as_numbers(tgen, topo, input_dict): * `topo`: input json file data * `addr_type` : ip type, ipv4/ipv6 * `input_dict`: defines - for which router, AS numbers needs to be verified + * `expected` : expected results from API, by-default True Usage ----- @@ -1522,7 +1528,7 @@ def verify_as_numbers(tgen, topo, input_dict): @retry(attempts=50, wait=3, return_is_str=True) -def verify_bgp_convergence_from_running_config(tgen, dut=None): +def verify_bgp_convergence_from_running_config(tgen, dut=None, expected=True): """ API to verify BGP convergence b/w loopback and physical interface. This API would be used when routers have BGP neighborship is loopback @@ -1532,6 +1538,7 @@ def verify_bgp_convergence_from_running_config(tgen, dut=None): ---------- * `tgen`: topogen object * `dut`: device under test + * `expected` : expected results from API, by-default True Usage ----- @@ -2086,6 +2093,7 @@ def verify_bgp_attributes( input_dict=None, seq_id=None, nexthop=None, + expected=True ): """ API will verify BGP attributes set by Route-map for given prefix and @@ -2101,6 +2109,7 @@ def verify_bgp_attributes( * `rmap_name`: route map name for which set criteria needs to be verified * `input_dict`: defines for which router, AS numbers needs * `seq_id`: sequence number of rmap, default is None + * `expected` : expected results from API, by-default True Usage ----- @@ -2216,7 +2225,7 @@ def verify_bgp_attributes( @retry(attempts=4, wait=2, return_is_str=True) def verify_best_path_as_per_bgp_attribute( - tgen, addr_type, router, input_dict, attribute + tgen, addr_type, router, input_dict, attribute, expected=True ): """ API is to verify best path according to BGP attributes for given routes. @@ -2231,6 +2240,8 @@ def verify_best_path_as_per_bgp_attribute( * `attribute` : calculate best path using this attribute * `input_dict`: defines different routes to calculate for which route best path is selected + * `expected` : expected results from API, by-default True + Usage ----- # To verify best path for routes 200.50.2.0/32 and 200.60.2.0/32 from @@ -2420,7 +2431,7 @@ def verify_best_path_as_per_bgp_attribute( @retry(attempts=5, wait=2, return_is_str=True) def verify_best_path_as_per_admin_distance( - tgen, addr_type, router, input_dict, attribute + tgen, addr_type, router, input_dict, attribute, expected=True ): """ API is to verify best path according to admin distance for given @@ -2435,6 +2446,8 @@ def verify_best_path_as_per_admin_distance( * `attribute` : calculate best path using admin distance * `input_dict`: defines different routes with different admin distance to calculate for which route best path is selected + * `expected` : expected results from API, by-default True + Usage ----- # To verify best path for route 200.50.2.0/32 from router r2 to @@ -2532,7 +2545,7 @@ def verify_best_path_as_per_admin_distance( @retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) def verify_bgp_rib( - tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None + tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None, expected=True ): """ This API is to verify whether bgp rib has any @@ -2547,6 +2560,7 @@ def verify_bgp_rib( * `next_hop`[optional]: next_hop which needs to be verified, default = static * 'aspath'[optional]: aspath which needs to be verified + * `expected` : expected results from API, by-default True Usage ----- @@ -2833,7 +2847,7 @@ def verify_bgp_rib( @retry(attempts=5, wait=2, return_is_str=True) -def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer): +def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer, expected=True): """ This API is to verify verify_graceful_restart configuration of DUT and cross verify the same from the peer bgp routerrouter. @@ -2847,6 +2861,7 @@ def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer): which user wants to test the data * `dut`: input dut router name * `peer`: input peer router name + * `expected` : expected results from API, by-default True Usage ----- @@ -3082,7 +3097,7 @@ def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer): @retry(attempts=5, wait=2, return_is_str=True) -def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer): +def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True): """ This API is to verify r_bit in the BGP gr capability advertised by the neighbor router @@ -3096,6 +3111,8 @@ def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer): which user wants to test the data * `dut`: input dut router name * `peer`: peer name + * `expected` : expected results from API, by-default True + Usage ----- input_dict = { @@ -3200,7 +3217,7 @@ def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer): @retry(attempts=5, wait=2, return_is_str=True) -def verify_eor(tgen, topo, addr_type, input_dict, dut, peer): +def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True): """ This API is to verify EOR @@ -3363,7 +3380,7 @@ def verify_eor(tgen, topo, addr_type, input_dict, dut, peer): @retry(attempts=4, wait=2, return_is_str=True) -def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer): +def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True): """ This API is to verify f_bit in the BGP gr capability advertised by the neighbor router @@ -3377,6 +3394,7 @@ def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer): which user wants to test the data * `dut`: input dut router name * `peer`: peer name + * `expected` : expected results from API, by-default True Usage ----- @@ -3516,6 +3534,8 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer) for which user wants to test the data * `dut`: input dut router name * `peer`: peer name + * `expected` : expected results from API, by-default True + Usage ----- # Configure graceful-restart @@ -3629,7 +3649,7 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer) @retry(attempts=4, wait=2, return_is_str=True) -def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut): +def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, expected=True): """ This API is to verify gr_address_family in the BGP gr capability advertised by the neighbor router @@ -3641,6 +3661,7 @@ def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut): * `addr_type` : ip type ipv4/ipv6 * `addr_type` : ip type IPV4 Unicast/IPV6 Unicast * `dut`: input dut router name + * `expected` : expected results from API, by-default True Usage ----- @@ -3730,6 +3751,7 @@ def verify_attributes_for_evpn_routes( ipLen=None, rd_peer=None, rt_peer=None, + expected=True ): """ API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1" @@ -3747,6 +3769,8 @@ def verify_attributes_for_evpn_routes( * `ipLen` : IP prefix length * `rd_peer` : Peer name from which RD will be auto-generated * `rt_peer` : Peer name from which RT will be auto-generated + * `expected` : expected results from API, by-default True + Usage ----- input_dict_1 = { @@ -4117,7 +4141,7 @@ def verify_attributes_for_evpn_routes( @retry(attempts=5, wait=2, return_is_str=True) def verify_evpn_routes( - tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None + tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None, expected=True ): """ API to verify evpn routes using "sh bgp l2vpn evpn" @@ -4132,6 +4156,8 @@ def verify_evpn_routes( * `route_type` : Route type 5 is supported as of now * `EthTag` : Ethernet tag, by-default is 0 * `next_hop` : Prefered nexthop for the evpn routes + * `expected` : expected results from API, by-default True + Usage ----- input_dict_1 = { diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index ee7cd6a7a..3f78f020b 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -123,6 +123,17 @@ DEBUG_LOGS = { "debug ospf te", "debug ospf zebra", ], + "ospf6": [ + "debug ospf6 event", + "debug ospf6 ism", + "debug ospf6 lsa", + "debug ospf6 nsm", + "debug ospf6 nssa", + "debug ospf6 packet all", + "debug ospf6 sr", + "debug ospf6 te", + "debug ospf6 zebra", + ], } if config.has_option("topogen", "verbosity"): @@ -422,7 +433,10 @@ def check_router_status(tgen): daemons.append("zebra") if "pimd" in result: daemons.append("pimd") - + if "ospfd" in result: + daemons.append("ospfd") + if "ospf6d" in result: + daemons.append("ospf6d") rnode.startDaemons(daemons) except Exception as e: @@ -890,6 +904,10 @@ def topo_daemons(tgen, topo): for val in topo["routers"][rtr]["links"].values(): if "pim" in val and "pimd" not in daemon_list: daemon_list.append("pimd") + if "ospf" in val and "ospfd" not in daemon_list: + daemon_list.append("ospfd") + if "ospf6" in val and "ospf6d" not in daemon_list: + daemon_list.append("ospf6d") break return daemon_list diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py new file mode 100755 index 000000000..07e4ab877 --- /dev/null +++ b/tests/topotests/lib/mcast-tester.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Subscribe to a multicast group so that the kernel sends an IGMP JOIN +for the multicast group we subscribed to. +""" + +import argparse +import os +import json +import socket +import subprocess +import struct +import sys +import time + +# +# Functions +# +def interface_name_to_index(name): + "Gets the interface index using its name. Returns None on failure." + interfaces = json.loads( + subprocess.check_output('ip -j link show', shell=True)) + + for interface in interfaces: + if interface['ifname'] == name: + return interface['ifindex'] + + return None + + +def multicast_join(sock, ifindex, group, port): + "Joins a multicast group." + mreq = struct.pack( + "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex + ) + + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind((group, port)) + sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) + + +# +# Main code. +# +parser = argparse.ArgumentParser(description="Multicast RX utility") +parser.add_argument('socket', help='Point to topotest UNIX socket') +parser.add_argument('group', help='Multicast IP') +parser.add_argument('interface', help='Interface name') +parser.add_argument( + '--send', + help='Transmit instead of join with interval (defaults to 0.7 sec)', + type=float, default=0) +args = parser.parse_args() + +ttl = 16 +port = 1000 + +# Get interface index/validate. +ifindex = interface_name_to_index(args.interface) +if ifindex is None: + sys.stderr.write('Interface {} does not exists\n'.format(args.interface)) + sys.exit(1) + +# We need root privileges to set up multicast. +if os.geteuid() != 0: + sys.stderr.write("ERROR: You must have root privileges\n") + sys.exit(1) + +# Wait for topotest to synchronize with us. +toposock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) +while True: + try: + toposock.connect(args.socket) + break + except ConnectionRefusedError: + time.sleep(1) + continue + +msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) +if args.send > 0: + # Prepare multicast bit in that interface. + msock.setsockopt( + socket.SOL_SOCKET, 25, + struct.pack("%ds" % len(args.interface), + args.interface.encode('utf-8'))) + # Set packets TTL. + msock.setsockopt( + socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl)) + # Block to ensure packet send. + msock.setblocking(True) + # Set topotest socket non blocking so we can multiplex the main loop. + toposock.setblocking(False) +else: + multicast_join(msock, ifindex, args.group, port) + +counter = 0 +while True: + if args.send > 0: + msock.sendto(b"test %d" % counter, (args.group, port)) + counter += 1 + time.sleep(args.send) + + try: + data = toposock.recv(1) + if data == b'': + print(' -> Connection closed') + break + except BlockingIOError: + continue + +msock.close() + +sys.exit(0) diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py index 7ad64de4a..3f39b93d8 100644 --- a/tests/topotests/lib/ospf.py +++ b/tests/topotests/lib/ospf.py @@ -18,13 +18,16 @@ # OF THIS SOFTWARE. # -from copy import deepcopy import traceback +import ipaddr +import ipaddress +import sys + +from copy import deepcopy from time import sleep from lib.topolog import logger -import ipaddr from lib.topotest import frr_unicode - +from ipaddress import IPv6Address # Import common_config to use commomnly used APIs from lib.common_config import ( create_common_configuration, @@ -86,10 +89,21 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru logger.debug("Router %s: 'ospf' not present in input_dict", router) continue - result = __create_ospf_global(tgen, input_dict, router, build, load_config) + result = __create_ospf_global( + tgen, input_dict, router, build, load_config) if result is True: ospf_data = input_dict[router]["ospf"] + for router in input_dict.keys(): + if "ospf6" not in input_dict[router]: + logger.debug("Router %s: 'ospf6' not present in input_dict", router) + continue + + result = __create_ospf_global( + tgen, input_dict, router, build, load_config, ospf='ospf6') + if result is True: + ospf_data = input_dict[router]["ospf6"] + logger.debug("Exiting lib API: create_router_ospf()") return result @@ -158,6 +172,7 @@ def __create_ospf_global( config_data.append(cmd) + # router id router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) @@ -166,6 +181,33 @@ def __create_ospf_global( if router_id: config_data.append("{} router-id {}".format(ospf, router_id)) + # log-adjacency-changes + log_adj_changes = ospf_data.setdefault("log_adj_changes", None) + del_log_adj_changes = ospf_data.setdefault("del_log_adj_changes", False) + if del_log_adj_changes: + config_data.append("no log-adjacency-changes detail") + if log_adj_changes: + config_data.append("log-adjacency-changes {}".format( + log_adj_changes)) + + # aggregation timer + aggr_timer = ospf_data.setdefault("aggr_timer", None) + del_aggr_timer = ospf_data.setdefault("del_aggr_timer", False) + if del_aggr_timer: + config_data.append("no aggregation timer") + if aggr_timer: + config_data.append("aggregation timer {}".format( + aggr_timer)) + + # maximum path information + ecmp_data = ospf_data.setdefault("maximum-paths", {}) + if ecmp_data: + cmd = "maximum-paths {}".format(ecmp_data) + del_action = ospf_data.setdefault("del_max_path", False) + if del_action: + cmd = "no maximum-paths" + config_data.append(cmd) + # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) if redistribute_data: @@ -203,6 +245,34 @@ def __create_ospf_global( cmd = "no {}".format(cmd) config_data.append(cmd) + #def route information + def_rte_data = ospf_data.setdefault("default-information", {}) + if def_rte_data: + if "originate" not in def_rte_data: + logger.debug("Router %s: 'originate key' not present in " + "input_dict", router) + else: + cmd = "default-information originate" + + if "always" in def_rte_data: + cmd = cmd + " always" + + if "metric" in def_rte_data: + cmd = cmd + " metric {}".format(def_rte_data["metric"]) + + if "metric-type" in def_rte_data: + cmd = cmd + " metric-type {}".format(def_rte_data[ + "metric-type"]) + + if "route-map" in def_rte_data: + cmd = cmd + " route-map {}".format(def_rte_data[ + "route-map"]) + + del_action = def_rte_data.setdefault("delete", False) + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + # area interface information for ospf6d only if ospf == "ospf6": area_iface = ospf_data.setdefault("neighbors", {}) @@ -217,6 +287,21 @@ def __create_ospf_global( cmd = "no {}".format(cmd) config_data.append(cmd) + try: + if "area" in input_dict[router]['links'][neighbor][ + 'ospf6']: + iface = input_dict[router]["links"][neighbor]["interface"] + cmd = "interface {} area {}".format( + iface, input_dict[router]['links'][neighbor][ + 'ospf6']['area']) + if input_dict[router]['links'][neighbor].setdefault( + "delete", False): + cmd = "no {}".format(cmd) + config_data.append(cmd) + except KeyError: + pass + + # summary information summary_data = ospf_data.setdefault("summary-address", {}) if summary_data: @@ -427,11 +512,11 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= result = create_common_configuration( tgen, router, config_data, "interface_config", build=build ) - logger.debug("Exiting lib API: create_igmp_config()") + logger.debug("Exiting lib API: config_ospf_interface()") return result -def clear_ospf(tgen, router): +def clear_ospf(tgen, router, ospf=None): """ This API is to clear ospf neighborship by running clear ip ospf interface * command, @@ -451,11 +536,16 @@ def clear_ospf(tgen, router): return False rnode = tgen.routers()[router] - # Clearing OSPF - logger.info("Clearing ospf process for router %s..", router) + if ospf: + version = "ipv6" + else: + version = "ip" - run_frr_cmd(rnode, "clear ip ospf interface ") + cmd = "clear {} ospf interface".format(version) + logger.info( + "Clearing ospf process on router %s.. using command '%s'", router, cmd) + run_frr_cmd(rnode, cmd) logger.debug("Exiting lib API: clear_ospf()") @@ -490,7 +580,7 @@ def redistribute_ospf(tgen, topo, dut, route_type, **kwargs): # Verification procs ################################ @retry(attempts=40, wait=2, return_is_str=True) -def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): +def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expected=True): """ This API is to verify ospf neighborship by running show ip ospf neighbour command, @@ -502,6 +592,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase * `lan` : verify neighbors in lan topology + * `expected` : expected results from API, by-default True Usage ----- @@ -683,70 +774,194 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): ################################ # Verification procs ################################ -@retry(attempts=40, wait=2, return_is_str=True) -def verify_ospf6_neighbor(tgen, topo): +@retry(attempts=10, wait=2, return_is_str=True) +def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): """ This API is to verify ospf neighborship by running - show ip ospf neighbour command, + show ipv6 ospf neighbour command, Parameters ---------- * `tgen` : Topogen object * `topo` : json file data + * `dut`: device under test + * `input_dict` : Input dict data, required when configuring from testcase + * `lan` : verify neighbors in lan topology Usage ----- - Check FULL neighbors. - verify_ospf_neighbor(tgen, topo) + 1. To check FULL neighbors. + verify_ospf_neighbor(tgen, topo, dut=dut) - result = verify_ospf_neighbor(tgen, topo) + 2. To check neighbors with their roles. + input_dict = { + "r0": { + "ospf6": { + "neighbors": { + "r1": { + "state": "Full", + "role": "DR" + }, + "r2": { + "state": "Full", + "role": "DROther" + }, + "r3": { + "state": "Full", + "role": "DROther" + } + } + } + } + } + result = verify_ospf6_neighbor(tgen, topo, dut, input_dict, lan=True) Returns ------- True or False (Error Message) """ - - logger.debug("Entering lib API: verify_ospf6_neighbor()") + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) result = False - for router, rnode in tgen.routers().items(): - if "ospf6" not in topo["routers"][router]: - continue - logger.info("Verifying OSPF6 neighborship on router %s:", router) - show_ospf_json = run_frr_cmd( - rnode, "show ipv6 ospf6 neighbor json", isjson=True - ) + if input_dict: + for router, rnode in tgen.routers().items(): + if 'ospf6' not in topo['routers'][router]: + continue - if not show_ospf_json: - return "OSPF6 is not running" - - ospf_nbr_list = topo["routers"][router]["ospf6"]["neighbors"] - no_of_peer = 0 - for ospf_nbr in ospf_nbr_list: - ospf_nbr_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"] - for neighbor in show_ospf_json["neighbors"]: - if neighbor["neighborId"] == ospf_nbr_rid: - nh_state = neighbor["state"] - break - else: - return "[DUT: {}] OSPF6 peer {} missing".format(router, ospf_nbr_rid) + if dut is not None and dut != router: + continue + + logger.info("Verifying OSPF neighborship on router %s:", router) + show_ospf_json = run_frr_cmd(rnode, + "show ipv6 ospf neighbor json", isjson=True) + # Verifying output dictionary show_ospf_json is empty or not + if not bool(show_ospf_json): + errormsg = "OSPF6 is not running" + return errormsg + + ospf_data_list = input_dict[router]["ospf6"] + ospf_nbr_list = ospf_data_list['neighbors'] + + for ospf_nbr, nbr_data in ospf_nbr_list.items(): + data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id'] + if ospf_nbr in data_ip: + nbr_details = nbr_data[ospf_nbr] + elif lan: + for switch in topo['switches']: + if 'ospf6' in topo['switches'][switch]['links'][router]: + neighbor_ip = data_ip + else: + continue + else: + neighbor_ip = data_ip[router]['ipv6'].split("/")[0] - if nh_state == "Full": - no_of_peer += 1 + nh_state = None + neighbor_ip = neighbor_ip.lower() + nbr_rid = data_rid + get_index_val = dict((d['neighborId'], dict( \ + d, index=index)) for (index, d) in enumerate( \ + show_ospf_json['neighbors'])) + try: + nh_state = get_index_val.get(neighbor_ip)['state'] + intf_state = get_index_val.get(neighbor_ip)['ifState'] + except TypeError: + errormsg = "[DUT: {}] OSPF peer {} missing,from "\ + "{} ".format(router, + nbr_rid, ospf_nbr) + return errormsg - if no_of_peer == len(ospf_nbr_list): - logger.info("[DUT: {}] OSPF6 is Converged".format(router)) - result = True - else: - return "[DUT: {}] OSPF6 is not Converged".format(router) + nbr_state = nbr_data.setdefault("state",None) + nbr_role = nbr_data.setdefault("role",None) - logger.debug("Exiting API: verify_ospf6_neighbor()") + if nbr_state: + if nbr_state == nh_state: + logger.info("[DUT: {}] OSPF6 Nbr is {}:{} State {}".format + (router, ospf_nbr, nbr_rid, nh_state)) + result = True + else: + errormsg = ("[DUT: {}] OSPF6 is not Converged, neighbor" + " state is {} , Expected state is {}".format(router, + nh_state, nbr_state)) + return errormsg + if nbr_role: + if nbr_role == intf_state: + logger.info("[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format( + router, ospf_nbr, nbr_rid, nbr_role)) + else: + errormsg = ("[DUT: {}] OSPF6 is not Converged with rid" + "{}, role is {}, Expected role is {}".format(router, + nbr_rid, intf_state, nbr_role)) + return errormsg + continue + else: + + for router, rnode in tgen.routers().items(): + if 'ospf6' not in topo['routers'][router]: + continue + + if dut is not None and dut != router: + continue + + logger.info("Verifying OSPF6 neighborship on router %s:", router) + show_ospf_json = run_frr_cmd(rnode, + "show ipv6 ospf neighbor json", isjson=True) + # Verifying output dictionary show_ospf_json is empty or not + if not bool(show_ospf_json): + errormsg = "OSPF6 is not running" + return errormsg + + ospf_data_list = topo["routers"][router]["ospf6"] + ospf_neighbors = ospf_data_list['neighbors'] + total_peer = 0 + total_peer = len(ospf_neighbors.keys()) + no_of_ospf_nbr = 0 + ospf_nbr_list = ospf_data_list['neighbors'] + no_of_peer = 0 + for ospf_nbr, nbr_data in ospf_nbr_list.items(): + data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id'] + if ospf_nbr in data_ip: + nbr_details = nbr_data[ospf_nbr] + elif lan: + for switch in topo['switches']: + if 'ospf6' in topo['switches'][switch]['links'][router]: + neighbor_ip = data_ip + else: + continue + else: + neighbor_ip = data_ip + + nh_state = None + neighbor_ip = neighbor_ip.lower() + nbr_rid = data_rid + get_index_val = dict((d['neighborId'], dict( \ + d, index=index)) for (index, d) in enumerate( \ + show_ospf_json['neighbors'])) + try: + nh_state = get_index_val.get(neighbor_ip)['state'] + intf_state = get_index_val.get(neighbor_ip)['ifState'] + except TypeError: + errormsg = "[DUT: {}] OSPF peer {} missing,from "\ + "{} ".format(router, + nbr_rid, ospf_nbr) + return errormsg + + if nh_state == 'Full': + no_of_peer += 1 + + if no_of_peer == total_peer: + logger.info("[DUT: {}] OSPF6 is Converged".format(router)) + result = True + else: + errormsg = ("[DUT: {}] OSPF6 is not Converged".format(router)) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return result @retry(attempts=21, wait=2, return_is_str=True) def verify_ospf_rib( - tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None + tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None, expected=True ): """ This API is to verify ospf routes by running @@ -761,6 +976,7 @@ def verify_ospf_rib( * `tag` : tag to be verified * `metric` : metric to be verified * `fib` : True if the route is installed in FIB. + * `expected` : expected results from API, by-default True Usage ----- @@ -1021,7 +1237,7 @@ def verify_ospf_rib( @retry(attempts=10, wait=2, return_is_str=True) -def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None): +def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expected=True): """ This API is to verify ospf routes by running show ip ospf interface command. @@ -1033,6 +1249,7 @@ def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None): * `dut`: device under test * `lan`: if set to true this interface belongs to LAN. * `input_dict` : Input dict data, required when configuring from testcase + * `expected` : expected results from API, by-default True Usage ----- @@ -1110,7 +1327,7 @@ def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None): @retry(attempts=11, wait=2, return_is_str=True) -def verify_ospf_database(tgen, topo, dut, input_dict): +def verify_ospf_database(tgen, topo, dut, input_dict, expected=True): """ This API is to verify ospf lsa's by running show ip ospf database command. @@ -1121,6 +1338,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict): * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase * `topo` : next to be verified + * `expected` : expected results from API, by-default True Usage ----- @@ -1273,7 +1491,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict): @retry(attempts=10, wait=2, return_is_str=True) -def verify_ospf_summary(tgen, topo, dut, input_dict): +def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True): """ This API is to verify ospf routes by running show ip ospf interface command. @@ -1284,6 +1502,7 @@ def verify_ospf_summary(tgen, topo, dut, input_dict): * `topo` : topology descriptions * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase + * `expected` : expected results from API, by-default True Usage ----- @@ -1349,3 +1568,667 @@ def verify_ospf_summary(tgen, topo, dut, input_dict): logger.debug("Exiting API: verify_ospf_summary()") return result + + + +@retry(attempts=10, wait=3, return_is_str=True) +def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None, + tag=None, metric=None, fib=None): + """ + This API is to verify ospf routes by running + show ip ospf route command. + + Parameters + ---------- + * `tgen` : Topogen object + * `dut`: device under test + * `input_dict` : Input dict data, required when configuring from testcase + * `next_hop` : next to be verified + * `tag` : tag to be verified + * `metric` : metric to be verified + * `fib` : True if the route is installed in FIB. + + Usage + ----- + input_dict = { + "r1": { + "static_routes": [ + { + "network": ip_net, + "no_of_ip": 1, + "routeType": "N" + } + ] + } + } + + result = verify_ospf6_rib(tgen, dut, input_dict,next_hop=nh) + + Returns + ------- + True or False (Error Message) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + router_list = tgen.routers() + additional_nexthops_in_required_nhs = [] + found_hops = [] + for routerInput in input_dict.keys(): + for router, rnode in router_list.iteritems(): + if router != dut: + continue + + logger.info("Checking router %s RIB:", router) + + # Verifying RIB routes + command = "show ipv6 ospf route" + + found_routes = [] + missing_routes = [] + + if "static_routes" in input_dict[routerInput] or \ + "prefix" in input_dict[routerInput]: + if "prefix" in input_dict[routerInput]: + static_routes = input_dict[routerInput]["prefix"] + else: + static_routes = input_dict[routerInput]["static_routes"] + + + for static_route in static_routes: + cmd = "{}".format(command) + + cmd = "{} json".format(cmd) + + ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True) + + # Fix for PR 2644182 + try: + ospf_rib_json = ospf_rib_json['routes'] + except KeyError: + pass + + # Verifying output dictionary ospf_rib_json is not empty + if bool(ospf_rib_json) is False: + errormsg = "[DUT: {}] No routes found in OSPF6 route " \ + "table".format(router) + return errormsg + + network = static_route["network"] + no_of_ip = static_route.setdefault("no_of_ip", 1) + _tag = static_route.setdefault("tag", None) + _rtype = static_route.setdefault("routeType", None) + + + # Generating IPs for verification + ip_list = generate_ips(network, no_of_ip) + st_found = False + nh_found = False + for st_rt in ip_list: + st_rt = str(ipaddress.ip_network(frr_unicode(st_rt))) + + _addr_type = validate_ip_address(st_rt) + if _addr_type != 'ipv6': + continue + + if st_rt in ospf_rib_json: + + st_found = True + found_routes.append(st_rt) + + if fib and next_hop: + if type(next_hop) is not list: + next_hop = [next_hop] + + for mnh in range(0, len(ospf_rib_json[st_rt])): + if 'fib' in ospf_rib_json[st_rt][ + mnh]["nextHops"][0]: + found_hops.append([rib_r[ + "ip"] for rib_r in ospf_rib_json[ + st_rt][mnh]["nextHops"]]) + + if found_hops[0]: + missing_list_of_nexthops = \ + set(found_hops[0]).difference(next_hop) + additional_nexthops_in_required_nhs = \ + set(next_hop).difference(found_hops[0]) + + if additional_nexthops_in_required_nhs: + logger.info( + "Nexthop " + "%s is not active for route %s in " + "RIB of router %s\n", + additional_nexthops_in_required_nhs, + st_rt, dut) + errormsg = ( + "Nexthop {} is not active" + " for route {} in RIB of router" + " {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, dut)) + return errormsg + else: + nh_found = True + + elif next_hop and fib is None: + if type(next_hop) is not list: + next_hop = [next_hop] + found_hops = [rib_r['nextHop'] for rib_r in + ospf_rib_json[st_rt][ + "nextHops"]] + + if found_hops: + missing_list_of_nexthops = \ + set(found_hops).difference(next_hop) + additional_nexthops_in_required_nhs = \ + set(next_hop).difference(found_hops) + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route"\ + " %s in RIB of router %s\n", \ + additional_nexthops_in_required_nhs, \ + st_rt, dut) + errormsg=("Nexthop {} is Missing for "\ + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, dut)) + return errormsg + else: + nh_found = True + if _rtype: + if "destinationType" not in ospf_rib_json[ + st_rt]: + errormsg = ("[DUT: {}]: destinationType missing" + "for route {} in OSPF RIB \n".\ + format(dut, st_rt)) + return errormsg + elif _rtype != ospf_rib_json[st_rt][ + "destinationType"]: + errormsg = ("[DUT: {}]: destinationType mismatch" + "for route {} in OSPF RIB \n".\ + format(dut, st_rt)) + return errormsg + else: + logger.info("DUT: {}]: Found destinationType {}" + "for route {}".\ + format(dut, _rtype, st_rt)) + if tag: + if "tag" not in ospf_rib_json[ + st_rt]: + errormsg = ("[DUT: {}]: tag is not" + " present for" + " route {} in RIB \n".\ + format(dut, st_rt + )) + return errormsg + + if _tag != ospf_rib_json[ + st_rt]["tag"]: + errormsg = ("[DUT: {}]: tag value {}" + " is not matched for" + " route {} in RIB \n".\ + format(dut, _tag, st_rt, + )) + return errormsg + + if metric is not None: + if "type2cost" not in ospf_rib_json[ + st_rt]: + errormsg = ("[DUT: {}]: metric is" + " not present for" + " route {} in RIB \n".\ + format(dut, st_rt)) + return errormsg + + if metric != ospf_rib_json[ + st_rt]["type2cost"]: + errormsg = ("[DUT: {}]: metric value " + "{} is not matched for " + "route {} in RIB \n".\ + format(dut, metric, st_rt, + )) + return errormsg + + else: + missing_routes.append(st_rt) + + if nh_found: + logger.info("[DUT: {}]: Found next_hop {} for all OSPF" + " routes in RIB".format(router, next_hop)) + + if len(missing_routes) > 0: + errormsg = ("[DUT: {}]: Missing route in RIB, " + "routes: {}".\ + format(dut, missing_routes)) + return errormsg + + if found_routes: + logger.info("[DUT: %s]: Verified routes in RIB, found" + " routes are: %s\n", dut, found_routes) + result = True + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +@retry(attempts=3, wait=2, return_is_str=True) +def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None): + """ + This API is to verify ospf routes by running + show ip ospf interface command. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : topology descriptions + * `dut`: device under test + * `lan`: if set to true this interface belongs to LAN. + * `input_dict` : Input dict data, required when configuring from testcase + + Usage + ----- + input_dict= { + 'r0': { + 'links':{ + 's1': { + 'ospf6':{ + 'priority':98, + 'timerDeadSecs': 4, + 'area': '0.0.0.3', + 'mcastMemberOspfDesignatedRouters': True, + 'mcastMemberOspfAllRouters': True, + 'ospfEnabled': True, + + } + } + } + } + } + result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) + + Returns + ------- + True or False (Error Message) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + + for router, rnode in tgen.routers().iteritems(): + if 'ospf6' not in topo['routers'][router]: + continue + + if dut is not None and dut != router: + continue + + logger.info("Verifying OSPF interface on router %s:", router) + show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf interface json", + isjson=True) + + # Verifying output dictionary show_ospf_json is empty or not + if not bool(show_ospf_json): + errormsg = "OSPF6 is not running" + return errormsg + + # To find neighbor ip type + ospf_intf_data = input_dict[router]["links"] + for ospf_intf, intf_data in ospf_intf_data.items(): + intf = topo['routers'][router]['links'][ospf_intf]['interface'] + if intf in show_ospf_json: + for intf_attribute in intf_data['ospf6']: + if intf_data['ospf6'][intf_attribute] is not list: + if intf_data['ospf6'][intf_attribute] == show_ospf_json[ + intf][intf_attribute]: + logger.info("[DUT: %s] OSPF6 interface %s: %s is %s", + router, intf, intf_attribute, intf_data['ospf6'][ + intf_attribute]) + elif intf_data['ospf6'][intf_attribute] is list: + for addr_list in len(show_ospf_json[intf][intf_attribute]): + if show_ospf_json[intf][intf_attribute][addr_list][ + 'address'].split('/')[0] == intf_data['ospf6'][ + 'internetAddress'][0]['address']: + break + else: + errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \ + Expected is {}".format(router, intf, intf_attribute, + intf_data['ospf6'][intf_attribute], intf_data['ospf6'][ + intf_attribute]) + return errormsg + else: + errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \ + Expected is {}".format(router, intf, intf_attribute, + intf_data['ospf6'][intf_attribute], intf_data['ospf6'][ + intf_attribute]) + return errormsg + result = True + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +@retry(attempts=11, wait=2, return_is_str=True) +def verify_ospf6_database(tgen, topo, dut, input_dict): + """ + This API is to verify ospf lsa's by running + show ip ospf database command. + + Parameters + ---------- + * `tgen` : Topogen object + * `dut`: device under test + * `input_dict` : Input dict data, required when configuring from testcase + * `topo` : next to be verified + + Usage + ----- + input_dict = { + "areas": { + "0.0.0.0": { + "routerLinkStates": { + "100.1.1.0-100.1.1.0": { + "LSID": "100.1.1.0", + "Advertised router": "100.1.1.0", + "LSA Age": 130, + "Sequence Number": "80000006", + "Checksum": "a703", + "Router links": 3 + } + }, + "networkLinkStates": { + "10.0.0.2-100.1.1.1": { + "LSID": "10.0.0.2", + "Advertised router": "100.1.1.1", + "LSA Age": 137, + "Sequence Number": "80000001", + "Checksum": "9583" + } + }, + }, + } + } + result = verify_ospf_database(tgen, topo, dut, input_dict) + + Returns + ------- + True or False (Error Message) + """ + + result = False + router = dut + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if 'ospf' not in topo['routers'][dut]: + errormsg = "[DUT: {}] OSPF is not configured on the router.".format( + dut) + return errormsg + + rnode = tgen.routers()[dut] + + logger.info("Verifying OSPF interface on router %s:", dut) + show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", + isjson=True) + # Verifying output dictionary show_ospf_json is empty or not + if not bool(show_ospf_json): + errormsg = "OSPF is not running" + return errormsg + + # for inter and inter lsa's + ospf_db_data = input_dict.setdefault("areas", None) + ospf_external_lsa = input_dict.setdefault( + 'asExternalLinkStates', None) + + if ospf_db_data: + for ospf_area, area_lsa in ospf_db_data.items(): + if ospf_area in show_ospf_json['areas']: + if 'routerLinkStates' in area_lsa: + for lsa in area_lsa['routerLinkStates']: + for rtrlsa in show_ospf_json['areas'][ospf_area][ + 'routerLinkStates']: + if lsa['lsaId'] == rtrlsa['lsaId'] and \ + lsa['advertisedRouter'] == rtrlsa[ + 'advertisedRouter']: + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Router " + "LSA %s", router, ospf_area, lsa) + break + else: + errormsg = \ + "[DUT: {}] OSPF LSDB area {}: expected" \ + " Router LSA is {}".format(router, ospf_area, lsa) + return errormsg + + if 'networkLinkStates' in area_lsa: + for lsa in area_lsa['networkLinkStates']: + for netlsa in show_ospf_json['areas'][ospf_area][ + 'networkLinkStates']: + if lsa in show_ospf_json['areas'][ospf_area][ + 'networkLinkStates']: + if lsa['lsaId'] == netlsa['lsaId'] and \ + lsa['advertisedRouter'] == netlsa[ + 'advertisedRouter']: + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Network " + "LSA %s", router, ospf_area, lsa) + break + else: + errormsg = \ + "[DUT: {}] OSPF LSDB area {}: expected" \ + " Network LSA is {}".format(router, ospf_area, lsa) + return errormsg + + if 'summaryLinkStates' in area_lsa: + for lsa in area_lsa['summaryLinkStates']: + for t3lsa in show_ospf_json['areas'][ospf_area][ + 'summaryLinkStates']: + if lsa['lsaId'] == t3lsa['lsaId'] and \ + lsa['advertisedRouter'] == t3lsa[ + 'advertisedRouter']: + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Summary " + "LSA %s", router, ospf_area, lsa) + break + else: + errormsg = \ + "[DUT: {}] OSPF LSDB area {}: expected" \ + " Summary LSA is {}".format(router, ospf_area, lsa) + return errormsg + + if 'nssaExternalLinkStates' in area_lsa: + for lsa in area_lsa['nssaExternalLinkStates']: + for t7lsa in show_ospf_json['areas'][ospf_area][ + 'nssaExternalLinkStates']: + if lsa['lsaId'] == t7lsa['lsaId'] and \ + lsa['advertisedRouter'] == t7lsa[ + 'advertisedRouter']: + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:Type7 " + "LSA %s", router, ospf_area, lsa) + break + else: + errormsg = \ + "[DUT: {}] OSPF LSDB area {}: expected" \ + " Type7 LSA is {}".format(router, ospf_area, lsa) + return errormsg + + if 'asbrSummaryLinkStates' in area_lsa: + for lsa in area_lsa['asbrSummaryLinkStates']: + for t4lsa in show_ospf_json['areas'][ospf_area][ + 'asbrSummaryLinkStates']: + if lsa['lsaId'] == t4lsa['lsaId'] and \ + lsa['advertisedRouter'] == t4lsa[ + 'advertisedRouter']: + result = True + break + if result: + logger.info( + "[DUT: %s] OSPF LSDB area %s:ASBR Summary " + "LSA %s", router, ospf_area, lsa) + result = True + else: + errormsg = \ + "[DUT: {}] OSPF LSDB area {}: expected" \ + " ASBR Summary LSA is {}".format( + router, ospf_area, lsa) + return errormsg + + if 'linkLocalOpaqueLsa' in area_lsa: + for lsa in area_lsa['linkLocalOpaqueLsa']: + try: + for lnklsa in show_ospf_json['areas'][ospf_area][ + 'linkLocalOpaqueLsa']: + if lsa['lsaId'] in lnklsa['lsaId'] and \ + 'linkLocalOpaqueLsa' in show_ospf_json[ + 'areas'][ospf_area]: + logger.info(( + "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA" + "%s", ospf_area, lsa)) + result = True + else: + errormsg = ("[DUT: FRR] OSPF LSDB area: {} " + "expected Opaque-LSA is {}, Found is {}".format( + ospf_area, lsa, show_ospf_json)) + raise ValueError (errormsg) + return errormsg + except KeyError: + errormsg = ("[DUT: FRR] linkLocalOpaqueLsa Not " + "present") + return errormsg + + if ospf_external_lsa: + for lsa in ospf_external_lsa: + try: + for t5lsa in show_ospf_json['asExternalLinkStates']: + if lsa['lsaId'] == t5lsa['lsaId'] and \ + lsa['advertisedRouter'] == t5lsa[ + 'advertisedRouter']: + result = True + break + except KeyError: + result = False + if result: + logger.info( + "[DUT: %s] OSPF LSDB:External LSA %s", + router, lsa) + result = True + else: + errormsg = \ + "[DUT: {}] OSPF LSDB : expected" \ + " External LSA is {}".format(router, lsa) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + + +def config_ospf6_interface (tgen, topo, input_dict=None, build=False, + load_config=True): + """ + API to configure ospf on router. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `build` : Only for initial setup phase this is set as True. + * `load_config` : Loading the config to router this is set as True. + + Usage + ----- + r1_ospf_auth = { + "r1": { + "links": { + "r2": { + "ospf": { + "authentication": 'message-digest', + "authentication-key": "ospf", + "message-digest-key": "10" + } + } + } + } + } + result = config_ospf6_interface(tgen, topo, r1_ospf_auth) + + Returns + ------- + True or False + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + if not input_dict: + input_dict = deepcopy(topo) + else: + input_dict = deepcopy(input_dict) + for router in input_dict.keys(): + config_data = [] + for lnk in input_dict[router]['links'].keys(): + if "ospf6" not in input_dict[router]['links'][lnk]: + logger.debug("Router %s: ospf6 configs is not present in" + "input_dict, passed input_dict", router, + input_dict) + continue + ospf_data = input_dict[router]['links'][lnk]['ospf6'] + data_ospf_area = ospf_data.setdefault("area", None) + data_ospf_auth = ospf_data.setdefault("authentication", None) + data_ospf_dr_priority = ospf_data.setdefault("priority", None) + data_ospf_cost = ospf_data.setdefault("cost", None) + data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None) + + try: + intf = topo['routers'][router]['links'][lnk]['interface'] + except KeyError: + intf = topo['switches'][router]['links'][lnk]['interface'] + + # interface + cmd = "interface {}".format(intf) + + config_data.append(cmd) + # interface area config + if data_ospf_area: + cmd = "ipv6 ospf area {}".format(data_ospf_area) + config_data.append(cmd) + + # interface ospf dr priority + if data_ospf_dr_priority: + cmd = "ipv6 ospf priority {}".format( + ospf_data["priority"]) + if 'del_action' in ospf_data: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + # interface ospf cost + if data_ospf_cost: + cmd = "ipv6 ospf cost {}".format( + ospf_data["cost"]) + if 'del_action' in ospf_data: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + # interface ospf mtu + if data_ospf_mtu: + cmd = "ipv6 ospf mtu-ignore" + if 'del_action' in ospf_data: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if build: + return config_data + else: + result = create_common_configuration(tgen, router, config_data, + "interface_config", + build=build) + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py index 61a5705a5..ce90717fa 100644 --- a/tests/topotests/lib/pim.py +++ b/tests/topotests/lib/pim.py @@ -496,7 +496,7 @@ def configure_pim_force_expire(tgen, topo, input_dict, build=False): # Verification APIs ############################################# @retry(attempts=6, wait=2, return_is_str=True) -def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None): +def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True): """ Verify all PIM neighbors are up and running, config is verified using "show ip pim neighbor" cli @@ -508,6 +508,7 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None): * `dut` : dut info * `iface` : link for which PIM nbr need to check * `nbr_ip` : neighbor ip of interface + * `expected` : expected results from API, by-default True Usage ----- @@ -619,7 +620,7 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None): @retry(attempts=21, wait=2, return_is_str=True) -def verify_igmp_groups(tgen, dut, interface, group_addresses): +def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True): """ Verify IGMP groups are received from an intended interface by running "show ip igmp groups" command @@ -630,6 +631,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses): * `dut`: device under test * `interface`: interface, from which IGMP groups would be received * `group_addresses`: IGMP group address + * `expected` : expected results from API, by-default True Usage ----- @@ -693,7 +695,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses): @retry(attempts=31, wait=2, return_is_str=True) def verify_upstream_iif( - tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1 + tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1, expected=True ): """ Verify upstream inbound interface is updated correctly @@ -708,6 +710,7 @@ def verify_upstream_iif( * `group_addresses`: IGMP group address * `joinState`: upstream join state * `refCount`: refCount value + * `expected` : expected results from API, by-default True Usage ----- @@ -845,7 +848,7 @@ def verify_upstream_iif( @retry(attempts=6, wait=2, return_is_str=True) -def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses): +def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, expected=True): """ Verify join state is updated correctly and join timer is running with the help of "show ip pim upstream" cli @@ -857,6 +860,7 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses): * `iif`: inbound interface * `src_address`: source address * `group_addresses`: IGMP group address + * `expected` : expected results from API, by-default True Usage ----- @@ -964,7 +968,7 @@ def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses): @retry(attempts=41, wait=2, return_is_dict=True) def verify_ip_mroutes( - tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0 + tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0, expected=True ): """ Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes @@ -980,7 +984,7 @@ def verify_ip_mroutes( * `oil`: Outgoing interface * `return_uptime`: If True, return uptime dict, default is False * `mwait`: Wait time, default is 0 - + * `expected` : expected results from API, by-default True Usage ----- @@ -1161,7 +1165,7 @@ def verify_ip_mroutes( @retry(attempts=31, wait=2, return_is_str=True) def verify_pim_rp_info( - tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None + tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None, expected=True ): """ Verify pim rp info by running "show ip pim rp-info" cli @@ -1176,6 +1180,7 @@ def verify_pim_rp_info( * `rp`: RP address * `source`: Source of RP * `iamrp`: User defined RP + * `expected` : expected results from API, by-default True Usage ----- @@ -1317,7 +1322,7 @@ def verify_pim_rp_info( @retry(attempts=31, wait=2, return_is_str=True) def verify_pim_state( - tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None + tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None, expected=True ): """ Verify pim state by running "show ip pim state" cli @@ -1331,6 +1336,7 @@ def verify_pim_state( * `group_addresses`: IGMP group address * `src_address`: source address, default = None * installed_fl` : Installed flag + * `expected` : expected results from API, by-default True Usage ----- @@ -1485,7 +1491,7 @@ def verify_pim_interface_traffic(tgen, input_dict): @retry(attempts=21, wait=2, return_is_str=True) -def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None): +def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None, expected=True): """ Verify all PIM interface are up and running, config is verified using "show ip pim interface" cli @@ -1497,6 +1503,7 @@ def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None): * `dut` : device under test * `interface` : interface name * `interface_ip` : interface ip address + * `expected` : expected results from API, by-default True Usage ----- @@ -1791,7 +1798,7 @@ def clear_ip_igmp_interfaces(tgen, dut): @retry(attempts=10, wait=2, return_is_str=True) -def clear_ip_mroute_verify(tgen, dut): +def clear_ip_mroute_verify(tgen, dut, expected=True): """ Clear ip mroute by running "clear ip mroute" cli and verify mroutes are up again after mroute clear @@ -1800,6 +1807,8 @@ def clear_ip_mroute_verify(tgen, dut): ---------- * `tgen`: topogen object * `dut`: Device Under Test + * `expected` : expected results from API, by-default True + Usage ----- @@ -2165,7 +2174,7 @@ def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None): @retry(attempts=6, wait=2, return_is_str=True) -def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None): +def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True): """ Verify pim rp info by running "show ip pim rp-info" cli @@ -2177,6 +2186,7 @@ def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None): * `grp_addr`: IGMP group address * 'rp_source': source from which rp installed * 'rpadd': rp address + * `expected` : expected results from API, by-default True Usage ----- @@ -2267,7 +2277,7 @@ def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None): @retry(attempts=31, wait=2, return_is_str=True) -def verify_pim_bsr(tgen, topo, dut, bsr_ip): +def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True): """ Verify all PIM interface are up and running, config is verified using "show ip pim interface" cli @@ -2278,6 +2288,7 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip): * `topo` : json file data * `dut` : device under test * 'bsr' : bsr ip to be verified + * `expected` : expected results from API, by-default True Usage ----- @@ -2322,7 +2333,7 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip): @retry(attempts=31, wait=2, return_is_str=True) -def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None): +def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None, expected=True): """ Verify IP PIM upstream rpf, config is verified using "show ip pim neighbor" cli @@ -2336,6 +2347,7 @@ def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=N * `group_addresses` : list of group address for which upstream info needs to be checked * `rp` : RP address + * `expected` : expected results from API, by-default True Usage ----- @@ -2519,7 +2531,7 @@ def enable_disable_pim_bsm(tgen, router, intf, enable=True): @retry(attempts=31, wait=2, return_is_str=True) -def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None): +def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None, expected=True): """ Verify ip pim join by running "show ip pim join" cli @@ -2531,6 +2543,7 @@ def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address= * `interface`: interface name, from which PIM join would come * `group_addresses`: IGMP group address * `src_address`: Source address + * `expected` : expected results from API, by-default True Usage ----- @@ -2609,7 +2622,7 @@ def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address= @retry(attempts=31, wait=2, return_is_dict=True) -def verify_igmp_config(tgen, input_dict, stats_return=False): +def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True): """ Verify igmp interface details, verifying following configs: timerQueryInterval @@ -2623,6 +2636,7 @@ def verify_igmp_config(tgen, input_dict, stats_return=False): * `input_dict` : Input dict data, required to verify timer * `stats_return`: If user wants API to return statistics + * `expected` : expected results from API, by-default True Usage ----- @@ -2898,7 +2912,7 @@ def verify_igmp_config(tgen, input_dict, stats_return=False): @retry(attempts=31, wait=2, return_is_str=True) -def verify_pim_config(tgen, input_dict): +def verify_pim_config(tgen, input_dict, expected=True): """ Verify pim interface details, verifying following configs: drPriority @@ -2912,6 +2926,7 @@ def verify_pim_config(tgen, input_dict): * `tgen`: topogen object * `input_dict` : Input dict data, required to verify timer + * `expected` : expected results from API, by-default True Usage ----- @@ -3023,7 +3038,7 @@ def verify_pim_config(tgen, input_dict): @retry(attempts=21, wait=2, return_is_dict=True) -def verify_multicast_traffic(tgen, input_dict, return_traffic=False): +def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=True): """ Verify multicast traffic by running "show multicast traffic count json" cli @@ -3034,6 +3049,8 @@ def verify_multicast_traffic(tgen, input_dict, return_traffic=False): * `input_dict(dict)`: defines DUT, what and for which interfaces traffic needs to be verified * `return_traffic`: returns traffic stats + * `expected` : expected results from API, by-default True + Usage ----- input_dict = { @@ -3264,7 +3281,7 @@ def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses): @retry(attempts=21, wait=2, return_is_str=True) -def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag): +def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag, expected=True): """ Verify flag state for mroutes and make sure (*, G)/(S, G) are having coorect flags by running "show ip mroute" cli @@ -3276,6 +3293,7 @@ def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag): * `src_address`: source address * `group_addresses`: IGMP group address * `flag`: flag state, needs to be verified + * `expected` : expected results from API, by-default True Usage ----- @@ -3358,7 +3376,7 @@ def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag): @retry(attempts=21, wait=2, return_is_str=True) -def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip): +def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=True): """ Verify all IGMP interface are up and running, config is verified using "show ip igmp interface" cli @@ -3370,6 +3388,7 @@ def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip): * `dut` : device under test * `igmp_iface` : interface name * `interface_ip` : interface ip address + * `expected` : expected results from API, by-default True Usage ----- diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 553f2bc6c..ade593350 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -222,6 +222,22 @@ class Topogen(object): self.peern += 1 return self.gears[name] + def add_host(self, name, ip, defaultRoute): + """ + Adds a new host to the topology. This function has the following + parameters: + * `ip`: the peer address (e.g. '1.2.3.4/24') + * `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1') + """ + if name is None: + name = "host{}".format(self.peern) + if name in self.gears: + raise KeyError("host already exists") + + self.gears[name] = TopoHost(self, name, ip=ip, defaultRoute=defaultRoute) + self.peern += 1 + return self.gears[name] + def add_link(self, node1, node2, ifname1=None, ifname2=None): """ Creates a connection between node1 and node2. The nodes can be the @@ -641,6 +657,8 @@ class TopoRouter(TopoGear): # Try to find relevant old logfiles in /tmp and delete them map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) + # Remove old valgrind files + map(os.remove, glob.glob("{}/{}.valgrind.*".format(self.logdir, self.name))) # Remove old core files map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 2a5bd1736..d1f60bfe0 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -1454,6 +1454,8 @@ class Router(Node): gdb_breakpoints = g_extra_config["gdb_breakpoints"] gdb_daemons = g_extra_config["gdb_daemons"] gdb_routers = g_extra_config["gdb_routers"] + valgrind_extra = g_extra_config["valgrind_extra"] + valgrind_memleaks = g_extra_config["valgrind_memleaks"] bundle_data = "" @@ -1503,7 +1505,14 @@ class Router(Node): ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype) else: binary = os.path.join(self.daemondir, daemon) + cmdenv = "ASAN_OPTIONS=log_path={0}.asan".format(daemon) + if valgrind_memleaks: + this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) + supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp")) + cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file) + if valgrind_extra: + cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes" cmdopt = "{} --log file:{}.log --log-level debug".format( daemon_opts, daemon ) diff --git a/tests/topotests/msdp_mesh_topo1/__init__.py b/tests/topotests/msdp_mesh_topo1/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/__init__.py diff --git a/tests/topotests/msdp_mesh_topo1/r1/bgpd.conf b/tests/topotests/msdp_mesh_topo1/r1/bgpd.conf new file mode 100644 index 000000000..953d90aa0 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r1/bgpd.conf @@ -0,0 +1,7 @@ +router bgp 65000 + neighbor 10.254.254.2 remote-as 65000 + neighbor 10.254.254.2 update-source 10.254.254.1 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_mesh_topo1/r1/ospfd.conf b/tests/topotests/msdp_mesh_topo1/r1/ospfd.conf new file mode 100644 index 000000000..c1adbd544 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r1/ospfd.conf @@ -0,0 +1,8 @@ +interface r1-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 +! +router ospf + network 192.168.1.0/24 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/msdp_mesh_topo1/r1/pimd.conf b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf new file mode 100644 index 000000000..49341efa5 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf @@ -0,0 +1,15 @@ +interface lo + ip pim + ip pim use-source 10.254.254.1 +! +interface r1-eth0 + ip pim +! +interface r1-eth1 + ip pim + ip igmp +! +ip pim rp 10.254.254.1 +ip msdp mesh-group mg-1 source 10.254.254.1 +ip msdp mesh-group mg-1 member 10.254.254.2 +ip msdp mesh-group mg-1 member 10.254.254.3 diff --git a/tests/topotests/msdp_mesh_topo1/r1/zebra.conf b/tests/topotests/msdp_mesh_topo1/r1/zebra.conf new file mode 100644 index 000000000..42c850f00 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r1/zebra.conf @@ -0,0 +1,11 @@ +ip forwarding +! +interface lo + ip address 10.254.254.1/32 +! +interface r1-eth0 + ip address 192.168.1.2/24 +! +interface r1-eth1 + ip address 192.168.10.1/24 +! diff --git a/tests/topotests/msdp_mesh_topo1/r2/bgpd.conf b/tests/topotests/msdp_mesh_topo1/r2/bgpd.conf new file mode 100644 index 000000000..f442efc60 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r2/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65000 + neighbor pg-1 peer-group + neighbor pg-1 update-source 10.254.254.1 + neighbor pg-1 remote-as 65000 + neighbor 10.254.254.1 peer-group pg-1 + neighbor 10.254.254.3 peer-group pg-1 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_mesh_topo1/r2/ospfd.conf b/tests/topotests/msdp_mesh_topo1/r2/ospfd.conf new file mode 100644 index 000000000..9e9ac5fb2 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r2/ospfd.conf @@ -0,0 +1,13 @@ +interface r2-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 +! +interface r2-eth1 + ip ospf hello-interval 2 + ip ospf dead-interval 10 +! +router ospf + network 192.168.1.0/24 area 0.0.0.0 + network 192.168.2.0/24 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/msdp_mesh_topo1/r2/pimd.conf b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf new file mode 100644 index 000000000..9005263ed --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf @@ -0,0 +1,14 @@ +interface lo + ip pim + ip pim use-source 10.254.254.2 +! +interface r2-eth0 + ip pim +! +interface r2-eth1 + ip pim +! +ip pim rp 10.254.254.2 +ip msdp mesh-group mg-1 source 10.254.254.2 +ip msdp mesh-group mg-1 member 10.254.254.1 +ip msdp mesh-group mg-1 member 10.254.254.3 diff --git a/tests/topotests/msdp_mesh_topo1/r2/zebra.conf b/tests/topotests/msdp_mesh_topo1/r2/zebra.conf new file mode 100644 index 000000000..6b2619421 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r2/zebra.conf @@ -0,0 +1,11 @@ +ip forwarding +! +interface lo + ip address 10.254.254.2/32 +! +interface r2-eth0 + ip address 192.168.1.1/24 +! +interface r2-eth1 + ip address 192.168.2.1/24 +! diff --git a/tests/topotests/msdp_mesh_topo1/r3/bgpd.conf b/tests/topotests/msdp_mesh_topo1/r3/bgpd.conf new file mode 100644 index 000000000..6c3f89ad9 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r3/bgpd.conf @@ -0,0 +1,7 @@ +router bgp 65000 + neighbor 192.168.2.1 remote-as 65000 + neighbor 192.168.2.1 update-source 10.254.254.3 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/msdp_mesh_topo1/r3/ospfd.conf b/tests/topotests/msdp_mesh_topo1/r3/ospfd.conf new file mode 100644 index 000000000..7b7b1abe6 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r3/ospfd.conf @@ -0,0 +1,8 @@ +interface r3-eth0 + ip ospf hello-interval 2 + ip ospf dead-interval 10 +! +router ospf + network 192.168.2.0/24 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/msdp_mesh_topo1/r3/pimd.conf b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf new file mode 100644 index 000000000..30e114856 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf @@ -0,0 +1,15 @@ +interface lo + ip pim + ip pim use-source 10.254.254.3 +! +interface r3-eth0 + ip pim +! +interface r3-eth1 + ip pim + ip igmp +! +ip pim rp 10.254.254.3 +ip msdp mesh-group mg-1 source 10.254.254.3 +ip msdp mesh-group mg-1 member 10.254.254.1 +ip msdp mesh-group mg-1 member 10.254.254.2 diff --git a/tests/topotests/msdp_mesh_topo1/r3/zebra.conf b/tests/topotests/msdp_mesh_topo1/r3/zebra.conf new file mode 100644 index 000000000..a8a15f3c0 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/r3/zebra.conf @@ -0,0 +1,11 @@ +ip forwarding +! +interface lo + ip address 10.254.254.3/32 +! +interface r3-eth0 + ip address 192.168.2.2/24 +! +interface r3-eth1 + ip address 192.168.30.1/24 +! diff --git a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.dot b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.dot new file mode 100644 index 000000000..8792e2c7b --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.dot @@ -0,0 +1,88 @@ +## Color coding: +######################### +## Main FRR: #f08080 red +## Switches: #d0e0d0 gray +## RIP: #19e3d9 Cyan +## RIPng: #fcb314 dark yellow +## OSPFv2: #32b835 Green +## OSPFv3: #19e3d9 Cyan +## ISIS IPv4 #fcb314 dark yellow +## ISIS IPv6 #9a81ec purple +## BGP IPv4 #eee3d3 beige +## BGP IPv6 #fdff00 yellow +##### Colors (see http://www.color-hex.com/) + +graph template { + label="msdp_mesh_topo1"; + + # Routers + r1 [ + shape=doubleoctagon, + label="r1", + fillcolor="#f08080", + style=filled, + ]; + r2 [ + shape=doubleoctagon + label="r2", + fillcolor="#f08080", + style=filled, + ]; + r3 [ + shape=doubleoctagon + label="r3", + fillcolor="#f08080", + style=filled, + ]; + h1 [ + shape=doubleoctagon + label="h1", + fillcolor="#4f4f4f", + style=filled, + ]; + h2 [ + shape=doubleoctagon + label="h2", + fillcolor="#4f4f4f", + style=filled, + ]; + + # Switches + s1 [ + shape=oval, + label="sw1\n192.168.1.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s2 [ + shape=oval, + label="sw2\n192.168.2.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s3 [ + shape=oval, + label="sw3\n192.168.10.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + s4 [ + shape=oval, + label="sw3\n192.168.30.0/24", + fillcolor="#d0e0d0", + style=filled, + ]; + + # Connections + r1 -- s1 [label="eth0\n.2"]; + r2 -- s1 [label="eth0\n.1"]; + + r2 -- s2 [label="eth1\n.1"]; + r3 -- s2 [label="eth0\n.2"]; + + r1 -- s3 [label="eth1\n.1"]; + h1 -- s3 [label="eth0\n.2"]; + + r3 -- s4 [label="eth1\n.1"]; + h2 -- s4 [label="eth0\n.2"]; +} diff --git a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.png b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.png Binary files differnew file mode 100644 index 000000000..9a15b8b08 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.png diff --git a/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py new file mode 100644 index 000000000..719ead091 --- /dev/null +++ b/tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python + +# +# test_msdp_mesh_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (C) 2021 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_msdp_mesh_topo1.py: Test the FRR PIM MSDP mesh groups. +""" + +import os +import sys +import json +from functools import partial +import pytest +import socket + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.pimd] + +# +# Test global variables: +# They are used to handle communicating with external application. +# +APP_SOCK_PATH = '/tmp/topotests/apps.sock' +HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py") +app_listener = None +app_clients = {} + +def listen_to_applications(): + "Start listening socket to connect with applications." + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + sock.bind(APP_SOCK_PATH) + sock.listen(10) + global app_listener + app_listener = sock + +def accept_host(host): + "Accept connection from application running in hosts." + global app_listener, app_clients + conn = app_listener.accept() + app_clients[host] = { + 'fd': conn[0], + 'address': conn[1] + } + +def close_applications(): + "Signal applications to stop and close all sockets." + global app_listener, app_clients + + # Close listening socket. + app_listener.close() + + # Remove old socket. + try: + os.unlink(APP_SOCK_PATH) + except OSError: + pass + + # Close all host connections. + for host in ["h1", "h2"]: + if app_clients.get(host) is None: + continue + app_clients["h1"]["fd"].close() + + +class MSDPMeshTopo1(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # Create 3 routers + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + # Create stub networks for multicast traffic. + tgen.add_host("h1", "192.168.10.2/24", "192.168.10.1") + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["h1"]) + + tgen.add_host("h2", "192.168.30.2/24", "192.168.30.1") + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["h2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(MSDPMeshTopo1, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + daemon_file = "{}/{}/zebra.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_ZEBRA, daemon_file) + + daemon_file = "{}/{}/bgpd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_BGP, daemon_file) + + daemon_file = "{}/{}/ospfd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_OSPF, daemon_file) + + daemon_file = "{}/{}/pimd.conf".format(CWD, rname) + if os.path.isfile(daemon_file): + router.load_config(TopoRouter.RD_PIM, daemon_file) + + # Initialize all routers. + tgen.start_router() + + # Start applications socket. + listen_to_applications() + + +def test_wait_ospf_convergence(): + "Wait for OSPF to converge" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("waiting for protocols to converge") + + def expect_loopback_route(router, iptype, route, proto): + "Wait until route is present on RIB for protocol." + logger.info("waiting route {} in {}".format(route, router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show {} route json".format(iptype), + {route: [{"protocol": proto}]} + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=1) + assertmsg = '"{}" OSPF convergence failure'.format(router) + assert result is None, assertmsg + + # Wait for R1 <-> R2 convergence. + expect_loopback_route("r1", "ip", "10.254.254.2/32", "ospf") + # Wait for R1 <-> R3 convergence. + expect_loopback_route("r1", "ip", "10.254.254.3/32", "ospf") + + # Wait for R2 <-> R1 convergence. + expect_loopback_route("r2", "ip", "10.254.254.1/32", "ospf") + # Wait for R2 <-> R3 convergence. + expect_loopback_route("r2", "ip", "10.254.254.3/32", "ospf") + + # Wait for R3 <-> R1 convergence. + expect_loopback_route("r3", "ip", "10.254.254.1/32", "ospf") + # Wait for R3 <-> R2 convergence. + expect_loopback_route("r3", "ip", "10.254.254.2/32", "ospf") + + +def test_wait_msdp_convergence(): + "Wait for MSDP to converge" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("test MSDP convergence") + + tgen.gears["h1"].run("{} --send='0.7' '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, '229.0.1.10', 'h1-eth0')) + accept_host("h1") + + tgen.gears["h2"].run("{} '{}' '{}' '{}' &".format( + HELPER_APP_PATH, APP_SOCK_PATH, '229.0.1.10', 'h2-eth0')) + accept_host("h2") + + def expect_msdp_peer(router, peer, sa_count=0): + "Expect MSDP peer connection to be established with SA amount." + logger.info("waiting MSDP connection from peer {} on router {}".format(peer, router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ip msdp peer json", + {peer: {"state": "established", "saCount": sa_count}} + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = '"{}" MSDP connection failure'.format(router) + assert result is None, assertmsg + + # R1 peers. + expect_msdp_peer("r1", "10.254.254.2") + expect_msdp_peer("r1", "10.254.254.3") + + # R2 peers. + expect_msdp_peer("r2", "10.254.254.1", 1) + expect_msdp_peer("r2", "10.254.254.3") + + # R3 peers. + expect_msdp_peer("r3", "10.254.254.1", 1) + expect_msdp_peer("r3", "10.254.254.2") + + +def test_msdp_sa_configuration(): + "Expect the multicast traffic SA to be created" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("test MSDP SA") + + def expect_msdp_sa(router, source, group, local, rp, spt_setup): + "Expect MSDP SA." + logger.info("waiting MSDP SA on router {}".format(router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ip msdp sa json", + {group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}} + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = '"{}" MSDP SA failure'.format(router) + assert result is None, assertmsg + + source = "192.168.10.2" + group = "229.0.1.10" + rp = "10.254.254.1" + + # R1 SA. + expect_msdp_sa("r1", source, group, "yes", "-", "-") + + # R2 SA. + expect_msdp_sa("r2", source, group, "no", rp, "no") + + # R3 peers. + expect_msdp_sa("r3", source, group, "no", rp, "yes") + + +def teardown_module(_mod): + "Teardown the pytest environment" + tgen = get_topogen() + close_applications() + tgen.stop_topology() + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf6_topo1/r1/show_ipv6_route.ref b/tests/topotests/ospf6_topo1/r1/show_ipv6_route.ref index a2ddf7c5a..96489b075 100644 --- a/tests/topotests/ospf6_topo1/r1/show_ipv6_route.ref +++ b/tests/topotests/ospf6_topo1/r1/show_ipv6_route.ref @@ -4,6 +4,6 @@ O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, weight 1, XX:XX:XX O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1/r2/show_ipv6_route.ref b/tests/topotests/ospf6_topo1/r2/show_ipv6_route.ref index 1f642b1b2..78c1ad883 100644 --- a/tests/topotests/ospf6_topo1/r2/show_ipv6_route.ref +++ b/tests/topotests/ospf6_topo1/r2/show_ipv6_route.ref @@ -4,7 +4,7 @@ O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, weight 1, XX:XX:XX O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1/r3/show_ipv6_route.ref b/tests/topotests/ospf6_topo1/r3/show_ipv6_route.ref index 8e3afa583..dc0acbe0c 100644 --- a/tests/topotests/ospf6_topo1/r3/show_ipv6_route.ref +++ b/tests/topotests/ospf6_topo1/r3/show_ipv6_route.ref @@ -4,7 +4,7 @@ O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, weight 1, XX:XX: O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, weight 1, XX:XX:XX O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, weight 1, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1/r4/show_ipv6_route.ref b/tests/topotests/ospf6_topo1/r4/show_ipv6_route.ref index 0df652ffb..730fd9f2d 100644 --- a/tests/topotests/ospf6_topo1/r4/show_ipv6_route.ref +++ b/tests/topotests/ospf6_topo1/r4/show_ipv6_route.ref @@ -6,4 +6,4 @@ O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, weight 1, XX:XX:XX O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py index f8c3476e1..bbd18a57f 100644 --- a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py +++ b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py @@ -360,6 +360,36 @@ def test_linux_ipv6_kernel_routingTable(): ) +def test_ospfv3_routingTable_write_multiplier(): + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # For debugging, uncomment the next line + # tgen.mininet_cli() + + # Modify R1 write muliplier and reset the interfaces + r1 = tgen.gears["r1"] + + r1.vtysh_cmd("conf t\nrouter ospf6\n write-multiplier 100") + r1.vtysh_cmd("clear ipv6 ospf interface r1-stubnet") + r1.vtysh_cmd("clear ipv6 ospf interface r1-sw5") + + # Verify OSPFv3 Routing Table + for router, rnode in tgen.routers().items(): + logger.info('Waiting for router "%s" convergence', router) + + # Load expected results from the command + reffile = os.path.join(CWD, "{}/show_ipv6_route.ref".format(router)) + expected = open(reffile).read() + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(compare_show_ipv6, router, expected) + result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5) + assert result, "OSPFv3 did not converge on {}:\n{}".format(router, diff) + + def test_shutdown_check_stderr(): tgen = get_topogen() diff --git a/tests/topotests/ospf6_topo1_vrf/README.md b/tests/topotests/ospf6_topo1_vrf/README.md index 3ed0b8fbe..18bca5c54 100644 --- a/tests/topotests/ospf6_topo1_vrf/README.md +++ b/tests/topotests/ospf6_topo1_vrf/README.md @@ -54,10 +54,12 @@ Simplified `R1` config (R1 is similar) hostname r1 ! interface r1-stubnet vrf r1-cust1 + ipv6 ospf6 area 0.0.0.0 ipv6 address fc00:1:1:1::1/64 ipv6 ospf6 network broadcast ! interface r1-sw5 vrf r1-cust1 + ipv6 ospf6 area 0.0.0.0 ipv6 address fc00:a:a:a::1/64 ipv6 ospf6 network broadcast ! @@ -65,8 +67,6 @@ Simplified `R1` config (R1 is similar) router-id 10.0.0.1 log-adjacency-changes detail redistribute static - interface r1-stubnet area 0.0.0.0 - interface r1-sw5 area 0.0.0.0 ! ipv6 route fc00:1111:1111:1111::/64 fc00:1:1:1::1234 vrf r1-cust1 @@ -75,14 +75,17 @@ Simplified `R3` config hostname r3 ! interface r3-stubnet vrf r3-cust1 + ipv6 ospf6 area 0.0.0.0 ipv6 address fc00:3:3:3::3/64 ipv6 ospf6 network broadcast ! interface r3-sw5 vrf r3-cust1 + ipv6 ospf6 area 0.0.0.0 ipv6 address fc00:a:a:a::3/64 ipv6 ospf6 network broadcast ! interface r3-sw6 vrf r3-cust1 + ipv6 ospf6 area 0.0.0.1 ipv6 address fc00:b:b:b::3/64 ipv6 ospf6 network broadcast ! @@ -90,9 +93,6 @@ Simplified `R3` config router-id 10.0.0.3 log-adjacency-changes detail redistribute static - interface r3-stubnet area 0.0.0.0 - interface r3-sw5 area 0.0.0.0 - interface r3-sw6 area 0.0.0.1 ! ipv6 route fc00:3333:3333:3333::/64 fc00:3:3:3::1234 vrf r3-cust1 diff --git a/tests/topotests/ospf6_topo1_vrf/r1/ospf6d.conf b/tests/topotests/ospf6_topo1_vrf/r1/ospf6d.conf index ed480354e..83bdfb7c8 100644 --- a/tests/topotests/ospf6_topo1_vrf/r1/ospf6d.conf +++ b/tests/topotests/ospf6_topo1_vrf/r1/ospf6d.conf @@ -9,12 +9,14 @@ debug ospf6 neighbor debug ospf6 route table debug ospf6 flooding ! -interface r1-stubnet vrf r1-cust1 +interface r1-stubnet + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 ! -interface r1-sw5 vrf r1-cust1 +interface r1-sw5 + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 @@ -23,8 +25,6 @@ router ospf6 vrf r1-cust1 ospf6 router-id 10.0.0.1 log-adjacency-changes detail redistribute static - interface r1-stubnet area 0.0.0.0 - interface r1-sw5 area 0.0.0.0 ! line vty exec-timeout 0 0 diff --git a/tests/topotests/ospf6_topo1_vrf/r1/show_ipv6_vrf_route.ref b/tests/topotests/ospf6_topo1_vrf/r1/show_ipv6_vrf_route.ref index a2ddf7c5a..96489b075 100644 --- a/tests/topotests/ospf6_topo1_vrf/r1/show_ipv6_vrf_route.ref +++ b/tests/topotests/ospf6_topo1_vrf/r1/show_ipv6_vrf_route.ref @@ -4,6 +4,6 @@ O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, weight 1, XX:XX:XX O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1_vrf/r2/ospf6d.conf b/tests/topotests/ospf6_topo1_vrf/r2/ospf6d.conf index 485771e7d..7fd01aa0c 100644 --- a/tests/topotests/ospf6_topo1_vrf/r2/ospf6d.conf +++ b/tests/topotests/ospf6_topo1_vrf/r2/ospf6d.conf @@ -9,12 +9,14 @@ debug ospf6 neighbor debug ospf6 route table debug ospf6 flooding ! -interface r2-stubnet vrf r2-cust1 +interface r2-stubnet + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 dead-interval 10 ipv6 ospf6 hello-interval 2 ! -interface r2-sw5 vrf r2-cust1 +interface r2-sw5 + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 dead-interval 10 ipv6 ospf6 hello-interval 2 @@ -23,8 +25,6 @@ router ospf6 vrf r2-cust1 ospf6 router-id 10.0.0.2 log-adjacency-changes detail redistribute static - interface r2-stubnet area 0.0.0.0 - interface r2-sw5 area 0.0.0.0 ! line vty exec-timeout 0 0 diff --git a/tests/topotests/ospf6_topo1_vrf/r2/show_ipv6_vrf_route.ref b/tests/topotests/ospf6_topo1_vrf/r2/show_ipv6_vrf_route.ref index 328961941..4c390f7cd 100644 --- a/tests/topotests/ospf6_topo1_vrf/r2/show_ipv6_vrf_route.ref +++ b/tests/topotests/ospf6_topo1_vrf/r2/show_ipv6_vrf_route.ref @@ -4,6 +4,6 @@ O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, weight 1, XX:XX:XX O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1_vrf/r3/ospf6d.conf b/tests/topotests/ospf6_topo1_vrf/r3/ospf6d.conf index f5837bf6f..df5aed3a6 100644 --- a/tests/topotests/ospf6_topo1_vrf/r3/ospf6d.conf +++ b/tests/topotests/ospf6_topo1_vrf/r3/ospf6d.conf @@ -9,17 +9,20 @@ debug ospf6 neighbor debug ospf6 route table debug ospf6 flooding ! -interface r3-stubnet vrf r3-cust1 +interface r3-stubnet + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 dead-interval 10 ipv6 ospf6 hello-interval 2 ! -interface r3-sw5 vrf r3-cust1 +interface r3-sw5 + ipv6 ospf6 area 0.0.0.0 ipv6 ospf6 network broadcast ipv6 ospf6 dead-interval 10 ipv6 ospf6 hello-interval 2 ! -interface r3-sw6 vrf r3-cust1 +interface r3-sw6 + ipv6 ospf6 area 0.0.0.1 ipv6 ospf6 network broadcast ipv6 ospf6 dead-interval 10 ipv6 ospf6 hello-interval 2 @@ -28,9 +31,6 @@ router ospf6 vrf r3-cust1 ospf6 router-id 10.0.0.3 log-adjacency-changes detail redistribute static - interface r3-stubnet area 0.0.0.0 - interface r3-sw5 area 0.0.0.0 - interface r3-sw6 area 0.0.0.1 ! line vty exec-timeout 0 0 diff --git a/tests/topotests/ospf6_topo1_vrf/r3/show_ipv6_vrf_route.ref b/tests/topotests/ospf6_topo1_vrf/r3/show_ipv6_vrf_route.ref index ac713190f..989213f96 100644 --- a/tests/topotests/ospf6_topo1_vrf/r3/show_ipv6_vrf_route.ref +++ b/tests/topotests/ospf6_topo1_vrf/r3/show_ipv6_vrf_route.ref @@ -4,6 +4,6 @@ O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, weight 1, XX:XX: O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, weight 1, XX:XX:XX O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, weight 1, XX:XX:XX -O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX -O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX -O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX +O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX +O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1_vrf/r4/ospf6d.conf b/tests/topotests/ospf6_topo1_vrf/r4/ospf6d.conf index ab67d06ff..465defb40 100644 --- a/tests/topotests/ospf6_topo1_vrf/r4/ospf6d.conf +++ b/tests/topotests/ospf6_topo1_vrf/r4/ospf6d.conf @@ -9,12 +9,14 @@ debug ospf6 neighbor debug ospf6 route table debug ospf6 flooding ! -interface r4-stubnet vrf r4-cust1 +interface r4-stubnet + ipv6 ospf6 area 0.0.0.1 ipv6 ospf6 network broadcast ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 ! -interface r4-sw6 vrf r4-cust1 +interface r4-sw6 + ipv6 ospf6 area 0.0.0.1 ipv6 ospf6 network broadcast ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 @@ -23,8 +25,6 @@ router ospf6 vrf r4-cust1 ospf6 router-id 10.0.0.4 log-adjacency-changes detail redistribute static - interface r4-stubnet area 0.0.0.1 - interface r4-sw6 area 0.0.0.1 ! line vty exec-timeout 0 0 diff --git a/tests/topotests/ospf6_topo1_vrf/r4/show_ipv6_vrf_route.ref b/tests/topotests/ospf6_topo1_vrf/r4/show_ipv6_vrf_route.ref index 0df652ffb..730fd9f2d 100644 --- a/tests/topotests/ospf6_topo1_vrf/r4/show_ipv6_vrf_route.ref +++ b/tests/topotests/ospf6_topo1_vrf/r4/show_ipv6_vrf_route.ref @@ -6,4 +6,4 @@ O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, weight 1, XX:XX:XX O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX -O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX +O>* fc00:3333:3333:3333::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py index e1857abc4..b158099d9 100755 --- a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py +++ b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py @@ -5,7 +5,7 @@ # Part of NetDEF Topology Tests # # Copyright (c) 2021 by Niral Networks, Inc. ("Niral Networks") -# Used Copyright (c) 2016 by Network Device Education Foundation, +# Used Copyright (c) 2016 by Network Device Education Foundation, # Inc. ("NetDEF") in this file. # # Permission to use, copy, modify, and/or distribute this software @@ -179,13 +179,9 @@ def setup_module(mod): "ip link set {0}-stubnet master {0}-cust1", ] - cmds1 = [ - "ip link set {0}-sw5 master {0}-cust1", - ] + cmds1 = ["ip link set {0}-sw5 master {0}-cust1"] - cmds2 = [ - "ip link set {0}-sw6 master {0}-cust1", - ] + cmds2 = ["ip link set {0}-sw6 master {0}-cust1"] # For all registered routers, load the zebra configuration file for rname, router in tgen.routers().items(): @@ -219,6 +215,7 @@ def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_wait_protocol_convergence(): "Wait for OSPFv3 to converge" tgen = get_topogen() @@ -261,7 +258,7 @@ def compare_show_ipv6_vrf(rname, expected): # Use the vtysh output, with some masking to make comparison easy vrf_name = "{0}-cust1".format(rname) current = topotest.ip6_route_zebra(tgen.gears[rname], vrf_name) - + # Use just the 'O'spf lines of the output linearr = [] for line in current.splitlines(): @@ -331,7 +328,11 @@ def test_linux_ipv6_kernel_routingTable(): for i in range(1, 5): # Actual output from router - actual = tgen.gears["r{}".format(i)].run("ip -6 route show vrf r{}-cust1".format(i)).rstrip() + actual = ( + tgen.gears["r{}".format(i)] + .run("ip -6 route show vrf r{}-cust1".format(i)) + .rstrip() + ) if "nhid" in actual: refTableFile = os.path.join(CWD, "r{}/ip_6_address.nhg.ref".format(i)) else: @@ -362,9 +363,9 @@ def test_linux_ipv6_kernel_routingTable(): "unreachable fe80::/64 " ): continue - if 'anycast' in line: + if "anycast" in line: continue - if 'multicast' in line: + if "multicast" in line: continue filtered_lines.append(line) actual = "\n".join(filtered_lines).splitlines(1) @@ -398,6 +399,35 @@ def test_linux_ipv6_kernel_routingTable(): ) +def test_ospfv3_routingTable_write_multiplier(): + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # For debugging, uncomment the next line + # tgen.mininet_cli() + # Modify R1 write muliplier and reset the interfaces + r1 = tgen.gears["r1"] + + r1.vtysh_cmd("conf t\nrouter ospf6 vrf r1-cust1 \n write-multiplier 100") + r1.vtysh_cmd("clear ipv6 ospf interface r1-stubnet") + r1.vtysh_cmd("clear ipv6 ospf interface r1-sw5") + + # Verify OSPFv3 Routing Table + for router, rnode in tgen.routers().iteritems(): + logger.info('Waiting for router "%s" convergence', router) + + # Load expected results from the command + reffile = os.path.join(CWD, "{}/show_ipv6_vrf_route.ref".format(router)) + expected = open(reffile).read() + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(compare_show_ipv6_vrf, router, expected) + result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5) + assert result, "OSPFv3 did not converge on {}:\n{}".format(router, diff) + + def test_shutdown_check_stderr(): tgen = get_topogen() diff --git a/tests/topotests/ospf6_topo2/r2/ospf6d.conf b/tests/topotests/ospf6_topo2/r2/ospf6d.conf index d4bb0e2a4..e88e965c7 100644 --- a/tests/topotests/ospf6_topo2/r2/ospf6d.conf +++ b/tests/topotests/ospf6_topo2/r2/ospf6d.conf @@ -6,12 +6,18 @@ interface r2-eth1 ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 10 ! +interface r2-eth2 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 10 +! router ospf6 ospf6 router-id 10.254.254.2 redistribute connected redistribute static default-information originate always metric 123 area 0.0.0.1 stub + area 0.0.0.2 nssa interface r2-eth0 area 0.0.0.1 interface r2-eth1 area 0.0.0.0 + interface r2-eth2 area 0.0.0.2 ! diff --git a/tests/topotests/ospf6_topo2/r2/zebra.conf b/tests/topotests/ospf6_topo2/r2/zebra.conf index 891945a4e..559f502b0 100644 --- a/tests/topotests/ospf6_topo2/r2/zebra.conf +++ b/tests/topotests/ospf6_topo2/r2/zebra.conf @@ -6,3 +6,6 @@ interface r2-eth0 interface r2-eth1 ipv6 address 2001:db8:2::2/64 ! +interface r2-eth2 + ipv6 address 2001:db8:3::1/64 +! diff --git a/tests/topotests/ospf6_topo2/r4/ospf6d.conf b/tests/topotests/ospf6_topo2/r4/ospf6d.conf new file mode 100644 index 000000000..813c0abff --- /dev/null +++ b/tests/topotests/ospf6_topo2/r4/ospf6d.conf @@ -0,0 +1,9 @@ +interface r4-eth0 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 10 +! +router ospf6 + ospf6 router-id 10.254.254.4 + area 0.0.0.2 nssa + interface r4-eth0 area 0.0.0.2 +! diff --git a/tests/topotests/ospf6_topo2/r4/zebra.conf b/tests/topotests/ospf6_topo2/r4/zebra.conf new file mode 100644 index 000000000..86cb972a9 --- /dev/null +++ b/tests/topotests/ospf6_topo2/r4/zebra.conf @@ -0,0 +1,5 @@ +ipv6 forwarding +! +interface r4-eth0 + ipv6 address 2001:db8:3::2/64 +! diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.dot b/tests/topotests/ospf6_topo2/test_ospf6_topo2.dot index ba7a36f2b..238ec7a5a 100644 --- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.dot +++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.dot @@ -34,6 +34,12 @@ graph template { fillcolor="#f08080", style=filled, ]; + r4 [ + shape=doubleoctagon + label="r4", + fillcolor="#f08080", + style=filled, + ]; # Switches sw1 [ @@ -62,10 +68,16 @@ graph template { } subgraph cluster1 { + label="area 0.0.0.2"; + r4 -- sw3 [label="eth0\n.2"]; + } + + subgraph cluster2 { label="area 0.0.0.0"; r2 -- sw1 [label="eth0\n.1"]; r2 -- sw2 [label="eth1\n.2"]; + r2 -- sw3 [label="eth2\n.1"]; + r3 -- sw2 [label="eth0\n.1"]; - r3 -- sw3 [label="eth1\n.2"]; } } diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.png b/tests/topotests/ospf6_topo2/test_ospf6_topo2.png Binary files differindex ee1de6073..4e79559a6 100644 --- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.png +++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.png diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py index efc8565bb..0fe5228ce 100644 --- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py +++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py @@ -47,6 +47,49 @@ from mininet.topo import Topo pytestmark = [pytest.mark.ospf6d] +def expect_lsas(router, area, lsas, wait=5, extra_params=""): + """ + Run the OSPFv3 show LSA database command and expect the supplied LSAs. + + Optional parameters: + * `wait`: amount of seconds to wait. + * `extra_params`: extra LSA database parameters. + * `inverse`: assert the inverse of the expected. + """ + tgen = get_topogen() + + command = "show ipv6 ospf6 database {} json".format(extra_params) + + logger.info("waiting OSPFv3 router '{}' LSA".format(router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + command, + {"areaScopedLinkStateDb": [{"areaId": area, "lsa": lsas}]}, + ) + _, result = topotest.run_and_expect(test_func, None, count=wait, wait=1) + assertmsg = '"{}" convergence failure'.format(router) + + assert result is None, assertmsg + + +def expect_ospfv3_routes(router, routes, wait=5): + "Run command `ipv6 ospf6 route` and expect route with type." + tgen = get_topogen() + + logger.info("waiting OSPFv3 router '{}' route".format(router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ipv6 ospf6 route json", + {"routes": routes} + ) + _, result = topotest.run_and_expect(test_func, None, count=wait, wait=1) + assertmsg = '"{}" convergence failure'.format(router) + + assert result is None, assertmsg + + class OSPFv3Topo2(Topo): "Test topology builder" @@ -54,8 +97,8 @@ class OSPFv3Topo2(Topo): "Build function" tgen = get_topogen(self) - # Create 3 routers - for routern in range(1, 4): + # Create 4 routers + for routern in range(1, 5): tgen.add_router("r{}".format(routern)) switch = tgen.add_switch("s1") @@ -66,6 +109,10 @@ class OSPFv3Topo2(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r3"]) + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) + def setup_module(mod): "Sets up the pytest environment" @@ -110,7 +157,52 @@ def test_wait_protocol_convergence(): expect_neighbor_full("r1", "10.254.254.2") expect_neighbor_full("r2", "10.254.254.1") expect_neighbor_full("r2", "10.254.254.3") + expect_neighbor_full("r2", "10.254.254.4") expect_neighbor_full("r3", "10.254.254.2") + expect_neighbor_full("r4", "10.254.254.2") + + +def test_ospfv3_expected_route_types(): + "Test routers route type to determine if NSSA/Stub is working as expected." + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("waiting for protocols to converge") + + def expect_ospf6_route_types(router, expected_summary): + "Expect the correct route types." + logger.info("waiting OSPFv3 router '{}'".format(router)) + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + "show ipv6 ospf6 route summary json", + expected_summary, + ) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=1) + assertmsg = '"{}" convergence failure'.format(router) + assert result is None, assertmsg + + # Stub router: no external routes. + expect_ospf6_route_types( + "r1", + { + "numberOfIntraAreaRoutes": 1, + "numberOfInterAreaRoutes": 3, + "numberOfExternal1Routes": 0, + "numberOfExternal2Routes": 0, + }, + ) + # NSSA router: no external routes. + expect_ospf6_route_types( + "r4", + { + "numberOfIntraAreaRoutes": 1, + "numberOfInterAreaRoutes": 2, + "numberOfExternal1Routes": 0, + "numberOfExternal2Routes": 0, + }, + ) def test_ospf6_default_route(): @@ -134,36 +226,96 @@ def test_ospf6_default_route(): assertmsg = '"{}" convergence failure'.format(router) assert result is None, assertmsg - def expect_lsa(router, area, prefix, metric): - "Test OSPF6 LSA existence." - logger.info("waiting OSPFv3 router '{}' LSA".format(router)) - test_func = partial( - topotest.router_json_cmp, - tgen.gears[router], - "show ipv6 ospf6 database inter-prefix detail json", - { - "areaScopedLinkStateDb": [ - { - "areaId": area, - "lsa": [ - { - "prefix": prefix, - "metric": metric, - } - ], - } - ] - }, - ) - _, result = topotest.run_and_expect(test_func, None, count=4, wait=1) - assertmsg = '"{}" convergence failure'.format(router) - assert result is None, assertmsg - metric = 123 - expect_lsa("r1", "0.0.0.1", "::/0", metric) + expect_lsas( + "r1", + "0.0.0.1", + [{"prefix": "::/0", "metric": metric}], + extra_params="inter-prefix detail", + ) expect_route("r1", "::/0", metric + 10) +def test_nssa_lsa_type7(): + """ + Test that static route gets announced as external route when redistributed + and gets removed when redistribution stops. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # + # Add new static route and check if it gets announced as LSA Type-7. + # + config = """ + configure terminal + ipv6 route 2001:db8:100::/64 Null0 + """ + tgen.gears["r2"].vtysh_cmd(config) + + lsas = [ + { + "type": "NSSA", + "advertisingRouter": "10.254.254.2", + "prefix": "2001:db8:100::/64", + "forwardingAddress": "2001:db8:3::1", + } + ] + route = { + "2001:db8:100::/64": { + "pathType": "E1", + "nextHops": [ + {"nextHop": "::", "interfaceName": "r4-eth0"} + ] + } + } + + logger.info("Expecting LSA type-7 and OSPFv3 route 2001:db8:100::/64 to show up") + expect_lsas("r4", "0.0.0.2", lsas, wait=30, extra_params="type-7 detail") + expect_ospfv3_routes("r4", route, wait=30) + + # + # Remove static route and check for LSA Type-7 removal. + # + config = """ + configure terminal + no ipv6 route 2001:db8:100::/64 Null0 + """ + tgen.gears["r2"].vtysh_cmd(config) + + def dont_expect_lsa(unexpected_lsa): + "Specialized test function to expect LSA go missing" + output = tgen.gears["r4"].vtysh_cmd("show ipv6 ospf6 database type-7 detail json", isjson=True) + for lsa in output['areaScopedLinkStateDb'][0]['lsa']: + if lsa["prefix"] == unexpected_lsa["prefix"]: + if lsa["forwardingAddress"] == unexpected_lsa["forwardingAddress"]: + return lsa + return None + + def dont_expect_route(unexpected_route): + "Specialized test function to expect route go missing" + output = tgen.gears["r4"].vtysh_cmd("show ipv6 ospf6 route json", isjson=True) + if output["routes"].has_key(unexpected_route): + return output["routes"][unexpected_route] + return None + + + logger.info("Expecting LSA type-7 and OSPFv3 route 2001:db8:100::/64 to go away") + + # Test that LSA doesn't exist. + test_func = partial(dont_expect_lsa, lsas[0]) + _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) + assertmsg = '"{}" LSA still exists'.format("r4") + assert result is None, assertmsg + + # Test that route doesn't exist. + test_func = partial(dont_expect_route, "2001:db8:100::/64") + _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) + assertmsg = '"{}" route still exists'.format("r4") + assert result is None, assertmsg + + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() diff --git a/tests/topotests/ospf_basic_functionality/ospf_asbr_summary_topo1.json b/tests/topotests/ospf_basic_functionality/ospf_asbr_summary_topo1.json new file mode 100644 index 000000000..1ddccb81b --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/ospf_asbr_summary_topo1.json @@ -0,0 +1,195 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 24 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r3-link0": { + "ipv4": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {}, + "r3": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3-link0": { + "ipv4": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r0-link0": { + "ipv4": "auto", + "description": "DummyIntftoR0" + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link0": { + "ipv4": "auto", + "description": "DummyIntftoR1", + "ospf": { + "area": "0.0.0.0" + } + } + }, + "ospf": { + "router_id": "100.1.1.3", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +} diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py new file mode 100644 index 000000000..88b549732 --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py @@ -0,0 +1,779 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Summarisation Functionality Automation.""" +import os +import sys +import time +import pytest +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress +from time import sleep + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + kill_router_daemons, + write_test_footer, + reset_config_on_routers, + stop_router, + start_router, + verify_rib, + create_static_routes, + step, + start_router_daemons, + create_route_maps, + shutdown_bringup_interface, + topo_daemons, + create_prefix_lists, + create_route_maps, + create_interfaces_cfg, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.ospf import ( + verify_ospf_neighbor, + clear_ospf, + verify_ospf_rib, + create_router_ospf, + verify_ospf_summary, +) + +# Global variables +topo = None +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospf_asbr_summary_topo1.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + ] +} +NETWORK_11 = {"ipv4": ["11.0.20.6/32", "11.0.20.7/32"]} + +NETWORK2 = { + "ipv4": [ + "12.0.20.1/32", + "12.0.20.2/32", + "12.0.20.3/32", + "12.0.20.4/32", + "12.0.20.5/32", + ] +} +SUMMARY = {"ipv4": ["11.0.0.0/8", "12.0.0.0/8", "11.0.0.0/24"]} +""" +TOPOOLOGY = + Please view in a fixed-width font such as Courier. + +---+ A0 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A0 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A0 +---+ + +TESTCASES = +1. OSPF summarisation functionality. +2. OSPF summarisation with metric type 2. +3. OSPF summarisation with Tag option +4. OSPF summarisation with advertise and no advertise option +5. OSPF summarisation Chaos. +6. OSPF summarisation with route map filtering. +7. OSPF summarisation with route map modification of metric type. +8. OSPF CLI Show verify ospf ASBR summary config and show commands behaviours. +""" + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def red_static(dut, config=True): + """ + Local 'def' for Redstribute static routes inside ospf. + + Parameters + ---------- + * `dut` : DUT on which configs have to be made. + * `config` : True or False, True by default for configure, set False for + unconfiguration. + """ + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} + else: + ospf_red = { + dut: {"ospf": {"redistribute": [{"redist_type": "static", "delete": True}]}} + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + +def red_connected(dut, config=True): + """ + Local 'def' for Redstribute connected routes inside ospf + + Parameters + ---------- + * `dut` : DUT on which configs have to be made. + * `config` : True or False, True by default for configure, set False for + unconfiguration. + """ + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} + else: + ospf_red = { + dut: { + "ospf": {"redistribute": [{"redist_type": "connected", "delete": True}]} + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase: Failed \n Error: {}".format(result) + + +# ################################## +# Test cases start here. +# ################################## + + +def test_ospf_type5_summary_tc43_p0(request): + """OSPF summarisation with metric type 2.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + protocol = "ospf" + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0" + ) + input_dict_static_rtes = { + "r0": { + "static_routes": [ + {"network": NETWORK["ipv4"], "next_hop": "blackhole"}, + {"network": NETWORK2["ipv4"], "next_hop": "blackhole"}, + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r0" + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" " routes to one route." + ) + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"} + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1." + ) + input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv4"][0]: { + "Summary address": SUMMARY["ipv4"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step("Change the summary address mask to lower match (ex - 16 to 8)") + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "16"} + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "11.0.0.0/16": { + "Summary address": "11.0.0.0/16", + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with newly configured mask." + ) + + input_dict_summary = {"r0": {"static_routes": [{"network": "11.0.0.0/16"}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step("Change the summary address mask to higher match (ex - 8 to 24)") + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "24"} + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "11.0.0.0/16": { + "Summary address": "11.0.0.0/24", + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 0, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with newly configured mask." + ) + step("Configure 2 summary address with different mask of same network.") + step( + "Verify that external routes(static / connected) are summarised " + "to configured summary address with highest match." + ) + + input_dict_summary = {"r0": {"static_routes": [{"network": "11.0.0.0/16"}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step(" Un configure one of the summary address.") + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + { + "prefix": SUMMARY["ipv4"][0].split("/")[0], + "mask": "24", + "delete": True, + } + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with newly configured mask." + ) + + input_dict_summary = {"r0": {"static_routes": [{"network": "11.0.0.0/16"}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "24"} + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised " + "to configured summary address with highest match." + ) + input_dict_summary = {"r0": {"static_routes": [{"network": "11.0.0.0/16"}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_ospf_type5_summary_tc48_p0(request): + """OSPF summarisation with route map modification of metric type.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + protocol = "ospf" + + step( + "Configure 5 static routes from the same network on R0" + "5 static routes from different networks and redistribute in R0" + ) + input_dict_static_rtes = { + "r0": { + "static_routes": [ + {"network": NETWORK["ipv4"], "next_hop": "blackhole"}, + {"network": NETWORK2["ipv4"], "next_hop": "blackhole"}, + ] + } + } + result = create_static_routes(tgen, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + dut = "r0" + red_static(dut) + + step("Verify that routes are learnt on R1.") + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_static_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step( + "Configure External Route summary in R0 to summarise 5" " routes to one route." + ) + + ospf_summ_r1 = { + "r0": { + "ospf": { + "summary-address": [ + {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"} + ] + } + } + } + result = create_router_ospf(tgen, topo, ospf_summ_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes are summarised to configured summary " + "address on R0 after 5 secs of delay timer expiry and only one " + "route is sent to R1." + ) + input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step("Verify that show ip ospf summary should show the summaries.") + input_dict = { + SUMMARY["ipv4"][0]: { + "Summary address": SUMMARY["ipv4"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step("Verify that originally advertised routes are withdraw from there" " peer.") + input_dict = { + "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]} + } + dut = "r1" + result = verify_ospf_rib(tgen, dut, input_dict, expected=False) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result is not True + ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name) + + step( + "Configure route map and & rule to permit configured summary address," + " redistribute static & connected routes with the route map." + ) + step("Configure prefixlist to permit the static routes, add to route map.") + # Create ip prefix list + pfx_list = { + "r0": { + "prefix_lists": { + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + } + ] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + ospf_red_r1 = { + "r0": { + "ospf": { + "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes are summarised to configured" + "summary address on R0 and only one route is sent to R1. Verify that " + "show ip ospf summary should show the configure summaries." + ) + + input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}} + dut = "r1" + + result = verify_ospf_rib(tgen, dut, input_dict_summary) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + input_dict = { + SUMMARY["ipv4"][0]: { + "Summary address": SUMMARY["ipv4"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step("Configure metric type as 1 in route map.") + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv4": [{"action": "permit", "set": {"metric-type": "type-1"}}] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with metric type 2." + ) + input_dict = { + SUMMARY["ipv4"][0]: { + "Summary address": SUMMARY["ipv4"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step("Un configure metric type from route map.") + + routemaps = { + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "set": {"metric-type": "type-1"}, + "delete": True, + } + ] + } + } + } + result = create_route_maps(tgen, routemaps) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that external routes(static / connected) are summarised" + " to configured summary address with metric type 2." + ) + input_dict = { + SUMMARY["ipv4"][0]: { + "Summary address": SUMMARY["ipv4"][0], + "Metric-type": "E2", + "Metric": 20, + "Tag": 0, + "External route count": 5, + } + } + dut = "r0" + result = verify_ospf_summary(tgen, topo, dut, input_dict) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name) + + step("Change rule from permit to deny in prefix list.") + pfx_list = { + "r0": { + "prefix_lists": { + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "deny"} + ] + } + } + } + } + result = create_prefix_lists(tgen, pfx_list) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf_topo1/r1/ospf6route.txt b/tests/topotests/ospf_topo1/r1/ospf6route.txt index 1bfd6942e..d01511c0e 100644 --- a/tests/topotests/ospf_topo1/r1/ospf6route.txt +++ b/tests/topotests/ospf_topo1/r1/ospf6route.txt @@ -1,13 +1,13 @@ *N IA 2001:db8:1::/64 :: r1-eth0 00:02:11 *N IA 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r1-eth1 00:02:06 - N E1 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r1-eth1 00:02:06 + N E2 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r1-eth1 00:02:06 *N IA 2001:db8:3::/64 :: r1-eth1 00:02:11 - N E1 2001:db8:3::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 - N E1 2001:db8:3::/64 fe80::b038:bcff:fe27:e2d6 r1-eth1 00:02:06 + N E2 2001:db8:3::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 + N E2 2001:db8:3::/64 fe80::b038:bcff:fe27:e2d6 r1-eth1 00:02:06 *N IA 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 - N E1 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 + N E2 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 *N IE 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 - N E1 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 - N E1 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:04 + N E2 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:06 + N E2 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:04 *N IE 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:04 - N E1 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:04 + N E2 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r1-eth1 00:02:04 diff --git a/tests/topotests/ospf_topo1/r1/ospf6route_down.txt b/tests/topotests/ospf_topo1/r1/ospf6route_down.txt index 1ce96c86c..57113d049 100644 --- a/tests/topotests/ospf_topo1/r1/ospf6route_down.txt +++ b/tests/topotests/ospf_topo1/r1/ospf6route_down.txt @@ -1,5 +1,5 @@ *N IA 2001:db8:1::/64 :: r1-eth0 00:01:51 *N IA 2001:db8:2::/64 fe80::281a:23ff:fe22:8a40 r1-eth1 00:00:52 - N E1 2001:db8:2::/64 fe80::281a:23ff:fe22:8a40 r1-eth1 00:00:52 + N E2 2001:db8:2::/64 fe80::281a:23ff:fe22:8a40 r1-eth1 00:00:52 *N IA 2001:db8:3::/64 :: r1-eth1 00:00:52 - N E1 2001:db8:3::/64 fe80::281a:23ff:fe22:8a40 r1-eth1 00:00:52 + N E2 2001:db8:3::/64 fe80::281a:23ff:fe22:8a40 r1-eth1 00:00:52 diff --git a/tests/topotests/ospf_topo1/r1/ospf6route_ecmp.txt b/tests/topotests/ospf_topo1/r1/ospf6route_ecmp.txt index 4df6e5ec0..48e9209a0 100644 --- a/tests/topotests/ospf_topo1/r1/ospf6route_ecmp.txt +++ b/tests/topotests/ospf_topo1/r1/ospf6route_ecmp.txt @@ -1,13 +1,13 @@ *N IA 2001:db8:1::/64 :: r1-eth0 00:06:13 *N IA 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r1-eth1 00:06:08 - N E1 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r1-eth1 00:06:08 + N E2 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r1-eth1 00:06:08 *N IA 2001:db8:3::/64 :: r1-eth1 00:06:13 - N E1 2001:db8:3::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 + N E2 2001:db8:3::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 fe80::e8bb:62ff:fee8:7022 r1-eth1 *N IA 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 - N E1 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 + N E2 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 *N IE 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 - N E1 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 - N E1 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:07 + N E2 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:08 + N E2 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:07 *N IE 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:07 - N E1 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:07 + N E2 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r1-eth1 00:06:07 diff --git a/tests/topotests/ospf_topo1/r2/ospf6route.txt b/tests/topotests/ospf_topo1/r2/ospf6route.txt index 7d3ce5b20..71c84d2eb 100644 --- a/tests/topotests/ospf_topo1/r2/ospf6route.txt +++ b/tests/topotests/ospf_topo1/r2/ospf6route.txt @@ -1,13 +1,13 @@ *N IA 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r2-eth1 00:03:34 - N E1 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r2-eth1 00:03:34 + N E2 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r2-eth1 00:03:34 *N IA 2001:db8:2::/64 :: r2-eth0 00:03:39 *N IA 2001:db8:3::/64 :: r2-eth1 00:03:34 - N E1 2001:db8:3::/64 fe80::b49b:4cff:fe80:4e87 r2-eth1 00:03:34 - N E1 2001:db8:3::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 + N E2 2001:db8:3::/64 fe80::b49b:4cff:fe80:4e87 r2-eth1 00:03:34 + N E2 2001:db8:3::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 *N IA 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 - N E1 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 + N E2 2001:db8:100::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 *N IE 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 - N E1 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 - N E1 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:32 + N E2 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:34 + N E2 2001:db8:200::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:32 *N IE 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:32 - N E1 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:32 + N E2 2001:db8:300::/64 fe80::50b7:d8ff:fe5f:8ff0 r2-eth1 00:03:32 diff --git a/tests/topotests/ospf_topo1/r2/ospf6route_down.txt b/tests/topotests/ospf_topo1/r2/ospf6route_down.txt index acfffc9f1..a1f041218 100644 --- a/tests/topotests/ospf_topo1/r2/ospf6route_down.txt +++ b/tests/topotests/ospf_topo1/r2/ospf6route_down.txt @@ -1,5 +1,5 @@ *N IA 2001:db8:1::/64 fe80::fc0b:daff:fe31:6791 r2-eth1 00:06:19 - N E1 2001:db8:1::/64 fe80::fc0b:daff:fe31:6791 r2-eth1 00:06:19 + N E2 2001:db8:1::/64 fe80::fc0b:daff:fe31:6791 r2-eth1 00:06:19 *N IA 2001:db8:2::/64 :: r2-eth0 00:07:17 *N IA 2001:db8:3::/64 :: r2-eth1 00:06:27 - N E1 2001:db8:3::/64 fe80::fc0b:daff:fe31:6791 r2-eth1 00:06:19 + N E2 2001:db8:3::/64 fe80::fc0b:daff:fe31:6791 r2-eth1 00:06:19 diff --git a/tests/topotests/ospf_topo1/r2/ospf6route_ecmp.txt b/tests/topotests/ospf_topo1/r2/ospf6route_ecmp.txt index f58b501e3..0c06d234c 100644 --- a/tests/topotests/ospf_topo1/r2/ospf6route_ecmp.txt +++ b/tests/topotests/ospf_topo1/r2/ospf6route_ecmp.txt @@ -1,13 +1,13 @@ *N IA 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r2-eth1 00:07:04 - N E1 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r2-eth1 00:07:04 + N E2 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r2-eth1 00:07:04 *N IA 2001:db8:2::/64 :: r2-eth0 00:07:09 *N IA 2001:db8:3::/64 :: r2-eth1 00:07:04 - N E1 2001:db8:3::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 + N E2 2001:db8:3::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 fe80::98cd:28ff:fe5e:3d93 r2-eth1 *N IA 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 - N E1 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 + N E2 2001:db8:100::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 *N IE 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 - N E1 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 - N E1 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:03 + N E2 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:04 + N E2 2001:db8:200::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:03 *N IE 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:03 - N E1 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:03 + N E2 2001:db8:300::/64 fe80::400f:dff:fe35:a1e7 r2-eth1 00:07:03 diff --git a/tests/topotests/ospf_topo1/r3/ospf6route.txt b/tests/topotests/ospf_topo1/r3/ospf6route.txt index b123c4265..69c99b4fd 100644 --- a/tests/topotests/ospf_topo1/r3/ospf6route.txt +++ b/tests/topotests/ospf_topo1/r3/ospf6route.txt @@ -1,12 +1,12 @@ *N IA 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r3-eth0 00:04:03 - N E1 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r3-eth0 00:04:03 + N E2 2001:db8:1::/64 fe80::b49b:4cff:fe80:4e87 r3-eth0 00:04:03 *N IA 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r3-eth0 00:04:03 - N E1 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r3-eth0 00:04:03 + N E2 2001:db8:2::/64 fe80::b038:bcff:fe27:e2d6 r3-eth0 00:04:03 *N IA 2001:db8:3::/64 :: r3-eth0 00:04:08 - N E1 2001:db8:3::/64 fe80::b49b:4cff:fe80:4e87 r3-eth0 00:04:03 - N E1 2001:db8:3::/64 fe80::b038:bcff:fe27:e2d6 r3-eth0 00:04:03 + N E2 2001:db8:3::/64 fe80::b49b:4cff:fe80:4e87 r3-eth0 00:04:03 + N E2 2001:db8:3::/64 fe80::b038:bcff:fe27:e2d6 r3-eth0 00:04:03 *N IA 2001:db8:100::/64 :: r3-eth1 00:04:08 *N IA 2001:db8:200::/64 :: r3-eth2 00:04:05 - N E1 2001:db8:200::/64 fe80::78e0:deff:feb1:ec0 r3-eth2 00:04:00 + N E2 2001:db8:200::/64 fe80::78e0:deff:feb1:ec0 r3-eth2 00:04:00 *N IA 2001:db8:300::/64 fe80::78e0:deff:feb1:ec0 r3-eth2 00:04:00 - N E1 2001:db8:300::/64 fe80::78e0:deff:feb1:ec0 r3-eth2 00:04:00 + N E2 2001:db8:300::/64 fe80::78e0:deff:feb1:ec0 r3-eth2 00:04:00 diff --git a/tests/topotests/ospf_topo1/r3/ospf6route_down.txt b/tests/topotests/ospf_topo1/r3/ospf6route_down.txt index ed69a8376..645ee0bfc 100644 --- a/tests/topotests/ospf_topo1/r3/ospf6route_down.txt +++ b/tests/topotests/ospf_topo1/r3/ospf6route_down.txt @@ -1,5 +1,5 @@ *N IA 2001:db8:100::/64 :: r3-eth1 00:08:06 *N IA 2001:db8:200::/64 :: r3-eth2 00:08:04 - N E1 2001:db8:200::/64 fe80::80a6:c3ff:fea9:88be r3-eth2 00:07:59 + N E2 2001:db8:200::/64 fe80::80a6:c3ff:fea9:88be r3-eth2 00:07:59 *N IA 2001:db8:300::/64 fe80::80a6:c3ff:fea9:88be r3-eth2 00:07:59 - N E1 2001:db8:300::/64 fe80::80a6:c3ff:fea9:88be r3-eth2 00:07:59 + N E2 2001:db8:300::/64 fe80::80a6:c3ff:fea9:88be r3-eth2 00:07:59 diff --git a/tests/topotests/ospf_topo1/r3/ospf6route_ecmp.txt b/tests/topotests/ospf_topo1/r3/ospf6route_ecmp.txt index 54e575adc..ecd51be4e 100644 --- a/tests/topotests/ospf_topo1/r3/ospf6route_ecmp.txt +++ b/tests/topotests/ospf_topo1/r3/ospf6route_ecmp.txt @@ -1,12 +1,12 @@ *N IA 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r3-eth0 00:08:58 - N E1 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r3-eth0 00:08:58 + N E2 2001:db8:1::/64 fe80::98cd:28ff:fe5e:3d93 r3-eth0 00:08:58 *N IA 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r3-eth0 00:08:58 - N E1 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r3-eth0 00:08:58 + N E2 2001:db8:2::/64 fe80::e8bb:62ff:fee8:7022 r3-eth0 00:08:58 *N IA 2001:db8:3::/64 :: r3-eth0 00:09:03 - N E1 2001:db8:3::/64 fe80::98cd:28ff:fe5e:3d93 r3-eth0 00:08:58 + N E2 2001:db8:3::/64 fe80::98cd:28ff:fe5e:3d93 r3-eth0 00:08:58 fe80::e8bb:62ff:fee8:7022 r3-eth0 *N IA 2001:db8:100::/64 :: r3-eth1 00:09:03 *N IA 2001:db8:200::/64 :: r3-eth2 00:09:02 - N E1 2001:db8:200::/64 fe80::d0dc:aff:fec5:5973 r3-eth2 00:08:57 + N E2 2001:db8:200::/64 fe80::d0dc:aff:fec5:5973 r3-eth2 00:08:57 *N IA 2001:db8:300::/64 fe80::d0dc:aff:fec5:5973 r3-eth2 00:08:57 - N E1 2001:db8:300::/64 fe80::d0dc:aff:fec5:5973 r3-eth2 00:08:57 + N E2 2001:db8:300::/64 fe80::d0dc:aff:fec5:5973 r3-eth2 00:08:57 diff --git a/tests/topotests/ospf_topo1/r4/ospf6route.txt b/tests/topotests/ospf_topo1/r4/ospf6route.txt index ceeee2cac..3a4f5efdf 100644 --- a/tests/topotests/ospf_topo1/r4/ospf6route.txt +++ b/tests/topotests/ospf_topo1/r4/ospf6route.txt @@ -1,13 +1,13 @@ *N IE 2001:db8:1::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:1::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:1::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 *N IE 2001:db8:2::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:2::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:2::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 *N IE 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:3::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 *N IE 2001:db8:100::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 - N E1 2001:db8:100::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:100::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 *N IA 2001:db8:200::/64 :: r4-eth0 00:04:30 - N E1 2001:db8:200::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 + N E2 2001:db8:200::/64 fe80::987b:baff:fe8a:c864 r4-eth0 00:04:25 *N IA 2001:db8:300::/64 :: r4-eth1 00:04:30 diff --git a/tests/topotests/ospf_topo1/r4/ospf6route_down.txt b/tests/topotests/ospf_topo1/r4/ospf6route_down.txt index 4ad636dd9..165f8dbdf 100644 --- a/tests/topotests/ospf_topo1/r4/ospf6route_down.txt +++ b/tests/topotests/ospf_topo1/r4/ospf6route_down.txt @@ -1,5 +1,5 @@ *N IE 2001:db8:100::/64 fe80::b44b:a1ff:fe48:3d69 r4-eth0 00:01:45 - N E1 2001:db8:100::/64 fe80::b44b:a1ff:fe48:3d69 r4-eth0 00:01:45 + N E2 2001:db8:100::/64 fe80::b44b:a1ff:fe48:3d69 r4-eth0 00:01:45 *N IA 2001:db8:200::/64 :: r4-eth0 00:01:50 - N E1 2001:db8:200::/64 fe80::b44b:a1ff:fe48:3d69 r4-eth0 00:01:45 + N E2 2001:db8:200::/64 fe80::b44b:a1ff:fe48:3d69 r4-eth0 00:01:45 *N IA 2001:db8:300::/64 :: r4-eth1 00:01:50 diff --git a/tests/topotests/ospf_topo1/r4/ospf6route_ecmp.txt b/tests/topotests/ospf_topo1/r4/ospf6route_ecmp.txt index b5cb10b72..d0d72a876 100644 --- a/tests/topotests/ospf_topo1/r4/ospf6route_ecmp.txt +++ b/tests/topotests/ospf_topo1/r4/ospf6route_ecmp.txt @@ -1,12 +1,12 @@ *N IE 2001:db8:1::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 - N E1 2001:db8:1::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:1::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 *N IE 2001:db8:2::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 - N E1 2001:db8:2::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:2::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 *N IE 2001:db8:3::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 - N E1 2001:db8:3::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 - N E1 2001:db8:3::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:3::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:3::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 *N IE 2001:db8:100::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 - N E1 2001:db8:100::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:100::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 *N IA 2001:db8:200::/64 :: r4-eth0 00:09:17 - N E1 2001:db8:200::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 + N E2 2001:db8:200::/64 fe80::78fe:fcff:fe51:9afc r4-eth0 00:09:13 *N IA 2001:db8:300::/64 :: r4-eth1 00:09:18 diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json new file mode 100644 index 000000000..3669b3a55 --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json @@ -0,0 +1,173 @@ +{ + "feature": [ + "bgp" + ], + "address_types": [ + "ipv6" + ], + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.3", + "neighbors": { + "r1": {}, + "r2": {} + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json new file mode 100644 index 000000000..d93eb1f21 --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_single_area.json @@ -0,0 +1,190 @@ +{ + "address_types": [ + "ipv6" + ], + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf6": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {}, + "r3": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3-link0": { + "ipv6": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf6": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf6": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv6": "auto", + "type": "loopback" + }, + "r0": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r1": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv6": "auto", + "ospf6": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1-link0": { + "ipv6": "auto", + "description": "DummyIntftoR1", + "ospf6": { + "area": "0.0.0.0" + } + } + }, + "ospf6": { + "router_id": "1.0.4.17", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py new file mode 100644 index 000000000..4aa71bfb1 --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address +from lib.topotest import frr_unicode + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress +from lib.bgp import verify_bgp_convergence, create_router_bgp + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + step, + create_route_maps, + shutdown_bringup_interface, + create_interfaces_cfg, + topo_daemons, + get_frr_ipv6_linklocal, +) + +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +from lib.ospf import ( + verify_ospf6_neighbor, + config_ospf_interface, + clear_ospf, + verify_ospf6_rib, + create_router_ospf, + verify_ospf6_interface, + verify_ospf6_database, + config_ospf6_interface, +) + +from ipaddress import IPv6Address + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospfv3_rte_calc.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +NETWORK = { + "ipv6": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + ], + "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"], +} +TOPOOLOGY = """ + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ +""" + +TESTCASES = """ +1. OSPF Cost - verifying ospf interface cost functionality +""" + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def get_llip(onrouter, intf): + """ + API to get the link local ipv6 address of a perticular interface + + Parameters + ---------- + * `fromnode`: Source node + * `tonode` : interface for which link local ip needs to be returned. + + Usage + ----- + result = get_llip('r1', 'r2-link0') + + Returns + ------- + 1) link local ipv6 address from the interface. + 2) errormsg - when link local ip not found. + """ + tgen = get_topogen() + intf = topo["routers"][onrouter]["links"][intf]["interface"] + llip = get_frr_ipv6_linklocal(tgen, onrouter, intf) + if llip: + logger.info("llip ipv6 address to be set as NH is %s", llip) + return llip + return None + + +def get_glipv6(onrouter, intf): + """ + API to get the global ipv6 address of a perticular interface + + Parameters + ---------- + * `onrouter`: Source node + * `intf` : interface for which link local ip needs to be returned. + + Usage + ----- + result = get_glipv6('r1', 'r2-link0') + + Returns + ------- + 1) global ipv6 address from the interface. + 2) errormsg - when link local ip not found. + """ + glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0] + if glipv6: + logger.info("Global ipv6 address to be set as NH is %s", glipv6) + return glipv6 + return None + + +def red_static(dut, config=True): + """Local def for Redstribute static routes inside ospf.""" + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}} + else: + ospf_red = { + dut: { + "ospf6": { + "redistribute": [{"redist_type": "static", "del_action": True}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + +def red_connected(dut, config=True): + """Local def for Redstribute connected routes inside ospf.""" + global topo + tgen = get_topogen() + if config: + ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}} + else: + ospf_red = { + dut: { + "ospf6": { + "redistribute": [{"redist_type": "connected", "del_action": True}] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase: Failed \n Error: {}".format(result) + + +# ################################## +# Test cases start here. +# ################################## +def test_ospfv3_cost_tc52_p0(request): + """OSPF Cost - verifying ospf interface cost functionality""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config.") + reset_config_on_routers(tgen) + + step( + "Configure ospf cost as 20 on interface between R0 and R1. " + "Configure ospf cost as 30 between interface between R0 and R2." + ) + + r0_ospf_cost = { + "r0": {"links": {"r1": {"ospf6": {"cost": 20}}, "r2": {"ospf6": {"cost": 30}}}} + } + result = config_ospf6_interface(tgen, topo, r0_ospf_cost) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that cost is updated in the ospf interface between" + " r0 and r1 as 30 and r0 and r2 as 20" + ) + dut = "r0" + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=r0_ospf_cost) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Swap the costs between interfaces on r0, between r0 and r1 to 30" + ", r0 and r2 to 20" + ) + + r0_ospf_cost = { + "r0": {"links": {"r1": {"ospf6": {"cost": 30}}, "r2": {"ospf6": {"cost": 20}}}} + } + result = config_ospf6_interface(tgen, topo, r0_ospf_cost) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that cost is updated in the ospf interface between r0 " + "and r1 as 30 and r0 and r2 as 20." + ) + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=r0_ospf_cost) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step(" Un configure cost from the interface r0 - r1.") + + r0_ospf_cost = { + "r0": {"links": {"r1": {"ospf6": {"cost": 30, "del_action": True}}}} + } + result = config_ospf6_interface(tgen, topo, r0_ospf_cost) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": {"links": {"r1": {"ospf6": {"cost": 10}}, "r2": {"ospf6": {"cost": 20}}}} + } + step( + "Verify that cost is updated in the ospf interface between r0" + " and r1 as 10 and r0 and r2 as 20." + ) + + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step(" Un configure cost from the interface r0 - r2.") + + r0_ospf_cost = { + "r0": {"links": {"r2": {"ospf6": {"cost": 20, "del_action": True}}}} + } + result = config_ospf6_interface(tgen, topo, r0_ospf_cost) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that cost is updated in the ospf interface between r0" + "and r1 as 10 and r0 and r2 as 10" + ) + + input_dict = { + "r0": {"links": {"r1": {"ospf6": {"cost": 10}}, "r2": {"ospf6": {"cost": 10}}}} + } + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py new file mode 100644 index 000000000..a84f1a1eb --- /dev/null +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py @@ -0,0 +1,417 @@ +#!/usr/bin/python + +# +# Copyright (c) 2021 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address +from lib.topotest import frr_unicode + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + step, + create_route_maps, + shutdown_bringup_interface, + create_interfaces_cfg, + topo_daemons, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +from lib.ospf import ( + verify_ospf6_neighbor, + config_ospf_interface, + clear_ospf, + verify_ospf6_rib, + create_router_ospf, + verify_ospf6_interface, + verify_ospf6_database, + config_ospf6_interface, +) + +from ipaddress import IPv6Address + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospfv3_single_area.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +""" +TOPOOLOGY = + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ + +TESTCASES = +1. OSPF IFSM -Verify state change events on p2p network. +2. OSPF Timers - Verify OSPF interface timer hello interval functionality +3. OSPF Timers - Verify OSPF interface timer dead interval functionality +4. Verify ospf show commands with json output. +5. Verify NFSM events when ospf nbr changes with different MTU values. + """ + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +# ################################## +# Test cases start here. +# ################################## + + +def test_ospfv3_p2p_tc3_p0(request): + """OSPF IFSM -Verify state change events on p2p network.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + step( + "Verify that OSPF is subscribed to multi cast services " + "(All SPF, all DR Routers)." + ) + step("Verify that interface is enabled in ospf.") + step("Verify that config is successful.") + dut = "r0" + input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}} + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the ip address") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Change the ip on the R0 interface") + + topo_modify_change_ip = deepcopy(topo) + intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] + topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] = str( + IPv6Address(frr_unicode(intf_ip.split("/")[0])) + 3 + ) + "/{}".format(intf_ip.split("/")[1]) + + build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = { + "r0": { + "links": { + "r3": { + "ospf6": { + "internetAddress": [ + { + "type": "inet6", + "address": topo_modify_change_ip["routers"]["r0"][ + "links" + ]["r3"]["ipv6"].split("/")[0], + } + ], + } + } + } + } + } + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Modify the mask on the R0 interface") + ip_addr = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] + mask = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] + step("Delete the ip address") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": ip_addr, + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Change the ip on the R0 interface") + + topo_modify_change_ip = deepcopy(topo) + intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] + topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv6"] = str( + IPv6Address(frr_unicode(intf_ip.split("/")[0])) + 3 + ) + "/{}".format(int(intf_ip.split("/")[1]) + 1) + + build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = { + "r0": { + "links": { + "r3": { + "ospf6": { + "internetAddress": [ + { + "type": "inet6", + "address": topo_modify_change_ip["routers"]["r0"][ + "links" + ]["r3"]["ipv6"].split("/")[0], + } + ], + } + } + } + } + } + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + topo1 = { + "r0": { + "links": { + "r3": { + "ipv6": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "ipv6" + ], + "interface": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "interface" + ], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + build_config_from_json(tgen, topo, save_bkup=False) + + step("Change the area id on the interface") + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf6": {"area": "0.0.0.0"}, + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf6": {"area": "0.0.0.1"}, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}} + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf6": {"area": "0.0.0.1"}, + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf6": {"area": "0.0.0.0"}, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = {"r0": {"links": {"r3": {"ospf6": {"ospf6Enabled": True}}}}} + result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify the all neighbors are up after clearing the process.") + for rtr in topo["routers"]: + clear_ospf(tgen, rtr, ospf="ospf6") + + ospf_covergence = verify_ospf6_neighbor(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) |