summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/user/zebra.rst15
-rw-r--r--tests/topotests/bgp_flowspec/r1/zebra.conf1
-rw-r--r--zebra/table_manager.c190
-rw-r--r--zebra/table_manager.h13
-rw-r--r--zebra/zapi_msg.c15
-rw-r--r--zebra/zebra_ns.c5
-rw-r--r--zebra/zebra_vrf.c19
-rw-r--r--zebra/zebra_vrf.h2
-rw-r--r--zebra/zebra_vty.c29
9 files changed, 235 insertions, 54 deletions
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 79036320b..1ab4dcce2 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -473,6 +473,21 @@ be updated with the new name. To illustrate, if you want to recompile with
./configure --with-defaultvrfname=global
+.. _zebra-table-allocation:
+
+Table Allocation
+================
+
+Some services like BGP flowspec allocate routing tables to perform policy
+routing based on netfilter criteria and IP rules. In order to avoid
+conflicts between VRF allocated routing tables and those services, Zebra
+proposes to define a chunk of routing tables to use by other services.
+
+Allocation configuration can be done like below, with the range of the
+chunk of routing tables to be used by the given service.
+
+.. clicmd:: ip table range <STARTTABLENO> <ENDTABLENO>
+
.. _zebra-ecmp:
ECMP
diff --git a/tests/topotests/bgp_flowspec/r1/zebra.conf b/tests/topotests/bgp_flowspec/r1/zebra.conf
index e4d5a2119..4b103cb39 100644
--- a/tests/topotests/bgp_flowspec/r1/zebra.conf
+++ b/tests/topotests/bgp_flowspec/r1/zebra.conf
@@ -1,6 +1,7 @@
!
hostname r1
password zebra
+ip table range 500 600
interface r1-eth0
ip address 10.0.1.1/24
ipv6 address 1001::1/112
diff --git a/zebra/table_manager.c b/zebra/table_manager.c
index bb060588d..9f3b44f94 100644
--- a/zebra/table_manager.c
+++ b/zebra/table_manager.c
@@ -55,10 +55,9 @@
#define RT_TABLE_ID_UNRESERVED_MIN 1
#define RT_TABLE_ID_UNRESERVED_MAX 0xffffffff
-struct table_manager tbl_mgr;
-
DEFINE_MGROUP(TABLE_MGR, "Table Manager");
DEFINE_MTYPE_STATIC(TABLE_MGR, TM_CHUNK, "Table Manager Chunk");
+DEFINE_MTYPE_STATIC(TABLE_MGR, TM_TABLE, "Table Manager Context");
static void delete_table_chunk(void *val)
{
@@ -68,12 +67,21 @@ static void delete_table_chunk(void *val)
/**
* Init table manager
*/
-void table_manager_enable(ns_id_t ns_id)
+void table_manager_enable(struct zebra_vrf *zvrf)
{
- if (ns_id != NS_DEFAULT)
+
+ if (zvrf->tbl_mgr)
return;
- tbl_mgr.lc_list = list_new();
- tbl_mgr.lc_list->del = delete_table_chunk;
+ if (!vrf_is_backend_netns() && zvrf_id(zvrf) != VRF_DEFAULT) {
+ struct zebra_vrf *def = zebra_vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (def)
+ zvrf->tbl_mgr = def->tbl_mgr;
+ return;
+ }
+ zvrf->tbl_mgr = XCALLOC(MTYPE_TM_TABLE, sizeof(struct table_manager));
+ zvrf->tbl_mgr->lc_list = list_new();
+ zvrf->tbl_mgr->lc_list->del = delete_table_chunk;
hook_register(zserv_client_close, release_daemon_table_chunks);
}
@@ -89,14 +97,19 @@ void table_manager_enable(ns_id_t ns_id)
* @return Pointer to the assigned table chunk
*/
struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
- uint32_t size)
+ uint32_t size,
+ struct zebra_vrf *zvrf)
{
struct table_manager_chunk *tmc;
struct listnode *node;
uint32_t start;
+ bool manual_conf = false;
+
+ if (!zvrf)
+ return NULL;
/* first check if there's one available */
- for (ALL_LIST_ELEMENTS_RO(tbl_mgr.lc_list, node, tmc)) {
+ for (ALL_LIST_ELEMENTS_RO(zvrf->tbl_mgr->lc_list, node, tmc)) {
if (tmc->proto == NO_PROTO
&& tmc->end - tmc->start + 1 == size) {
tmc->proto = proto;
@@ -109,17 +122,26 @@ struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
if (!tmc)
return NULL;
+ if (zvrf->tbl_mgr->start || zvrf->tbl_mgr->end)
+ manual_conf = true;
/* table RT IDs range are [1;252] and [256;0xffffffff]
* - check if the requested range can be within the first range,
* otherwise elect second one
* - TODO : vrf-lites have their own table identifier.
* In that case, table_id should be removed from the table range.
*/
- if (list_isempty(tbl_mgr.lc_list))
- start = RT_TABLE_ID_UNRESERVED_MIN;
- else
+ if (list_isempty(zvrf->tbl_mgr->lc_list)) {
+ if (!manual_conf)
+ start = RT_TABLE_ID_UNRESERVED_MIN;
+ else
+ start = zvrf->tbl_mgr->start;
+ } else
start = ((struct table_manager_chunk *)listgetdata(
- listtail(tbl_mgr.lc_list)))->end + 1;
+ listtail(zvrf->tbl_mgr->lc_list)))
+ ->end
+ + 1;
+
+ if (!manual_conf) {
#if !defined(GNU_LINUX)
/* BSD systems
@@ -127,25 +149,35 @@ struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
#else
/* Linux Systems
*/
- /* if not enough room space between MIN and COMPAT,
- * then begin after LOCAL
- */
- if (start < RT_TABLE_ID_COMPAT && (size >
- RT_TABLE_ID_COMPAT
- - RT_TABLE_ID_UNRESERVED_MIN))
- start = RT_TABLE_ID_LOCAL + 1;
+ /* if not enough room space between MIN and COMPAT,
+ * then begin after LOCAL
+ */
+ if (start < RT_TABLE_ID_COMPAT
+ && (size > RT_TABLE_ID_COMPAT - RT_TABLE_ID_UNRESERVED_MIN))
+ start = RT_TABLE_ID_LOCAL + 1;
#endif /* !def(GNU_LINUX) */
- tmc->start = start;
- if (RT_TABLE_ID_UNRESERVED_MAX - size + 1 < start) {
- flog_err(EC_ZEBRA_TM_EXHAUSTED_IDS,
- "Reached max table id. Start/Size %u/%u", start, size);
- XFREE(MTYPE_TM_CHUNK, tmc);
- return NULL;
+ tmc->start = start;
+ if (RT_TABLE_ID_UNRESERVED_MAX - size + 1 < start) {
+ flog_err(EC_ZEBRA_TM_EXHAUSTED_IDS,
+ "Reached max table id. Start/Size %u/%u",
+ start, size);
+ XFREE(MTYPE_TM_CHUNK, tmc);
+ return NULL;
+ }
+ } else {
+ tmc->start = start;
+ if (zvrf->tbl_mgr->end - size + 1 < start) {
+ flog_err(EC_ZEBRA_TM_EXHAUSTED_IDS,
+ "Reached max table id. Start/Size %u/%u",
+ start, size);
+ XFREE(MTYPE_TM_CHUNK, tmc);
+ return NULL;
+ }
}
tmc->end = tmc->start + size - 1;
tmc->proto = proto;
tmc->instance = instance;
- listnode_add(tbl_mgr.lc_list, tmc);
+ listnode_add(zvrf->tbl_mgr->lc_list, tmc);
return tmc;
}
@@ -160,16 +192,23 @@ struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
* @return 0 on success, -1 otherwise
*/
int release_table_chunk(uint8_t proto, uint16_t instance, uint32_t start,
- uint32_t end)
+ uint32_t end, struct zebra_vrf *zvrf)
{
struct listnode *node;
struct table_manager_chunk *tmc;
int ret = -1;
+ struct table_manager *tbl_mgr;
+
+ if (!zvrf)
+ return -1;
+ tbl_mgr = zvrf->tbl_mgr;
+ if (!tbl_mgr)
+ return ret;
/* check that size matches */
zlog_debug("Releasing table chunk: %u - %u", start, end);
/* find chunk and disown */
- for (ALL_LIST_ELEMENTS_RO(tbl_mgr.lc_list, node, tmc)) {
+ for (ALL_LIST_ELEMENTS_RO(tbl_mgr->lc_list, node, tmc)) {
if (tmc->start != start)
continue;
if (tmc->end != end)
@@ -208,24 +247,99 @@ int release_daemon_table_chunks(struct zserv *client)
struct table_manager_chunk *tmc;
int count = 0;
int ret;
+ struct vrf *vrf;
+ struct zebra_vrf *zvrf;
- for (ALL_LIST_ELEMENTS_RO(tbl_mgr.lc_list, node, tmc)) {
- if (tmc->proto == proto && tmc->instance == instance) {
- ret = release_table_chunk(tmc->proto, tmc->instance,
- tmc->start, tmc->end);
- if (ret == 0)
- count++;
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ zvrf = vrf->info;
+
+ if (!zvrf)
+ continue;
+ if (!vrf_is_backend_netns() && vrf->vrf_id != VRF_DEFAULT)
+ continue;
+ for (ALL_LIST_ELEMENTS_RO(zvrf->tbl_mgr->lc_list, node, tmc)) {
+ if (tmc->proto == proto && tmc->instance == instance) {
+ ret = release_table_chunk(
+ tmc->proto, tmc->instance, tmc->start,
+ tmc->end, zvrf);
+ if (ret == 0)
+ count++;
+ }
}
}
-
zlog_debug("%s: Released %d table chunks", __func__, count);
return count;
}
-void table_manager_disable(ns_id_t ns_id)
+static void table_range_add(struct zebra_vrf *zvrf, uint32_t start,
+ uint32_t end)
+{
+ if (!zvrf->tbl_mgr)
+ return;
+ zvrf->tbl_mgr->start = start;
+ zvrf->tbl_mgr->end = end;
+}
+
+void table_manager_disable(struct zebra_vrf *zvrf)
{
- if (ns_id != NS_DEFAULT)
+ if (!zvrf->tbl_mgr)
+ return;
+ if (!vrf_is_backend_netns() && zvrf_id(zvrf) != VRF_DEFAULT) {
+ zvrf->tbl_mgr = NULL;
return;
- list_delete(&tbl_mgr.lc_list);
+ }
+ list_delete(&zvrf->tbl_mgr->lc_list);
+ XFREE(MTYPE_TM_TABLE, zvrf->tbl_mgr);
+ zvrf->tbl_mgr = NULL;
+}
+
+int table_manager_range(struct vty *vty, bool add, struct zebra_vrf *zvrf,
+ const char *start_table_str, const char *end_table_str)
+{
+ uint32_t start;
+ uint32_t end;
+
+ if (add) {
+ if (!start_table_str || !end_table_str) {
+ vty_out(vty, "%% Labels not specified\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ start = atoi(start_table_str);
+ end = atoi(end_table_str);
+ if (end < start) {
+ vty_out(vty, "%% End table is less than Start table\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+#if !defined(GNU_LINUX)
+/* BSD systems
+ */
+#else
+ /* Linux Systems
+ */
+ if ((start >= RT_TABLE_ID_COMPAT && start <= RT_TABLE_ID_LOCAL)
+ || (end >= RT_TABLE_ID_COMPAT
+ && end <= RT_TABLE_ID_LOCAL)) {
+ vty_out(vty, "%% Values forbidden in range [%u;%u]\n",
+ RT_TABLE_ID_COMPAT, RT_TABLE_ID_LOCAL);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ if (start < RT_TABLE_ID_COMPAT && end > RT_TABLE_ID_LOCAL) {
+ vty_out(vty,
+ "%% Range overlaps range [%u;%u] forbidden\n",
+ RT_TABLE_ID_COMPAT, RT_TABLE_ID_LOCAL);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+#endif
+ if (zvrf->tbl_mgr
+ && ((zvrf->tbl_mgr->start && zvrf->tbl_mgr->start != start)
+ || (zvrf->tbl_mgr->end && zvrf->tbl_mgr->end != end))) {
+ vty_out(vty,
+ "%% New range will be taken into account at restart\n");
+ }
+ table_range_add(zvrf, start, end);
+ } else
+ table_range_add(zvrf, 0, 0);
+ return CMD_SUCCESS;
}
diff --git a/zebra/table_manager.h b/zebra/table_manager.h
index 4f78f5097..fa1366842 100644
--- a/zebra/table_manager.h
+++ b/zebra/table_manager.h
@@ -57,15 +57,20 @@ struct table_manager_chunk {
*/
struct table_manager {
struct list *lc_list;
+ uint32_t start;
+ uint32_t end;
};
-void table_manager_enable(ns_id_t ns_id);
+void table_manager_enable(struct zebra_vrf *zvrf);
struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
- uint32_t size);
+ uint32_t size,
+ struct zebra_vrf *zvrf);
int release_table_chunk(uint8_t proto, uint16_t instance, uint32_t start,
- uint32_t end);
+ uint32_t end, struct zebra_vrf *zvrf);
int release_daemon_table_chunks(struct zserv *client);
-void table_manager_disable(ns_id_t ns_id);
+void table_manager_disable(struct zebra_vrf *zvrf);
+int table_manager_range(struct vty *vty, bool add, struct zebra_vrf *zvrf,
+ const char *min, const char *max);
#ifdef __cplusplus
}
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 72c707150..0aef1965d 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2827,7 +2827,7 @@ static void zread_label_manager_request(ZAPI_HANDLER_ARGS)
}
static void zread_get_table_chunk(struct zserv *client, struct stream *msg,
- vrf_id_t vrf_id)
+ struct zebra_vrf *zvrf)
{
struct stream *s;
uint32_t size;
@@ -2839,7 +2839,7 @@ static void zread_get_table_chunk(struct zserv *client, struct stream *msg,
/* Get data. */
STREAM_GETL(s, size);
- tmc = assign_table_chunk(client->proto, client->instance, size);
+ tmc = assign_table_chunk(client->proto, client->instance, size, zvrf);
if (!tmc)
flog_err(EC_ZEBRA_TM_CANNOT_ASSIGN_CHUNK,
"%s: Unable to assign Table Chunk of size %u",
@@ -2848,13 +2848,14 @@ static void zread_get_table_chunk(struct zserv *client, struct stream *msg,
zlog_debug("Assigned Table Chunk %u - %u", tmc->start,
tmc->end);
/* send response back */
- zsend_assign_table_chunk_response(client, vrf_id, tmc);
+ zsend_assign_table_chunk_response(client, zvrf_id(zvrf), tmc);
stream_failure:
return;
}
-static void zread_release_table_chunk(struct zserv *client, struct stream *msg)
+static void zread_release_table_chunk(struct zserv *client, struct stream *msg,
+ struct zebra_vrf *zvrf)
{
struct stream *s;
uint32_t start, end;
@@ -2866,7 +2867,7 @@ static void zread_release_table_chunk(struct zserv *client, struct stream *msg)
STREAM_GETL(s, start);
STREAM_GETL(s, end);
- release_table_chunk(client->proto, client->instance, start, end);
+ release_table_chunk(client->proto, client->instance, start, end, zvrf);
stream_failure:
return;
@@ -2886,9 +2887,9 @@ static void zread_table_manager_request(ZAPI_HANDLER_ARGS)
return;
}
if (hdr->command == ZEBRA_GET_TABLE_CHUNK)
- zread_get_table_chunk(client, msg, zvrf_id(zvrf));
+ zread_get_table_chunk(client, msg, zvrf);
else if (hdr->command == ZEBRA_RELEASE_TABLE_CHUNK)
- zread_release_table_chunk(client, msg);
+ zread_release_table_chunk(client, msg, zvrf);
}
}
diff --git a/zebra/zebra_ns.c b/zebra/zebra_ns.c
index 27b8a3ea4..0f53de8c3 100644
--- a/zebra/zebra_ns.c
+++ b/zebra/zebra_ns.c
@@ -127,9 +127,6 @@ int zebra_ns_enable(ns_id_t ns_id, void **info)
route_read(zns);
kernel_read_pbr_rules(zns);
- /* Initiate Table Manager per ZNS */
- table_manager_enable(ns_id);
-
return 0;
}
@@ -142,8 +139,6 @@ static int zebra_ns_disable_internal(struct zebra_ns *zns, bool complete)
kernel_terminate(zns, complete);
- table_manager_disable(zns->ns_id);
-
zns->ns_id = NS_DEFAULT;
return 0;
diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c
index 4fbcc6f59..e16234772 100644
--- a/zebra/zebra_vrf.c
+++ b/zebra/zebra_vrf.c
@@ -44,6 +44,7 @@
#ifndef VTYSH_EXTRACT_PL
#include "zebra/zebra_vrf_clippy.c"
#endif
+#include "zebra/table_manager.h"
static void zebra_vrf_table_create(struct zebra_vrf *zvrf, afi_t afi,
safi_t safi);
@@ -113,6 +114,10 @@ static int zebra_vrf_new(struct vrf *vrf)
otable_init(&zvrf->other_tables);
router_id_init(zvrf);
+
+ /* Initiate Table Manager per ZNS */
+ table_manager_enable(zvrf);
+
return 0;
}
@@ -176,6 +181,8 @@ static int zebra_vrf_disable(struct vrf *vrf)
zlog_debug("VRF %s id %u is now inactive", zvrf_name(zvrf),
zvrf_id(zvrf));
+ table_manager_disable(zvrf);
+
/* Stop any VxLAN-EVPN processing. */
zebra_vxlan_vrf_disable(zvrf);
@@ -503,6 +510,12 @@ static int vrf_config_write(struct vty *vty)
if (zvrf->zebra_rnh_ipv6_default_route)
vty_out(vty, "ipv6 nht resolve-via-default\n");
+
+ if (zvrf->tbl_mgr
+ && (zvrf->tbl_mgr->start || zvrf->tbl_mgr->end))
+ vty_out(vty, "ip table range %u %u\n",
+ zvrf->tbl_mgr->start,
+ zvrf->tbl_mgr->end);
} else {
vty_frame(vty, "vrf %s\n", zvrf_name(zvrf));
if (zvrf->l3vni)
@@ -517,6 +530,12 @@ static int vrf_config_write(struct vty *vty)
if (zvrf->zebra_rnh_ipv6_default_route)
vty_out(vty, " ipv6 nht resolve-via-default\n");
+
+ if (zvrf->tbl_mgr && vrf_is_backend_netns()
+ && (zvrf->tbl_mgr->start || zvrf->tbl_mgr->end))
+ vty_out(vty, " ip table range %u %u\n",
+ zvrf->tbl_mgr->start,
+ zvrf->tbl_mgr->end);
}
diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h
index f32f09850..27342908c 100644
--- a/zebra/zebra_vrf.h
+++ b/zebra/zebra_vrf.h
@@ -177,6 +177,8 @@ struct zebra_vrf {
uint64_t lsp_installs;
uint64_t lsp_removals;
+ struct table_manager *tbl_mgr;
+
#if defined(HAVE_RTADV)
struct rtadv rtadv;
#endif /* HAVE_RTADV */
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index b204b30ca..b787c7c34 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -59,6 +59,7 @@
#include "northbound_cli.h"
#include "zebra/zebra_nb.h"
#include "zebra/kernel_netlink.h"
+#include "zebra/table_manager.h"
extern int allow_delete;
@@ -4298,6 +4299,31 @@ DEFUN_HIDDEN(no_zebra_kernel_netlink_batch_tx_buf,
#endif /* HAVE_NETLINK */
+DEFUN(ip_table_range, ip_table_range_cmd,
+ "[no] ip table range (1-4294967295) (1-4294967295)",
+ NO_STR IP_STR
+ "table configuration\n"
+ "Configure table range\n"
+ "Start Routing Table\n"
+ "End Routing Table\n")
+{
+ ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+
+ if (!zvrf)
+ return CMD_WARNING;
+
+ if (zvrf_id(zvrf) != VRF_DEFAULT && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "VRF subcommand does not make any sense in l3mdev based vrf's\n");
+ return CMD_WARNING;
+ }
+
+ if (strmatch(argv[0]->text, "no"))
+ return table_manager_range(vty, false, zvrf, NULL, NULL);
+
+ return table_manager_range(vty, true, zvrf, argv[3]->arg, argv[4]->arg);
+}
+
/* IP node for static routes. */
static int zebra_ip_config(struct vty *vty);
static struct cmd_node ip_node = {
@@ -4446,6 +4472,9 @@ void zebra_vty_init(void)
install_element(CONFIG_NODE, &zebra_dplane_queue_limit_cmd);
install_element(CONFIG_NODE, &no_zebra_dplane_queue_limit_cmd);
+ install_element(CONFIG_NODE, &ip_table_range_cmd);
+ install_element(VRF_NODE, &ip_table_range_cmd);
+
#ifdef HAVE_NETLINK
install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd);
install_element(CONFIG_NODE, &no_zebra_kernel_netlink_batch_tx_buf_cmd);