summaryrefslogtreecommitdiffstats
path: root/zebra
diff options
context:
space:
mode:
authorDonald Sharp <sharpd@cumulusnetworks.com>2019-01-03 17:56:35 +0100
committerDonald Sharp <sharpd@cumulusnetworks.com>2019-01-11 17:48:14 +0100
commitf52ed6779688e918bd53edd18574569313d7e023 (patch)
treec6af94e3e202ebd07e6012f963def88cfda550c7 /zebra
parentMerge pull request #3599 from donaldsharp/vtysh_sharp (diff)
downloadfrr-f52ed6779688e918bd53edd18574569313d7e023.tar.xz
frr-f52ed6779688e918bd53edd18574569313d7e023.zip
zebra: Limit meta_queue insertion to one time.
Modify the meta_queue insertion such that we only enqueue the route_node into one meta_queue instead of several. Suppose we have multiple route_entries associated with a particular node from rip, bgp, staticd. If we receive a route update from rip, we would enqueue the route_node into the 1, 2, 3 meta-nodes. Which means that we would run the entire process of figuring out a route 3 times, while nothing would change the second two times. Modify the code to choose the lowest meta-queue and install it into that one for processing. Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Diffstat (limited to 'zebra')
-rw-r--r--zebra/zebra_rib.c73
1 files changed, 48 insertions, 25 deletions
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index b4b19693e..ed3367edb 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -2144,43 +2144,66 @@ static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
return mq->size ? WQ_REQUEUE : WQ_SUCCESS;
}
-/* Look into the RN and queue it into one or more priority queues,
- * increasing the size for each data push done.
+
+/*
+ * Look into the RN and queue it into the highest priority queue
+ * at this point in time for processing.
+ *
+ * We will enqueue a route node only once per invocation.
+ *
+ * There are two possibilities here that should be kept in mind.
+ * If the original invocation has not been pulled off for processing
+ * yet, A subsuquent invocation can have a route entry with a better
+ * meta queue index value and we can have a situation where
+ * we might have the same node enqueued 2 times. Not necessarily
+ * an optimal situation but it should be ok.
+ *
+ * The other possibility is that the original invocation has not
+ * been pulled off for processing yet, A subsusquent invocation
+ * doesn't have a route_entry with a better meta-queue and the
+ * original metaqueue index value will win and we'll end up with
+ * the route node enqueued once.
*/
static void rib_meta_queue_add(struct meta_queue *mq, struct route_node *rn)
{
- struct route_entry *re;
+ struct route_entry *re = NULL, *curr_re = NULL;
+ uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE;
+ struct zebra_vrf *zvrf;
- RNODE_FOREACH_RE (rn, re) {
- uint8_t qindex = route_info[re->type].meta_q_map;
- struct zebra_vrf *zvrf;
+ RNODE_FOREACH_RE (rn, curr_re) {
+ curr_qindex = route_info[curr_re->type].meta_q_map;
- /* Invariant: at this point we always have rn->info set. */
- if (CHECK_FLAG(rib_dest_from_rnode(rn)->flags,
- RIB_ROUTE_QUEUED(qindex))) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- rnode_debug(
- rn, re->vrf_id,
- "rn %p is already queued in sub-queue %u",
- (void *)rn, qindex);
- continue;
+ if (curr_qindex <= qindex) {
+ re = curr_re;
+ qindex = curr_qindex;
}
+ }
- SET_FLAG(rib_dest_from_rnode(rn)->flags,
- RIB_ROUTE_QUEUED(qindex));
- listnode_add(mq->subq[qindex], rn);
- route_lock_node(rn);
- mq->size++;
+ if (!re)
+ return;
+ /* Invariant: at this point we always have rn->info set. */
+ if (CHECK_FLAG(rib_dest_from_rnode(rn)->flags,
+ RIB_ROUTE_QUEUED(qindex))) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
rnode_debug(rn, re->vrf_id,
- "queued rn %p into sub-queue %u",
+ "rn %p is already queued in sub-queue %u",
(void *)rn, qindex);
-
- zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
- if (zvrf)
- zvrf->flags |= ZEBRA_VRF_RIB_SCHEDULED;
+ return;
}
+
+ SET_FLAG(rib_dest_from_rnode(rn)->flags, RIB_ROUTE_QUEUED(qindex));
+ listnode_add(mq->subq[qindex], rn);
+ route_lock_node(rn);
+ mq->size++;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %u",
+ (void *)rn, qindex);
+
+ zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
+ if (zvrf)
+ zvrf->flags |= ZEBRA_VRF_RIB_SCHEDULED;
}
/* Add route_node to work queue and schedule processing */