summaryrefslogtreecommitdiffstats
path: root/net/openvswitch
diff options
context:
space:
mode:
authorTonghao Zhang <xiangxia.m.yue@gmail.com>2019-11-01 15:23:49 +0100
committerDavid S. Miller <davem@davemloft.net>2019-11-04 02:18:03 +0100
commit57f7d7b9164426c496300d254fd5167fbbf205ea (patch)
treeb5605cfd801ce05951f89196c937485bf76913c2 /net/openvswitch
parentnet: openvswitch: optimize flow mask cache hash collision (diff)
downloadlinux-57f7d7b9164426c496300d254fd5167fbbf205ea.tar.xz
linux-57f7d7b9164426c496300d254fd5167fbbf205ea.zip
net: openvswitch: optimize flow-mask looking up
The full looking up on flow table traverses all mask array. If mask-array is too large, the number of invalid flow-mask increase, performance will be drop. One bad case, for example: M means flow-mask is valid and NULL of flow-mask means deleted. +-------------------------------------------+ | M | NULL | ... | NULL | M| +-------------------------------------------+ In that case, without this patch, openvswitch will traverses all mask array, because there will be one flow-mask in the tail. This patch changes the way of flow-mask inserting and deleting, and the mask array will be keep as below: there is not a NULL hole. In the fast path, we can "break" "for" (not "continue") in flow_lookup when we get a NULL flow-mask. "break" v +-------------------------------------------+ | M | M | NULL |... | NULL | NULL| +-------------------------------------------+ This patch don't optimize slow or control path, still using ma->max to traverse. Slow path: * tbl_mask_array_realloc * ovs_flow_tbl_lookup_exact * flow_mask_find Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com> Tested-by: Greg Rose <gvrose8192@gmail.com> Acked-by: Pravin B Shelar <pshelar@ovn.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/openvswitch')
-rw-r--r--net/openvswitch/flow_table.c104
1 files changed, 53 insertions, 51 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c7ba43524c2a..a10d421773f8 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -518,8 +518,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u32 *n_mask_hit,
u32 *index)
{
- struct sw_flow_mask *mask;
struct sw_flow *flow;
+ struct sw_flow_mask *mask;
int i;
if (*index < ma->max) {
@@ -538,7 +538,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
mask = rcu_dereference_ovsl(ma->masks[i]);
if (!mask)
- continue;
+ break;
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found */
@@ -695,8 +695,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
-
- return ma->count;
+ return READ_ONCE(ma->count);
}
static struct table_instance *table_instance_expand(struct table_instance *ti,
@@ -705,21 +704,33 @@ static struct table_instance *table_instance_expand(struct table_instance *ti,
return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
-static void tbl_mask_array_delete_mask(struct mask_array *ma,
- struct sw_flow_mask *mask)
+static void tbl_mask_array_del_mask(struct flow_table *tbl,
+ struct sw_flow_mask *mask)
{
- int i;
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i, ma_count = READ_ONCE(ma->count);
/* Remove the deleted mask pointers from the array */
- for (i = 0; i < ma->max; i++) {
- if (mask == ovsl_dereference(ma->masks[i])) {
- RCU_INIT_POINTER(ma->masks[i], NULL);
- ma->count--;
- kfree_rcu(mask, rcu);
- return;
- }
+ for (i = 0; i < ma_count; i++) {
+ if (mask == ovsl_dereference(ma->masks[i]))
+ goto found;
}
+
BUG();
+ return;
+
+found:
+ WRITE_ONCE(ma->count, ma_count -1);
+
+ rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
+ RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
+
+ kfree_rcu(mask, rcu);
+
+ /* Shrink the mask array if necessary. */
+ if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
+ ma_count <= (ma->max / 3))
+ tbl_mask_array_realloc(tbl, ma->max / 2);
}
/* Remove 'mask' from the mask list, if it is not needed any more. */
@@ -733,17 +744,8 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
BUG_ON(!mask->ref_count);
mask->ref_count--;
- if (!mask->ref_count) {
- struct mask_array *ma;
-
- ma = ovsl_dereference(tbl->mask_array);
- tbl_mask_array_delete_mask(ma, mask);
-
- /* Shrink the mask array if necessary. */
- if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
- ma->count <= (ma->max / 3))
- tbl_mask_array_realloc(tbl, ma->max / 2);
- }
+ if (!mask->ref_count)
+ tbl_mask_array_del_mask(tbl, mask);
}
}
@@ -807,6 +809,29 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
return NULL;
}
+static int tbl_mask_array_add_mask(struct flow_table *tbl,
+ struct sw_flow_mask *new)
+{
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int err, ma_count = READ_ONCE(ma->count);
+
+ if (ma_count >= ma->max) {
+ err = tbl_mask_array_realloc(tbl, ma->max +
+ MASK_ARRAY_SIZE_MIN);
+ if (err)
+ return err;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ }
+
+ BUG_ON(ovsl_dereference(ma->masks[ma_count]));
+
+ rcu_assign_pointer(ma->masks[ma_count], new);
+ WRITE_ONCE(ma->count, ma_count +1);
+
+ return 0;
+}
+
/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
const struct sw_flow_mask *new)
@@ -815,9 +840,6 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask = flow_mask_find(tbl, new);
if (!mask) {
- struct mask_array *ma;
- int i;
-
/* Allocate a new mask if none exsits. */
mask = mask_alloc();
if (!mask)
@@ -826,29 +848,9 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask->range = new->range;
/* Add mask to mask-list. */
- ma = ovsl_dereference(tbl->mask_array);
- if (ma->count >= ma->max) {
- int err;
-
- err = tbl_mask_array_realloc(tbl, ma->max +
- MASK_ARRAY_SIZE_MIN);
- if (err) {
- kfree(mask);
- return err;
- }
-
- ma = ovsl_dereference(tbl->mask_array);
- }
-
- for (i = 0; i < ma->max; i++) {
- const struct sw_flow_mask *t;
-
- t = ovsl_dereference(ma->masks[i]);
- if (!t) {
- rcu_assign_pointer(ma->masks[i], mask);
- ma->count++;
- break;
- }
+ if (tbl_mask_array_add_mask(tbl, mask)) {
+ kfree(mask);
+ return -ENOMEM;
}
} else {
BUG_ON(!mask->ref_count);