summaryrefslogtreecommitdiffstats
path: root/drivers/net/vxlan/vxlan_mdb.c
diff options
context:
space:
mode:
authorIdo Schimmel <idosch@nvidia.com>2023-10-25 14:30:17 +0200
committerDavid S. Miller <davem@davemloft.net>2023-10-27 11:51:42 +0200
commit32d9673e96dc636cbfca2381b2c93b7a15dc3369 (patch)
tree16d7c7bdfe6ed253dfe210b45ffb1b33058c7ab3 /drivers/net/vxlan/vxlan_mdb.c
parentbridge: mcast: Add MDB get support (diff)
downloadlinux-32d9673e96dc636cbfca2381b2c93b7a15dc3369.tar.xz
linux-32d9673e96dc636cbfca2381b2c93b7a15dc3369.zip
vxlan: mdb: Add MDB get support
Implement support for MDB get operation by looking up a matching MDB entry, allocating the skb according to the entry's size and then filling in the response. Signed-off-by: Ido Schimmel <idosch@nvidia.com> Acked-by: Nikolay Aleksandrov <razor@blackwall.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxlan/vxlan_mdb.c')
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c150
1 files changed, 150 insertions, 0 deletions
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index b1af22fea90c..eb4c580b5cee 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -1306,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
+static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
+ sizeof(struct in_addr),
+ sizeof(struct in6_addr)),
+ [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
+};
+
+static int vxlan_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
+ struct vxlan_mdb_entry_key *group,
+ struct netlink_ext_ack *extack)
+{
+ struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
+ struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ memset(group, 0, sizeof(*group));
+ group->vni = vxlan->default_dst.remote_vni;
+
+ if (!tb[MDBA_GET_ENTRY_ATTRS]) {
+ vxlan_mdb_group_set(group, entry, NULL);
+ return 0;
+ }
+
+ err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_GET_ENTRY_ATTRS],
+ vxlan_mdbe_attrs_get_pol, extack);
+ if (err)
+ return err;
+
+ if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
+ !vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE],
+ entry->addr.proto, extack))
+ return -EINVAL;
+
+ vxlan_mdb_group_set(group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]);
+
+ if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
+ group->vni =
+ cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
+
+ return 0;
+}
+
+static struct sk_buff *
+vxlan_mdb_get_reply_alloc(const struct vxlan_dev *vxlan,
+ const struct vxlan_mdb_entry *mdb_entry)
+{
+ struct vxlan_mdb_remote *remote;
+ size_t nlmsg_size;
+
+ nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
+ /* MDBA_MDB */
+ nla_total_size(0) +
+ /* MDBA_MDB_ENTRY */
+ nla_total_size(0);
+
+ list_for_each_entry(remote, &mdb_entry->remotes, list)
+ nlmsg_size += vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry,
+ remote);
+
+ return nlmsg_new(nlmsg_size, GFP_KERNEL);
+}
+
+static int
+vxlan_mdb_get_reply_fill(const struct vxlan_dev *vxlan,
+ struct sk_buff *skb,
+ const struct vxlan_mdb_entry *mdb_entry,
+ u32 portid, u32 seq)
+{
+ struct nlattr *mdb_nest, *mdb_entry_nest;
+ struct vxlan_mdb_remote *remote;
+ struct br_port_msg *bpm;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ bpm = nlmsg_data(nlh);
+ memset(bpm, 0, sizeof(*bpm));
+ bpm->family = AF_BRIDGE;
+ bpm->ifindex = vxlan->dev->ifindex;
+ mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
+ if (!mdb_nest) {
+ err = -EMSGSIZE;
+ goto cancel;
+ }
+ mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
+ if (!mdb_entry_nest) {
+ err = -EMSGSIZE;
+ goto cancel;
+ }
+
+ list_for_each_entry(remote, &mdb_entry->remotes, list) {
+ err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote);
+ if (err)
+ goto cancel;
+ }
+
+ nla_nest_end(skb, mdb_entry_nest);
+ nla_nest_end(skb, mdb_nest);
+ nlmsg_end(skb, nlh);
+
+ return 0;
+
+cancel:
+ nlmsg_cancel(skb, nlh);
+ return err;
+}
+
+int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
+ u32 seq, struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_mdb_entry *mdb_entry;
+ struct vxlan_mdb_entry_key group;
+ struct sk_buff *skb;
+ int err;
+
+ ASSERT_RTNL();
+
+ err = vxlan_mdb_get_parse(dev, tb, &group, extack);
+ if (err)
+ return err;
+
+ mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group);
+ if (!mdb_entry) {
+ NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
+ return -ENOENT;
+ }
+
+ skb = vxlan_mdb_get_reply_alloc(vxlan, mdb_entry);
+ if (!skb)
+ return -ENOMEM;
+
+ err = vxlan_mdb_get_reply_fill(vxlan, skb, mdb_entry, portid, seq);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
+ goto free;
+ }
+
+ return rtnl_unicast(skb, dev_net(dev), portid);
+
+free:
+ kfree_skb(skb);
+ return err;
+}
+
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
struct sk_buff *skb,
__be32 src_vni)