diff options
author | Dmitry Bogdanov <dbogdanov@marvell.com> | 2020-03-25 13:52:37 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-27 04:17:36 +0100 |
commit | b62c3624500a7e1cc081e75973299c1f7901a438 (patch) | |
tree | 792e2de3a835e3e004537d47d5890e8a242bb298 /drivers/net/macsec.c | |
parent | net: macsec: support multicast/broadcast when offloading (diff) | |
download | linux-b62c3624500a7e1cc081e75973299c1f7901a438.tar.xz linux-b62c3624500a7e1cc081e75973299c1f7901a438.zip |
net: macsec: add support for getting offloaded stats
When HW offloading is enabled, offloaded stats should be used, because
s/w stats are wrong and out of sync with the HW in this case.
Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/macsec.c')
-rw-r--r-- | drivers/net/macsec.c | 321 |
1 files changed, 213 insertions, 108 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c7ad7c6f1d1e..b00a078d13ff 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -88,17 +88,6 @@ struct gcm_iv { __be32 pn; }; -struct macsec_dev_stats { - __u64 OutPktsUntagged; - __u64 InPktsUntagged; - __u64 OutPktsTooLong; - __u64 InPktsNoTag; - __u64 InPktsBadTag; - __u64 InPktsUnknownSCI; - __u64 InPktsNoSCI; - __u64 InPktsOverrun; -}; - #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT struct pcpu_secy_stats { @@ -2653,207 +2642,309 @@ rollback: return ret; } -static int copy_tx_sa_stats(struct sk_buff *skb, - struct macsec_tx_sa_stats __percpu *pstats) +static void get_tx_sa_stats(struct net_device *dev, int an, + struct macsec_tx_sa *tx_sa, + struct macsec_tx_sa_stats *sum) { - struct macsec_tx_sa_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.sa.assoc_num = an; + ctx.sa.tx_sa = tx_sa; + ctx.stats.tx_sa_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { - const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + const struct macsec_tx_sa_stats *stats = + per_cpu_ptr(tx_sa->stats, cpu); - sum.OutPktsProtected += stats->OutPktsProtected; - sum.OutPktsEncrypted += stats->OutPktsEncrypted; + sum->OutPktsProtected += stats->OutPktsProtected; + sum->OutPktsEncrypted += stats->OutPktsEncrypted; } +} - if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) +static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) +{ + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, + sum->OutPktsProtected) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, + sum->OutPktsEncrypted)) return -EMSGSIZE; return 0; } -static noinline_for_stack int -copy_rx_sa_stats(struct sk_buff *skb, - struct macsec_rx_sa_stats __percpu *pstats) +static void get_rx_sa_stats(struct net_device *dev, + struct macsec_rx_sc *rx_sc, int an, + struct macsec_rx_sa *rx_sa, + struct macsec_rx_sa_stats *sum) { - struct macsec_rx_sa_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.sa.assoc_num = an; + ctx.sa.rx_sa = rx_sa; + ctx.stats.rx_sa_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + ctx.rx_sc = rx_sc; + macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { - const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + const struct macsec_rx_sa_stats *stats = + per_cpu_ptr(rx_sa->stats, cpu); - sum.InPktsOK += stats->InPktsOK; - sum.InPktsInvalid += stats->InPktsInvalid; - sum.InPktsNotValid += stats->InPktsNotValid; - sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; - sum.InPktsUnusedSA += stats->InPktsUnusedSA; + sum->InPktsOK += stats->InPktsOK; + sum->InPktsInvalid += stats->InPktsInvalid; + sum->InPktsNotValid += stats->InPktsNotValid; + sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; + sum->InPktsUnusedSA += stats->InPktsUnusedSA; } +} - if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || - nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) +static int copy_rx_sa_stats(struct sk_buff *skb, + struct macsec_rx_sa_stats *sum) +{ + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, + sum->InPktsInvalid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, + sum->InPktsNotValid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, + sum->InPktsNotUsingSA) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, + sum->InPktsUnusedSA)) return -EMSGSIZE; return 0; } -static noinline_for_stack int -copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) +static void get_rx_sc_stats(struct net_device *dev, + struct macsec_rx_sc *rx_sc, + struct macsec_rx_sc_stats *sum) { - struct macsec_rx_sc_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.rx_sc_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + ctx.rx_sc = rx_sc; + macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_rx_sc_stats *stats; struct macsec_rx_sc_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(rx_sc->stats, cpu); do { start = u64_stats_fetch_begin_irq(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); - sum.InOctetsValidated += tmp.InOctetsValidated; - sum.InOctetsDecrypted += tmp.InOctetsDecrypted; - sum.InPktsUnchecked += tmp.InPktsUnchecked; - sum.InPktsDelayed += tmp.InPktsDelayed; - sum.InPktsOK += tmp.InPktsOK; - sum.InPktsInvalid += tmp.InPktsInvalid; - sum.InPktsLate += tmp.InPktsLate; - sum.InPktsNotValid += tmp.InPktsNotValid; - sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; - sum.InPktsUnusedSA += tmp.InPktsUnusedSA; + sum->InOctetsValidated += tmp.InOctetsValidated; + sum->InOctetsDecrypted += tmp.InOctetsDecrypted; + sum->InPktsUnchecked += tmp.InPktsUnchecked; + sum->InPktsDelayed += tmp.InPktsDelayed; + sum->InPktsOK += tmp.InPktsOK; + sum->InPktsInvalid += tmp.InPktsInvalid; + sum->InPktsLate += tmp.InPktsLate; + sum->InPktsNotValid += tmp.InPktsNotValid; + sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; + sum->InPktsUnusedSA += tmp.InPktsUnusedSA; } +} +static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, - sum.InOctetsValidated, + sum->InOctetsValidated, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, - sum.InOctetsDecrypted, + sum->InOctetsDecrypted, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, - sum.InPktsUnchecked, + sum->InPktsUnchecked, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, - sum.InPktsDelayed, + sum->InPktsDelayed, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, - sum.InPktsOK, + sum->InPktsOK, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, - sum.InPktsInvalid, + sum->InPktsInvalid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, - sum.InPktsLate, + sum->InPktsLate, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, - sum.InPktsNotValid, + sum->InPktsNotValid, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, - sum.InPktsNotUsingSA, + sum->InPktsNotUsingSA, MACSEC_RXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, - sum.InPktsUnusedSA, + sum->InPktsUnusedSA, MACSEC_RXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } -static noinline_for_stack int -copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) +static void get_tx_sc_stats(struct net_device *dev, + struct macsec_tx_sc_stats *sum) { - struct macsec_tx_sc_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.tx_sc_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_tx_sc_stats *stats; struct macsec_tx_sc_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); do { start = u64_stats_fetch_begin_irq(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); - sum.OutPktsProtected += tmp.OutPktsProtected; - sum.OutPktsEncrypted += tmp.OutPktsEncrypted; - sum.OutOctetsProtected += tmp.OutOctetsProtected; - sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; + sum->OutPktsProtected += tmp.OutPktsProtected; + sum->OutPktsEncrypted += tmp.OutPktsEncrypted; + sum->OutOctetsProtected += tmp.OutOctetsProtected; + sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; } +} +static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, - sum.OutPktsProtected, + sum->OutPktsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, - sum.OutPktsEncrypted, + sum->OutPktsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, - sum.OutOctetsProtected, + sum->OutOctetsProtected, MACSEC_TXSC_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, - sum.OutOctetsEncrypted, + sum->OutOctetsEncrypted, MACSEC_TXSC_STATS_ATTR_PAD)) return -EMSGSIZE; return 0; } -static noinline_for_stack int -copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) +static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) { - struct macsec_dev_stats sum = {0, }; + struct macsec_dev *macsec = macsec_priv(dev); int cpu; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(macsec, &ctx); + if (ops) { + ctx.stats.dev_stats = sum; + ctx.secy = &macsec_priv(dev)->secy; + macsec_offload(ops->mdo_get_dev_stats, &ctx); + } + return; + } + for_each_possible_cpu(cpu) { const struct pcpu_secy_stats *stats; struct macsec_dev_stats tmp; unsigned int start; - stats = per_cpu_ptr(pstats, cpu); + stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); do { start = u64_stats_fetch_begin_irq(&stats->syncp); memcpy(&tmp, &stats->stats, sizeof(tmp)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); - sum.OutPktsUntagged += tmp.OutPktsUntagged; - sum.InPktsUntagged += tmp.InPktsUntagged; - sum.OutPktsTooLong += tmp.OutPktsTooLong; - sum.InPktsNoTag += tmp.InPktsNoTag; - sum.InPktsBadTag += tmp.InPktsBadTag; - sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; - sum.InPktsNoSCI += tmp.InPktsNoSCI; - sum.InPktsOverrun += tmp.InPktsOverrun; + sum->OutPktsUntagged += tmp.OutPktsUntagged; + sum->InPktsUntagged += tmp.InPktsUntagged; + sum->OutPktsTooLong += tmp.OutPktsTooLong; + sum->InPktsNoTag += tmp.InPktsNoTag; + sum->InPktsBadTag += tmp.InPktsBadTag; + sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; + sum->InPktsNoSCI += tmp.InPktsNoSCI; + sum->InPktsOverrun += tmp.InPktsOverrun; } +} +static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) +{ if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, - sum.OutPktsUntagged, + sum->OutPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, - sum.InPktsUntagged, + sum->InPktsUntagged, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, - sum.OutPktsTooLong, + sum->OutPktsTooLong, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, - sum.InPktsNoTag, + sum->InPktsNoTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, - sum.InPktsBadTag, + sum->InPktsBadTag, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, - sum.InPktsUnknownSCI, + sum->InPktsUnknownSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, - sum.InPktsNoSCI, + sum->InPktsNoSCI, MACSEC_SECY_STATS_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, - sum.InPktsOverrun, + sum->InPktsOverrun, MACSEC_SECY_STATS_ATTR_PAD)) return -EMSGSIZE; @@ -2914,7 +3005,12 @@ static noinline_for_stack int dump_secy(struct macsec_secy *secy, struct net_device *dev, struct sk_buff *skb, struct netlink_callback *cb) { + struct macsec_tx_sc_stats tx_sc_stats = {0, }; + struct macsec_tx_sa_stats tx_sa_stats = {0, }; + struct macsec_rx_sc_stats rx_sc_stats = {0, }; + struct macsec_rx_sa_stats rx_sa_stats = {0, }; struct macsec_dev *macsec = netdev_priv(dev); + struct macsec_dev_stats dev_stats = {0, }; struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *txsa_list, *rxsc_list; struct macsec_rx_sc *rx_sc; @@ -2945,7 +3041,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); if (!attr) goto nla_put_failure; - if (copy_tx_sc_stats(skb, tx_sc->stats)) { + + get_tx_sc_stats(dev, &tx_sc_stats); + if (copy_tx_sc_stats(skb, &tx_sc_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } @@ -2954,7 +3052,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); if (!attr) goto nla_put_failure; - if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { + get_secy_stats(dev, &dev_stats); + if (copy_secy_stats(skb, &dev_stats)) { nla_nest_cancel(skb, attr); goto nla_put_failure; } @@ -2978,6 +3077,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } + attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); + if (!attr) { + nla_nest_cancel(skb, txsa_nest); + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); + get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); + if (copy_tx_sa_stats(skb, &tx_sa_stats)) { + nla_nest_cancel(skb, attr); + nla_nest_cancel(skb, txsa_nest); + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + if (secy->xpn) { pn = tx_sa->next_pn; pn_len = MACSEC_XPN_PN_LEN; @@ -2996,20 +3111,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); - if (!attr) { - nla_nest_cancel(skb, txsa_nest); - nla_nest_cancel(skb, txsa_list); - goto nla_put_failure; - } - if (copy_tx_sa_stats(skb, tx_sa->stats)) { - nla_nest_cancel(skb, attr); - nla_nest_cancel(skb, txsa_nest); - nla_nest_cancel(skb, txsa_list); - goto nla_put_failure; - } - nla_nest_end(skb, attr); - nla_nest_end(skb, txsa_nest); } nla_nest_end(skb, txsa_list); @@ -3043,7 +3144,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } - if (copy_rx_sc_stats(skb, rx_sc->stats)) { + memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); + get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); + if (copy_rx_sc_stats(skb, &rx_sc_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); @@ -3084,7 +3187,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, nla_nest_cancel(skb, rxsc_list); goto nla_put_failure; } - if (copy_rx_sa_stats(skb, rx_sa->stats)) { + memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); + get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); + if (copy_rx_sa_stats(skb, &rx_sa_stats)) { nla_nest_cancel(skb, attr); nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); |