summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c')
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c103
1 files changed, 83 insertions, 20 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ba49e959c3f..63b1ecc18c26 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -435,7 +435,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(&ch->napi, skb);
+ list_add_tail(&skb->list, ch->rx_list);
return;
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
*/
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
+
+ fd_len = dpaa2_fd_get_len(&fd);
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
+
+ /* Everything that happens after this enqueues might race with
+ * the Tx confirmation callback for this frame
+ */
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, fq, &fd, false);
+ netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += fd_len;
-
- nq = netdev_get_tx_queue(net_dev, queue_mapping);
- netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -1108,12 +1113,16 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ struct list_head rx_list;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
ch->xdp.res = 0;
priv = ch->priv;
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
do {
err = pull_channel(ch);
if (unlikely(err))
@@ -1157,6 +1166,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
work_done = max(rx_cleaned, 1);
out:
+ netif_receive_skb_list(ch->rx_list);
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -1817,7 +1828,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
- fq = &priv->fq[smp_processor_id()];
+ fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
@@ -2560,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
/* This is the last ethertype field parsed:
@@ -2572,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
@@ -2602,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
@@ -2672,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
{
int i, size = 0;
- for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
size += dist_fields[i].size;
+ }
return size;
}
@@ -2698,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
return 0;
}
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
@@ -2719,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- /* For Rx hashing key we set only the selected fields.
- * For Rx flow classification key we set all supported fields
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
*/
- if (type == DPAA2_ETH_RX_DIST_HASH) {
- if (!(flags & dist_fields[i].rxnfc_field))
- continue;
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
rx_hash_fields |= dist_fields[i].rxnfc_field;
- }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2781,16 +2821,28 @@ free_key:
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
- return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
}
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
+ int err;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
@@ -2798,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
- if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
- !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ if (!dpaa2_eth_fs_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
@@ -2809,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
priv->rx_cls_enabled = 1;
- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+ return 0;
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
@@ -2846,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
- err = dpaa2_eth_set_cls(priv);
+ err = dpaa2_eth_set_default_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");