diff options
author | Tom Zhao <tzhao@solarflare.com> | 2020-03-05 12:38:45 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-05 23:56:57 +0100 |
commit | 3b4f06c715d0d3ecd6497275e3c85fe91462d0ee (patch) | |
tree | ce62bc533650ccd16ca01c9dd325a96b62999c37 /drivers/net/ethernet/sfc/tx.c | |
parent | net: hns3: fix a not link up issue when fibre port supports autoneg (diff) | |
download | linux-3b4f06c715d0d3ecd6497275e3c85fe91462d0ee.tar.xz linux-3b4f06c715d0d3ecd6497275e3c85fe91462d0ee.zip |
sfc: complete the next packet when we receive a timestamp
We now ignore the "completion" event when using tx queue timestamping,
and only pay attention to the two (high and low) timestamp events. The
NIC will send a pair of timestamp events for every packet transmitted.
The current firmware may merge the completion events, and it is possible
that future versions may reorder the completion and timestamp events.
As such the completion event is not useful.
Without this patch in place a merged completion event on a queue with
timestamping will cause a "spurious TX completion" error. This affects
SFN8000-series adapters.
Signed-off-by: Tom Zhao <tzhao@solarflare.com>
Acked-by: Martin Habets <mhabets@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sfc/tx.c')
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 04d7f41d7ed9..8aafc54a4684 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, return efx_enqueue_skb(tx_queue, skb); } +void efx_xmit_done_single(struct efx_tx_queue *tx_queue) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); +} + void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; |