diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-09-10 08:41:47 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-10 21:27:32 +0200 |
commit | f7d12cdcbb28207b3bdcf4affbf3935e4c015d03 (patch) | |
tree | ab94c3e81e355c8df47102ede2d5d0aa02738945 /drivers/net/sfc/nic.c | |
parent | sfc: Abstract channel and index lookup for RX queues (diff) | |
download | linux-f7d12cdcbb28207b3bdcf4affbf3935e4c015d03.tar.xz linux-f7d12cdcbb28207b3bdcf4affbf3935e4c015d03.zip |
sfc: Refactor channel and queue lookup and iteration
In preparation for changes to the way channels and queue structures
are allocated, revise the macros and functions used to look up and
iterator over them.
- Replace efx_for_each_tx_queue() with iteration over channels then TX
queues
- Replace efx_for_each_rx_queue() with iteration over channels then RX
queues (with one exception, shortly to be removed)
- Introduce efx_get_{channel,rx_queue,tx_queue}() functions to look up
channels and queues by index
- Introduce efx_channel_get_{rx,tx}_queue() functions to look up a
channel's queues
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r-- | drivers/net/sfc/nic.c | 85 |
1 files changed, 49 insertions, 36 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index be4d5524054f..9e3563348eb7 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -682,7 +682,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) /* Transmit completion */ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = &efx->tx_queue[tx_ev_q_label]; + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & EFX_TXQ_MASK); channel->irq_mod_score += tx_packets; @@ -690,7 +691,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { /* Rewrite the FIFO write pointer */ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = &efx->tx_queue[tx_ev_q_label]; + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); if (efx_dev_registered(efx)) netif_tx_lock(efx->net_dev); @@ -830,7 +832,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != channel->channel); - rx_queue = &efx->rx_queue[channel->channel]; + rx_queue = efx_channel_get_rx_queue(channel); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; @@ -882,7 +884,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) /* The queue must be empty, so we won't receive any rx * events, so efx_process_channel() won't refill the * queue. Refill it here */ - efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); + efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); else netif_dbg(efx, hw, efx->net_dev, "channel %d received " "generated event "EFX_QWORD_FMT"\n", @@ -1166,7 +1168,7 @@ void efx_nic_generate_fill_event(struct efx_channel *channel) static void efx_poll_flush_events(struct efx_nic *efx) { - struct efx_channel *channel = &efx->channel[0]; + struct efx_channel *channel = efx_get_channel(efx, 0); struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; unsigned int read_ptr = channel->eventq_read_ptr; @@ -1188,7 +1190,9 @@ static void efx_poll_flush_events(struct efx_nic *efx) ev_queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { - tx_queue = efx->tx_queue + ev_queue; + tx_queue = efx_get_tx_queue( + efx, ev_queue / EFX_TXQ_TYPES, + ev_queue % EFX_TXQ_TYPES); tx_queue->flushed = FLUSH_DONE; } } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && @@ -1198,7 +1202,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) ev_failed = EFX_QWORD_FIELD( *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); if (ev_queue < efx->n_rx_channels) { - rx_queue = efx->rx_queue + ev_queue; + rx_queue = efx_get_rx_queue(efx, ev_queue); rx_queue->flushed = ev_failed ? FLUSH_FAILED : FLUSH_DONE; } @@ -1219,6 +1223,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) * serialise them */ int efx_nic_flush_queues(struct efx_nic *efx) { + struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int i, tx_pending, rx_pending; @@ -1227,29 +1232,35 @@ int efx_nic_flush_queues(struct efx_nic *efx) efx->type->prepare_flush(efx); /* Flush all tx queues in parallel */ - efx_for_each_tx_queue(tx_queue, efx) - efx_flush_tx_queue(tx_queue); + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_flush_tx_queue(tx_queue); + } /* The hardware supports four concurrent rx flushes, each of which may * need to be retried if there is an outstanding descriptor fetch */ for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { rx_pending = tx_pending = 0; - efx_for_each_rx_queue(rx_queue, efx) { - if (rx_queue->flushed == FLUSH_PENDING) - ++rx_pending; - } - efx_for_each_rx_queue(rx_queue, efx) { - if (rx_pending == EFX_RX_FLUSH_COUNT) - break; - if (rx_queue->flushed == FLUSH_FAILED || - rx_queue->flushed == FLUSH_NONE) { - efx_flush_rx_queue(rx_queue); - ++rx_pending; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_queue->flushed == FLUSH_PENDING) + ++rx_pending; } } - efx_for_each_tx_queue(tx_queue, efx) { - if (tx_queue->flushed != FLUSH_DONE) - ++tx_pending; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_pending == EFX_RX_FLUSH_COUNT) + break; + if (rx_queue->flushed == FLUSH_FAILED || + rx_queue->flushed == FLUSH_NONE) { + efx_flush_rx_queue(rx_queue); + ++rx_pending; + } + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->flushed != FLUSH_DONE) + ++tx_pending; + } } if (rx_pending == 0 && tx_pending == 0) @@ -1261,19 +1272,21 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Mark the queues as all flushed. We're going to return failure * leading to a reset, or fake up success anyway */ - efx_for_each_tx_queue(tx_queue, efx) { - if (tx_queue->flushed != FLUSH_DONE) - netif_err(efx, hw, efx->net_dev, - "tx queue %d flush command timed out\n", - tx_queue->queue); - tx_queue->flushed = FLUSH_DONE; - } - efx_for_each_rx_queue(rx_queue, efx) { - if (rx_queue->flushed != FLUSH_DONE) - netif_err(efx, hw, efx->net_dev, - "rx queue %d flush command timed out\n", - efx_rx_queue_index(rx_queue)); - rx_queue->flushed = FLUSH_DONE; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->flushed != FLUSH_DONE) + netif_err(efx, hw, efx->net_dev, + "tx queue %d flush command timed out\n", + tx_queue->queue); + tx_queue->flushed = FLUSH_DONE; + } + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_queue->flushed != FLUSH_DONE) + netif_err(efx, hw, efx->net_dev, + "rx queue %d flush command timed out\n", + efx_rx_queue_index(rx_queue)); + rx_queue->flushed = FLUSH_DONE; + } } return -ETIMEDOUT; |