summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie/tx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 03:40:54 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 03:40:54 +0200
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /drivers/net/wireless/iwlwifi/pcie/tx.c
parentMerge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.tar.xz
linux-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c145
1 files changed, 74 insertions, 71 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6acccb19c4f3..eb8e2984c5e9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -34,6 +35,7 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-io.h"
+#include "iwl-scd.h"
#include "iwl-op-mode.h"
#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
@@ -618,8 +620,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++) {
- kfree(txq->entries[i].cmd);
- kfree(txq->entries[i].free_buf);
+ kzfree(txq->entries[i].cmd);
+ kzfree(txq->entries[i].free_buf);
}
/* De-alloc circular buffer of TFDs */
@@ -644,17 +646,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
memset(txq, 0, sizeof(*txq));
}
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- */
-static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
-{
- struct iwl_trans_pcie __maybe_unused *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_write_prph(trans, SCD_TXFACT, mask);
-}
-
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -692,7 +683,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
trans_pcie->cmd_fifo);
/* Activate all Tx DMA/FIFO channels */
- iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
+ iwl_scd_activate_fifos(trans);
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
@@ -745,7 +736,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
/* Turn off all Tx DMA fifos */
spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_txq_set_sched(trans, 0);
+ iwl_scd_deactivate_fifos(trans);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
@@ -886,7 +877,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
- iwl_write_prph(trans, SCD_TXFACT, 0);
+ iwl_scd_deactivate_fifos(trans);
/* Tell NIC where to find the "keep warm" buffer */
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
@@ -1072,55 +1063,53 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
-static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(trans,
- SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
/* Receiver address (actually, Rx station's index into station table),
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn)
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- /* Stop this Tx queue before configuring it */
- iwl_pcie_txq_set_inactive(trans, txq_id);
+ if (cfg) {
+ fifo = cfg->fifo;
- /* Set this queue as a chain-building queue unless it is CMD queue */
- if (txq_id != trans_pcie->cmd_queue)
- iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+ /* Disable the scheduler prior configuring the cmd queue */
+ if (txq_id == trans_pcie->cmd_queue &&
+ trans_pcie->scd_set_active)
+ iwl_scd_enable_set_active(trans, 0);
- /* If this queue is mapped to a certain station: it is an AGG queue */
- if (sta_id >= 0) {
- u16 ra_tid = BUILD_RAxTID(sta_id, tid);
+ /* Stop this Tx queue before configuring it */
+ iwl_scd_txq_set_inactive(trans, txq_id);
- /* Map receiver-address / traffic-ID to this queue */
- iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
+ /* Set this queue as a chain-building queue unless it is CMD */
+ if (txq_id != trans_pcie->cmd_queue)
+ iwl_scd_txq_set_chain(trans, txq_id);
- /* enable aggregations for the queue */
- iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
- trans_pcie->txq[txq_id].ampdu = true;
- } else {
- /*
- * disable aggregations for the queue, this will also make the
- * ra_tid mapping configuration irrelevant since it is now a
- * non-AGG queue.
- */
- iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ if (cfg->aggregate) {
+ u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+ /* enable aggregations for the queue */
+ iwl_scd_txq_enable_agg(trans, txq_id);
+ trans_pcie->txq[txq_id].ampdu = true;
+ } else {
+ /*
+ * disable aggregations for the queue, this will also
+ * make the ra_tid mapping configuration irrelevant
+ * since it is now a non-AGG queue.
+ */
+ iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = trans_pcie->txq[txq_id].q.read_ptr;
+ ssn = trans_pcie->txq[txq_id].q.read_ptr;
+ }
}
/* Place first TFD at index corresponding to start sequence number.
@@ -1128,32 +1117,44 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (ssn & 0xff) | (txq_id << 8));
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
+ if (cfg) {
+ u8 frame_limit = cfg->frame_limit;
+
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
- /* Set up Tx window size and frame limit for this queue */
- iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_trans_write_mem32(trans,
+ trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
- (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
- SCD_QUEUE_STTS_REG_MSK);
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+
+ /* enable the scheduler for this queue (only) */
+ if (txq_id == trans_pcie->cmd_queue &&
+ trans_pcie->scd_set_active)
+ iwl_scd_enable_set_active(trans, BIT(txq_id));
+ }
+
trans_pcie->txq[txq_id].active = true;
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
}
-void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
+ bool configure_scd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 stts_addr = trans_pcie->scd_base_addr +
@@ -1172,10 +1173,12 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
return;
}
- iwl_pcie_txq_set_inactive(trans, txq_id);
+ if (configure_scd) {
+ iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
- ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
+ }
iwl_pcie_txq_unmap(trans, txq_id);
trans_pcie->txq[txq_id].ampdu = false;
@@ -1406,7 +1409,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
- kfree(txq->entries[idx].free_buf);
+ kzfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);