summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c312
1 files changed, 226 insertions, 86 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index de4e804bedf0..027be275e035 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,44 +383,160 @@ static inline
}
}
+/* Check if a DMA region fits the device constraints.
+ * Returns true, if the region is OK for usage with this device. */
+static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
+ dma_addr_t addr, size_t size)
+{
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + size > (1ULL << 30))
+ return 0;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + size > (1ULL << 32))
+ return 0;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
+ }
+ return 1;
+}
+
+#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
+#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
+
+static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
+ dma_addr_t dmaaddr, size_t size)
+{
+ ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
+ free_pages((unsigned long)base, get_order(size));
+}
+
+static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
+ dma_addr_t *dmaaddr, size_t size,
+ gfp_t gfp_flags)
+{
+ void *base;
+
+ base = (void *)__get_free_pages(gfp_flags, get_order(size));
+ if (!base)
+ return NULL;
+ memset(base, 0, size);
+ *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
+ DMA_TO_DEVICE);
+ if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
+ free_pages((unsigned long)base, get_order(size));
+ return NULL;
+ }
+
+ return base;
+}
+
+static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
+ dma_addr_t *dmaaddr, size_t size)
+{
+ void *base;
+
+ base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
+ GFP_KERNEL);
+ if (!base) {
+ b43err(ring->dev->wl, "Failed to allocate or map pages "
+ "for DMA ringmemory\n");
+ return NULL;
+ }
+ if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
+ /* The memory does not fit our device constraints.
+ * Retry with GFP_DMA set to get lower memory. */
+ b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+ base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
+ GFP_KERNEL | GFP_DMA);
+ if (!base) {
+ b43err(ring->dev->wl, "Failed to allocate or map pages "
+ "in the GFP_DMA region for DMA ringmemory\n");
+ return NULL;
+ }
+ if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
+ b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+ b43err(ring->dev->wl, "Failed to allocate DMA "
+ "ringmemory that fits device constraints\n");
+ return NULL;
+ }
+ }
+ /* We expect the memory to be 4k aligned, at least. */
+ if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
+ b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
+ return NULL;
+ }
+
+ return base;
+}
+
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
- /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
- * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
- * has shown that 4K is sufficient for the latter as long as the buffer
- * does not cross an 8K boundary.
- *
- * For unknown reasons - possibly a hardware error - the BCM4311 rev
- * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
- * which accounts for the GFP_DMA flag below.
- *
- * The flags here must match the flags in free_ringmemory below!
+ unsigned int required;
+ void *base;
+ dma_addr_t dmaaddr;
+
+ /* There are several requirements to the descriptor ring memory:
+ * - The memory region needs to fit the address constraints for the
+ * device (same as for frame buffers).
+ * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
+ * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
*/
+
if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
- if (!ring->descbase) {
- b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
+ required = ring->nr_slots * sizeof(struct b43_dmadesc64);
+ else
+ required = ring->nr_slots * sizeof(struct b43_dmadesc32);
+ if (B43_WARN_ON(required > 0x1000))
+ return -ENOMEM;
+
+ ring->alloc_descsize = 0x1000;
+ base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
+ if (!base)
+ return -ENOMEM;
+ ring->alloc_descbase = base;
+ ring->alloc_dmabase = dmaaddr;
+
+ if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
+ /* We're on <=32bit DMA, or we already got 8k aligned memory.
+ * That's all we need, so we're fine. */
+ ring->descbase = base;
+ ring->dmabase = dmaaddr;
+ return 0;
+ }
+ b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
+
+ /* Ok, we failed at the 8k alignment requirement.
+ * Try to force-align the memory region now. */
+ ring->alloc_descsize = 0x2000;
+ base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
+ if (!base)
return -ENOMEM;
+ ring->alloc_descbase = base;
+ ring->alloc_dmabase = dmaaddr;
+
+ if (is_8k_aligned(dmaaddr)) {
+ /* We're already 8k aligned. That Ok, too. */
+ ring->descbase = base;
+ ring->dmabase = dmaaddr;
+ return 0;
}
- memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
+ /* Force-align it to 8k */
+ ring->descbase = (void *)((u8 *)base + 0x1000);
+ ring->dmabase = dmaaddr + 0x1000;
+ B43_WARN_ON(!is_8k_aligned(ring->dmabase));
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
- if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
-
- ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, flags);
+ b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
+ ring->alloc_dmabase, ring->alloc_descsize);
}
/* Reset the RX DMA channel */
@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1;
- switch (ring->type) {
- case B43_DMA_30BIT:
- if ((u64)addr + buffersize > (1ULL << 30))
- goto address_error;
- break;
- case B43_DMA_32BIT:
- if ((u64)addr + buffersize > (1ULL << 32))
- goto address_error;
- break;
- case B43_DMA_64BIT:
- /* Currently we can't have addresses beyond
- * 64bit in the kernel. */
- break;
+ if (!b43_dma_address_ok(ring, addr, buffersize)) {
+ /* We can't support this address. Unmap it again. */
+ unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+ return 1;
}
/* The address is OK. */
return 0;
-
-address_error:
- /* We can't support this address. Unmap it again. */
- unmap_descbuffer(ring, addr, buffersize, dma_to_device);
-
- return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
+ ssb_dma_sync_single_for_device(ring->dev->dev,
+ ring->alloc_dmabase,
+ ring->alloc_descsize, DMA_TO_DEVICE);
return 0;
}
@@ -770,7 +874,7 @@ static void free_all_descbuffers(struct b43_dmaring *ring)
for (i = 0; i < ring->nr_slots; i++) {
desc = ring->ops->idx2desc(ring, i, &meta);
- if (!meta->skb) {
+ if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
B43_WARN_ON(!ring->tx);
continue;
}
@@ -822,7 +926,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
enum b43_dmatype type)
{
struct b43_dmaring *ring;
- int err;
+ int i, err;
dma_addr_t dma_test;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -837,6 +941,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
GFP_KERNEL);
if (!ring->meta)
goto err_kfree_ring;
+ for (i = 0; i < ring->nr_slots; i++)
+ ring->meta->skb = B43_DMA_PTR_POISON;
ring->type = type;
ring->dev = dev;
@@ -1147,28 +1253,29 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
case 0x5000:
ring = dma->tx_ring_mcast;
break;
- default:
- B43_WARN_ON(1);
}
*slot = (cookie & 0x0FFF);
- B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
+ if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
+ b43dbg(dev->wl, "TX-status contains "
+ "invalid cookie: 0x%04X\n", cookie);
+ return NULL;
+ }
return ring;
}
static int dma_tx_fragment(struct b43_dmaring *ring,
- struct sk_buff **in_skb)
+ struct sk_buff *skb)
{
- struct sk_buff *skb = *in_skb;
const struct b43_dma_ops *ops = ring->ops;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
u8 *header;
int slot, old_top_slot, old_used_slots;
int err;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
struct b43_dmadesc_meta *meta_hdr;
- struct sk_buff *bounce_skb;
u16 cookie;
size_t hdrsize = b43_txhdr_size(ring->dev);
@@ -1212,34 +1319,28 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->skb = skb;
meta->is_last_fragment = 1;
+ priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
- if (!bounce_skb) {
+ priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
+ if (!priv_info->bouncebuffer) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
+ memcpy(priv_info->bouncebuffer, skb->data, skb->len);
- memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
- memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
- bounce_skb->dev = skb->dev;
- skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
- info = IEEE80211_SKB_CB(bounce_skb);
-
- dev_kfree_skb_any(skb);
- skb = bounce_skb;
- *in_skb = bounce_skb;
- meta->skb = skb;
- meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+ meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;
- goto out_free_bounce;
+ goto out_unmap_hdr;
}
}
@@ -1253,11 +1354,12 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
}
/* Now transfer the whole frame. */
wmb();
+ ssb_dma_sync_single_for_device(ring->dev->dev,
+ ring->alloc_dmabase,
+ ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot));
return 0;
-out_free_bounce:
- dev_kfree_skb_any(skb);
out_unmap_hdr:
unmap_descbuffer(ring, meta_hdr->dmaaddr,
hdrsize, 1);
@@ -1362,11 +1464,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
* static, so we don't need to store it per frame. */
ring->queue_prio = skb_get_queue_mapping(skb);
- /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
- * into the skb data or cb now. */
- hdr = NULL;
- info = NULL;
- err = dma_tx_fragment(ring, &skb);
+ err = dma_tx_fragment(ring, skb);
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
@@ -1400,30 +1498,63 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
struct b43_dmaring *ring;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
- int slot;
+ int slot, firstused;
bool frame_succeed;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
-
B43_WARN_ON(!ring->tx);
+
+ /* Sanity check: TX packets are processed in-order on one ring.
+ * Check if the slot deduced from the cookie really is the first
+ * used slot. */
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+ * malfunction, memory leaks and/or stall of DMA functionality. */
+ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+ "Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
+ }
+
ops = ring->ops;
while (1) {
- B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
+ B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
desc = ops->idx2desc(ring, slot, &meta);
- if (meta->skb)
- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
- 1);
- else
+ if (b43_dma_ptr_is_poisoned(meta->skb)) {
+ b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
+ "on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
+ if (meta->skb) {
+ struct b43_private_tx_info *priv_info =
+ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+
+ unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
+ } else {
unmap_descbuffer(ring, meta->dmaaddr,
b43_txhdr_size(dev), 1);
+ }
if (meta->is_last_fragment) {
struct ieee80211_tx_info *info;
- BUG_ON(!meta->skb);
+ if (unlikely(!meta->skb)) {
+ /* This is a scatter-gather fragment of a frame, so
+ * the skb pointer must not be NULL. */
+ b43dbg(dev->wl, "TX status unexpected NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
info = IEEE80211_SKB_CB(meta->skb);
@@ -1441,20 +1572,29 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
#endif /* DEBUG */
ieee80211_tx_status(dev->wl->hw, meta->skb);
- /* skb is freed by ieee80211_tx_status() */
- meta->skb = NULL;
+ /* skb will be freed by ieee80211_tx_status().
+ * Poison our pointer. */
+ meta->skb = B43_DMA_PTR_POISON;
} else {
/* No need to call free_descriptor_buffer here, as
* this is only the txhdr, which is not allocated.
*/
- B43_WARN_ON(meta->skb);
+ if (unlikely(meta->skb)) {
+ b43dbg(dev->wl, "TX status unexpected non-NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
}
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
- if (meta->is_last_fragment)
+ if (meta->is_last_fragment) {
+ /* This is the last scatter-gather
+ * fragment of the frame. We are done. */
break;
+ }
slot = next_slot(ring, slot);
}
if (ring->stopped) {