summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2010-05-05 16:02:49 +0200
committerDavid S. Miller <davem@davemloft.net>2010-05-06 06:11:25 +0200
commit50849d792b97c546c45a6652a16ba9be7d635c69 (patch)
tree16f7ec1cb98909e8f9ac7e9c85065cc1c69e236d /drivers
parente1000e: save skb counts in TX to avoid cache misses (diff)
downloadlinux-50849d792b97c546c45a6652a16ba9be7d635c69.tar.xz
linux-50849d792b97c546c45a6652a16ba9be7d635c69.zip
e1000e: reduce writes of RX producer ptr
Reduce number of writes to RX producer pointer. When alloc'ing RX buffers, only write the RX producer pointer once every E1000_RX_BUFFER_WRITE (16) buffers created. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/e1000e/netdev.c57
1 files changed, 23 insertions, 34 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8de64ed8762c..b049d1a3c861 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -548,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -649,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -656,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**