diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-11 00:30:16 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-11 00:30:16 +0200 |
commit | 94fb175c0414902ad9dbd956addf3a5feafbc85b (patch) | |
tree | 5d3c37abe78f072e92072f2079a98303c92cf16e /drivers/dma/ioat/dma_v3.c | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc (diff) | |
parent | netdma: adding alignment check for NETDMA ops (diff) | |
download | linux-94fb175c0414902ad9dbd956addf3a5feafbc85b.tar.xz linux-94fb175c0414902ad9dbd956addf3a5feafbc85b.zip |
Merge tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine fixes from Dan Williams:
1/ regression fix for Xen as it now trips over a broken assumption
about the dma address size on 32-bit builds
2/ new quirk for netdma to ignore dma channels that cannot meet
netdma alignment requirements
3/ fixes for two long standing issues in ioatdma (ring size overflow)
and iop-adma (potential stack corruption)
* tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
netdma: adding alignment check for NETDMA ops
ioatdma: DMA copy alignment needed to address IOAT DMA silicon errata
ioat: ring size variables need to be 32bit to avoid overflow
iop-adma: Corrected array overflow in RAID6 Xscale(R) test.
ioat: fix size of 'completion' for Xen
Diffstat (limited to 'drivers/dma/ioat/dma_v3.c')
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 49 |
1 files changed, 45 insertions, 4 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 2c4476c0e405..f7f1dc62c15c 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) * The difference from the dma_v2.c __cleanup() is that this routine * handles extended descriptors and dma-unmapping raid operations. */ -static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) +static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) { struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent *desc; @@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; + dma_addr_t phys_complete; spin_lock_bh(&chan->cleanup_lock); if (ioat_cleanup_preamble(chan, &phys_complete)) @@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; + dma_addr_t phys_complete; ioat2_quiesce(chan, 0); if (ioat_cleanup_preamble(chan, &phys_complete)) @@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data) struct ioat_chan_common *chan = &ioat->base; if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { - unsigned long phys_complete; + dma_addr_t phys_complete; u64 status; status = ioat_chansts(chan); @@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) return ioat2_reset_sync(chan, msecs_to_jiffies(200)); } +static bool is_jf_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: + return true; + default: + return false; + } +} + +static bool is_snb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: + return true; + default: + return false; + } +} + int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; + if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) + dma->copy_align = 6; + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |