diff options
author | Dave Jiang <dave.jiang@intel.com> | 2015-08-11 17:48:27 +0200 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-08-17 10:07:30 +0200 |
commit | 55f878ec47e3ab560a046c9030a97b1048b74e8b (patch) | |
tree | 15cccf9aa8e4adeb90507bc9c7d6efa5446ce65c /drivers/dma/ioat | |
parent | dmaengine: ioatdma: clean up local dma channel data structure (diff) | |
download | linux-55f878ec47e3ab560a046c9030a97b1048b74e8b.tar.xz linux-55f878ec47e3ab560a046c9030a97b1048b74e8b.zip |
dmaengine: ioatdma: fixup ioatdma_device namings
Changing the variable names for ioatdma_device to be consistently named
ioat_dma instead of device/dma in order to avoid confusion and distinct
from struct device. This will clearly indicate that it is an
ioatdma_device. This also make all the naming consistent that the dma
device is ioat_dma and all the channels are ioat_chan.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 156 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 50 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 48 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 6 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 126 |
5 files changed, 195 insertions, 191 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 60aa04d95a0b..3cf2639fb06a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -93,30 +93,30 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) /* common channel initialization */ void -ioat_init_channel(struct ioatdma_device *device, struct ioatdma_chan *ioat_chan, - int idx) +ioat_init_channel(struct ioatdma_device *ioat_dma, + struct ioatdma_chan *ioat_chan, int idx) { - struct dma_device *dma = &device->common; + struct dma_device *dma = &ioat_dma->dma_dev; struct dma_chan *c = &ioat_chan->dma_chan; unsigned long data = (unsigned long) c; - ioat_chan->device = device; - ioat_chan->reg_base = device->reg_base + (0x80 * (idx + 1)); + ioat_chan->ioat_dma = ioat_dma; + ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); spin_lock_init(&ioat_chan->cleanup_lock); ioat_chan->dma_chan.device = dma; dma_cookie_init(&ioat_chan->dma_chan); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); - device->idx[idx] = ioat_chan; + ioat_dma->idx[idx] = ioat_chan; init_timer(&ioat_chan->timer); - ioat_chan->timer.function = device->timer_fn; + ioat_chan->timer.function = ioat_dma->timer_fn; ioat_chan->timer.data = data; - tasklet_init(&ioat_chan->cleanup_task, device->cleanup_fn, data); + tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); } void ioat_stop(struct ioatdma_chan *ioat_chan) { - struct ioatdma_device *device = ioat_chan->device; - struct pci_dev *pdev = device->pdev; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; int chan_id = chan_num(ioat_chan); struct msix_entry *msix; @@ -126,9 +126,9 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) clear_bit(IOAT_RUN, &ioat_chan->state); /* flush inflight interrupts */ - switch (device->irq_mode) { + switch (ioat_dma->irq_mode) { case IOAT_MSIX: - msix = &device->msix_entries[chan_id]; + msix = &ioat_dma->msix_entries[chan_id]; synchronize_irq(msix->vector); break; case IOAT_MSI: @@ -146,7 +146,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) tasklet_kill(&ioat_chan->cleanup_task); /* final cleanup now that everything is quiesced and can't re-arm */ - device->cleanup_fn((unsigned long)&ioat_chan->dma_chan); + ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan); } dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) @@ -189,14 +189,14 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); if (ret == DMA_COMPLETE) return ret; - device->cleanup_fn((unsigned long) c); + ioat_dma->cleanup_fn((unsigned long) c); return dma_cookie_status(c, cookie, txstate); } @@ -215,15 +215,15 @@ static void ioat_dma_test_callback(void *dma_async_param) /** * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. - * @device: device to be tested + * @ioat_dma: dma device to be tested */ -int ioat_dma_self_test(struct ioatdma_device *device) +int ioat_dma_self_test(struct ioatdma_device *ioat_dma) { int i; u8 *src; u8 *dest; - struct dma_device *dma = &device->common; - struct device *dev = &device->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; + struct device *dev = &ioat_dma->pdev->dev; struct dma_chan *dma_chan; struct dma_async_tx_descriptor *tx; dma_addr_t dma_dest, dma_src; @@ -266,8 +266,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) goto unmap_src; } flags = DMA_PREP_INTERRUPT; - tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, flags); + tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, + dma_src, IOAT_TEST_SIZE, + flags); if (!tx) { dev_err(dev, "Self-test prep failed, disabling\n"); err = -ENODEV; @@ -321,12 +322,12 @@ MODULE_PARM_DESC(ioat_interrupt_style, /** * ioat_dma_setup_interrupts - setup interrupt handler - * @device: ioat device + * @ioat_dma: ioat dma device */ -int ioat_dma_setup_interrupts(struct ioatdma_device *device) +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; - struct pci_dev *pdev = device->pdev; + struct pci_dev *pdev = ioat_dma->pdev; struct device *dev = &pdev->dev; struct msix_entry *msix; int i, j, msixcnt; @@ -344,31 +345,31 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) msix: /* The number of MSI-X vectors should equal the number of channels */ - msixcnt = device->common.chancnt; + msixcnt = ioat_dma->dma_dev.chancnt; for (i = 0; i < msixcnt; i++) - device->msix_entries[i].entry = i; + ioat_dma->msix_entries[i].entry = i; - err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); + err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); if (err) goto msi; for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; - ioat_chan = ioat_chan_by_index(device, i); + msix = &ioat_dma->msix_entries[i]; + ioat_chan = ioat_chan_by_index(ioat_dma, i); err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt_msix, 0, "ioat-msix", ioat_chan); if (err) { for (j = 0; j < i; j++) { - msix = &device->msix_entries[j]; - ioat_chan = ioat_chan_by_index(device, j); + msix = &ioat_dma->msix_entries[j]; + ioat_chan = ioat_chan_by_index(ioat_dma, j); devm_free_irq(dev, msix->vector, ioat_chan); } goto msi; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = IOAT_MSIX; + ioat_dma->irq_mode = IOAT_MSIX; goto done; msi: @@ -377,69 +378,70 @@ msi: goto intx; err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, - "ioat-msi", device); + "ioat-msi", ioat_dma); if (err) { pci_disable_msi(pdev); goto intx; } - device->irq_mode = IOAT_MSI; + ioat_dma->irq_mode = IOAT_MSI; goto done; intx: err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); + IRQF_SHARED, "ioat-intx", ioat_dma); if (err) goto err_no_irq; - device->irq_mode = IOAT_INTX; + ioat_dma->irq_mode = IOAT_INTX; done: - if (device->intr_quirk) - device->intr_quirk(device); + if (ioat_dma->intr_quirk) + ioat_dma->intr_quirk(ioat_dma); intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; - writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); + writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); return 0; err_no_irq: /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - device->irq_mode = IOAT_NOIRQ; + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + ioat_dma->irq_mode = IOAT_NOIRQ; dev_err(dev, "no usable interrupts\n"); return err; } EXPORT_SYMBOL(ioat_dma_setup_interrupts); -static void ioat_disable_interrupts(struct ioatdma_device *device) +static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) { /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); } -int ioat_probe(struct ioatdma_device *device) +int ioat_probe(struct ioatdma_device *ioat_dma) { int err = -ENODEV; - struct dma_device *dma = &device->common; - struct pci_dev *pdev = device->pdev; + struct dma_device *dma = &ioat_dma->dma_dev; + struct pci_dev *pdev = ioat_dma->pdev; struct device *dev = &pdev->dev; /* DMA coherent memory pool for DMA descriptor allocations */ - device->dma_pool = pci_pool_create("dma_desc_pool", pdev, - sizeof(struct ioat_dma_descriptor), - 64, 0); - if (!device->dma_pool) { + ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, + sizeof(struct ioat_dma_descriptor), + 64, 0); + if (!ioat_dma->dma_pool) { err = -ENOMEM; goto err_dma_pool; } - device->completion_pool = pci_pool_create("completion_pool", pdev, - sizeof(u64), SMP_CACHE_BYTES, - SMP_CACHE_BYTES); + ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, + sizeof(u64), + SMP_CACHE_BYTES, + SMP_CACHE_BYTES); - if (!device->completion_pool) { + if (!ioat_dma->completion_pool) { err = -ENOMEM; goto err_completion_pool; } - device->enumerate_channels(device); + ioat_dma->enumerate_channels(ioat_dma); dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->dev = &pdev->dev; @@ -449,34 +451,34 @@ int ioat_probe(struct ioatdma_device *device) goto err_setup_interrupts; } - err = ioat_dma_setup_interrupts(device); + err = ioat_dma_setup_interrupts(ioat_dma); if (err) goto err_setup_interrupts; - err = device->self_test(device); + err = ioat_dma->self_test(ioat_dma); if (err) goto err_self_test; return 0; err_self_test: - ioat_disable_interrupts(device); + ioat_disable_interrupts(ioat_dma); err_setup_interrupts: - pci_pool_destroy(device->completion_pool); + pci_pool_destroy(ioat_dma->completion_pool); err_completion_pool: - pci_pool_destroy(device->dma_pool); + pci_pool_destroy(ioat_dma->dma_pool); err_dma_pool: return err; } -int ioat_register(struct ioatdma_device *device) +int ioat_register(struct ioatdma_device *ioat_dma) { - int err = dma_async_device_register(&device->common); + int err = dma_async_device_register(&ioat_dma->dma_dev); if (err) { - ioat_disable_interrupts(device); - pci_pool_destroy(device->completion_pool); - pci_pool_destroy(device->dma_pool); + ioat_disable_interrupts(ioat_dma); + pci_pool_destroy(ioat_dma->completion_pool); + pci_pool_destroy(ioat_dma->dma_pool); } return err; @@ -499,10 +501,10 @@ struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); static ssize_t version_show(struct dma_chan *c, char *page) { struct dma_device *dma = c->device; - struct ioatdma_device *device = to_ioatdma_device(dma); + struct ioatdma_device *ioat_dma = to_ioatdma_device(dma); return sprintf(page, "%d.%d\n", - device->version >> 4, device->version & 0xf); + ioat_dma->version >> 4, ioat_dma->version & 0xf); } struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); @@ -524,9 +526,9 @@ const struct sysfs_ops ioat_sysfs_ops = { .show = ioat_attr_show, }; -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) { - struct dma_device *dma = &device->common; + struct dma_device *dma = &ioat_dma->dma_dev; struct dma_chan *c; list_for_each_entry(c, &dma->channels, device_node) { @@ -545,9 +547,9 @@ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) } } -void ioat_kobject_del(struct ioatdma_device *device) +void ioat_kobject_del(struct ioatdma_device *ioat_dma) { - struct dma_device *dma = &device->common; + struct dma_device *dma = &ioat_dma->dma_dev; struct dma_chan *c; list_for_each_entry(c, &dma->channels, device_node) { @@ -560,18 +562,18 @@ void ioat_kobject_del(struct ioatdma_device *device) } } -void ioat_dma_remove(struct ioatdma_device *device) +void ioat_dma_remove(struct ioatdma_device *ioat_dma) { - struct dma_device *dma = &device->common; + struct dma_device *dma = &ioat_dma->dma_dev; - ioat_disable_interrupts(device); + ioat_disable_interrupts(ioat_dma); - ioat_kobject_del(device); + ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - pci_pool_destroy(device->dma_pool); - pci_pool_destroy(device->completion_pool); + pci_pool_destroy(ioat_dma->dma_pool); + pci_pool_destroy(ioat_dma->completion_pool); INIT_LIST_HEAD(&dma->channels); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 43290d1c88ed..11bbcf27f86f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -30,11 +30,11 @@ #define IOAT_DMA_DCA_ANY_CPU ~0 -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) -#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) +#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) +#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) /* * workaround for IOAT ver.3.0 null descriptor issue @@ -54,7 +54,7 @@ enum ioat_irq_mode { * @pdev: PCI-Express device * @reg_base: MMIO register space base address * @dma_pool: for allocating DMA descriptors - * @common: embedded struct dma_device + * @dma_dev: embedded struct dma_device * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data @@ -75,19 +75,19 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct dma_device common; + struct dma_device dma_dev; u8 version; struct msix_entry msix_entries[4]; struct ioatdma_chan *idx[4]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; u32 cap; - void (*intr_quirk)(struct ioatdma_device *device); - int (*enumerate_channels)(struct ioatdma_device *device); + void (*intr_quirk)(struct ioatdma_device *ioat_dma); + int (*enumerate_channels)(struct ioatdma_device *ioat_dma); int (*reset_hw)(struct ioatdma_chan *ioat_chan); void (*cleanup_fn)(unsigned long data); void (*timer_fn)(unsigned long data); - int (*self_test)(struct ioatdma_device *device); + int (*self_test)(struct ioatdma_device *ioat_dma); }; struct ioatdma_chan { @@ -107,7 +107,7 @@ struct ioatdma_chan { #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) - struct ioatdma_device *device; + struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; u64 *completion; struct tasklet_struct cleanup_task; @@ -188,14 +188,14 @@ __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) static inline struct ioatdma_chan * -ioat_chan_by_index(struct ioatdma_device *device, int index) +ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) { - return device->idx[index]; + return ioat_dma->idx[index]; } static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; u32 status_lo; @@ -214,7 +214,7 @@ static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; /* With IOAT v3.3 the status register is 64bit. */ @@ -242,7 +242,7 @@ static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; writeb(IOAT_CHANCMD_SUSPEND, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); @@ -250,7 +250,7 @@ static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) static inline void ioat_reset(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); @@ -258,7 +258,7 @@ static inline void ioat_reset(struct ioatdma_chan *ioat_chan) static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) { - u8 ver = ioat_chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u8 cmd; cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); @@ -291,20 +291,20 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -int ioat_probe(struct ioatdma_device *device); -int ioat_register(struct ioatdma_device *device); -int ioat_dma_self_test(struct ioatdma_device *device); -void ioat_dma_remove(struct ioatdma_device *device); +int ioat_probe(struct ioatdma_device *ioat_dma); +int ioat_register(struct ioatdma_device *ioat_dma); +int ioat_dma_self_test(struct ioatdma_device *ioat_dma); +void ioat_dma_remove(struct ioatdma_device *ioat_dma); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -void ioat_init_channel(struct ioatdma_device *device, +void ioat_init_channel(struct ioatdma_device *ioat_dma, struct ioatdma_chan *ioat_chan, int idx); enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, dma_addr_t *phys_complete); -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); -void ioat_kobject_del(struct ioatdma_device *device); -int ioat_dma_setup_interrupts(struct ioatdma_device *device); +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); +void ioat_kobject_del(struct ioatdma_device *ioat_dma); +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); void ioat_stop(struct ioatdma_chan *ioat_chan); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 0f4b2435e707..020c1fe31ca1 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -187,25 +187,25 @@ int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) /** * ioat2_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated + * @ioat_dma: the ioat dma device to be enumerated */ -int ioat2_enumerate_channels(struct ioatdma_device *device) +int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; u8 xfercap_log; int i; INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(device->idx)) { + if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(device->idx)); - dma->chancnt = ARRAY_SIZE(device->idx); + dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); + dma->chancnt = ARRAY_SIZE(ioat_dma->idx); } - xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) return 0; @@ -216,10 +216,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) if (!ioat_chan) break; - ioat_init_channel(device, ioat_chan, i); + ioat_init_channel(ioat_dma, ioat_chan, i); ioat_chan->xfercap_log = xfercap_log; spin_lock_init(&ioat_chan->prep_lock); - if (device->reset_hw(ioat_chan)) { + if (ioat_dma->reset_hw(ioat_chan)) { i = 0; break; } @@ -258,18 +258,18 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f { struct ioat_dma_descriptor *hw; struct ioat_ring_ent *desc; - struct ioatdma_device *dma; + struct ioatdma_device *ioat_dma; dma_addr_t phys; - dma = to_ioatdma_device(chan->device); - hw = pci_pool_alloc(dma->dma_pool, flags, &phys); + ioat_dma = to_ioatdma_device(chan->device); + hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); if (!hw) return NULL; memset(hw, 0, sizeof(*hw)); desc = kmem_cache_zalloc(ioat2_cache, flags); if (!desc) { - pci_pool_free(dma->dma_pool, hw, phys); + pci_pool_free(ioat_dma->dma_pool, hw, phys); return NULL; } @@ -282,10 +282,10 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) { - struct ioatdma_device *dma; + struct ioatdma_device *ioat_dma; - dma = to_ioatdma_device(chan->device); - pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); + ioat_dma = to_ioatdma_device(chan->device); + pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); kmem_cache_free(ioat2_cache, desc); } @@ -348,7 +348,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ ioat_chan->completion = - pci_pool_alloc(ioat_chan->device->completion_pool, + pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, GFP_KERNEL, &ioat_chan->completion_dma); if (!ioat_chan->completion) return -ENOMEM; @@ -554,10 +554,10 @@ int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) */ if (time_is_before_jiffies(ioat_chan->timer.expires) && timer_pending(&ioat_chan->timer)) { - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - device->timer_fn((unsigned long)ioat_chan); + ioat_dma->timer_fn((unsigned long)ioat_chan); } return -ENOMEM; @@ -617,7 +617,7 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, void ioat2_free_chan_resources(struct dma_chan *c) { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent *desc; const int total_descs = 1 << ioat_chan->alloc_order; int descs; @@ -630,7 +630,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - device->reset_hw(ioat_chan); + ioat_dma->reset_hw(ioat_chan); spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -654,7 +654,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) kfree(ioat_chan->ring); ioat_chan->ring = NULL; ioat_chan->alloc_order = 0; - pci_pool_free(device->completion_pool, ioat_chan->completion, + pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, ioat_chan->completion_dma); spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index d3b73c8819cd..7d69ed3edab4 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -121,11 +121,11 @@ ioat2_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); } -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); +int ioat2_dma_probe(struct ioatdma_device *ioat_dma, int dca); +int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); -int ioat2_enumerate_channels(struct ioatdma_device *device); +int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma); struct dma_async_tx_descriptor * ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9fb9b450c154..8ad4b07e7b85 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -254,7 +254,7 @@ static void pq16_set_src(struct ioat_raw_descriptor *desc[3], } static struct ioat_sed_ent * -ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) +ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) { struct ioat_sed_ent *sed; gfp_t flags = __GFP_ZERO | GFP_ATOMIC; @@ -264,7 +264,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) return NULL; sed->hw_pool = hw_pool; - sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], + sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], flags, &sed->dma); if (!sed->hw) { kmem_cache_free(ioat3_sed_cache, sed); @@ -274,12 +274,13 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) return sed; } -static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) +static void +ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) { if (!sed) return; - dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); kmem_cache_free(ioat3_sed_cache, sed); } @@ -370,7 +371,7 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) */ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) { - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent *desc; bool seen_current = false; int idx = ioat_chan->tail, i; @@ -399,7 +400,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) dump_desc_dbg(ioat_chan, desc); /* set err stat if we are using dwbes */ - if (device->cap & IOAT_CAP_DWBES) + if (ioat_dma->cap & IOAT_CAP_DWBES) desc_get_errstat(ioat_chan, desc); tx = &desc->txd; @@ -423,7 +424,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) /* cleanup super extended descriptors */ if (desc->sed) { - ioat3_free_sed(device, desc->sed); + ioat3_free_sed(ioat_dma, desc->sed); desc->sed = NULL; } } @@ -440,7 +441,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) } /* 5 microsecond delay per pending descriptor */ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), - ioat_chan->device->reg_base + IOAT_INTRDELAY_OFFSET); + ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); } static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) @@ -834,7 +835,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, size_t len, unsigned long flags) { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *desc; struct ioat_ring_ent *ext; @@ -845,7 +846,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, u32 offset = 0; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; int i, s, idx, with_ext, num_descs; - int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; + int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; dev_dbg(to_dev(ioat_chan), "%s\n", __func__); /* the engine requires at least two sources (we provide @@ -911,7 +912,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, pq->ctl = 0; pq->ctl_f.op = op; /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) + if (ioat_dma->cap & IOAT_CAP_DWBES) pq->ctl_f.wb_en = result ? 1 : 0; pq->ctl_f.src_cnt = src_cnt_to_hw(s); pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); @@ -958,7 +959,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, size_t len, unsigned long flags) { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - struct ioatdma_device *device = ioat_chan->device; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent *desc; size_t total_len = len; struct ioat_pq_descriptor *pq; @@ -994,7 +995,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, descs[0] = (struct ioat_raw_descriptor *) pq; - desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); + desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); if (!desc->sed) { dev_err(to_dev(ioat_chan), "%s: no free sed entries\n", __func__); @@ -1026,7 +1027,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, pq->ctl_f.op = op; pq->ctl_f.src_cnt = src16_cnt_to_hw(s); /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) + if (ioat_dma->cap & IOAT_CAP_DWBES) pq->ctl_f.wb_en = result ? 1 : 0; pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); @@ -1208,7 +1209,7 @@ static void ioat3_dma_test_callback(void *dma_async_param) } #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ -static int ioat_xor_val_self_test(struct ioatdma_device *device) +static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) { int i, src_idx; struct page *dest; @@ -1225,8 +1226,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) int err = 0; struct completion cmp; unsigned long tmo; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; u8 op = 0; dev_dbg(dev, "%s\n", __func__); @@ -1473,35 +1474,35 @@ out: return err; } -static int ioat3_dma_self_test(struct ioatdma_device *device) +static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) { - int rc = ioat_dma_self_test(device); + int rc = ioat_dma_self_test(ioat_dma); if (rc) return rc; - rc = ioat_xor_val_self_test(device); + rc = ioat_xor_val_self_test(ioat_dma); if (rc) return rc; return 0; } -static int ioat3_irq_reinit(struct ioatdma_device *device) +static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) { - struct pci_dev *pdev = device->pdev; + struct pci_dev *pdev = ioat_dma->pdev; int irq = pdev->irq, i; if (!is_bwd_ioat(pdev)) return 0; - switch (device->irq_mode) { + switch (ioat_dma->irq_mode) { case IOAT_MSIX: - for (i = 0; i < device->common.chancnt; i++) { - struct msix_entry *msix = &device->msix_entries[i]; + for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { + struct msix_entry *msix = &ioat_dma->msix_entries[i]; struct ioatdma_chan *ioat_chan; - ioat_chan = ioat_chan_by_index(device, i); + ioat_chan = ioat_chan_by_index(ioat_dma, i); devm_free_irq(&pdev->dev, msix->vector, ioat_chan); } @@ -1511,14 +1512,14 @@ static int ioat3_irq_reinit(struct ioatdma_device *device) pci_disable_msi(pdev); /* fall through */ case IOAT_INTX: - devm_free_irq(&pdev->dev, irq, device); + devm_free_irq(&pdev->dev, irq, ioat_dma); break; default: return 0; } - device->irq_mode = IOAT_NOIRQ; + ioat_dma->irq_mode = IOAT_NOIRQ; - return ioat_dma_setup_interrupts(device); + return ioat_dma_setup_interrupts(ioat_dma); } static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) @@ -1526,8 +1527,8 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) /* throw away whatever the channel was doing and get it * initialized, with ioat3 specific workarounds */ - struct ioatdma_device *device = ioat_chan->device; - struct pci_dev *pdev = device->pdev; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; u32 chanerr; u16 dev_id; int err; @@ -1537,7 +1538,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - if (device->version < IOAT_VER_3_3) { + if (ioat_dma->version < IOAT_VER_3_3) { /* clear any pending errors */ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); @@ -1562,7 +1563,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) err = ioat2_reset_sync(ioat_chan, msecs_to_jiffies(200)); if (!err) - err = ioat3_irq_reinit(device); + err = ioat3_irq_reinit(ioat_dma); if (err) dev_err(&pdev->dev, "Failed to reset: %d\n", err); @@ -1570,20 +1571,20 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) return err; } -static void ioat3_intr_quirk(struct ioatdma_device *device) +static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) { struct dma_device *dma; struct dma_chan *c; struct ioatdma_chan *ioat_chan; u32 errmask; - dma = &device->common; + dma = &ioat_dma->dma_dev; /* * if we have descriptor write back error status, we mask the * error interrupts */ - if (device->cap & IOAT_CAP_DWBES) { + if (ioat_dma->cap & IOAT_CAP_DWBES) { list_for_each_entry(c, &dma->channels, device_node) { ioat_chan = to_ioat_chan(c); errmask = readl(ioat_chan->reg_base + @@ -1596,9 +1597,9 @@ static void ioat3_intr_quirk(struct ioatdma_device *device) } } -int ioat3_dma_probe(struct ioatdma_device *device, int dca) +int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) { - struct pci_dev *pdev = device->pdev; + struct pci_dev *pdev = ioat_dma->pdev; int dca_en = system_has_dca_enabled(pdev); struct dma_device *dma; struct dma_chan *c; @@ -1606,11 +1607,11 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) bool is_raid_device = false; int err; - device->enumerate_channels = ioat2_enumerate_channels; - device->reset_hw = ioat3_reset_hw; - device->self_test = ioat3_dma_self_test; - device->intr_quirk = ioat3_intr_quirk; - dma = &device->common; + ioat_dma->enumerate_channels = ioat2_enumerate_channels; + ioat_dma->reset_hw = ioat3_reset_hw; + ioat_dma->self_test = ioat3_dma_self_test; + ioat_dma->intr_quirk = ioat3_intr_quirk; + dma = &ioat_dma->dma_dev; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; @@ -1619,16 +1620,17 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; - device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) - device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); + ioat_dma->cap &= + ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); /* dca is incompatible with raid operations */ - if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) - device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); + if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) + ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); - if (device->cap & IOAT_CAP_XOR) { + if (ioat_dma->cap & IOAT_CAP_XOR) { is_raid_device = true; dma->max_xor = 8; @@ -1639,7 +1641,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } - if (device->cap & IOAT_CAP_PQ) { + if (ioat_dma->cap & IOAT_CAP_PQ) { is_raid_device = true; dma->device_prep_dma_pq = ioat3_prep_pq; @@ -1647,19 +1649,19 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_PQ, dma->cap_mask); dma_cap_set(DMA_PQ_VAL, dma->cap_mask); - if (device->cap & IOAT_CAP_RAID16SS) { + if (ioat_dma->cap & IOAT_CAP_RAID16SS) { dma_set_maxpq(dma, 16, 0); } else { dma_set_maxpq(dma, 8, 0); } - if (!(device->cap & IOAT_CAP_XOR)) { + if (!(ioat_dma->cap & IOAT_CAP_XOR)) { dma->device_prep_dma_xor = ioat3_prep_pqxor; dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; dma_cap_set(DMA_XOR, dma->cap_mask); dma_cap_set(DMA_XOR_VAL, dma->cap_mask); - if (device->cap & IOAT_CAP_RAID16SS) { + if (ioat_dma->cap & IOAT_CAP_RAID16SS) { dma->max_xor = 16; } else { dma->max_xor = 8; @@ -1668,11 +1670,11 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) } dma->device_tx_status = ioat3_tx_status; - device->cleanup_fn = ioat3_cleanup_event; - device->timer_fn = ioat3_timer_event; + ioat_dma->cleanup_fn = ioat3_cleanup_event; + ioat_dma->timer_fn = ioat3_timer_event; /* starting with CB3.3 super extended descriptors are supported */ - if (device->cap & IOAT_CAP_RAID16SS) { + if (ioat_dma->cap & IOAT_CAP_RAID16SS) { char pool_name[14]; int i; @@ -1680,19 +1682,19 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) snprintf(pool_name, 14, "ioat_hw%d_sed", i); /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dmam_pool_create(pool_name, + ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, &pdev->dev, SED_SIZE * (i + 1), 64, 0); - if (!device->sed_hw_pool[i]) + if (!ioat_dma->sed_hw_pool[i]) return -ENOMEM; } } - if (!(device->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) + if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) dma_cap_set(DMA_PRIVATE, dma->cap_mask); - err = ioat_probe(device); + err = ioat_probe(ioat_dma); if (err) return err; @@ -1702,14 +1704,14 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); } - err = ioat_register(device); + err = ioat_register(ioat_dma); if (err) return err; - ioat_kobject_add(device, &ioat2_ktype); + ioat_kobject_add(ioat_dma, &ioat2_ktype); if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); + ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); return 0; } |