diff options
Diffstat (limited to 'drivers')
191 files changed, 12318 insertions, 3028 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index cd4de7e038ea..c6bcb8c719d8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -33,6 +33,7 @@ #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-aspm.h> +#include <linux/dmar.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/dmi.h> @@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device, struct acpi_pci_root *root; acpi_handle handle = device->handle; int no_aspm = 0, clear_aspm = 0; + bool hotadd = system_state != SYSTEM_BOOTING; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device, strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; + if (hotadd && dmar_device_add(handle)) { + result = -ENXIO; + goto end; + } + pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), root->segment, &root->secondary); @@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device, root->segment, (unsigned int)root->secondary.start); device->driver_data = NULL; result = -ENODEV; - goto end; + goto remove_dmar; } if (clear_aspm) { @@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device, if (device->wakeup.flags.run_wake) device_set_run_wake(root->bus->bridge, true); - if (system_state != SYSTEM_BOOTING) { + if (hotadd) { pcibios_resource_survey_bus(root->bus); pci_assign_unassigned_root_bus_resources(root->bus); } @@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device, pci_unlock_rescan_remove(); return 1; +remove_dmar: + if (hotadd) + dmar_device_remove(handle); end: kfree(root); return result; @@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) pci_remove_root_bus(root->bus); + dmar_device_remove(device->handle); + pci_unlock_rescan_remove(); kfree(root); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 973a3332a85f..80f4de729a86 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -95,8 +95,12 @@ static int amba_pm_runtime_suspend(struct device *dev) struct amba_device *pcdev = to_amba_device(dev); int ret = pm_generic_runtime_suspend(dev); - if (ret == 0 && dev->driver) - clk_disable_unprepare(pcdev->pclk); + if (ret == 0 && dev->driver) { + if (pm_runtime_is_irq_safe(dev)) + clk_disable(pcdev->pclk); + else + clk_disable_unprepare(pcdev->pclk); + } return ret; } @@ -107,7 +111,10 @@ static int amba_pm_runtime_resume(struct device *dev) int ret; if (dev->driver) { - ret = clk_prepare_enable(pcdev->pclk); + if (pm_runtime_is_irq_safe(dev)) + ret = clk_enable(pcdev->pclk); + else + ret = clk_prepare_enable(pcdev->pclk); /* Failure is probably fatal to the system, but... */ if (ret) return ret; @@ -115,7 +122,7 @@ static int amba_pm_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#endif +#endif /* CONFIG_PM */ static const struct dev_pm_ops amba_pm = { .suspend = pm_generic_suspend, diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c index e44d675a30ec..b5aedca5ea3c 100644 --- a/drivers/ata/ahci_sunxi.c +++ b/drivers/ata/ahci_sunxi.c @@ -27,6 +27,12 @@ #include <linux/regulator/consumer.h> #include "ahci.h" +/* Insmod parameters */ +static bool enable_pmp; +module_param(enable_pmp, bool, 0); +MODULE_PARM_DESC(enable_pmp, + "Enable support for sata port multipliers, only use if you use a pmp!"); + #define AHCI_BISTAFR 0x00a0 #define AHCI_BISTCR 0x00a4 #define AHCI_BISTFCTR 0x00a8 @@ -184,7 +190,15 @@ static int ahci_sunxi_probe(struct platform_device *pdev) goto disable_resources; hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | - AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; + AHCI_HFLAG_YES_NCQ; + + /* + * The sunxi sata controller seems to be unable to successfully do a + * soft reset if no pmp is attached, so disable pmp use unless + * requested, otherwise directly attached disks do not work. + */ + if (!enable_pmp) + hpriv->flags |= AHCI_HFLAG_NO_PMP; rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info); if (rc) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c5ba15af87d3..5c84fb5c3372 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1043,8 +1043,8 @@ const char *sata_spd_string(unsigned int spd) * None. * * RETURNS: - * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or - * %ATA_DEV_UNKNOWN the event of failure. + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, + * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. */ unsigned int ata_dev_classify(const struct ata_taskfile *tf) { @@ -1089,6 +1089,11 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) return ATA_DEV_SEMB; } + if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { + DPRINTK("found ZAC device by sig\n"); + return ATA_DEV_ZAC; + } + DPRINTK("unknown device\n"); return ATA_DEV_UNKNOWN; } @@ -1329,7 +1334,7 @@ static int ata_hpa_resize(struct ata_device *dev) int rc; /* do we need to do it? */ - if (dev->class != ATA_DEV_ATA || + if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) return 0; @@ -1889,6 +1894,7 @@ retry: case ATA_DEV_SEMB: class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ case ATA_DEV_ATA: + case ATA_DEV_ZAC: tf.command = ATA_CMD_ID_ATA; break; case ATA_DEV_ATAPI: @@ -1980,7 +1986,7 @@ retry: rc = -EINVAL; reason = "device reports invalid type"; - if (class == ATA_DEV_ATA) { + if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) goto err_out; if (ap->host->flags & ATA_HOST_IGNORE_ATA && @@ -2015,7 +2021,8 @@ retry: goto retry; } - if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { + if ((flags & ATA_READID_POSTRESET) && + (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { /* * The exact sequence expected by certain pre-ATA4 drives is: * SRST RESET @@ -2280,7 +2287,7 @@ int ata_dev_configure(struct ata_device *dev) sizeof(modelbuf)); /* ATA-specific feature tests */ - if (dev->class == ATA_DEV_ATA) { + if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { if (ata_id_is_cfa(id)) { /* CPRM may make this media unusable */ if (id[ATA_ID_CFA_KEY_MGMT] & 1) @@ -4033,6 +4040,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, if (ata_class_enabled(new_class) && new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI && + new_class != ATA_DEV_ZAC && new_class != ATA_DEV_SEMB) { ata_dev_info(dev, "class mismatch %u != %u\n", dev->class, new_class); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index dad83df555c4..3dbec8954c86 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1809,6 +1809,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, switch (qc->dev->class) { case ATA_DEV_ATA: + case ATA_DEV_ZAC: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & (ATA_UNC | ATA_AMNF)) @@ -3792,7 +3793,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, struct ata_eh_context *ehc = &link->eh_context; unsigned long tmp; - if (dev->class != ATA_DEV_ATA) + if (dev->class != ATA_DEV_ATA && + dev->class != ATA_DEV_ZAC) continue; if (!(ehc->i.dev_action[dev->devno] & ATA_EH_PARK)) @@ -3873,7 +3875,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, /* retry flush if necessary */ ata_for_each_dev(dev, link, ALL) { - if (dev->class != ATA_DEV_ATA) + if (dev->class != ATA_DEV_ATA && + dev->class != ATA_DEV_ZAC) continue; rc = ata_eh_maybe_retry_flush(dev); if (rc) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index dd45c6a03e5d..e364e86e84d7 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -235,7 +235,8 @@ static ssize_t ata_scsi_park_store(struct device *device, rc = -ENODEV; goto unlock; } - if (dev->class != ATA_DEV_ATA) { + if (dev->class != ATA_DEV_ATA && + dev->class != ATA_DEV_ZAC) { rc = -EOPNOTSUPP; goto unlock; } @@ -1961,6 +1962,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args, static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) { const u8 versions[] = { + 0x00, 0x60, /* SAM-3 (no version claimed) */ 0x03, @@ -1969,6 +1971,20 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 0x02, 0x60 /* SPC-3 (no version claimed) */ }; + const u8 versions_zbc[] = { + 0x00, + 0xA0, /* SAM-5 (no version claimed) */ + + 0x04, + 0xC0, /* SBC-3 (no version claimed) */ + + 0x04, + 0x60, /* SPC-4 (no version claimed) */ + + 0x60, + 0x20, /* ZBC (no version claimed) */ + }; + u8 hdr[] = { TYPE_DISK, 0, @@ -1983,6 +1999,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) if (ata_id_removeable(args->id)) hdr[1] |= (1 << 7); + if (args->dev->class == ATA_DEV_ZAC) { + hdr[0] = TYPE_ZBC; + hdr[2] = 0x6; /* ZBC is defined in SPC-4 */ + } + memcpy(rbuf, hdr, sizeof(hdr)); memcpy(&rbuf[8], "ATA ", 8); ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); @@ -1995,7 +2016,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) if (rbuf[32] == 0 || rbuf[32] == ' ') memcpy(&rbuf[32], "n/a ", 4); - memcpy(rbuf + 59, versions, sizeof(versions)); + if (args->dev->class == ATA_DEV_ZAC) + memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc)); + else + memcpy(rbuf + 58, versions, sizeof(versions)); return 0; } @@ -2564,7 +2588,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) DPRINTK("ATAPI request sense\n"); - /* FIXME: is this needed? */ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); #ifdef CONFIG_ATA_SFF @@ -3405,7 +3428,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, ata_xlat_func_t xlat_func; int rc = 0; - if (dev->class == ATA_DEV_ATA) { + if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) goto bad_cdb_len; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index e37413228228..3227b7c8a05f 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -143,6 +143,7 @@ static struct { { ATA_DEV_PMP_UNSUP, "pmp" }, { ATA_DEV_SEMB, "semb" }, { ATA_DEV_SEMB_UNSUP, "semb" }, + { ATA_DEV_ZAC, "zac" }, { ATA_DEV_NONE, "none" } }; ata_bitfield_name_search(class, ata_class_names) diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 5ec69c3d409d..04faf6df959f 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -20,6 +20,9 @@ #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/time.h> +#ifdef CONFIG_BCM47XX +#include <bcm47xx_nvram.h> +#endif enum bcma_boot_dev { BCMA_BOOT_DEV_UNK = 0, @@ -316,10 +319,16 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) switch (boot_dev) { case BCMA_BOOT_DEV_PARALLEL: case BCMA_BOOT_DEV_SERIAL: - /* TODO: Init NVRAM using BCMA_SOC_FLASH2 window */ +#ifdef CONFIG_BCM47XX + bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH2, + BCMA_SOC_FLASH2_SZ); +#endif break; case BCMA_BOOT_DEV_NAND: - /* TODO: Init NVRAM using BCMA_SOC_FLASH1 window */ +#ifdef CONFIG_BCM47XX + bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH1, + BCMA_SOC_FLASH1_SZ); +#endif break; default: break; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 0ebadf93b6c5..4b911ed96ea3 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -23,8 +23,8 @@ #define DRV_MODULE_NAME "sunvdc" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.1" -#define DRV_MODULE_RELDATE "February 13, 2013" +#define DRV_MODULE_VERSION "1.2" +#define DRV_MODULE_RELDATE "November 24, 2014" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define WAITING_FOR_GEN_CMD 0x04 #define WAITING_FOR_ANY -1 +static struct workqueue_struct *sunvdc_wq; + struct vdc_req_entry { struct request *req; }; @@ -60,6 +62,10 @@ struct vdc_port { u64 max_xfer_size; u32 vdisk_block_size; + u64 ldc_timeout; + struct timer_list ldc_reset_timer; + struct work_struct ldc_reset_work; + /* The server fills these in for us in the disk attribute * ACK packet. */ @@ -71,6 +77,10 @@ struct vdc_port { char disk_name[32]; }; +static void vdc_ldc_reset(struct vdc_port *port); +static void vdc_ldc_reset_work(struct work_struct *work); +static void vdc_ldc_reset_timer(unsigned long _arg); + static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) { return container_of(vio, struct vdc_port, vio); @@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = { .ioctl = vdc_ioctl, }; +static void vdc_blk_queue_start(struct vdc_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + /* restart blk queue when ring is half emptied. also called after + * handshake completes, so check for initial handshake before we've + * allocated a disk. + */ + if (port->disk && blk_queue_stopped(port->disk->queue) && + vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) { + blk_start_queue(port->disk->queue); + } + +} + static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) { if (vio->cmp && @@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) static void vdc_handshake_complete(struct vio_driver_state *vio) { + struct vdc_port *port = to_vdc_port(vio); + + del_timer(&port->ldc_reset_timer); vdc_finish(vio, 0, WAITING_FOR_LINK_UP); + vdc_blk_queue_start(port); } static int vdc_handle_unknown(struct vdc_port *port, void *arg) @@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); desc->hdr.state = VIO_DESC_FREE; - dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); + dr->cons = vio_dring_next(dr, index); req = rqe->req; if (req == NULL) { @@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); - /* restart blk queue when ring is half emptied */ - if (blk_queue_stopped(port->disk->queue) && - vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) - blk_start_queue(port->disk->queue); + vdc_blk_queue_start(port); } static int vdc_ack(struct vdc_port *port, void *msgbuf) @@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event) spin_lock_irqsave(&vio->lock, flags); - if (unlikely(event == LDC_EVENT_RESET || - event == LDC_EVENT_UP)) { + if (unlikely(event == LDC_EVENT_RESET)) { vio_link_state_change(vio, event); - spin_unlock_irqrestore(&vio->lock, flags); - return; + queue_work(sunvdc_wq, &port->ldc_reset_work); + goto out; + } + + if (unlikely(event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + goto out; } if (unlikely(event != LDC_EVENT_DATA_READY)) { - printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); - spin_unlock_irqrestore(&vio->lock, flags); - return; + pr_warn(PFX "Unexpected LDC event %d\n", event); + goto out; } err = 0; @@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event) } if (err < 0) vdc_finish(&port->vio, err, WAITING_FOR_ANY); +out: spin_unlock_irqrestore(&vio->lock, flags); } @@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port) delay = 128; } while (err == -EAGAIN); + if (err == -ENOTCONN) + vdc_ldc_reset(port); return err; } @@ -472,7 +504,7 @@ static int __send_request(struct request *req) printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); } else { port->req_id++; - dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); + dr->prod = vio_dring_next(dr, dr->prod); } return err; @@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) err = __vdc_tx_trigger(port); if (err >= 0) { port->req_id++; - dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); + dr->prod = vio_dring_next(dr, dr->prod); spin_unlock_irqrestore(&port->vio.lock, flags); wait_for_completion(&comp.com); @@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port) } } -static int probe_disk(struct vdc_port *port) +static int vdc_port_up(struct vdc_port *port) { struct vio_completion comp; - struct request_queue *q; - struct gendisk *g; - int err; init_completion(&comp.com); comp.err = 0; @@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port) port->vio.cmp = ∁ vio_port_up(&port->vio); - wait_for_completion(&comp.com); - if (comp.err) - return comp.err; + return comp.err; +} + +static void vdc_port_down(struct vdc_port *port) +{ + ldc_disconnect(port->vio.lp); + ldc_unbind(port->vio.lp); + vdc_free_tx_ring(port); + vio_ldc_free(&port->vio); +} + +static int probe_disk(struct vdc_port *port) +{ + struct request_queue *q; + struct gendisk *g; + int err; + + err = vdc_port_up(port); + if (err) + return err; if (vdc_version_supported(port, 1, 1)) { /* vdisk_size should be set during the handshake, if it wasn't @@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) struct mdesc_handle *hp; struct vdc_port *port; int err; + const u64 *ldc_timeout; print_version(); @@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); port->vdisk_size = -1; + /* Actual wall time may be double due to do_generic_file_read() doing + * a readahead I/O first, and once that fails it will try to read a + * single page. + */ + ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL); + port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0; + setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer, + (unsigned long)port); + INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work); + err = vio_driver_init(&port->vio, vdev, VDEV_DISK, vdc_versions, ARRAY_SIZE(vdc_versions), &vdc_vio_ops, port->disk_name); @@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev) struct vdc_port *port = dev_get_drvdata(&vdev->dev); if (port) { + unsigned long flags; + + spin_lock_irqsave(&port->vio.lock, flags); + blk_stop_queue(port->disk->queue); + spin_unlock_irqrestore(&port->vio.lock, flags); + + flush_work(&port->ldc_reset_work); + del_timer_sync(&port->ldc_reset_timer); del_timer_sync(&port->vio.timer); + del_gendisk(port->disk); + blk_cleanup_queue(port->disk->queue); + put_disk(port->disk); + port->disk = NULL; + vdc_free_tx_ring(port); vio_ldc_free(&port->vio); @@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev) return 0; } +static void vdc_requeue_inflight(struct vdc_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + u32 idx; + + for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) { + struct vio_disk_desc *desc = vio_dring_entry(dr, idx); + struct vdc_req_entry *rqe = &port->rq_arr[idx]; + struct request *req; + + ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); + desc->hdr.state = VIO_DESC_FREE; + dr->cons = vio_dring_next(dr, idx); + + req = rqe->req; + if (req == NULL) { + vdc_end_special(port, desc); + continue; + } + + rqe->req = NULL; + blk_requeue_request(port->disk->queue, req); + } +} + +static void vdc_queue_drain(struct vdc_port *port) +{ + struct request *req; + + while ((req = blk_fetch_request(port->disk->queue)) != NULL) + __blk_end_request_all(req, -EIO); +} + +static void vdc_ldc_reset_timer(unsigned long _arg) +{ + struct vdc_port *port = (struct vdc_port *) _arg; + struct vio_driver_state *vio = &port->vio; + unsigned long flags; + + spin_lock_irqsave(&vio->lock, flags); + if (!(port->vio.hs_state & VIO_HS_COMPLETE)) { + pr_warn(PFX "%s ldc down %llu seconds, draining queue\n", + port->disk_name, port->ldc_timeout); + vdc_queue_drain(port); + vdc_blk_queue_start(port); + } + spin_unlock_irqrestore(&vio->lock, flags); +} + +static void vdc_ldc_reset_work(struct work_struct *work) +{ + struct vdc_port *port; + struct vio_driver_state *vio; + unsigned long flags; + + port = container_of(work, struct vdc_port, ldc_reset_work); + vio = &port->vio; + + spin_lock_irqsave(&vio->lock, flags); + vdc_ldc_reset(port); + spin_unlock_irqrestore(&vio->lock, flags); +} + +static void vdc_ldc_reset(struct vdc_port *port) +{ + int err; + + assert_spin_locked(&port->vio.lock); + + pr_warn(PFX "%s ldc link reset\n", port->disk_name); + blk_stop_queue(port->disk->queue); + vdc_requeue_inflight(port); + vdc_port_down(port); + + err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); + if (err) { + pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err); + return; + } + + err = vdc_alloc_tx_ring(port); + if (err) { + pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err); + goto err_free_ldc; + } + + if (port->ldc_timeout) + mod_timer(&port->ldc_reset_timer, + round_jiffies(jiffies + HZ * port->ldc_timeout)); + mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ)); + return; + +err_free_ldc: + vio_ldc_free(&port->vio); +} + static const struct vio_device_id vdc_port_match[] = { { .type = "vdc-port", @@ -927,9 +1093,13 @@ static int __init vdc_init(void) { int err; + sunvdc_wq = alloc_workqueue("sunvdc", 0, 0); + if (!sunvdc_wq) + return -ENOMEM; + err = register_blkdev(0, VDCBLK_NAME); if (err < 0) - goto out_err; + goto out_free_wq; vdc_major = err; @@ -943,7 +1113,8 @@ out_unregister_blkdev: unregister_blkdev(vdc_major, VDCBLK_NAME); vdc_major = 0; -out_err: +out_free_wq: + destroy_workqueue(sunvdc_wq); return err; } @@ -951,6 +1122,7 @@ static void __exit vdc_exit(void) { vio_unregister_driver(&vdc_port_driver); unregister_blkdev(vdc_major, VDCBLK_NAME); + destroy_workqueue(sunvdc_wq); } module_init(vdc_init); diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index db1c9b7adaa6..6ed9e9fe5233 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -62,6 +62,20 @@ config IPMI_SI_PROBE_DEFAULTS only be available on older systems if the "ipmi_si_intf.trydefaults=1" boot argument is passed. +config IPMI_SSIF + tristate 'IPMI SMBus handler (SSIF)' + select I2C + help + Provides a driver for a SMBus interface to a BMC, meaning that you + have a driver that must be accessed over an I2C bus instead of a + standard interface. This module requires I2C support. + +config IPMI_POWERNV + depends on PPC_POWERNV + tristate 'POWERNV (OPAL firmware) IPMI interface' + help + Provides a driver for OPAL firmware-based IPMI interfaces. + config IPMI_WATCHDOG tristate 'IPMI Watchdog Timer' help diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 16a93648d54e..f3ffde1f5f1f 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -7,5 +7,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o +obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o +obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index f816211f062f..5fa83f751378 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void); static void smi_recv_tasklet(unsigned long); static void handle_new_recv_msgs(ipmi_smi_t intf); static void need_waiter(ipmi_smi_t intf); +static int handle_one_recv_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *msg); static int initialized; @@ -191,12 +193,12 @@ struct ipmi_proc_entry { #endif struct bmc_device { - struct platform_device *dev; + struct platform_device pdev; struct ipmi_device_id id; unsigned char guid[16]; int guid_set; - - struct kref refcount; + char name[16]; + struct kref usecount; /* bmc device attributes */ struct device_attribute device_id_attr; @@ -210,6 +212,7 @@ struct bmc_device { struct device_attribute guid_attr; struct device_attribute aux_firmware_rev_attr; }; +#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) /* * Various statistics for IPMI, these index stats[] in the ipmi_smi @@ -323,6 +326,9 @@ struct ipmi_smi { struct kref refcount; + /* Set when the interface is being unregistered. */ + bool in_shutdown; + /* Used for a list of interfaces. */ struct list_head link; @@ -341,7 +347,6 @@ struct ipmi_smi { struct bmc_device *bmc; char *my_dev_name; - char *sysfs_name; /* * This is the lower-layer's sender routine. Note that you @@ -377,11 +382,16 @@ struct ipmi_smi { * periodic timer interrupt. The tasklet is for handling received * messages directly from the handler. */ - spinlock_t waiting_msgs_lock; - struct list_head waiting_msgs; + spinlock_t waiting_rcv_msgs_lock; + struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; struct tasklet_struct recv_tasklet; + spinlock_t xmit_msgs_lock; + struct list_head xmit_msgs; + struct ipmi_smi_msg *curr_msg; + struct list_head hp_xmit_msgs; + /* * The list of command receivers that are registered for commands * on this interface. @@ -474,6 +484,18 @@ static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_get_stat(intf, stat) \ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) +static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", + "ACPI", "SMBIOS", "PCI", + "device-tree", "default" }; + +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) +{ + if (src > SI_DEFAULT) + src = 0; /* Invalid */ + return addr_src_to_str[src]; +} +EXPORT_SYMBOL(ipmi_addr_src_to_str); + static int is_lan_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_LAN_ADDR_TYPE; @@ -517,7 +539,7 @@ static void clean_up_interface_data(ipmi_smi_t intf) tasklet_kill(&intf->recv_tasklet); - free_smi_msg_list(&intf->waiting_msgs); + free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); /* @@ -1473,6 +1495,30 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, smi_msg->msgid = msgid; } +static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, + struct ipmi_smi_msg *smi_msg, int priority) +{ + int run_to_completion = intf->run_to_completion; + unsigned long flags; + + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (intf->curr_msg) { + if (priority > 0) + list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); + else + list_add_tail(&smi_msg->link, &intf->xmit_msgs); + smi_msg = NULL; + } else { + intf->curr_msg = smi_msg; + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + + if (smi_msg) + handlers->sender(intf->send_info, smi_msg); +} + /* * Separate from ipmi_request so that the user does not have to be * supplied in certain circumstances (mainly at panic time). If @@ -1497,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user, struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; unsigned long flags; - struct ipmi_smi_handlers *handlers; if (supplied_recv) @@ -1520,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user, } rcu_read_lock(); - handlers = intf->handlers; - if (!handlers) { + if (intf->in_shutdown) { rv = -ENODEV; goto out_err; } @@ -1856,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user, } #endif - handlers->sender(intf->send_info, smi_msg, priority); + smi_send(intf, intf->handlers, smi_msg, priority); rcu_read_unlock(); return 0; @@ -2153,7 +2197,7 @@ static void remove_proc_entries(ipmi_smi_t smi) static int __find_bmc_guid(struct device *dev, void *data) { unsigned char *id = data; - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return memcmp(bmc->guid, id, 16) == 0; } @@ -2164,7 +2208,7 @@ static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); if (dev) - return dev_get_drvdata(dev); + return to_bmc_device(dev); else return NULL; } @@ -2177,7 +2221,7 @@ struct prod_dev_id { static int __find_bmc_prod_dev_id(struct device *dev, void *data) { struct prod_dev_id *id = data; - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return (bmc->id.product_id == id->product_id && bmc->id.device_id == id->device_id); @@ -2195,7 +2239,7 @@ static struct bmc_device *ipmi_find_bmc_prod_dev_id( dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); if (dev) - return dev_get_drvdata(dev); + return to_bmc_device(dev); else return NULL; } @@ -2204,84 +2248,92 @@ static ssize_t device_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "%u\n", bmc->id.device_id); } +DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); -static ssize_t provides_dev_sdrs_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t provides_device_sdrs_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "%u\n", (bmc->id.device_revision & 0x80) >> 7); } +DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL); static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u\n", bmc->id.device_revision & 0x0F); } +DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); -static ssize_t firmware_rev_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t firmware_revision_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, bmc->id.firmware_revision_2); } +DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); static ssize_t ipmi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u.%u\n", ipmi_version_major(&bmc->id), ipmi_version_minor(&bmc->id)); } +DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); static ssize_t add_dev_support_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "0x%02x\n", bmc->id.additional_device_support); } +DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL); static ssize_t manufacturer_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); } +DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); static ssize_t product_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); } +DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); static ssize_t aux_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", bmc->id.aux_firmware_revision[3], @@ -2289,174 +2341,96 @@ static ssize_t aux_firmware_rev_show(struct device *dev, bmc->id.aux_firmware_revision[1], bmc->id.aux_firmware_revision[0]); } +DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 100, "%Lx%Lx\n", (long long) bmc->guid[0], (long long) bmc->guid[8]); } +DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); + +static struct attribute *bmc_dev_attrs[] = { + &dev_attr_device_id.attr, + &dev_attr_provides_device_sdrs.attr, + &dev_attr_revision.attr, + &dev_attr_firmware_revision.attr, + &dev_attr_ipmi_version.attr, + &dev_attr_additional_device_support.attr, + &dev_attr_manufacturer_id.attr, + &dev_attr_product_id.attr, + NULL +}; -static void remove_files(struct bmc_device *bmc) -{ - if (!bmc->dev) - return; +static struct attribute_group bmc_dev_attr_group = { + .attrs = bmc_dev_attrs, +}; - device_remove_file(&bmc->dev->dev, - &bmc->device_id_attr); - device_remove_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); - device_remove_file(&bmc->dev->dev, - &bmc->revision_attr); - device_remove_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); - device_remove_file(&bmc->dev->dev, - &bmc->version_attr); - device_remove_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); - device_remove_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); - device_remove_file(&bmc->dev->dev, - &bmc->product_id_attr); +static const struct attribute_group *bmc_dev_attr_groups[] = { + &bmc_dev_attr_group, + NULL +}; - if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->dev->dev, - &bmc->aux_firmware_rev_attr); - if (bmc->guid_set) - device_remove_file(&bmc->dev->dev, - &bmc->guid_attr); +static struct device_type bmc_device_type = { + .groups = bmc_dev_attr_groups, +}; + +static void +release_bmc_device(struct device *dev) +{ + kfree(to_bmc_device(dev)); } static void cleanup_bmc_device(struct kref *ref) { - struct bmc_device *bmc; + struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); - bmc = container_of(ref, struct bmc_device, refcount); + if (bmc->id.aux_firmware_revision_set) + device_remove_file(&bmc->pdev.dev, + &bmc->aux_firmware_rev_attr); + if (bmc->guid_set) + device_remove_file(&bmc->pdev.dev, + &bmc->guid_attr); - remove_files(bmc); - platform_device_unregister(bmc->dev); - kfree(bmc); + platform_device_unregister(&bmc->pdev); } static void ipmi_bmc_unregister(ipmi_smi_t intf) { struct bmc_device *bmc = intf->bmc; - if (intf->sysfs_name) { - sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name); - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; - } + sysfs_remove_link(&intf->si_dev->kobj, "bmc"); if (intf->my_dev_name) { - sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); + sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); kfree(intf->my_dev_name); intf->my_dev_name = NULL; } mutex_lock(&ipmidriver_mutex); - kref_put(&bmc->refcount, cleanup_bmc_device); + kref_put(&bmc->usecount, cleanup_bmc_device); intf->bmc = NULL; mutex_unlock(&ipmidriver_mutex); } -static int create_files(struct bmc_device *bmc) +static int create_bmc_files(struct bmc_device *bmc) { int err; - bmc->device_id_attr.attr.name = "device_id"; - bmc->device_id_attr.attr.mode = S_IRUGO; - bmc->device_id_attr.show = device_id_show; - sysfs_attr_init(&bmc->device_id_attr.attr); - - bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; - bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; - bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; - sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr); - - bmc->revision_attr.attr.name = "revision"; - bmc->revision_attr.attr.mode = S_IRUGO; - bmc->revision_attr.show = revision_show; - sysfs_attr_init(&bmc->revision_attr.attr); - - bmc->firmware_rev_attr.attr.name = "firmware_revision"; - bmc->firmware_rev_attr.attr.mode = S_IRUGO; - bmc->firmware_rev_attr.show = firmware_rev_show; - sysfs_attr_init(&bmc->firmware_rev_attr.attr); - - bmc->version_attr.attr.name = "ipmi_version"; - bmc->version_attr.attr.mode = S_IRUGO; - bmc->version_attr.show = ipmi_version_show; - sysfs_attr_init(&bmc->version_attr.attr); - - bmc->add_dev_support_attr.attr.name = "additional_device_support"; - bmc->add_dev_support_attr.attr.mode = S_IRUGO; - bmc->add_dev_support_attr.show = add_dev_support_show; - sysfs_attr_init(&bmc->add_dev_support_attr.attr); - - bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; - bmc->manufacturer_id_attr.attr.mode = S_IRUGO; - bmc->manufacturer_id_attr.show = manufacturer_id_show; - sysfs_attr_init(&bmc->manufacturer_id_attr.attr); - - bmc->product_id_attr.attr.name = "product_id"; - bmc->product_id_attr.attr.mode = S_IRUGO; - bmc->product_id_attr.show = product_id_show; - sysfs_attr_init(&bmc->product_id_attr.attr); - - bmc->guid_attr.attr.name = "guid"; - bmc->guid_attr.attr.mode = S_IRUGO; - bmc->guid_attr.show = guid_show; - sysfs_attr_init(&bmc->guid_attr.attr); - - bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; - bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; - bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; - sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr); - - err = device_create_file(&bmc->dev->dev, - &bmc->device_id_attr); - if (err) - goto out; - err = device_create_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); - if (err) - goto out_devid; - err = device_create_file(&bmc->dev->dev, - &bmc->revision_attr); - if (err) - goto out_sdrs; - err = device_create_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); - if (err) - goto out_rev; - err = device_create_file(&bmc->dev->dev, - &bmc->version_attr); - if (err) - goto out_firm; - err = device_create_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); - if (err) - goto out_version; - err = device_create_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); - if (err) - goto out_add_dev; - err = device_create_file(&bmc->dev->dev, - &bmc->product_id_attr); - if (err) - goto out_manu; if (bmc->id.aux_firmware_revision_set) { - err = device_create_file(&bmc->dev->dev, + bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; + err = device_create_file(&bmc->pdev.dev, &bmc->aux_firmware_rev_attr); if (err) - goto out_prod_id; + goto out; } if (bmc->guid_set) { - err = device_create_file(&bmc->dev->dev, + bmc->guid_attr.attr.name = "guid"; + err = device_create_file(&bmc->pdev.dev, &bmc->guid_attr); if (err) goto out_aux_firm; @@ -2466,44 +2440,17 @@ static int create_files(struct bmc_device *bmc) out_aux_firm: if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->dev->dev, + device_remove_file(&bmc->pdev.dev, &bmc->aux_firmware_rev_attr); -out_prod_id: - device_remove_file(&bmc->dev->dev, - &bmc->product_id_attr); -out_manu: - device_remove_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); -out_add_dev: - device_remove_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); -out_version: - device_remove_file(&bmc->dev->dev, - &bmc->version_attr); -out_firm: - device_remove_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); -out_rev: - device_remove_file(&bmc->dev->dev, - &bmc->revision_attr); -out_sdrs: - device_remove_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); -out_devid: - device_remove_file(&bmc->dev->dev, - &bmc->device_id_attr); out: return err; } -static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, - const char *sysfs_name) +static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) { int rv; struct bmc_device *bmc = intf->bmc; struct bmc_device *old_bmc; - int size; - char dummy[1]; mutex_lock(&ipmidriver_mutex); @@ -2527,7 +2474,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, intf->bmc = old_bmc; bmc = old_bmc; - kref_get(&bmc->refcount); + kref_get(&bmc->usecount); mutex_unlock(&ipmidriver_mutex); printk(KERN_INFO @@ -2537,12 +2484,12 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, bmc->id.product_id, bmc->id.device_id); } else { - char name[14]; unsigned char orig_dev_id = bmc->id.device_id; int warn_printed = 0; - snprintf(name, sizeof(name), + snprintf(bmc->name, sizeof(bmc->name), "ipmi_bmc.%4.4x", bmc->id.product_id); + bmc->pdev.name = bmc->name; while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, bmc->id.product_id, @@ -2566,23 +2513,16 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, } } - bmc->dev = platform_device_alloc(name, bmc->id.device_id); - if (!bmc->dev) { - mutex_unlock(&ipmidriver_mutex); - printk(KERN_ERR - "ipmi_msghandler:" - " Unable to allocate platform device\n"); - return -ENOMEM; - } - bmc->dev->dev.driver = &ipmidriver.driver; - dev_set_drvdata(&bmc->dev->dev, bmc); - kref_init(&bmc->refcount); + bmc->pdev.dev.driver = &ipmidriver.driver; + bmc->pdev.id = bmc->id.device_id; + bmc->pdev.dev.release = release_bmc_device; + bmc->pdev.dev.type = &bmc_device_type; + kref_init(&bmc->usecount); - rv = platform_device_add(bmc->dev); + rv = platform_device_register(&bmc->pdev); mutex_unlock(&ipmidriver_mutex); if (rv) { - platform_device_put(bmc->dev); - bmc->dev = NULL; + put_device(&bmc->pdev.dev); printk(KERN_ERR "ipmi_msghandler:" " Unable to register bmc device: %d\n", @@ -2594,10 +2534,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, return rv; } - rv = create_files(bmc); + rv = create_bmc_files(bmc); if (rv) { mutex_lock(&ipmidriver_mutex); - platform_device_unregister(bmc->dev); + platform_device_unregister(&bmc->pdev); mutex_unlock(&ipmidriver_mutex); return rv; @@ -2614,44 +2554,26 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, * create symlink from system interface device to bmc device * and back. */ - intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); - if (!intf->sysfs_name) { - rv = -ENOMEM; - printk(KERN_ERR - "ipmi_msghandler: allocate link to BMC: %d\n", - rv); - goto out_err; - } - - rv = sysfs_create_link(&intf->si_dev->kobj, - &bmc->dev->dev.kobj, intf->sysfs_name); + rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; printk(KERN_ERR "ipmi_msghandler: Unable to create bmc symlink: %d\n", rv); goto out_err; } - size = snprintf(dummy, 0, "ipmi%d", ifnum); - intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); + intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum); if (!intf->my_dev_name) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; rv = -ENOMEM; printk(KERN_ERR "ipmi_msghandler: allocate link from BMC: %d\n", rv); goto out_err; } - snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum); - rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, + rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; kfree(intf->my_dev_name); intf->my_dev_name = NULL; printk(KERN_ERR @@ -2850,7 +2772,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *si_dev, - const char *sysfs_name, unsigned char slave_addr) { int i, j; @@ -2909,12 +2830,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, #ifdef CONFIG_PROC_FS mutex_init(&intf->proc_entry_lock); #endif - spin_lock_init(&intf->waiting_msgs_lock); - INIT_LIST_HEAD(&intf->waiting_msgs); + spin_lock_init(&intf->waiting_rcv_msgs_lock); + INIT_LIST_HEAD(&intf->waiting_rcv_msgs); tasklet_init(&intf->recv_tasklet, smi_recv_tasklet, (unsigned long) intf); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); + spin_lock_init(&intf->xmit_msgs_lock); + INIT_LIST_HEAD(&intf->xmit_msgs); + INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; @@ -2984,7 +2908,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, if (rv == 0) rv = add_proc_entries(intf, i); - rv = ipmi_bmc_register(intf, i, sysfs_name); + rv = ipmi_bmc_register(intf, i); out: if (rv) { @@ -3014,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, } EXPORT_SYMBOL(ipmi_register_smi); +static void deliver_smi_err_response(ipmi_smi_t intf, + struct ipmi_smi_msg *msg, + unsigned char err) +{ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = err; + msg->rsp_size = 3; + /* It's an error, so it will never requeue, no need to check return. */ + handle_one_recv_msg(intf, msg); +} + static void cleanup_smi_msgs(ipmi_smi_t intf) { int i; struct seq_table *ent; + struct ipmi_smi_msg *msg; + struct list_head *entry; + struct list_head tmplist; + + /* Clear out our transmit queues and hold the messages. */ + INIT_LIST_HEAD(&tmplist); + list_splice_tail(&intf->hp_xmit_msgs, &tmplist); + list_splice_tail(&intf->xmit_msgs, &tmplist); + + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ + schedule_timeout(1); + } /* No need for locks, the interface is down. */ + + /* + * Return errors for all pending messages in queue and in the + * tables waiting for remote responses. + */ + while (!list_empty(&tmplist)) { + entry = tmplist.next; + list_del(entry); + msg = list_entry(entry, struct ipmi_smi_msg, link); + deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); + } + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { ent = &(intf->seq_table[i]); if (!ent->inuse) @@ -3031,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf) int ipmi_unregister_smi(ipmi_smi_t intf) { struct ipmi_smi_watcher *w; - int intf_num = intf->intf_num; + int intf_num = intf->intf_num; + ipmi_user_t user; ipmi_bmc_unregister(intf); mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; - intf->handlers = NULL; + intf->in_shutdown = true; list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); synchronize_rcu(); cleanup_smi_msgs(intf); + /* Clean up the effects of users on the lower-level software. */ + mutex_lock(&ipmi_interfaces_mutex); + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); + } + rcu_read_unlock(); + intf->handlers = NULL; + mutex_unlock(&ipmi_interfaces_mutex); + remove_proc_entries(intf); /* @@ -3134,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, ipmi_user_t user = NULL; struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; - struct ipmi_smi_handlers *handlers; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3188,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, } #endif rcu_read_lock(); - handlers = intf->handlers; - if (handlers) { - handlers->sender(intf->send_info, msg, 0); + if (!intf->in_shutdown) { + smi_send(intf, intf->handlers, msg, 0); /* * We used the message, so return the value * that causes it to not be freed or @@ -3857,32 +3830,32 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) /* See if any waiting messages need to be processed. */ if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - while (!list_empty(&intf->waiting_msgs)) { - smi_msg = list_entry(intf->waiting_msgs.next, + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + while (!list_empty(&intf->waiting_rcv_msgs)) { + smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); - list_del(&smi_msg->link); if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); rv = handle_one_recv_msg(intf, smi_msg); if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - if (rv == 0) { - /* Message handled */ - ipmi_free_smi_msg(smi_msg); - } else if (rv < 0) { - /* Fatal error on the message, del but don't free. */ - } else { + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + if (rv > 0) { /* * To preserve message order, quit if we * can't handle a message. */ - list_add(&smi_msg->link, &intf->waiting_msgs); break; + } else { + list_del(&smi_msg->link); + if (rv == 0) + /* Message handled */ + ipmi_free_smi_msg(smi_msg); + /* If rv < 0, fatal error, del but don't free. */ } } if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); /* * If the pretimout count is non-zero, decrement one from it and @@ -3903,7 +3876,41 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) static void smi_recv_tasklet(unsigned long val) { - handle_new_recv_msgs((ipmi_smi_t) val); + unsigned long flags = 0; /* keep us warning-free. */ + ipmi_smi_t intf = (ipmi_smi_t) val; + int run_to_completion = intf->run_to_completion; + struct ipmi_smi_msg *newmsg = NULL; + + /* + * Start the next message if available. + * + * Do this here, not in the actual receiver, because we may deadlock + * because the lower layer is allowed to hold locks while calling + * message delivery. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (intf->curr_msg == NULL && !intf->in_shutdown) { + struct list_head *entry = NULL; + + /* Pick the high priority queue first. */ + if (!list_empty(&intf->hp_xmit_msgs)) + entry = intf->hp_xmit_msgs.next; + else if (!list_empty(&intf->xmit_msgs)) + entry = intf->xmit_msgs.next; + + if (entry) { + list_del(entry); + newmsg = list_entry(entry, struct ipmi_smi_msg, link); + intf->curr_msg = newmsg; + } + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + if (newmsg) + intf->handlers->sender(intf->send_info, newmsg); + + handle_new_recv_msgs(intf); } /* Handle a new message from the lower layer. */ @@ -3911,13 +3918,16 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ - int run_to_completion; - + int run_to_completion = intf->run_to_completion; if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + /* * This is the local response to a command send, start * the timer for these. The user_data will not be @@ -3953,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); +free_msg: ipmi_free_smi_msg(msg); - goto out; + } else { + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); } - /* - * To preserve message order, if the list is not empty, we - * tack this message onto the end of the list. - */ - run_to_completion = intf->run_to_completion; if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_msgs); + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (msg == intf->curr_msg) + intf->curr_msg = NULL; if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - tasklet_schedule(&intf->recv_tasklet); - out: - return; + if (run_to_completion) + smi_recv_tasklet((unsigned long) intf); + else + tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_msg_received); void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) { + if (intf->in_shutdown) + return; + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); tasklet_schedule(&intf->recv_tasklet); } @@ -4017,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct ipmi_recv_msg *msg; struct ipmi_smi_handlers *handlers; - if (intf->intf_num == -1) + if (intf->in_shutdown) return; if (!ent->inuse) @@ -4082,8 +4103,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, ipmi_inc_stat(intf, retransmitted_ipmb_commands); - intf->handlers->sender(intf->send_info, - smi_msg, 0); + smi_send(intf, intf->handlers, smi_msg, 0); } else ipmi_free_smi_msg(smi_msg); @@ -4145,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) static void ipmi_request_event(ipmi_smi_t intf) { - struct ipmi_smi_handlers *handlers; - /* No event requests when in maintenance mode. */ if (intf->maintenance_mode_enable) return; - handlers = intf->handlers; - if (handlers) - handlers->request_events(intf->send_info); + if (!intf->in_shutdown) + intf->handlers->request_events(intf->send_info); } static struct timer_list ipmi_timer; @@ -4548,6 +4565,7 @@ static int ipmi_init_msghandler(void) proc_ipmi_root = proc_mkdir("ipmi", NULL); if (!proc_ipmi_root) { printk(KERN_ERR PFX "Unable to create IPMI proc dir"); + driver_unregister(&ipmidriver.driver); return -ENOMEM; } diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c new file mode 100644 index 000000000000..79524ed2a3cb --- /dev/null +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -0,0 +1,310 @@ +/* + * PowerNV OPAL IPMI driver + * + * Copyright 2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#define pr_fmt(fmt) "ipmi-powernv: " fmt + +#include <linux/ipmi_smi.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <asm/opal.h> + + +struct ipmi_smi_powernv { + u64 interface_id; + struct ipmi_device_id ipmi_id; + ipmi_smi_t intf; + u64 event; + struct notifier_block event_nb; + + /** + * We assume that there can only be one outstanding request, so + * keep the pending message in cur_msg. We protect this from concurrent + * updates through send & recv calls, (and consequently opal_msg, which + * is in-use when cur_msg is set) with msg_lock + */ + spinlock_t msg_lock; + struct ipmi_smi_msg *cur_msg; + struct opal_ipmi_msg *opal_msg; +}; + +static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf) +{ + struct ipmi_smi_powernv *smi = send_info; + + smi->intf = intf; + return 0; +} + +static void send_error_reply(struct ipmi_smi_powernv *smi, + struct ipmi_smi_msg *msg, u8 completion_code) +{ + msg->rsp[0] = msg->data[0] | 0x4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = completion_code; + msg->rsp_size = 3; + ipmi_smi_msg_received(smi->intf, msg); +} + +static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) +{ + struct ipmi_smi_powernv *smi = send_info; + struct opal_ipmi_msg *opal_msg; + unsigned long flags; + int comp, rc; + size_t size; + + /* ensure data_len will fit in the opal_ipmi_msg buffer... */ + if (msg->data_size > IPMI_MAX_MSG_LENGTH) { + comp = IPMI_REQ_LEN_EXCEEDED_ERR; + goto err; + } + + /* ... and that we at least have netfn and cmd bytes */ + if (msg->data_size < 2) { + comp = IPMI_REQ_LEN_INVALID_ERR; + goto err; + } + + spin_lock_irqsave(&smi->msg_lock, flags); + + if (smi->cur_msg) { + comp = IPMI_NODE_BUSY_ERR; + goto err_unlock; + } + + /* format our data for the OPAL API */ + opal_msg = smi->opal_msg; + opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1; + opal_msg->netfn = msg->data[0]; + opal_msg->cmd = msg->data[1]; + if (msg->data_size > 2) + memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2); + + /* data_size already includes the netfn and cmd bytes */ + size = sizeof(*opal_msg) + msg->data_size - 2; + + pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__, + smi->interface_id, opal_msg, size); + rc = opal_ipmi_send(smi->interface_id, opal_msg, size); + pr_devel("%s: -> %d\n", __func__, rc); + + if (!rc) { + smi->cur_msg = msg; + spin_unlock_irqrestore(&smi->msg_lock, flags); + return; + } + + comp = IPMI_ERR_UNSPECIFIED; +err_unlock: + spin_unlock_irqrestore(&smi->msg_lock, flags); +err: + send_error_reply(smi, msg, comp); +} + +static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) +{ + struct opal_ipmi_msg *opal_msg; + struct ipmi_smi_msg *msg; + unsigned long flags; + uint64_t size; + int rc; + + pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__, + smi->interface_id); + + spin_lock_irqsave(&smi->msg_lock, flags); + + if (!smi->cur_msg) { + pr_warn("no current message?\n"); + return 0; + } + + msg = smi->cur_msg; + opal_msg = smi->opal_msg; + + size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH); + + rc = opal_ipmi_recv(smi->interface_id, + opal_msg, + &size); + size = be64_to_cpu(size); + pr_devel("%s: -> %d (size %lld)\n", __func__, + rc, rc == 0 ? size : 0); + if (rc) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + ipmi_free_smi_msg(msg); + return 0; + } + + if (size < sizeof(*opal_msg)) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + pr_warn("unexpected IPMI message size %lld\n", size); + return 0; + } + + if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + pr_warn("unexpected IPMI message format (version %d)\n", + opal_msg->version); + return 0; + } + + msg->rsp[0] = opal_msg->netfn; + msg->rsp[1] = opal_msg->cmd; + if (size > sizeof(*opal_msg)) + memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg)); + msg->rsp_size = 2 + size - sizeof(*opal_msg); + + smi->cur_msg = NULL; + spin_unlock_irqrestore(&smi->msg_lock, flags); + ipmi_smi_msg_received(smi->intf, msg); + return 0; +} + +static void ipmi_powernv_request_events(void *send_info) +{ +} + +static void ipmi_powernv_set_run_to_completion(void *send_info, + bool run_to_completion) +{ +} + +static void ipmi_powernv_poll(void *send_info) +{ + struct ipmi_smi_powernv *smi = send_info; + + ipmi_powernv_recv(smi); +} + +static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = { + .owner = THIS_MODULE, + .start_processing = ipmi_powernv_start_processing, + .sender = ipmi_powernv_send, + .request_events = ipmi_powernv_request_events, + .set_run_to_completion = ipmi_powernv_set_run_to_completion, + .poll = ipmi_powernv_poll, +}; + +static int ipmi_opal_event(struct notifier_block *nb, + unsigned long events, void *change) +{ + struct ipmi_smi_powernv *smi = container_of(nb, + struct ipmi_smi_powernv, event_nb); + + if (events & smi->event) + ipmi_powernv_recv(smi); + return 0; +} + +static int ipmi_powernv_probe(struct platform_device *pdev) +{ + struct ipmi_smi_powernv *ipmi; + struct device *dev; + u32 prop; + int rc; + + if (!pdev || !pdev->dev.of_node) + return -ENODEV; + + dev = &pdev->dev; + + ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL); + if (!ipmi) + return -ENOMEM; + + spin_lock_init(&ipmi->msg_lock); + + rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id", + &prop); + if (rc) { + dev_warn(dev, "No interface ID property\n"); + goto err_free; + } + ipmi->interface_id = prop; + + rc = of_property_read_u32(dev->of_node, "interrupts", &prop); + if (rc) { + dev_warn(dev, "No interrupts property\n"); + goto err_free; + } + + ipmi->event = 1ull << prop; + ipmi->event_nb.notifier_call = ipmi_opal_event; + + rc = opal_notifier_register(&ipmi->event_nb); + if (rc) { + dev_warn(dev, "OPAL notifier registration failed (%d)\n", rc); + goto err_free; + } + + ipmi->opal_msg = devm_kmalloc(dev, + sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH, + GFP_KERNEL); + if (!ipmi->opal_msg) { + rc = -ENOMEM; + goto err_unregister; + } + + /* todo: query actual ipmi_device_id */ + rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, + &ipmi->ipmi_id, dev, 0); + if (rc) { + dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc); + goto err_free_msg; + } + + dev_set_drvdata(dev, ipmi); + return 0; + +err_free_msg: + devm_kfree(dev, ipmi->opal_msg); +err_unregister: + opal_notifier_unregister(&ipmi->event_nb); +err_free: + devm_kfree(dev, ipmi); + return rc; +} + +static int ipmi_powernv_remove(struct platform_device *pdev) +{ + struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); + + ipmi_unregister_smi(smi->intf); + opal_notifier_unregister(&smi->event_nb); + return 0; +} + +static const struct of_device_id ipmi_powernv_match[] = { + { .compatible = "ibm,opal-ipmi" }, + { }, +}; + + +static struct platform_driver powernv_ipmi_driver = { + .driver = { + .name = "ipmi-powernv", + .owner = THIS_MODULE, + .of_match_table = ipmi_powernv_match, + }, + .probe = ipmi_powernv_probe, + .remove = ipmi_powernv_remove, +}; + + +module_platform_driver(powernv_ipmi_driver); + +MODULE_DEVICE_TABLE(of, ipmi_powernv_match); +MODULE_DESCRIPTION("powernv IPMI driver"); +MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5c4e1f625bbb..90c7fdf95419 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -92,12 +92,9 @@ enum si_intf_state { SI_GETTING_FLAGS, SI_GETTING_EVENTS, SI_CLEARING_FLAGS, - SI_CLEARING_FLAGS_THEN_SET_IRQ, SI_GETTING_MESSAGES, - SI_ENABLE_INTERRUPTS1, - SI_ENABLE_INTERRUPTS2, - SI_DISABLE_INTERRUPTS1, - SI_DISABLE_INTERRUPTS2 + SI_CHECKING_ENABLES, + SI_SETTING_ENABLES /* FIXME - add watchdog stuff. */ }; @@ -111,10 +108,6 @@ enum si_type { }; static char *si_to_str[] = { "kcs", "smic", "bt" }; -static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", - "ACPI", "SMBIOS", "PCI", - "device-tree", "default" }; - #define DEVICE_NAME "ipmi_si" static struct platform_driver ipmi_driver; @@ -174,8 +167,7 @@ struct smi_info { struct si_sm_handlers *handlers; enum si_type si_type; spinlock_t si_lock; - struct list_head xmit_msgs; - struct list_head hp_xmit_msgs; + struct ipmi_smi_msg *waiting_msg; struct ipmi_smi_msg *curr_msg; enum si_intf_state si_state; @@ -254,9 +246,6 @@ struct smi_info { /* The time (in jiffies) the last timeout occurred at. */ unsigned long last_timeout_jiffies; - /* Used to gracefully stop the timer without race conditions. */ - atomic_t stop_operation; - /* Are we waiting for the events, pretimeouts, received msgs? */ atomic_t need_watch; @@ -268,6 +257,16 @@ struct smi_info { */ bool interrupt_disabled; + /* + * Does the BMC support events? + */ + bool supports_event_msg_buff; + + /* + * Did we get an attention that we did not handle? + */ + bool got_attn; + /* From the get device id response... */ struct ipmi_device_id device_id; @@ -332,7 +331,10 @@ static void deliver_recv_msg(struct smi_info *smi_info, struct ipmi_smi_msg *msg) { /* Deliver the message to the upper layer. */ - ipmi_smi_msg_received(smi_info->intf, msg); + if (smi_info->intf) + ipmi_smi_msg_received(smi_info->intf, msg); + else + ipmi_free_smi_msg(msg); } static void return_hosed_msg(struct smi_info *smi_info, int cCode) @@ -356,28 +358,18 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode) static enum si_sm_result start_next_msg(struct smi_info *smi_info) { int rv; - struct list_head *entry = NULL; #ifdef DEBUG_TIMING struct timeval t; #endif - /* Pick the high priority queue first. */ - if (!list_empty(&(smi_info->hp_xmit_msgs))) { - entry = smi_info->hp_xmit_msgs.next; - } else if (!list_empty(&(smi_info->xmit_msgs))) { - entry = smi_info->xmit_msgs.next; - } - - if (!entry) { + if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; rv = SI_SM_IDLE; } else { int err; - list_del(entry); - smi_info->curr_msg = list_entry(entry, - struct ipmi_smi_msg, - link); + smi_info->curr_msg = smi_info->waiting_msg; + smi_info->waiting_msg = NULL; #ifdef DEBUG_TIMING do_gettimeofday(&t); printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); @@ -401,30 +393,15 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) return rv; } -static void start_enable_irq(struct smi_info *smi_info) +static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; - /* - * If we are enabling interrupts, we have to tell the - * BMC to use them. - */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); - smi_info->si_state = SI_ENABLE_INTERRUPTS1; -} - -static void start_disable_irq(struct smi_info *smi_info) -{ - unsigned char msg[2]; - - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); - smi_info->si_state = SI_DISABLE_INTERRUPTS1; + smi_info->si_state = SI_CHECKING_ENABLES; } static void start_clear_flags(struct smi_info *smi_info) @@ -440,6 +417,32 @@ static void start_clear_flags(struct smi_info *smi_info) smi_info->si_state = SI_CLEARING_FLAGS; } +static void start_getting_msg_queue(struct smi_info *smi_info) +{ + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_MESSAGES; +} + +static void start_getting_events(struct smi_info *smi_info) +{ + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + smi_info->curr_msg->data_size = 2; + + smi_info->handlers->start_transaction( + smi_info->si_sm, + smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_EVENTS; +} + static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { smi_info->last_timeout_jiffies = jiffies; @@ -453,22 +456,45 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) * polled until we can allocate some memory. Once we have some * memory, we will re-enable the interrupt. */ -static inline void disable_si_irq(struct smi_info *smi_info) +static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { - start_disable_irq(smi_info); smi_info->interrupt_disabled = true; - if (!atomic_read(&smi_info->stop_operation)) - smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + start_check_enables(smi_info); + return true; } + return false; } -static inline void enable_si_irq(struct smi_info *smi_info) +static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { - start_enable_irq(smi_info); smi_info->interrupt_disabled = false; + start_check_enables(smi_info); + return true; } + return false; +} + +/* + * Allocate a message. If unable to allocate, start the interrupt + * disable process and return NULL. If able to allocate but + * interrupts are disabled, free the message and return NULL after + * starting the interrupt enable process. + */ +static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + if (!disable_si_irq(smi_info)) + smi_info->si_state = SI_NORMAL; + } else if (enable_si_irq(smi_info)) { + ipmi_free_smi_msg(msg); + msg = NULL; + } + return msg; } static void handle_flags(struct smi_info *smi_info) @@ -480,45 +506,22 @@ static void handle_flags(struct smi_info *smi_info) start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; - ipmi_smi_watchdog_pretimeout(smi_info->intf); + if (smi_info->intf) + ipmi_smi_watchdog_pretimeout(smi_info->intf); } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) { - disable_si_irq(smi_info); - smi_info->si_state = SI_NORMAL; + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) return; - } - enable_si_irq(smi_info); - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; - smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_MESSAGES; + start_getting_msg_queue(smi_info); } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) { - disable_si_irq(smi_info); - smi_info->si_state = SI_NORMAL; + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) return; - } - enable_si_irq(smi_info); - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; - smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_EVENTS; + start_getting_events(smi_info); } else if (smi_info->msg_flags & OEM_DATA_AVAIL && smi_info->oem_data_avail_handler) { if (smi_info->oem_data_avail_handler(smi_info)) @@ -527,6 +530,55 @@ static void handle_flags(struct smi_info *smi_info) smi_info->si_state = SI_NORMAL; } +/* + * Global enables we care about. + */ +#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ + IPMI_BMC_EVT_MSG_INTR) + +static u8 current_global_enables(struct smi_info *smi_info, u8 base, + bool *irq_on) +{ + u8 enables = 0; + + if (smi_info->supports_event_msg_buff) + enables |= IPMI_BMC_EVT_MSG_BUFF; + else + enables &= ~IPMI_BMC_EVT_MSG_BUFF; + + if (smi_info->irq && !smi_info->interrupt_disabled) + enables |= IPMI_BMC_RCV_MSG_INTR; + else + enables &= ~IPMI_BMC_RCV_MSG_INTR; + + if (smi_info->supports_event_msg_buff && + smi_info->irq && !smi_info->interrupt_disabled) + + enables |= IPMI_BMC_EVT_MSG_INTR; + else + enables &= ~IPMI_BMC_EVT_MSG_INTR; + + *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); + + return enables; +} + +static void check_bt_irq(struct smi_info *smi_info, bool irq_on) +{ + u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); + + irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; + + if ((bool)irqstate == irq_on) + return; + + if (irq_on) + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, + IPMI_BT_INTMASK_ENABLE_IRQ_BIT); + else + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); +} + static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; @@ -581,7 +633,6 @@ static void handle_transaction_done(struct smi_info *smi_info) } case SI_CLEARING_FLAGS: - case SI_CLEARING_FLAGS_THEN_SET_IRQ: { unsigned char msg[3]; @@ -592,10 +643,7 @@ static void handle_transaction_done(struct smi_info *smi_info) dev_warn(smi_info->dev, "Error clearing flags: %2.2x\n", msg[2]); } - if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) - start_enable_irq(smi_info); - else - smi_info->si_state = SI_NORMAL; + smi_info->si_state = SI_NORMAL; break; } @@ -675,9 +723,11 @@ static void handle_transaction_done(struct smi_info *smi_info) break; } - case SI_ENABLE_INTERRUPTS1: + case SI_CHECKING_ENABLES: { unsigned char msg[4]; + u8 enables; + bool irq_on; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); @@ -687,70 +737,53 @@ static void handle_transaction_done(struct smi_info *smi_info) dev_warn(smi_info->dev, "Maybe ok, but ipmi might run very slowly.\n"); smi_info->si_state = SI_NORMAL; - } else { + break; + } + enables = current_global_enables(smi_info, 0, &irq_on); + if (smi_info->si_type == SI_BT) + /* BT has its own interrupt enable bit. */ + check_bt_irq(smi_info, irq_on); + if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { + /* Enables are not correct, fix them. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = (msg[3] | - IPMI_BMC_RCV_MSG_INTR | - IPMI_BMC_EVT_MSG_INTR); + msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); smi_info->handlers->start_transaction( smi_info->si_sm, msg, 3); - smi_info->si_state = SI_ENABLE_INTERRUPTS2; + smi_info->si_state = SI_SETTING_ENABLES; + } else if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); + } else { + smi_info->si_state = SI_NORMAL; } break; } - case SI_ENABLE_INTERRUPTS2: + case SI_SETTING_ENABLES: { unsigned char msg[4]; - /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { + if (msg[2] != 0) dev_warn(smi_info->dev, - "Couldn't set irq info: %x.\n", msg[2]); - dev_warn(smi_info->dev, - "Maybe ok, but ipmi might run very slowly.\n"); - } else - smi_info->interrupt_disabled = false; - smi_info->si_state = SI_NORMAL; - break; - } - - case SI_DISABLE_INTERRUPTS1: - { - unsigned char msg[4]; + "Could not set the global enables: 0x%x.\n", + msg[2]); - /* We got the flags from the SMI, now handle them. */ - smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not disable interrupts" - ", failed get.\n"); - smi_info->si_state = SI_NORMAL; + if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); } else { - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = (msg[3] & - ~(IPMI_BMC_RCV_MSG_INTR | - IPMI_BMC_EVT_MSG_INTR)); - smi_info->handlers->start_transaction( - smi_info->si_sm, msg, 3); - smi_info->si_state = SI_DISABLE_INTERRUPTS2; - } - break; - } - - case SI_DISABLE_INTERRUPTS2: - { - unsigned char msg[4]; - - /* We got the flags from the SMI, now handle them. */ - smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not disable interrupts" - ", failed set.\n"); + smi_info->si_state = SI_NORMAL; } - smi_info->si_state = SI_NORMAL; break; } } @@ -808,25 +841,35 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, * We prefer handling attn over new messages. But don't do * this if there is not yet an upper layer to handle anything. */ - if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { + if (likely(smi_info->intf) && + (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) { unsigned char msg[2]; - smi_inc_stat(smi_info, attentions); + if (smi_info->si_state != SI_NORMAL) { + /* + * We got an ATTN, but we are doing something else. + * Handle the ATTN later. + */ + smi_info->got_attn = true; + } else { + smi_info->got_attn = false; + smi_inc_stat(smi_info, attentions); - /* - * Got a attn, send down a get message flags to see - * what's causing it. It would be better to handle - * this in the upper layer, but due to the way - * interrupts work with the SMI, that's not really - * possible. - */ - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_MSG_FLAGS_CMD; + /* + * Got a attn, send down a get message flags to see + * what's causing it. It would be better to handle + * this in the upper layer, but due to the way + * interrupts work with the SMI, that's not really + * possible. + */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; - smi_info->handlers->start_transaction( - smi_info->si_sm, msg, 2); - smi_info->si_state = SI_GETTING_FLAGS; - goto restart; + smi_info->handlers->start_transaction( + smi_info->si_sm, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; + goto restart; + } } /* If we are currently idle, try to start the next message. */ @@ -846,19 +889,21 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, */ atomic_set(&smi_info->req_events, 0); - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) - goto out; - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; - smi_info->curr_msg->data_size = 2; + /* + * Take this opportunity to check the interrupt and + * message enable state for the BMC. The BMC can be + * asynchronously reset, and may thus get interrupts + * disable and messages disabled. + */ + if (smi_info->supports_event_msg_buff || smi_info->irq) { + start_check_enables(smi_info); + } else { + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) + goto out; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_EVENTS; + start_getting_events(smi_info); + } goto restart; } out: @@ -879,8 +924,7 @@ static void check_start_timer_thread(struct smi_info *smi_info) } static void sender(void *send_info, - struct ipmi_smi_msg *msg, - int priority) + struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; enum si_sm_result result; @@ -889,14 +933,8 @@ static void sender(void *send_info, struct timeval t; #endif - if (atomic_read(&smi_info->stop_operation)) { - msg->rsp[0] = msg->data[0] | 4; - msg->rsp[1] = msg->data[1]; - msg->rsp[2] = IPMI_ERR_UNSPECIFIED; - msg->rsp_size = 3; - deliver_recv_msg(smi_info, msg); - return; - } + BUG_ON(smi_info->waiting_msg); + smi_info->waiting_msg = msg; #ifdef DEBUG_TIMING do_gettimeofday(&t); @@ -905,16 +943,16 @@ static void sender(void *send_info, if (smi_info->run_to_completion) { /* - * If we are running to completion, then throw it in - * the list and run transactions until everything is - * clear. Priority doesn't matter here. + * If we are running to completion, start it and run + * transactions until everything is clear. */ + smi_info->curr_msg = smi_info->waiting_msg; + smi_info->waiting_msg = NULL; /* * Run to completion means we are single-threaded, no * need for locks. */ - list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { @@ -926,11 +964,6 @@ static void sender(void *send_info, } spin_lock_irqsave(&smi_info->si_lock, flags); - if (priority > 0) - list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); - else - list_add_tail(&msg->link, &smi_info->xmit_msgs); - check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } @@ -1068,8 +1101,7 @@ static void request_events(void *send_info) { struct smi_info *smi_info = send_info; - if (atomic_read(&smi_info->stop_operation) || - !smi_info->has_event_buffer) + if (!smi_info->has_event_buffer) return; atomic_set(&smi_info->req_events, 1); @@ -1697,7 +1729,7 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) } *s = '\0'; s++; - for (i = 0; hotmod_ops[i].name; i++) { + for (i = 0; v[i].name; i++) { if (strcmp(*curr, v[i].name) == 0) { *val = v[i].val; *curr = s; @@ -2133,6 +2165,9 @@ static int try_init_spmi(struct SPMITable *spmi) case 3: /* BT */ info->si_type = SI_BT; break; + case 4: /* SSIF, just ignore */ + kfree(info); + return -EIO; default: printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", spmi->InterfaceType); @@ -2250,6 +2285,8 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, case 3: info->si_type = SI_BT; break; + case 4: /* SSIF, just ignore */ + goto err_free; default: dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); goto err_free; @@ -2913,9 +2950,11 @@ static int try_enable_event_buffer(struct smi_info *smi_info) goto out; } - if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { /* buffer is already enabled, nothing to do. */ + smi_info->supports_event_msg_buff = true; goto out; + } msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; @@ -2948,6 +2987,9 @@ static int try_enable_event_buffer(struct smi_info *smi_info) * that the event buffer is not supported. */ rv = -ENOENT; + else + smi_info->supports_event_msg_buff = true; + out: kfree(resp); return rv; @@ -3188,15 +3230,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info) static inline void wait_for_timer_and_thread(struct smi_info *smi_info) { - if (smi_info->intf) { - /* - * The timer and thread are only running if the - * interface has been started up and registered. - */ - if (smi_info->thread != NULL) - kthread_stop(smi_info->thread); + if (smi_info->thread != NULL) + kthread_stop(smi_info->thread); + if (smi_info->timer_running) del_timer_sync(&smi_info->si_timer); - } } static struct ipmi_default_vals @@ -3274,8 +3311,8 @@ static int add_smi(struct smi_info *new_smi) int rv = 0; printk(KERN_INFO PFX "Adding %s-specified %s state machine", - ipmi_addr_src_to_str[new_smi->addr_source], - si_to_str[new_smi->si_type]); + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); mutex_lock(&smi_infos_lock); if (!is_new_interface(new_smi)) { printk(KERN_CONT " duplicate interface\n"); @@ -3305,7 +3342,7 @@ static int try_smi_init(struct smi_info *new_smi) printk(KERN_INFO PFX "Trying %s-specified %s state" " machine at %s address 0x%lx, slave address 0x%x," " irq %d\n", - ipmi_addr_src_to_str[new_smi->addr_source], + ipmi_addr_src_to_str(new_smi->addr_source), si_to_str[new_smi->si_type], addr_space_to_str[new_smi->io.addr_type], new_smi->io.addr_data, @@ -3371,8 +3408,7 @@ static int try_smi_init(struct smi_info *new_smi) setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); - INIT_LIST_HEAD(&(new_smi->xmit_msgs)); - INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); + new_smi->waiting_msg = NULL; new_smi->curr_msg = NULL; atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = false; @@ -3380,7 +3416,6 @@ static int try_smi_init(struct smi_info *new_smi) atomic_set(&new_smi->stats[i], 0); new_smi->interrupt_disabled = true; - atomic_set(&new_smi->stop_operation, 0); atomic_set(&new_smi->need_watch, 0); new_smi->intf_num = smi_num; smi_num++; @@ -3394,9 +3429,15 @@ static int try_smi_init(struct smi_info *new_smi) * timer to avoid racing with the timer. */ start_clear_flags(new_smi); - /* IRQ is defined to be set when non-zero. */ - if (new_smi->irq) - new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; + + /* + * IRQ is defined to be set when non-zero. req_events will + * cause a global flags check that will enable interrupts. + */ + if (new_smi->irq) { + new_smi->interrupt_disabled = false; + atomic_set(&new_smi->req_events, 1); + } if (!new_smi->dev) { /* @@ -3428,7 +3469,6 @@ static int try_smi_init(struct smi_info *new_smi) new_smi, &new_smi->device_id, new_smi->dev, - "bmc", new_smi->slave_addr); if (rv) { dev_err(new_smi->dev, "Unable to register device: error %d\n", @@ -3466,15 +3506,15 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err_stop_timer: - atomic_inc(&new_smi->stop_operation); wait_for_timer_and_thread(new_smi); out_err: new_smi->interrupt_disabled = true; if (new_smi->intf) { - ipmi_unregister_smi(new_smi->intf); + ipmi_smi_t intf = new_smi->intf; new_smi->intf = NULL; + ipmi_unregister_smi(intf); } if (new_smi->irq_cleanup) { @@ -3653,60 +3693,49 @@ module_init(init_ipmi_si); static void cleanup_one_si(struct smi_info *to_clean) { int rv = 0; - unsigned long flags; if (!to_clean) return; + if (to_clean->intf) { + ipmi_smi_t intf = to_clean->intf; + + to_clean->intf = NULL; + rv = ipmi_unregister_smi(intf); + if (rv) { + pr_err(PFX "Unable to unregister device: errno=%d\n", + rv); + } + } + if (to_clean->dev) dev_set_drvdata(to_clean->dev, NULL); list_del(&to_clean->link); - /* Tell the driver that we are shutting down. */ - atomic_inc(&to_clean->stop_operation); - /* - * Make sure the timer and thread are stopped and will not run - * again. + * Make sure that interrupts, the timer and the thread are + * stopped and will not run again. */ + if (to_clean->irq_cleanup) + to_clean->irq_cleanup(to_clean); wait_for_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off - * for the device. A little tricky with locks to make sure - * there are no races. + * in the BMC. Note that timers and CPU interrupts are off, + * so no need for locks. */ - spin_lock_irqsave(&to_clean->si_lock, flags); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { - spin_unlock_irqrestore(&to_clean->si_lock, flags); poll(to_clean); schedule_timeout_uninterruptible(1); - spin_lock_irqsave(&to_clean->si_lock, flags); } disable_si_irq(to_clean); - spin_unlock_irqrestore(&to_clean->si_lock, flags); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); } - /* Clean up interrupts and make sure that everything is done. */ - if (to_clean->irq_cleanup) - to_clean->irq_cleanup(to_clean); - while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { - poll(to_clean); - schedule_timeout_uninterruptible(1); - } - - if (to_clean->intf) - rv = ipmi_unregister_smi(to_clean->intf); - - if (rv) { - printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", - rv); - } - if (to_clean->handlers) to_clean->handlers->cleanup(to_clean->si_sm); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c new file mode 100644 index 000000000000..e178ac27e73c --- /dev/null +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -0,0 +1,1870 @@ +/* + * ipmi_ssif.c + * + * The interface to the IPMI driver for SMBus access to a SMBus + * compliant device. Called SSIF by the IPMI spec. + * + * Author: Intel Corporation + * Todd Davis <todd.c.davis@intel.com> + * + * Rewritten by Corey Minyard <minyard@acm.org> to support the + * non-blocking I2C interface, add support for multi-part + * transactions, add PEC support, and general clenaup. + * + * Copyright 2003 Intel Corporation + * Copyright 2005 MontaVista Software + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * This file holds the "policy" for the interface to the SSIF state + * machine. It does the configuration, handles timers and interrupts, + * and drives the real SSIF state machine. + */ + +/* + * TODO: Figure out how to use SMB alerts. This will require a new + * interface into the I2C driver, I believe. + */ + +#include <linux/version.h> +#if defined(MODVERSIONS) +#include <linux/modversions.h> +#endif + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/i2c.h> +#include <linux/ipmi_smi.h> +#include <linux/init.h> +#include <linux/dmi.h> +#include <linux/kthread.h> +#include <linux/acpi.h> + +#define PFX "ipmi_ssif: " +#define DEVICE_NAME "ipmi_ssif" + +#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57 + +#define SSIF_IPMI_REQUEST 2 +#define SSIF_IPMI_MULTI_PART_REQUEST_START 6 +#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7 +#define SSIF_IPMI_RESPONSE 3 +#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9 + +/* ssif_debug is a bit-field + * SSIF_DEBUG_MSG - commands and their responses + * SSIF_DEBUG_STATES - message states + * SSIF_DEBUG_TIMING - Measure times between events in the driver + */ +#define SSIF_DEBUG_TIMING 4 +#define SSIF_DEBUG_STATE 2 +#define SSIF_DEBUG_MSG 1 +#define SSIF_NODEBUG 0 +#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG) + +/* + * Timer values + */ +#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */ +#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */ + +/* How many times to we retry sending/receiving the message. */ +#define SSIF_SEND_RETRIES 5 +#define SSIF_RECV_RETRIES 250 + +#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000) +#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) +#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) + +enum ssif_intf_state { + SSIF_NORMAL, + SSIF_GETTING_FLAGS, + SSIF_GETTING_EVENTS, + SSIF_CLEARING_FLAGS, + SSIF_GETTING_MESSAGES, + /* FIXME - add watchdog stuff. */ +}; + +#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ + && (ssif)->curr_msg == NULL) + +/* + * Indexes into stats[] in ssif_info below. + */ +enum ssif_stat_indexes { + /* Number of total messages sent. */ + SSIF_STAT_sent_messages = 0, + + /* + * Number of message parts sent. Messages may be broken into + * parts if they are long. + */ + SSIF_STAT_sent_messages_parts, + + /* + * Number of time a message was retried. + */ + SSIF_STAT_send_retries, + + /* + * Number of times the send of a message failed. + */ + SSIF_STAT_send_errors, + + /* + * Number of message responses received. + */ + SSIF_STAT_received_messages, + + /* + * Number of message fragments received. + */ + SSIF_STAT_received_message_parts, + + /* + * Number of times the receive of a message was retried. + */ + SSIF_STAT_receive_retries, + + /* + * Number of errors receiving messages. + */ + SSIF_STAT_receive_errors, + + /* + * Number of times a flag fetch was requested. + */ + SSIF_STAT_flag_fetches, + + /* + * Number of times the hardware didn't follow the state machine. + */ + SSIF_STAT_hosed, + + /* + * Number of received events. + */ + SSIF_STAT_events, + + /* Number of asyncronous messages received. */ + SSIF_STAT_incoming_messages, + + /* Number of watchdog pretimeouts. */ + SSIF_STAT_watchdog_pretimeouts, + + /* Always add statistics before this value, it must be last. */ + SSIF_NUM_STATS +}; + +struct ssif_addr_info { + unsigned short addr; + struct i2c_board_info binfo; + char *adapter_name; + int debug; + int slave_addr; + enum ipmi_addr_src addr_src; + union ipmi_smi_info_union addr_info; + + struct mutex clients_mutex; + struct list_head clients; + + struct list_head link; +}; + +struct ssif_info; + +typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len); + +struct ssif_info { + ipmi_smi_t intf; + int intf_num; + spinlock_t lock; + struct ipmi_smi_msg *waiting_msg; + struct ipmi_smi_msg *curr_msg; + enum ssif_intf_state ssif_state; + unsigned long ssif_debug; + + struct ipmi_smi_handlers handlers; + + enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ + union ipmi_smi_info_union addr_info; + + /* + * Flags from the last GET_MSG_FLAGS command, used when an ATTN + * is set to hold the flags until we are done handling everything + * from the flags. + */ +#define RECEIVE_MSG_AVAIL 0x01 +#define EVENT_MSG_BUFFER_FULL 0x02 +#define WDT_PRE_TIMEOUT_INT 0x08 + unsigned char msg_flags; + + bool has_event_buffer; + + /* + * If set to true, this will request events the next time the + * state machine is idle. + */ + bool req_events; + + /* + * If set to true, this will request flags the next time the + * state machine is idle. + */ + bool req_flags; + + /* + * Used to perform timer operations when run-to-completion + * mode is on. This is a countdown timer. + */ + int rtc_us_timer; + + /* Used for sending/receiving data. +1 for the length. */ + unsigned char data[IPMI_MAX_MSG_LENGTH + 1]; + unsigned int data_len; + + /* Temp receive buffer, gets copied into data. */ + unsigned char recv[I2C_SMBUS_BLOCK_MAX]; + + struct i2c_client *client; + ssif_i2c_done done_handler; + + /* Thread interface handling */ + struct task_struct *thread; + struct completion wake_thread; + bool stopping; + int i2c_read_write; + int i2c_command; + unsigned char *i2c_data; + unsigned int i2c_size; + + /* From the device id response. */ + struct ipmi_device_id device_id; + + struct timer_list retry_timer; + int retries_left; + + /* Info from SSIF cmd */ + unsigned char max_xmit_msg_size; + unsigned char max_recv_msg_size; + unsigned int multi_support; + int supports_pec; + +#define SSIF_NO_MULTI 0 +#define SSIF_MULTI_2_PART 1 +#define SSIF_MULTI_n_PART 2 + unsigned char *multi_data; + unsigned int multi_len; + unsigned int multi_pos; + + atomic_t stats[SSIF_NUM_STATS]; +}; + +#define ssif_inc_stat(ssif, stat) \ + atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat]) +#define ssif_get_stat(ssif, stat) \ + ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) + +static bool initialized; + +static atomic_t next_intf = ATOMIC_INIT(0); + +static void return_hosed_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg); +static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags); +static int start_send(struct ssif_info *ssif_info, + unsigned char *data, + unsigned int len); + +static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info, + unsigned long *flags) +{ + spin_lock_irqsave(&ssif_info->lock, *flags); + return flags; +} + +static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info, + unsigned long *flags) +{ + spin_unlock_irqrestore(&ssif_info->lock, *flags); +} + +static void deliver_recv_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg) +{ + ipmi_smi_t intf = ssif_info->intf; + + if (!intf) { + ipmi_free_smi_msg(msg); + } else if (msg->rsp_size < 0) { + return_hosed_msg(ssif_info, msg); + pr_err(PFX + "Malformed message in deliver_recv_msg: rsp_size = %d\n", + msg->rsp_size); + } else { + ipmi_smi_msg_received(intf, msg); + } +} + +static void return_hosed_msg(struct ssif_info *ssif_info, + struct ipmi_smi_msg *msg) +{ + ssif_inc_stat(ssif_info, hosed); + + /* Make it a response */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = 0xFF; /* Unknown error. */ + msg->rsp_size = 3; + + deliver_recv_msg(ssif_info, msg); +} + +/* + * Must be called with the message lock held. This will release the + * message lock. Note that the caller will check SSIF_IDLE and start a + * new operation, so there is no need to check for new messages to + * start in here. + */ +static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) +{ + unsigned char msg[3]; + + ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; + ssif_info->ssif_state = SSIF_CLEARING_FLAGS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + /* Make sure the watchdog pre-timeout flag is not set at startup. */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + + if (start_send(ssif_info, msg, 3) != 0) { + /* Error, just go to normal state. */ + ssif_info->ssif_state = SSIF_NORMAL; + } +} + +static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) +{ + unsigned char mb[2]; + + ssif_info->req_flags = false; + ssif_info->ssif_state = SSIF_GETTING_FLAGS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + mb[0] = (IPMI_NETFN_APP_REQUEST << 2); + mb[1] = IPMI_GET_MSG_FLAGS_CMD; + if (start_send(ssif_info, mb, 2) != 0) + ssif_info->ssif_state = SSIF_NORMAL; +} + +static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, + struct ipmi_smi_msg *msg) +{ + if (start_send(ssif_info, msg->data, msg->data_size) != 0) { + unsigned long oflags; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + ssif_info->curr_msg = NULL; + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + ipmi_free_smi_msg(msg); + } +} + +static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + + ssif_info->req_events = false; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + ssif_info->ssif_state = SSIF_NORMAL; + return; + } + + ssif_info->curr_msg = msg; + ssif_info->ssif_state = SSIF_GETTING_EVENTS; + ipmi_ssif_unlock_cond(ssif_info, flags); + + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + msg->data_size = 2; + + check_start_send(ssif_info, flags, msg); +} + +static void start_recv_msg_fetch(struct ssif_info *ssif_info, + unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + ssif_info->ssif_state = SSIF_NORMAL; + return; + } + + ssif_info->curr_msg = msg; + ssif_info->ssif_state = SSIF_GETTING_MESSAGES; + ipmi_ssif_unlock_cond(ssif_info, flags); + + msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg->data[1] = IPMI_GET_MSG_CMD; + msg->data_size = 2; + + check_start_send(ssif_info, flags, msg); +} + +/* + * Must be called with the message lock held. This will release the + * message lock. Note that the caller will check SSIF_IDLE and start a + * new operation, so there is no need to check for new messages to + * start in here. + */ +static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) +{ + if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) { + ipmi_smi_t intf = ssif_info->intf; + /* Watchdog pre-timeout */ + ssif_inc_stat(ssif_info, watchdog_pretimeouts); + start_clear_flags(ssif_info, flags); + if (intf) + ipmi_smi_watchdog_pretimeout(intf); + } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL) + /* Messages available. */ + start_recv_msg_fetch(ssif_info, flags); + else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL) + /* Events available. */ + start_event_fetch(ssif_info, flags); + else { + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + } +} + +static int ipmi_ssif_thread(void *data) +{ + struct ssif_info *ssif_info = data; + + while (!kthread_should_stop()) { + int result; + + /* Wait for something to do */ + wait_for_completion(&ssif_info->wake_thread); + init_completion(&ssif_info->wake_thread); + + if (ssif_info->stopping) + break; + + if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { + result = i2c_smbus_write_block_data( + ssif_info->client, SSIF_IPMI_REQUEST, + ssif_info->i2c_data[0], + ssif_info->i2c_data + 1); + ssif_info->done_handler(ssif_info, result, NULL, 0); + } else { + result = i2c_smbus_read_block_data( + ssif_info->client, SSIF_IPMI_RESPONSE, + ssif_info->i2c_data); + if (result < 0) + ssif_info->done_handler(ssif_info, result, + NULL, 0); + else + ssif_info->done_handler(ssif_info, 0, + ssif_info->i2c_data, + result); + } + } + + return 0; +} + +static int ssif_i2c_send(struct ssif_info *ssif_info, + ssif_i2c_done handler, + int read_write, int command, + unsigned char *data, unsigned int size) +{ + ssif_info->done_handler = handler; + + ssif_info->i2c_read_write = read_write; + ssif_info->i2c_command = command; + ssif_info->i2c_data = data; + ssif_info->i2c_size = size; + complete(&ssif_info->wake_thread); + return 0; +} + + +static void msg_done_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len); + +static void retry_timeout(unsigned long data) +{ + struct ssif_info *ssif_info = (void *) data; + int rv; + + if (ssif_info->stopping) + return; + + ssif_info->rtc_us_timer = 0; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, + SSIF_IPMI_RESPONSE, + ssif_info->recv, I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + /* request failed, just return the error. */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(5)\n"); + + msg_done_handler(ssif_info, -EIO, NULL, 0); + } +} + +static int start_resend(struct ssif_info *ssif_info); + +static void msg_done_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len) +{ + struct ipmi_smi_msg *msg; + unsigned long oflags, *flags; + int rv; + + /* + * We are single-threaded here, so no need for a lock until we + * start messing with driver states or the queues. + */ + + if (result < 0) { + ssif_info->retries_left--; + if (ssif_info->retries_left > 0) { + ssif_inc_stat(ssif_info, receive_retries); + + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_JIFFIES); + ssif_info->rtc_us_timer = SSIF_MSG_USEC; + return; + } + + ssif_inc_stat(ssif_info, receive_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error in msg_done_handler: %d\n", result); + len = 0; + goto continue_op; + } + + if ((len > 1) && (ssif_info->multi_pos == 0) + && (data[0] == 0x00) && (data[1] == 0x01)) { + /* Start of multi-part read. Start the next transaction. */ + int i; + + ssif_inc_stat(ssif_info, received_message_parts); + + /* Remove the multi-part read marker. */ + for (i = 0; i < (len-2); i++) + ssif_info->data[i] = data[i+2]; + len -= 2; + ssif_info->multi_len = len; + ssif_info->multi_pos = 1; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, + SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, + ssif_info->recv, I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(1)\n"); + + result = -EIO; + } else + return; + } else if (ssif_info->multi_pos) { + /* Middle of multi-part read. Start the next transaction. */ + int i; + unsigned char blocknum; + + if (len == 0) { + result = -EIO; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX "Middle message with no data\n"); + + goto continue_op; + } + + blocknum = data[ssif_info->multi_len]; + + if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) { + /* Received message too big, abort the operation. */ + result = -E2BIG; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Received message too big\n"); + + goto continue_op; + } + + /* Remove the blocknum from the data. */ + for (i = 0; i < (len-1); i++) + ssif_info->data[i+ssif_info->multi_len] = data[i+1]; + len--; + ssif_info->multi_len += len; + if (blocknum == 0xff) { + /* End of read */ + len = ssif_info->multi_len; + data = ssif_info->data; + } else if ((blocknum+1) != ssif_info->multi_pos) { + /* + * Out of sequence block, just abort. Block + * numbers start at zero for the second block, + * but multi_pos starts at one, so the +1. + */ + result = -EIO; + } else { + ssif_inc_stat(ssif_info, received_message_parts); + + ssif_info->multi_pos++; + + rv = ssif_i2c_send(ssif_info, msg_done_handler, + I2C_SMBUS_READ, + SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE, + ssif_info->recv, + I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX + "Error from i2c_non_blocking_op(2)\n"); + + result = -EIO; + } else + return; + } + } + + if (result < 0) { + ssif_inc_stat(ssif_info, receive_errors); + } else { + ssif_inc_stat(ssif_info, received_messages); + ssif_inc_stat(ssif_info, received_message_parts); + } + + + continue_op: + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) + pr_info(PFX "DONE 1: state = %d, result=%d.\n", + ssif_info->ssif_state, result); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + msg = ssif_info->curr_msg; + if (msg) { + msg->rsp_size = len; + if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) + msg->rsp_size = IPMI_MAX_MSG_LENGTH; + memcpy(msg->rsp, data, msg->rsp_size); + ssif_info->curr_msg = NULL; + } + + switch (ssif_info->ssif_state) { + case SSIF_NORMAL: + ipmi_ssif_unlock_cond(ssif_info, flags); + if (!msg) + break; + + if (result < 0) + return_hosed_msg(ssif_info, msg); + else + deliver_recv_msg(ssif_info, msg); + break; + + case SSIF_GETTING_FLAGS: + /* We got the flags from the SSIF, now handle them. */ + if ((result < 0) || (len < 4) || (data[2] != 0)) { + /* + * Error fetching flags, or invalid length, + * just give up for now. + */ + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + pr_warn(PFX "Error getting flags: %d %d, %x\n", + result, len, data[2]); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + pr_warn(PFX "Invalid response getting flags: %x %x\n", + data[0], data[1]); + } else { + ssif_inc_stat(ssif_info, flag_fetches); + ssif_info->msg_flags = data[3]; + handle_flags(ssif_info, flags); + } + break; + + case SSIF_CLEARING_FLAGS: + /* We cleared the flags. */ + if ((result < 0) || (len < 3) || (data[2] != 0)) { + /* Error clearing flags */ + pr_warn(PFX "Error clearing flags: %d %d, %x\n", + result, len, data[2]); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { + pr_warn(PFX "Invalid response clearing flags: %x %x\n", + data[0], data[1]); + } + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + + case SSIF_GETTING_EVENTS: + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the event flag. */ + ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + handle_flags(ssif_info, flags); + } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) { + pr_warn(PFX "Invalid response getting events: %x %x\n", + msg->rsp[0], msg->rsp[1]); + msg->done(msg); + /* Take off the event flag. */ + ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; + handle_flags(ssif_info, flags); + } else { + handle_flags(ssif_info, flags); + ssif_inc_stat(ssif_info, events); + deliver_recv_msg(ssif_info, msg); + } + break; + + case SSIF_GETTING_MESSAGES: + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { + /* Error getting event, probably done. */ + msg->done(msg); + + /* Take off the msg flag. */ + ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + handle_flags(ssif_info, flags); + } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || msg->rsp[1] != IPMI_GET_MSG_CMD) { + pr_warn(PFX "Invalid response clearing flags: %x %x\n", + msg->rsp[0], msg->rsp[1]); + msg->done(msg); + + /* Take off the msg flag. */ + ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL; + handle_flags(ssif_info, flags); + } else { + ssif_inc_stat(ssif_info, incoming_messages); + handle_flags(ssif_info, flags); + deliver_recv_msg(ssif_info, msg); + } + break; + } + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (ssif_info->req_events) + start_event_fetch(ssif_info, flags); + else if (ssif_info->req_flags) + start_flag_fetch(ssif_info, flags); + else + start_next_msg(ssif_info, flags); + } else + ipmi_ssif_unlock_cond(ssif_info, flags); + + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) + pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state); +} + +static void msg_written_handler(struct ssif_info *ssif_info, int result, + unsigned char *data, unsigned int len) +{ + int rv; + + /* We are single-threaded here, so no need for a lock. */ + if (result < 0) { + ssif_info->retries_left--; + if (ssif_info->retries_left > 0) { + if (!start_resend(ssif_info)) { + ssif_inc_stat(ssif_info, send_retries); + return; + } + /* request failed, just return the error. */ + ssif_inc_stat(ssif_info, send_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info(PFX + "Out of retries in msg_written_handler\n"); + msg_done_handler(ssif_info, -EIO, NULL, 0); + return; + } + + ssif_inc_stat(ssif_info, send_errors); + + /* + * Got an error on transmit, let the done routine + * handle it. + */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error in msg_written_handler: %d\n", result); + + msg_done_handler(ssif_info, result, NULL, 0); + return; + } + + if (ssif_info->multi_data) { + /* In the middle of a multi-data write. */ + int left; + + ssif_inc_stat(ssif_info, sent_messages_parts); + + left = ssif_info->multi_len - ssif_info->multi_pos; + if (left > 32) + left = 32; + /* Length byte. */ + ssif_info->multi_data[ssif_info->multi_pos] = left; + ssif_info->multi_pos += left; + if (left < 32) + /* + * Write is finished. Note that we must end + * with a write of less than 32 bytes to + * complete the transaction, even if it is + * zero bytes. + */ + ssif_info->multi_data = NULL; + + rv = ssif_i2c_send(ssif_info, msg_written_handler, + I2C_SMBUS_WRITE, + SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, + ssif_info->multi_data + ssif_info->multi_pos, + I2C_SMBUS_BLOCK_DATA); + if (rv < 0) { + /* request failed, just return the error. */ + ssif_inc_stat(ssif_info, send_errors); + + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Error from i2c_non_blocking_op(3)\n"); + msg_done_handler(ssif_info, -EIO, NULL, 0); + } + } else { + ssif_inc_stat(ssif_info, sent_messages); + ssif_inc_stat(ssif_info, sent_messages_parts); + + /* Wait a jiffie then request the next message */ + ssif_info->retries_left = SSIF_RECV_RETRIES; + ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_PART_JIFFIES); + return; + } +} + +static int start_resend(struct ssif_info *ssif_info) +{ + int rv; + int command; + + if (ssif_info->data_len > 32) { + command = SSIF_IPMI_MULTI_PART_REQUEST_START; + ssif_info->multi_data = ssif_info->data; + ssif_info->multi_len = ssif_info->data_len; + /* + * Subtle thing, this is 32, not 33, because we will + * overwrite the thing at position 32 (which was just + * transmitted) with the new length. + */ + ssif_info->multi_pos = 32; + ssif_info->data[0] = 32; + } else { + ssif_info->multi_data = NULL; + command = SSIF_IPMI_REQUEST; + ssif_info->data[0] = ssif_info->data_len; + } + + rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, + command, ssif_info->data, I2C_SMBUS_BLOCK_DATA); + if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG)) + pr_info("Error from i2c_non_blocking_op(4)\n"); + return rv; +} + +static int start_send(struct ssif_info *ssif_info, + unsigned char *data, + unsigned int len) +{ + if (len > IPMI_MAX_MSG_LENGTH) + return -E2BIG; + if (len > ssif_info->max_xmit_msg_size) + return -E2BIG; + + ssif_info->retries_left = SSIF_SEND_RETRIES; + memcpy(ssif_info->data+1, data, len); + ssif_info->data_len = len; + return start_resend(ssif_info); +} + +/* Must be called with the message lock held. */ +static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) +{ + struct ipmi_smi_msg *msg; + unsigned long oflags; + + restart: + if (!SSIF_IDLE(ssif_info)) { + ipmi_ssif_unlock_cond(ssif_info, flags); + return; + } + + if (!ssif_info->waiting_msg) { + ssif_info->curr_msg = NULL; + ipmi_ssif_unlock_cond(ssif_info, flags); + } else { + int rv; + + ssif_info->curr_msg = ssif_info->waiting_msg; + ssif_info->waiting_msg = NULL; + ipmi_ssif_unlock_cond(ssif_info, flags); + rv = start_send(ssif_info, + ssif_info->curr_msg->data, + ssif_info->curr_msg->data_size); + if (rv) { + msg = ssif_info->curr_msg; + ssif_info->curr_msg = NULL; + return_hosed_msg(ssif_info, msg); + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + goto restart; + } + } +} + +static void sender(void *send_info, + struct ipmi_smi_msg *msg) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + + BUG_ON(ssif_info->waiting_msg); + ssif_info->waiting_msg = msg; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + start_next_msg(ssif_info, flags); + + if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) { + struct timeval t; + + do_gettimeofday(&t); + pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", + msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); + } +} + +static int get_smi_info(void *send_info, struct ipmi_smi_info *data) +{ + struct ssif_info *ssif_info = send_info; + + data->addr_src = ssif_info->addr_source; + data->dev = &ssif_info->client->dev; + data->addr_info = ssif_info->addr_info; + get_device(data->dev); + + return 0; +} + +/* + * Instead of having our own timer to periodically check the message + * flags, we let the message handler drive us. + */ +static void request_events(void *send_info) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + + if (!ssif_info->has_event_buffer) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + /* + * Request flags first, not events, because the lower layer + * doesn't have a way to send an attention. But make sure + * event checking still happens. + */ + ssif_info->req_events = true; + if (SSIF_IDLE(ssif_info)) + start_flag_fetch(ssif_info, flags); + else { + ssif_info->req_flags = true; + ipmi_ssif_unlock_cond(ssif_info, flags); + } +} + +static int inc_usecount(void *send_info) +{ + struct ssif_info *ssif_info = send_info; + + if (!i2c_get_adapter(ssif_info->client->adapter->nr)) + return -ENODEV; + + i2c_use_client(ssif_info->client); + return 0; +} + +static void dec_usecount(void *send_info) +{ + struct ssif_info *ssif_info = send_info; + + i2c_release_client(ssif_info->client); + i2c_put_adapter(ssif_info->client->adapter); +} + +static int ssif_start_processing(void *send_info, + ipmi_smi_t intf) +{ + struct ssif_info *ssif_info = send_info; + + ssif_info->intf = intf; + + return 0; +} + +#define MAX_SSIF_BMCS 4 + +static unsigned short addr[MAX_SSIF_BMCS]; +static int num_addrs; +module_param_array(addr, ushort, &num_addrs, 0); +MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs."); + +static char *adapter_name[MAX_SSIF_BMCS]; +static int num_adapter_names; +module_param_array(adapter_name, charp, &num_adapter_names, 0); +MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned."); + +static int slave_addrs[MAX_SSIF_BMCS]; +static int num_slave_addrs; +module_param_array(slave_addrs, int, &num_slave_addrs, 0); +MODULE_PARM_DESC(slave_addrs, + "The default IPMB slave address for the controller."); + +/* + * Bit 0 enables message debugging, bit 1 enables state debugging, and + * bit 2 enables timing debugging. This is an array indexed by + * interface number" + */ +static int dbg[MAX_SSIF_BMCS]; +static int num_dbg; +module_param_array(dbg, int, &num_dbg, 0); +MODULE_PARM_DESC(dbg, "Turn on debugging."); + +static bool ssif_dbg_probe; +module_param_named(dbg_probe, ssif_dbg_probe, bool, 0); +MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters."); + +static int use_thread; +module_param(use_thread, int, 0); +MODULE_PARM_DESC(use_thread, "Use the thread interface."); + +static bool ssif_tryacpi = 1; +module_param_named(tryacpi, ssif_tryacpi, bool, 0); +MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI"); + +static bool ssif_trydmi = 1; +module_param_named(trydmi, ssif_trydmi, bool, 0); +MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)"); + +static DEFINE_MUTEX(ssif_infos_mutex); +static LIST_HEAD(ssif_infos); + +static int ssif_remove(struct i2c_client *client) +{ + struct ssif_info *ssif_info = i2c_get_clientdata(client); + int rv; + + if (!ssif_info) + return 0; + + i2c_set_clientdata(client, NULL); + + /* + * After this point, we won't deliver anything asychronously + * to the message handler. We can unregister ourself. + */ + rv = ipmi_unregister_smi(ssif_info->intf); + if (rv) { + pr_err(PFX "Unable to unregister device: errno=%d\n", rv); + return rv; + } + ssif_info->intf = NULL; + + /* make sure the driver is not looking for flags any more. */ + while (ssif_info->ssif_state != SSIF_NORMAL) + schedule_timeout(1); + + ssif_info->stopping = true; + del_timer_sync(&ssif_info->retry_timer); + if (ssif_info->thread) { + complete(&ssif_info->wake_thread); + kthread_stop(ssif_info->thread); + } + + /* + * No message can be outstanding now, we have removed the + * upper layer and it permitted us to do so. + */ + kfree(ssif_info); + return 0; +} + +static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, + int *resp_len, unsigned char *resp) +{ + int retry_cnt; + int ret; + + retry_cnt = SSIF_SEND_RETRIES; + retry1: + ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); + if (ret) { + retry_cnt--; + if (retry_cnt > 0) + goto retry1; + return -ENODEV; + } + + ret = -ENODEV; + retry_cnt = SSIF_RECV_RETRIES; + while (retry_cnt > 0) { + ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE, + resp); + if (ret > 0) + break; + msleep(SSIF_MSG_MSEC); + retry_cnt--; + if (retry_cnt <= 0) + break; + } + + if (ret > 0) { + /* Validate that the response is correct. */ + if (ret < 3 || + (resp[0] != (msg[0] | (1 << 2))) || + (resp[1] != msg[1])) + ret = -EINVAL; + else { + *resp_len = ret; + ret = 0; + } + } + + return ret; +} + +static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + unsigned char *resp; + unsigned char msg[3]; + int rv; + int len; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* Do a Get Device ID command, since it is required. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv) + rv = -ENODEV; + else + strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); + kfree(resp); + return rv; +} + +static int smi_type_proc_show(struct seq_file *m, void *v) +{ + return seq_puts(m, "ssif\n"); +} + +static int smi_type_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_type_proc_show, inode->i_private); +} + +static const struct file_operations smi_type_proc_ops = { + .open = smi_type_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smi_stats_proc_show(struct seq_file *m, void *v) +{ + struct ssif_info *ssif_info = m->private; + + seq_printf(m, "sent_messages: %u\n", + ssif_get_stat(ssif_info, sent_messages)); + seq_printf(m, "sent_messages_parts: %u\n", + ssif_get_stat(ssif_info, sent_messages_parts)); + seq_printf(m, "send_retries: %u\n", + ssif_get_stat(ssif_info, send_retries)); + seq_printf(m, "send_errors: %u\n", + ssif_get_stat(ssif_info, send_errors)); + seq_printf(m, "received_messages: %u\n", + ssif_get_stat(ssif_info, received_messages)); + seq_printf(m, "received_message_parts: %u\n", + ssif_get_stat(ssif_info, received_message_parts)); + seq_printf(m, "receive_retries: %u\n", + ssif_get_stat(ssif_info, receive_retries)); + seq_printf(m, "receive_errors: %u\n", + ssif_get_stat(ssif_info, receive_errors)); + seq_printf(m, "flag_fetches: %u\n", + ssif_get_stat(ssif_info, flag_fetches)); + seq_printf(m, "hosed: %u\n", + ssif_get_stat(ssif_info, hosed)); + seq_printf(m, "events: %u\n", + ssif_get_stat(ssif_info, events)); + seq_printf(m, "watchdog_pretimeouts: %u\n", + ssif_get_stat(ssif_info, watchdog_pretimeouts)); + return 0; +} + +static int smi_stats_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations smi_stats_proc_ops = { + .open = smi_stats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct ssif_addr_info *ssif_info_find(unsigned short addr, + char *adapter_name, + bool match_null_name) +{ + struct ssif_addr_info *info, *found = NULL; + +restart: + list_for_each_entry(info, &ssif_infos, link) { + if (info->binfo.addr == addr) { + if (info->adapter_name || adapter_name) { + if (!info->adapter_name != !adapter_name) { + /* One is NULL and one is not */ + continue; + } + if (strcmp(info->adapter_name, adapter_name)) + /* Names to not match */ + continue; + } + found = info; + break; + } + } + + if (!found && match_null_name) { + /* Try to get an exact match first, then try with a NULL name */ + adapter_name = NULL; + match_null_name = false; + goto restart; + } + + return found; +} + +static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) +{ +#ifdef CONFIG_ACPI + acpi_handle acpi_handle; + + acpi_handle = ACPI_HANDLE(dev); + if (acpi_handle) { + ssif_info->addr_source = SI_ACPI; + ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle; + return true; + } +#endif + return false; +} + +static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + unsigned char msg[3]; + unsigned char *resp; + struct ssif_info *ssif_info; + int rv = 0; + int len; + int i; + u8 slave_addr = 0; + struct ssif_addr_info *addr_info = NULL; + + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL); + if (!ssif_info) { + kfree(resp); + return -ENOMEM; + } + + if (!check_acpi(ssif_info, &client->dev)) { + addr_info = ssif_info_find(client->addr, client->adapter->name, + true); + if (!addr_info) { + /* Must have come in through sysfs. */ + ssif_info->addr_source = SI_HOTMOD; + } else { + ssif_info->addr_source = addr_info->addr_src; + ssif_info->ssif_debug = addr_info->debug; + ssif_info->addr_info = addr_info->addr_info; + slave_addr = addr_info->slave_addr; + } + } + + pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", + ipmi_addr_src_to_str(ssif_info->addr_source), + client->addr, client->adapter->name, slave_addr); + + /* + * Do a Get Device ID command, since it comes back with some + * useful info. + */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_DEVICE_ID_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv) + goto out; + + rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id); + if (rv) + goto out; + + ssif_info->client = client; + i2c_set_clientdata(client, ssif_info); + + /* Now check for system interface capabilities */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; + msg[2] = 0; /* SSIF */ + rv = do_cmd(client, 3, msg, &len, resp); + if (!rv && (len >= 3) && (resp[2] == 0)) { + if (len < 7) { + if (ssif_dbg_probe) + pr_info(PFX "SSIF info too short: %d\n", len); + goto no_support; + } + + /* Got a good SSIF response, handle it. */ + ssif_info->max_xmit_msg_size = resp[5]; + ssif_info->max_recv_msg_size = resp[6]; + ssif_info->multi_support = (resp[4] >> 6) & 0x3; + ssif_info->supports_pec = (resp[4] >> 3) & 0x1; + + /* Sanitize the data */ + switch (ssif_info->multi_support) { + case SSIF_NO_MULTI: + if (ssif_info->max_xmit_msg_size > 32) + ssif_info->max_xmit_msg_size = 32; + if (ssif_info->max_recv_msg_size > 32) + ssif_info->max_recv_msg_size = 32; + break; + + case SSIF_MULTI_2_PART: + if (ssif_info->max_xmit_msg_size > 64) + ssif_info->max_xmit_msg_size = 64; + if (ssif_info->max_recv_msg_size > 62) + ssif_info->max_recv_msg_size = 62; + break; + + case SSIF_MULTI_n_PART: + break; + + default: + /* Data is not sane, just give up. */ + goto no_support; + } + } else { + no_support: + /* Assume no multi-part or PEC support */ + pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", + rv, len, resp[2]); + + ssif_info->max_xmit_msg_size = 32; + ssif_info->max_recv_msg_size = 32; + ssif_info->multi_support = SSIF_NO_MULTI; + ssif_info->supports_pec = 0; + } + + /* Make sure the NMI timeout is cleared. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; + msg[2] = WDT_PRE_TIMEOUT_INT; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 3) || (resp[2] != 0)) + pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n", + rv, len, resp[2]); + + /* Attempt to enable the event buffer. */ + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + rv = do_cmd(client, 2, msg, &len, resp); + if (rv || (len < 4) || (resp[2] != 0)) { + pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { + ssif_info->has_event_buffer = true; + /* buffer is already enabled, nothing to do. */ + goto found; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 2)) { + pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + if (resp[2] == 0) + /* A successful return means the event buffer is supported. */ + ssif_info->has_event_buffer = true; + + found: + ssif_info->intf_num = atomic_inc_return(&next_intf); + + if (ssif_dbg_probe) { + pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", + client->addr); + } + + spin_lock_init(&ssif_info->lock); + ssif_info->ssif_state = SSIF_NORMAL; + init_timer(&ssif_info->retry_timer); + ssif_info->retry_timer.data = (unsigned long) ssif_info; + ssif_info->retry_timer.function = retry_timeout; + + for (i = 0; i < SSIF_NUM_STATS; i++) + atomic_set(&ssif_info->stats[i], 0); + + if (ssif_info->supports_pec) + ssif_info->client->flags |= I2C_CLIENT_PEC; + + ssif_info->handlers.owner = THIS_MODULE; + ssif_info->handlers.start_processing = ssif_start_processing; + ssif_info->handlers.get_smi_info = get_smi_info; + ssif_info->handlers.sender = sender; + ssif_info->handlers.request_events = request_events; + ssif_info->handlers.inc_usecount = inc_usecount; + ssif_info->handlers.dec_usecount = dec_usecount; + + { + unsigned int thread_num; + + thread_num = ((ssif_info->client->adapter->nr << 8) | + ssif_info->client->addr); + init_completion(&ssif_info->wake_thread); + ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info, + "kssif%4.4x", thread_num); + if (IS_ERR(ssif_info->thread)) { + rv = PTR_ERR(ssif_info->thread); + dev_notice(&ssif_info->client->dev, + "Could not start kernel thread: error %d\n", + rv); + goto out; + } + } + + rv = ipmi_register_smi(&ssif_info->handlers, + ssif_info, + &ssif_info->device_id, + &ssif_info->client->dev, + slave_addr); + if (rv) { + pr_err(PFX "Unable to register device: error %d\n", rv); + goto out; + } + + rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type", + &smi_type_proc_ops, + ssif_info); + if (rv) { + pr_err(PFX "Unable to create proc entry: %d\n", rv); + goto out_err_unreg; + } + + rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats", + &smi_stats_proc_ops, + ssif_info); + if (rv) { + pr_err(PFX "Unable to create proc entry: %d\n", rv); + goto out_err_unreg; + } + + out: + if (rv) + kfree(ssif_info); + kfree(resp); + return rv; + + out_err_unreg: + ipmi_unregister_smi(ssif_info->intf); + goto out; +} + +static int ssif_adapter_handler(struct device *adev, void *opaque) +{ + struct ssif_addr_info *addr_info = opaque; + + if (adev->type != &i2c_adapter_type) + return 0; + + i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); + + if (!addr_info->adapter_name) + return 1; /* Only try the first I2C adapter by default. */ + return 0; +} + +static int new_ssif_client(int addr, char *adapter_name, + int debug, int slave_addr, + enum ipmi_addr_src addr_src) +{ + struct ssif_addr_info *addr_info; + int rv = 0; + + mutex_lock(&ssif_infos_mutex); + if (ssif_info_find(addr, adapter_name, false)) { + rv = -EEXIST; + goto out_unlock; + } + + addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL); + if (!addr_info) { + rv = -ENOMEM; + goto out_unlock; + } + + if (adapter_name) { + addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL); + if (!addr_info->adapter_name) { + kfree(addr_info); + rv = -ENOMEM; + goto out_unlock; + } + } + + strncpy(addr_info->binfo.type, DEVICE_NAME, + sizeof(addr_info->binfo.type)); + addr_info->binfo.addr = addr; + addr_info->binfo.platform_data = addr_info; + addr_info->debug = debug; + addr_info->slave_addr = slave_addr; + addr_info->addr_src = addr_src; + + list_add_tail(&addr_info->link, &ssif_infos); + + if (initialized) + i2c_for_each_dev(addr_info, ssif_adapter_handler); + /* Otherwise address list will get it */ + +out_unlock: + mutex_unlock(&ssif_infos_mutex); + return rv; +} + +static void free_ssif_clients(void) +{ + struct ssif_addr_info *info, *tmp; + + mutex_lock(&ssif_infos_mutex); + list_for_each_entry_safe(info, tmp, &ssif_infos, link) { + list_del(&info->link); + kfree(info->adapter_name); + kfree(info); + } + mutex_unlock(&ssif_infos_mutex); +} + +static unsigned short *ssif_address_list(void) +{ + struct ssif_addr_info *info; + unsigned int count = 0, i; + unsigned short *address_list; + + list_for_each_entry(info, &ssif_infos, link) + count++; + + address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL); + if (!address_list) + return NULL; + + i = 0; + list_for_each_entry(info, &ssif_infos, link) { + unsigned short addr = info->binfo.addr; + int j; + + for (j = 0; j < i; j++) { + if (address_list[j] == addr) + goto skip_addr; + } + address_list[i] = addr; +skip_addr: + i++; + } + address_list[i] = I2C_CLIENT_END; + + return address_list; +} + +#ifdef CONFIG_ACPI +static struct acpi_device_id ssif_acpi_match[] = { + { "IPI0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, ssif_acpi_match); + +/* + * Once we get an ACPI failure, we don't try any more, because we go + * through the tables sequentially. Once we don't find a table, there + * are no more. + */ +static int acpi_failure; + +/* + * Defined in the IPMI 2.0 spec. + */ +struct SPMITable { + s8 Signature[4]; + u32 Length; + u8 Revision; + u8 Checksum; + s8 OEMID[6]; + s8 OEMTableID[8]; + s8 OEMRevision[4]; + s8 CreatorID[4]; + s8 CreatorRevision[4]; + u8 InterfaceType; + u8 IPMIlegacy; + s16 SpecificationRevision; + + /* + * Bit 0 - SCI interrupt supported + * Bit 1 - I/O APIC/SAPIC + */ + u8 InterruptType; + + /* + * If bit 0 of InterruptType is set, then this is the SCI + * interrupt in the GPEx_STS register. + */ + u8 GPE; + + s16 Reserved; + + /* + * If bit 1 of InterruptType is set, then this is the I/O + * APIC/SAPIC interrupt. + */ + u32 GlobalSystemInterrupt; + + /* The actual register address. */ + struct acpi_generic_address addr; + + u8 UID[4]; + + s8 spmi_id[1]; /* A '\0' terminated array starts here. */ +}; + +static int try_init_spmi(struct SPMITable *spmi) +{ + unsigned short myaddr; + + if (num_addrs >= MAX_SSIF_BMCS) + return -1; + + if (spmi->IPMIlegacy != 1) { + pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy); + return -ENODEV; + } + + if (spmi->InterfaceType != 4) + return -ENODEV; + + if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) { + pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n", + spmi->addr.space_id); + return -EIO; + } + + myaddr = spmi->addr.address >> 1; + + return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI); +} + +static void spmi_find_bmc(void) +{ + acpi_status status; + struct SPMITable *spmi; + int i; + + if (acpi_disabled) + return; + + if (acpi_failure) + return; + + for (i = 0; ; i++) { + status = acpi_get_table(ACPI_SIG_SPMI, i+1, + (struct acpi_table_header **)&spmi); + if (status != AE_OK) + return; + + try_init_spmi(spmi); + } +} +#else +static void spmi_find_bmc(void) { } +#endif + +#ifdef CONFIG_DMI +static int decode_dmi(const struct dmi_device *dmi_dev) +{ + struct dmi_header *dm = dmi_dev->device_data; + u8 *data = (u8 *) dm; + u8 len = dm->length; + unsigned short myaddr; + int slave_addr; + + if (num_addrs >= MAX_SSIF_BMCS) + return -1; + + if (len < 9) + return -1; + + if (data[0x04] != 4) /* Not SSIF */ + return -1; + + if ((data[8] >> 1) == 0) { + /* + * Some broken systems put the I2C address in + * the slave address field. We try to + * accommodate them here. + */ + myaddr = data[6] >> 1; + slave_addr = 0; + } else { + myaddr = data[8] >> 1; + slave_addr = data[6]; + } + + return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS); +} + +static void dmi_iterator(void) +{ + const struct dmi_device *dev = NULL; + + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) + decode_dmi(dev); +} +#else +static void dmi_iterator(void) { } +#endif + +static const struct i2c_device_id ssif_id[] = { + { DEVICE_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ssif_id); + +static struct i2c_driver ssif_i2c_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .owner = THIS_MODULE, + .name = DEVICE_NAME + }, + .probe = ssif_probe, + .remove = ssif_remove, + .id_table = ssif_id, + .detect = ssif_detect +}; + +static int init_ipmi_ssif(void) +{ + int i; + int rv; + + if (initialized) + return 0; + + pr_info("IPMI SSIF Interface driver\n"); + + /* build list for i2c from addr list */ + for (i = 0; i < num_addrs; i++) { + rv = new_ssif_client(addr[i], adapter_name[i], + dbg[i], slave_addrs[i], + SI_HARDCODED); + if (!rv) + pr_err(PFX + "Couldn't add hardcoded device at addr 0x%x\n", + addr[i]); + } + + if (ssif_tryacpi) + ssif_i2c_driver.driver.acpi_match_table = + ACPI_PTR(ssif_acpi_match); + if (ssif_trydmi) + dmi_iterator(); + if (ssif_tryacpi) + spmi_find_bmc(); + + ssif_i2c_driver.address_list = ssif_address_list(); + + rv = i2c_add_driver(&ssif_i2c_driver); + if (!rv) + initialized = true; + + return rv; +} +module_init(init_ipmi_ssif); + +static void cleanup_ipmi_ssif(void) +{ + if (!initialized) + return; + + initialized = false; + + i2c_del_driver(&ssif_i2c_driver); + + free_ssif_clients(); +} +module_exit(cleanup_ipmi_ssif); + +MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>"); +MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 455fd17d938e..3f44f292d066 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -28,7 +28,7 @@ config COMMON_CLK_WM831X depends on MFD_WM831X ---help--- Supports the clocking subsystem of the WM831x/2x series of - PMICs from Wolfson Microlectronics. + PMICs from Wolfson Microelectronics. source "drivers/clk/versatile/Kconfig" diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c index f20b750235f6..ca80103ac188 100644 --- a/drivers/clk/clk-ls1x.c +++ b/drivers/clk/clk-ls1x.c @@ -15,7 +15,8 @@ #include <loongson1.h> -#define OSC 33 +#define OSC (33 * 1000000) +#define DIV_APB 2 static DEFINE_SPINLOCK(_lock); @@ -29,13 +30,12 @@ static void ls1x_pll_clk_disable(struct clk_hw *hw) } static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) + unsigned long parent_rate) { u32 pll, rate; pll = __raw_readl(LS1X_CLK_PLL_FREQ); - rate = ((12 + (pll & 0x3f)) * 1000000) + - ((((pll >> 8) & 0x3ff) * 1000000) >> 10); + rate = 12 + (pll & 0x3f) + (((pll >> 8) & 0x3ff) >> 10); rate *= OSC; rate >>= 1; @@ -48,8 +48,10 @@ static const struct clk_ops ls1x_pll_clk_ops = { .recalc_rate = ls1x_pll_recalc_rate, }; -static struct clk * __init clk_register_pll(struct device *dev, - const char *name, const char *parent_name, unsigned long flags) +static struct clk *__init clk_register_pll(struct device *dev, + const char *name, + const char *parent_name, + unsigned long flags) { struct clk_hw *hw; struct clk *clk; @@ -78,34 +80,83 @@ static struct clk * __init clk_register_pll(struct device *dev, return clk; } +static const char const *cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", }; +static const char const *ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", }; +static const char const *dc_parents[] = { "dc_clk_div", "osc_33m_clk", }; + void __init ls1x_clk_init(void) { struct clk *clk; - clk = clk_register_pll(NULL, "pll_clk", NULL, CLK_IS_ROOT); - clk_prepare_enable(clk); - - clk = clk_register_divider(NULL, "cpu_clk", "pll_clk", - CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_CPU_SHIFT, - DIV_CPU_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); - clk_prepare_enable(clk); - clk_register_clkdev(clk, "cpu", NULL); - - clk = clk_register_divider(NULL, "dc_clk", "pll_clk", - CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT, - DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); - clk_prepare_enable(clk); - clk_register_clkdev(clk, "dc", NULL); - - clk = clk_register_divider(NULL, "ahb_clk", "pll_clk", - CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT, - DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); - clk_prepare_enable(clk); - clk_register_clkdev(clk, "ahb", NULL); + clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, CLK_IS_ROOT, + OSC); + clk_register_clkdev(clk, "osc_33m_clk", NULL); + + /* clock derived from 33 MHz OSC clk */ + clk = clk_register_pll(NULL, "pll_clk", "osc_33m_clk", 0); + clk_register_clkdev(clk, "pll_clk", NULL); + + /* clock derived from PLL clk */ + /* _____ + * _______________________| | + * OSC ___/ | MUX |___ CPU CLK + * \___ PLL ___ CPU DIV ___| | + * |_____| + */ + clk = clk_register_divider(NULL, "cpu_clk_div", "pll_clk", + CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV, + DIV_CPU_SHIFT, DIV_CPU_WIDTH, + CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ROUND_CLOSEST, &_lock); + clk_register_clkdev(clk, "cpu_clk_div", NULL); + clk = clk_register_mux(NULL, "cpu_clk", cpu_parents, + ARRAY_SIZE(cpu_parents), + CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV, + BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock); + clk_register_clkdev(clk, "cpu_clk", NULL); + + /* _____ + * _______________________| | + * OSC ___/ | MUX |___ DC CLK + * \___ PLL ___ DC DIV ___| | + * |_____| + */ + clk = clk_register_divider(NULL, "dc_clk_div", "pll_clk", + 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT, + DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); + clk_register_clkdev(clk, "dc_clk_div", NULL); + clk = clk_register_mux(NULL, "dc_clk", dc_parents, + ARRAY_SIZE(dc_parents), + CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV, + BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock); + clk_register_clkdev(clk, "dc_clk", NULL); + + /* _____ + * _______________________| | + * OSC ___/ | MUX |___ DDR CLK + * \___ PLL ___ DDR DIV ___| | + * |_____| + */ + clk = clk_register_divider(NULL, "ahb_clk_div", "pll_clk", + 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT, + DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED, + &_lock); + clk_register_clkdev(clk, "ahb_clk_div", NULL); + clk = clk_register_mux(NULL, "ahb_clk", ahb_parents, + ARRAY_SIZE(ahb_parents), + CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV, + BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock); + clk_register_clkdev(clk, "ahb_clk", NULL); clk_register_clkdev(clk, "stmmaceth", NULL); - clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1, 2); - clk_prepare_enable(clk); - clk_register_clkdev(clk, "apb", NULL); + /* clock derived from AHB clk */ + /* APB clk is always half of the AHB clk */ + clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1, + DIV_APB); + clk_register_clkdev(clk, "apb_clk", NULL); + clk_register_clkdev(clk, "ls1x_i2c", NULL); + clk_register_clkdev(clk, "ls1x_pwmtimer", NULL); + clk_register_clkdev(clk, "ls1x_spi", NULL); + clk_register_clkdev(clk, "ls1x_wdt", NULL); clk_register_clkdev(clk, "serial8250", NULL); } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index f657a48d20eb..fc01ec27d3c8 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -224,4 +224,9 @@ config CLKSRC_VERSATILE ARM Versatile, RealView and Versatile Express reference platforms. +config CLKSRC_MIPS_GIC + bool + depends on MIPS_GIC + select CLKSRC_OF + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index fae0435cc23d..94d90b24b56b 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -47,3 +47,4 @@ obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o +obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c new file mode 100644 index 000000000000..3bd31b1321f6 --- /dev/null +++ b/drivers/clocksource/mips-gic-timer.c @@ -0,0 +1,166 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + */ +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irqchip/mips-gic.h> +#include <linux/notifier.h> +#include <linux/of_irq.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/time.h> + +static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); +static int gic_timer_irq; +static unsigned int gic_frequency; + +static int gic_next_event(unsigned long delta, struct clock_event_device *evt) +{ + u64 cnt; + int res; + + cnt = gic_read_count(); + cnt += (u64)delta; + gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); + res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; + return res; +} + +static void gic_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do ... */ +} + +static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd = dev_id; + + gic_write_compare(gic_read_compare()); + cd->event_handler(cd); + return IRQ_HANDLED; +} + +struct irqaction gic_compare_irqaction = { + .handler = gic_compare_interrupt, + .percpu_dev_id = &gic_clockevent_device, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "timer", +}; + +static void gic_clockevent_cpu_init(struct clock_event_device *cd) +{ + unsigned int cpu = smp_processor_id(); + + cd->name = "MIPS GIC"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; + + cd->rating = 350; + cd->irq = gic_timer_irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = gic_next_event; + cd->set_mode = gic_set_clock_mode; + + clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff); + + enable_percpu_irq(gic_timer_irq, IRQ_TYPE_NONE); +} + +static void gic_clockevent_cpu_exit(struct clock_event_device *cd) +{ + disable_percpu_irq(gic_timer_irq); +} + +static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); + break; + case CPU_DYING: + gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_nb = { + .notifier_call = gic_cpu_notifier, +}; + +static int gic_clockevent_init(void) +{ + if (!cpu_has_counter || !gic_frequency) + return -ENXIO; + + setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction); + + register_cpu_notifier(&gic_cpu_nb); + + gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); + + return 0; +} + +static cycle_t gic_hpt_read(struct clocksource *cs) +{ + return gic_read_count(); +} + +static struct clocksource gic_clocksource = { + .name = "GIC", + .read = gic_hpt_read, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init __gic_clocksource_init(void) +{ + /* Set clocksource mask. */ + gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width()); + + /* Calculate a somewhat reasonable rating value. */ + gic_clocksource.rating = 200 + gic_frequency / 10000000; + + clocksource_register_hz(&gic_clocksource, gic_frequency); + + gic_clockevent_init(); +} + +void __init gic_clocksource_init(unsigned int frequency) +{ + gic_frequency = frequency; + gic_timer_irq = MIPS_GIC_IRQ_BASE + + GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE); + + __gic_clocksource_init(); +} + +static void __init gic_clocksource_of_init(struct device_node *node) +{ + if (WARN_ON(!gic_present || !node->parent || + !of_device_is_compatible(node->parent, "mti,gic"))) + return; + + if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) { + pr_err("GIC frequency not specified.\n"); + return; + } + gic_timer_irq = irq_of_parse_and_map(node, 0); + if (!gic_timer_irq) { + pr_err("GIC timer IRQ not specified.\n"); + return; + } + + __gic_clocksource_init(); +} +CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer", + gic_clocksource_of_init); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 9bc2720628a4..91ebe282b106 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -197,7 +197,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); if (ret) { - pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); + pr_err("%s: Failed to allocate resources: %d\n", __func__, ret); return ret; } diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 7bb9d65d9a2c..e5541117b3e9 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -283,7 +283,7 @@ EXPORT_SYMBOL(fence_add_callback); * @cb: [in] the callback to remove * * Remove a previously queued callback from the fence. This function returns - * true if the callback is succesfully removed, or false if the fence has + * true if the callback is successfully removed, or false if the fence has * already been signaled. * * *WARNING*: diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index de469821bc1b..f2b2c4e87aef 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -107,6 +107,13 @@ config AT_HDMAC help Support the Atmel AHB DMA controller. +config AT_XDMAC + tristate "Atmel XDMA support" + depends on ARCH_AT91 + select DMA_ENGINE + help + Support the Atmel XDMA controller. + config FSL_DMA tristate "Freescale Elo series DMA support" depends on FSL_SOC @@ -395,12 +402,12 @@ config XILINX_VDMA config DMA_SUN6I tristate "Allwinner A31 SoCs DMA support" - depends on MACH_SUN6I || COMPILE_TEST + depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST depends on RESET_CONTROLLER select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support for the DMA engine for Allwinner A31 SoCs. + Support for the DMA engine first found in Allwinner A31 SoCs. config NBPFAXI_DMA tristate "Renesas Type-AXI NBPF DMA support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index cb626c179911..2022b5451377 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o +obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_SH_DMAE_BASE) += sh/ diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e34024b000a4..1364d00881dd 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2164,7 +2164,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) __func__, ret); goto out_no_memcpy; } - pl08x->memcpy.chancnt = ret; /* Register slave channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, @@ -2175,7 +2174,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) __func__, ret); goto out_no_slave; } - pl08x->slave.chancnt = ret; ret = dma_async_device_register(&pl08x->memcpy); if (ret) { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c new file mode 100644 index 000000000000..b60d77a22df6 --- /dev/null +++ b/drivers/dma/at_xdmac.c @@ -0,0 +1,1524 @@ +/* + * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems) + * + * Copyright (C) 2014 Atmel Corporation + * + * Author: Ludovic Desroches <ludovic.desroches@atmel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/barrier.h> +#include <dt-bindings/dma/at91.h> +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_dma.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm.h> + +#include "dmaengine.h" + +/* Global registers */ +#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */ +#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */ +#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */ +#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */ +#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */ +#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */ +#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */ +#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */ +#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */ +#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */ +#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */ +#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */ +#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */ +#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */ +#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */ +#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */ +#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */ +#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */ +#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */ +#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */ +#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */ + +/* Channel relative registers offsets */ +#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */ +#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */ +#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */ +#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */ +#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */ +#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */ +#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */ +#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */ +#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */ +#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */ +#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */ +#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */ +#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */ +#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */ +#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */ +#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */ +#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */ +#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */ +#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */ +#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */ +#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */ +#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */ +#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */ +#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */ +#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */ +#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */ +#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */ +#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */ +#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */ +#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */ +#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */ +#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */ +#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */ +#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */ +#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */ +#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */ +#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */ +#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */ +#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ +#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ +#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ +#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ +#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ +#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ +#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */ +#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */ +#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */ +#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */ +#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */ +#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */ +#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */ +#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1) +#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1) +#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1) +#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1) +#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1) +#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */ +#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4) +#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4) +#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */ +#define AT_XDMAC_CC_PROT_SEC (0x0 << 5) +#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5) +#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */ +#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6) +#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6) +#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */ +#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7) +#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7) +#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */ +#define AT_XDMAC_CC_DWIDTH_OFFSET 11 +#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET) +#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */ +#define AT_XDMAC_CC_DWIDTH_BYTE 0x0 +#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1 +#define AT_XDMAC_CC_DWIDTH_WORD 0x2 +#define AT_XDMAC_CC_DWIDTH_DWORD 0x3 +#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */ +#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */ +#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */ +#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16) +#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16) +#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16) +#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16) +#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */ +#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18) +#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18) +#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18) +#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18) +#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */ +#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21) +#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21) +#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */ +#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22) +#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22) +#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ +#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) +#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) +#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ +#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ +#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ +#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ + +#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */ + +/* Microblock control members */ +#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */ +#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */ +#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */ +#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */ +#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */ +#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */ +#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */ +#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ + +#define AT_XDMAC_MAX_CHAN 0x20 + +enum atc_status { + AT_XDMAC_CHAN_IS_CYCLIC = 0, + AT_XDMAC_CHAN_IS_PAUSED, +}; + +/* ----- Channels ----- */ +struct at_xdmac_chan { + struct dma_chan chan; + void __iomem *ch_regs; + u32 mask; /* Channel Mask */ + u32 cfg[3]; /* Channel Configuration Register */ + #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ + #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ + #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */ + u8 perid; /* Peripheral ID */ + u8 perif; /* Peripheral Interface */ + u8 memif; /* Memory Interface */ + u32 per_src_addr; + u32 per_dst_addr; + u32 save_cim; + u32 save_cnda; + u32 save_cndc; + unsigned long status; + struct tasklet_struct tasklet; + + spinlock_t lock; + + struct list_head xfers_list; + struct list_head free_descs_list; +}; + + +/* ----- Controller ----- */ +struct at_xdmac { + struct dma_device dma; + void __iomem *regs; + int irq; + struct clk *clk; + u32 save_gim; + u32 save_gs; + struct dma_pool *at_xdmac_desc_pool; + struct at_xdmac_chan chan[0]; +}; + + +/* ----- Descriptors ----- */ + +/* Linked List Descriptor */ +struct at_xdmac_lld { + dma_addr_t mbr_nda; /* Next Descriptor Member */ + u32 mbr_ubc; /* Microblock Control Member */ + dma_addr_t mbr_sa; /* Source Address Member */ + dma_addr_t mbr_da; /* Destination Address Member */ + u32 mbr_cfg; /* Configuration Register */ +}; + + +struct at_xdmac_desc { + struct at_xdmac_lld lld; + enum dma_transfer_direction direction; + struct dma_async_tx_descriptor tx_dma_desc; + struct list_head desc_node; + /* Following members are only used by the first descriptor */ + bool active_xfer; + unsigned int xfer_size; + struct list_head descs_list; + struct list_head xfer_node; +}; + +static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) +{ + return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); +} + +#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg)) +#define at_xdmac_write(atxdmac, reg, value) \ + writel_relaxed((value), (atxdmac)->regs + (reg)) + +#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg)) +#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg)) + +static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan) +{ + return container_of(dchan, struct at_xdmac_chan, chan); +} + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev) +{ + return container_of(ddev, struct at_xdmac, dma); +} + +static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct at_xdmac_desc, tx_dma_desc); +} + +static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan) +{ + return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); +} + +static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan) +{ + return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); +} + +static inline int at_xdmac_csize(u32 maxburst) +{ + int csize; + + csize = ffs(maxburst) - 1; + if (csize > 4) + csize = -EINVAL; + + return csize; +}; + +static inline u8 at_xdmac_get_dwidth(u32 cfg) +{ + return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; +}; + +static unsigned int init_nr_desc_per_channel = 64; +module_param(init_nr_desc_per_channel, uint, 0644); +MODULE_PARM_DESC(init_nr_desc_per_channel, + "initial descriptors per channel (default: 64)"); + + +static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) +{ + return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; +} + +static void at_xdmac_off(struct at_xdmac *atxdmac) +{ + at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); + + /* Wait that all chans are disabled. */ + while (at_xdmac_read(atxdmac, AT_XDMAC_GS)) + cpu_relax(); + + at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); +} + +/* Call with lock hold. */ +static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, + struct at_xdmac_desc *first) +{ + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + u32 reg; + + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); + + if (at_xdmac_chan_is_enabled(atchan)) + return; + + /* Set transfer as active to not try to start it again. */ + first->active_xfer = true; + + /* Tell xdmac where to get the first descriptor. */ + reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) + | AT_XDMAC_CNDA_NDAIF(atchan->memif); + at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); + + /* + * When doing memory to memory transfer we need to use the next + * descriptor view 2 since some fields of the configuration register + * depend on transfer size and src/dest addresses. + */ + if (is_slave_direction(first->direction)) { + reg = AT_XDMAC_CNDC_NDVIEW_NDV1; + if (first->direction == DMA_MEM_TO_DEV) + atchan->cfg[AT_XDMAC_CUR_CFG] = + atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + else + atchan->cfg[AT_XDMAC_CUR_CFG] = + atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + at_xdmac_chan_write(atchan, AT_XDMAC_CC, + atchan->cfg[AT_XDMAC_CUR_CFG]); + } else { + /* + * No need to write AT_XDMAC_CC reg, it will be done when the + * descriptor is fecthed. + */ + reg = AT_XDMAC_CNDC_NDVIEW_NDV2; + } + + reg |= AT_XDMAC_CNDC_NDDUP + | AT_XDMAC_CNDC_NDSUP + | AT_XDMAC_CNDC_NDE; + at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg); + + dev_vdbg(chan2dev(&atchan->chan), + "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", + __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), + at_xdmac_chan_read(atchan, AT_XDMAC_CSA), + at_xdmac_chan_read(atchan, AT_XDMAC_CDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); + + at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); + reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; + /* + * There is no end of list when doing cyclic dma, we need to get + * an interrupt after each periods. + */ + if (at_xdmac_chan_is_cyclic(atchan)) + at_xdmac_chan_write(atchan, AT_XDMAC_CIE, + reg | AT_XDMAC_CIE_BIE); + else + at_xdmac_chan_write(atchan, AT_XDMAC_CIE, + reg | AT_XDMAC_CIE_LIE); + at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); + dev_vdbg(chan2dev(&atchan->chan), + "%s: enable channel (0x%08x)\n", __func__, atchan->mask); + wmb(); + at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); + + dev_vdbg(chan2dev(&atchan->chan), + "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", + __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), + at_xdmac_chan_read(atchan, AT_XDMAC_CSA), + at_xdmac_chan_read(atchan, AT_XDMAC_CDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); + +} + +static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct at_xdmac_desc *desc = txd_to_at_desc(tx); + struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&atchan->lock); + cookie = dma_cookie_assign(tx); + + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", + __func__, atchan, desc); + list_add_tail(&desc->xfer_node, &atchan->xfers_list); + if (list_is_singular(&atchan->xfers_list)) + at_xdmac_start_xfer(atchan, desc); + + spin_unlock_bh(&atchan->lock); + return cookie; +} + +static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, + gfp_t gfp_flags) +{ + struct at_xdmac_desc *desc; + struct at_xdmac *atxdmac = to_at_xdmac(chan->device); + dma_addr_t phys; + + desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); + if (desc) { + memset(desc, 0, sizeof(*desc)); + INIT_LIST_HEAD(&desc->descs_list); + dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); + desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; + desc->tx_dma_desc.phys = phys; + } + + return desc; +} + +/* Call must be protected by lock. */ +static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) +{ + struct at_xdmac_desc *desc; + + if (list_empty(&atchan->free_descs_list)) { + desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); + } else { + desc = list_first_entry(&atchan->free_descs_list, + struct at_xdmac_desc, desc_node); + list_del(&desc->desc_node); + desc->active_xfer = false; + } + + return desc; +} + +static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of_dma) +{ + struct at_xdmac *atxdmac = of_dma->of_dma_data; + struct at_xdmac_chan *atchan; + struct dma_chan *chan; + struct device *dev = atxdmac->dma.dev; + + if (dma_spec->args_count != 1) { + dev_err(dev, "dma phandler args: bad number of args\n"); + return NULL; + } + + chan = dma_get_any_slave_channel(&atxdmac->dma); + if (!chan) { + dev_err(dev, "can't get a dma channel\n"); + return NULL; + } + + atchan = to_at_xdmac_chan(chan); + atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); + atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); + atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); + dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n", + atchan->memif, atchan->perif, atchan->perid); + + return chan; +} + +static int at_xdmac_set_slave_config(struct dma_chan *chan, + struct dma_slave_config *sconfig) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + u8 dwidth; + int csize; + + atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_INCREMENTED_AM + | AT_XDMAC_CC_SAM_FIXED_AM + | AT_XDMAC_CC_DIF(atchan->memif) + | AT_XDMAC_CC_SIF(atchan->perif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_PER2MEM + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = at_xdmac_csize(sconfig->src_maxburst); + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(sconfig->src_addr_width) - 1; + atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); + + + atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_FIXED_AM + | AT_XDMAC_CC_SAM_INCREMENTED_AM + | AT_XDMAC_CC_DIF(atchan->perif) + | AT_XDMAC_CC_SIF(atchan->memif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_MEM2PER + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = at_xdmac_csize(sconfig->dst_maxburst); + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(sconfig->dst_addr_width) - 1; + atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); + + /* Src and dst addr are needed to configure the link list descriptor. */ + atchan->per_src_addr = sconfig->src_addr; + atchan->per_dst_addr = sconfig->dst_addr; + + dev_dbg(chan2dev(chan), + "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", + __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], + atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], + atchan->per_src_addr, atchan->per_dst_addr); + + return 0; +} + +static struct dma_async_tx_descriptor * +at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *first = NULL, *prev = NULL; + struct scatterlist *sg; + int i; + u32 cfg; + unsigned int xfer_size = 0; + + if (!sgl) + return NULL; + + if (!is_slave_direction(direction)) { + dev_err(chan2dev(chan), "invalid DMA direction\n"); + return NULL; + } + + dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n", + __func__, sg_len, + direction == DMA_MEM_TO_DEV ? "to device" : "from device", + flags); + + /* Protect dma_sconfig field that can be modified by set_slave_conf. */ + spin_lock_bh(&atchan->lock); + + /* Prepare descriptors. */ + for_each_sg(sgl, sg, sg_len, i) { + struct at_xdmac_desc *desc = NULL; + u32 len, mem; + + len = sg_dma_len(sg); + mem = sg_dma_address(sg); + if (unlikely(!len)) { + dev_err(chan2dev(chan), "sg data length is zero\n"); + spin_unlock_bh(&atchan->lock); + return NULL; + } + dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", + __func__, i, len, mem); + + desc = at_xdmac_get_desc(atchan); + if (!desc) { + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) + list_splice_init(&first->descs_list, &atchan->free_descs_list); + spin_unlock_bh(&atchan->lock); + return NULL; + } + + /* Linked list descriptor setup. */ + if (direction == DMA_DEV_TO_MEM) { + desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_da = mem; + cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + } else { + desc->lld.mbr_sa = mem; + desc->lld.mbr_da = atchan->per_dst_addr; + cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + } + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ + | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ + | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ + | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ + | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ + dev_dbg(chan2dev(chan), + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); + + /* Chain lld. */ + if (prev) { + prev->lld.mbr_nda = desc->tx_dma_desc.phys; + dev_dbg(chan2dev(chan), + "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", + __func__, prev, &prev->lld.mbr_nda); + } + + prev = desc; + if (!first) + first = desc; + + dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, desc, first); + list_add_tail(&desc->desc_node, &first->descs_list); + xfer_size += len; + } + + spin_unlock_bh(&atchan->lock); + + first->tx_dma_desc.flags = flags; + first->xfer_size = xfer_size; + first->direction = direction; + + return &first->tx_dma_desc; +} + +static struct dma_async_tx_descriptor * +at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *first = NULL, *prev = NULL; + unsigned int periods = buf_len / period_len; + int i; + u32 cfg; + + dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", + __func__, &buf_addr, buf_len, period_len, + direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags); + + if (!is_slave_direction(direction)) { + dev_err(chan2dev(chan), "invalid DMA direction\n"); + return NULL; + } + + if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { + dev_err(chan2dev(chan), "channel currently used\n"); + return NULL; + } + + for (i = 0; i < periods; i++) { + struct at_xdmac_desc *desc = NULL; + + spin_lock_bh(&atchan->lock); + desc = at_xdmac_get_desc(atchan); + if (!desc) { + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) + list_splice_init(&first->descs_list, &atchan->free_descs_list); + spin_unlock_bh(&atchan->lock); + return NULL; + } + spin_unlock_bh(&atchan->lock); + dev_dbg(chan2dev(chan), + "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", + __func__, desc, &desc->tx_dma_desc.phys); + + if (direction == DMA_DEV_TO_MEM) { + desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_da = buf_addr + i * period_len; + cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + } else { + desc->lld.mbr_sa = buf_addr + i * period_len; + desc->lld.mbr_da = atchan->per_dst_addr; + cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + } + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 + | AT_XDMAC_MBR_UBC_NDEN + | AT_XDMAC_MBR_UBC_NSEN + | AT_XDMAC_MBR_UBC_NDE + | period_len >> at_xdmac_get_dwidth(cfg); + + dev_dbg(chan2dev(chan), + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); + + /* Chain lld. */ + if (prev) { + prev->lld.mbr_nda = desc->tx_dma_desc.phys; + dev_dbg(chan2dev(chan), + "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", + __func__, prev, &prev->lld.mbr_nda); + } + + prev = desc; + if (!first) + first = desc; + + dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, desc, first); + list_add_tail(&desc->desc_node, &first->descs_list); + } + + prev->lld.mbr_nda = first->tx_dma_desc.phys; + dev_dbg(chan2dev(chan), + "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", + __func__, prev, &prev->lld.mbr_nda); + first->tx_dma_desc.flags = flags; + first->xfer_size = buf_len; + first->direction = direction; + + return &first->tx_dma_desc; +} + +static struct dma_async_tx_descriptor * +at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *first = NULL, *prev = NULL; + size_t remaining_size = len, xfer_size = 0, ublen; + dma_addr_t src_addr = src, dst_addr = dest; + u32 dwidth; + /* + * WARNING: We don't know the direction, it involves we can't + * dynamically set the source and dest interface so we have to use the + * same one. Only interface 0 allows EBI access. Hopefully we can + * access DDR through both ports (at least on SAMA5D4x), so we can use + * the same interface for source and dest, that solves the fact we + * don't know the direction. + */ + u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM + | AT_XDMAC_CC_SAM_INCREMENTED_AM + | AT_XDMAC_CC_DIF(0) + | AT_XDMAC_CC_SIF(0) + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_MEM_TRAN; + + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", + __func__, &src, &dest, len, flags); + + if (unlikely(!len)) + return NULL; + + /* + * Check address alignment to select the greater data width we can use. + * Some XDMAC implementations don't provide dword transfer, in this + * case selecting dword has the same behavior as selecting word transfers. + */ + if (!((src_addr | dst_addr) & 7)) { + dwidth = AT_XDMAC_CC_DWIDTH_DWORD; + dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); + } else if (!((src_addr | dst_addr) & 3)) { + dwidth = AT_XDMAC_CC_DWIDTH_WORD; + dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); + } else if (!((src_addr | dst_addr) & 1)) { + dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; + dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); + } else { + dwidth = AT_XDMAC_CC_DWIDTH_BYTE; + dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); + } + + /* Prepare descriptors. */ + while (remaining_size) { + struct at_xdmac_desc *desc = NULL; + + dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); + + spin_lock_bh(&atchan->lock); + desc = at_xdmac_get_desc(atchan); + spin_unlock_bh(&atchan->lock); + if (!desc) { + dev_err(chan2dev(chan), "can't get descriptor\n"); + if (first) + list_splice_init(&first->descs_list, &atchan->free_descs_list); + return NULL; + } + + /* Update src and dest addresses. */ + src_addr += xfer_size; + dst_addr += xfer_size; + + if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth) + xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth; + else + xfer_size = remaining_size; + + dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size); + + /* Check remaining length and change data width if needed. */ + if (!((src_addr | dst_addr | xfer_size) & 7)) { + dwidth = AT_XDMAC_CC_DWIDTH_DWORD; + dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); + } else if (!((src_addr | dst_addr | xfer_size) & 3)) { + dwidth = AT_XDMAC_CC_DWIDTH_WORD; + dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); + } else if (!((src_addr | dst_addr | xfer_size) & 1)) { + dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; + dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); + } else if ((src_addr | dst_addr | xfer_size) & 1) { + dwidth = AT_XDMAC_CC_DWIDTH_BYTE; + dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); + } + chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); + + ublen = xfer_size >> dwidth; + remaining_size -= xfer_size; + + desc->lld.mbr_sa = src_addr; + desc->lld.mbr_da = dst_addr; + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 + | AT_XDMAC_MBR_UBC_NDEN + | AT_XDMAC_MBR_UBC_NSEN + | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0) + | ublen; + desc->lld.mbr_cfg = chan_cc; + + dev_dbg(chan2dev(chan), + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); + + /* Chain lld. */ + if (prev) { + prev->lld.mbr_nda = desc->tx_dma_desc.phys; + dev_dbg(chan2dev(chan), + "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", + __func__, prev, prev->lld.mbr_nda); + } + + prev = desc; + if (!first) + first = desc; + + dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, desc, first); + list_add_tail(&desc->desc_node, &first->descs_list); + } + + first->tx_dma_desc.flags = flags; + first->xfer_size = len; + + return &first->tx_dma_desc; +} + +static enum dma_status +at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + struct at_xdmac_desc *desc, *_desc; + struct list_head *descs_list; + enum dma_status ret; + int residue; + u32 cur_nda, mask, value; + u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (!txstate) + return ret; + + spin_lock_bh(&atchan->lock); + + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); + + /* + * If the transfer has not been started yet, don't need to compute the + * residue, it's the transfer length. + */ + if (!desc->active_xfer) { + dma_set_residue(txstate, desc->xfer_size); + spin_unlock_bh(&atchan->lock); + return ret; + } + + residue = desc->xfer_size; + /* + * Flush FIFO: only relevant when the transfer is source peripheral + * synchronized. + */ + mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; + value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; + if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { + at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); + while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) + cpu_relax(); + } + + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + /* + * Remove size of all microblocks already transferred and the current + * one. Then add the remaining size to transfer of the current + * microblock. + */ + descs_list = &desc->descs_list; + list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { + residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; + if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) + break; + } + residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; + + spin_unlock_bh(&atchan->lock); + + dma_set_residue(txstate, residue); + + dev_dbg(chan2dev(chan), + "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", + __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); + + return ret; +} + +/* Call must be protected by lock. */ +static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, + struct at_xdmac_desc *desc) +{ + dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + + /* + * Remove the transfer from the transfer list then move the transfer + * descriptors into the free descriptors list. + */ + list_del(&desc->xfer_node); + list_splice_init(&desc->descs_list, &atchan->free_descs_list); +} + +static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) +{ + struct at_xdmac_desc *desc; + + spin_lock_bh(&atchan->lock); + + /* + * If channel is enabled, do nothing, advance_work will be triggered + * after the interruption. + */ + if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { + desc = list_first_entry(&atchan->xfers_list, + struct at_xdmac_desc, + xfer_node); + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + if (!desc->active_xfer) + at_xdmac_start_xfer(atchan, desc); + } + + spin_unlock_bh(&atchan->lock); +} + +static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) +{ + struct at_xdmac_desc *desc; + struct dma_async_tx_descriptor *txd; + + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); + txd = &desc->tx_dma_desc; + + if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) + txd->callback(txd->callback_param); +} + +static void at_xdmac_tasklet(unsigned long data) +{ + struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; + struct at_xdmac_desc *desc; + u32 error_mask; + + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", + __func__, atchan->status); + + error_mask = AT_XDMAC_CIS_RBEIS + | AT_XDMAC_CIS_WBEIS + | AT_XDMAC_CIS_ROIS; + + if (at_xdmac_chan_is_cyclic(atchan)) { + at_xdmac_handle_cyclic(atchan); + } else if ((atchan->status & AT_XDMAC_CIS_LIS) + || (atchan->status & error_mask)) { + struct dma_async_tx_descriptor *txd; + + if (atchan->status & AT_XDMAC_CIS_RBEIS) + dev_err(chan2dev(&atchan->chan), "read bus error!!!"); + if (atchan->status & AT_XDMAC_CIS_WBEIS) + dev_err(chan2dev(&atchan->chan), "write bus error!!!"); + if (atchan->status & AT_XDMAC_CIS_ROIS) + dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); + + spin_lock_bh(&atchan->lock); + desc = list_first_entry(&atchan->xfers_list, + struct at_xdmac_desc, + xfer_node); + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); + BUG_ON(!desc->active_xfer); + + txd = &desc->tx_dma_desc; + + at_xdmac_remove_xfer(atchan, desc); + spin_unlock_bh(&atchan->lock); + + if (!at_xdmac_chan_is_cyclic(atchan)) { + dma_cookie_complete(txd); + if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) + txd->callback(txd->callback_param); + } + + dma_run_dependencies(txd); + + at_xdmac_advance_work(atchan); + } +} + +static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) +{ + struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id; + struct at_xdmac_chan *atchan; + u32 imr, status, pending; + u32 chan_imr, chan_status; + int i, ret = IRQ_NONE; + + do { + imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM); + status = at_xdmac_read(atxdmac, AT_XDMAC_GIS); + pending = status & imr; + + dev_vdbg(atxdmac->dma.dev, + "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n", + __func__, status, imr, pending); + + if (!pending) + break; + + /* We have to find which channel has generated the interrupt. */ + for (i = 0; i < atxdmac->dma.chancnt; i++) { + if (!((1 << i) & pending)) + continue; + + atchan = &atxdmac->chan[i]; + chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); + chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); + atchan->status = chan_status & chan_imr; + dev_vdbg(atxdmac->dma.dev, + "%s: chan%d: imr=0x%x, status=0x%x\n", + __func__, i, chan_imr, chan_status); + dev_vdbg(chan2dev(&atchan->chan), + "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", + __func__, + at_xdmac_chan_read(atchan, AT_XDMAC_CC), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), + at_xdmac_chan_read(atchan, AT_XDMAC_CSA), + at_xdmac_chan_read(atchan, AT_XDMAC_CDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); + + if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + + tasklet_schedule(&atchan->tasklet); + ret = IRQ_HANDLED; + } + + } while (pending); + + return ret; +} + +static void at_xdmac_issue_pending(struct dma_chan *chan) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + + dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); + + if (!at_xdmac_chan_is_cyclic(atchan)) + at_xdmac_advance_work(atchan); + + return; +} + +static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct at_xdmac_desc *desc, *_desc; + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + int ret = 0; + + dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); + + spin_lock_bh(&atchan->lock); + + switch (cmd) { + case DMA_PAUSE: + at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); + set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + break; + + case DMA_RESUME: + if (!at_xdmac_chan_is_paused(atchan)) + break; + + at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + break; + + case DMA_TERMINATE_ALL: + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) + cpu_relax(); + + /* Cancel all pending transfers. */ + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) + at_xdmac_remove_xfer(atchan, desc); + + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); + break; + + case DMA_SLAVE_CONFIG: + ret = at_xdmac_set_slave_config(chan, + (struct dma_slave_config *)arg); + break; + + default: + dev_err(chan2dev(chan), + "unmanaged or unknown dma control cmd: %d\n", cmd); + ret = -ENXIO; + } + + spin_unlock_bh(&atchan->lock); + + return ret; +} + +static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *desc; + int i; + + spin_lock_bh(&atchan->lock); + + if (at_xdmac_chan_is_enabled(atchan)) { + dev_err(chan2dev(chan), + "can't allocate channel resources (channel enabled)\n"); + i = -EIO; + goto spin_unlock; + } + + if (!list_empty(&atchan->free_descs_list)) { + dev_err(chan2dev(chan), + "can't allocate channel resources (channel not free from a previous use)\n"); + i = -EIO; + goto spin_unlock; + } + + for (i = 0; i < init_nr_desc_per_channel; i++) { + desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); + if (!desc) { + dev_warn(chan2dev(chan), + "only %d descriptors have been allocated\n", i); + break; + } + list_add_tail(&desc->desc_node, &atchan->free_descs_list); + } + + dma_cookie_init(chan); + + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); + +spin_unlock: + spin_unlock_bh(&atchan->lock); + return i; +} + +static void at_xdmac_free_chan_resources(struct dma_chan *chan) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(chan->device); + struct at_xdmac_desc *desc, *_desc; + + list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { + dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); + list_del(&desc->desc_node); + dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); + } + + return; +} + +#define AT_XDMAC_DMA_BUSWIDTHS\ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int at_xdmac_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + + caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; + caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = true; + caps->cmd_terminate = true; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + return 0; +} + +#ifdef CONFIG_PM +static int atmel_xdmac_prepare(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct at_xdmac *atxdmac = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + + /* Wait for transfer completion, except in cyclic case. */ + if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan)) + return -EAGAIN; + } + return 0; +} +#else +# define atmel_xdmac_prepare NULL +#endif + +#ifdef CONFIG_PM_SLEEP +static int atmel_xdmac_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct at_xdmac *atxdmac = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + + if (at_xdmac_chan_is_cyclic(atchan)) { + if (!at_xdmac_chan_is_paused(atchan)) + at_xdmac_control(chan, DMA_PAUSE, 0); + atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); + atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); + atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); + } + } + atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); + + at_xdmac_off(atxdmac); + clk_disable_unprepare(atxdmac->clk); + return 0; +} + +static int atmel_xdmac_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct at_xdmac *atxdmac = platform_get_drvdata(pdev); + struct at_xdmac_chan *atchan; + struct dma_chan *chan, *_chan; + int i; + u32 cfg; + + clk_prepare_enable(atxdmac->clk); + + /* Clear pending interrupts. */ + for (i = 0; i < atxdmac->dma.chancnt; i++) { + atchan = &atxdmac->chan[i]; + while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) + cpu_relax(); + } + + at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); + at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); + list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { + atchan = to_at_xdmac_chan(chan); + cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; + at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg); + if (at_xdmac_chan_is_cyclic(atchan)) { + at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); + at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); + at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); + wmb(); + at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); + } + } + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static int at_xdmac_probe(struct platform_device *pdev) +{ + struct resource *res; + struct at_xdmac *atxdmac; + int irq, size, nr_channels, i, ret; + void __iomem *base; + u32 reg; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + /* + * Read number of xdmac channels, read helper function can't be used + * since atxdmac is not yet allocated and we need to know the number + * of channels to do the allocation. + */ + reg = readl_relaxed(base + AT_XDMAC_GTYPE); + nr_channels = AT_XDMAC_NB_CH(reg); + if (nr_channels > AT_XDMAC_MAX_CHAN) { + dev_err(&pdev->dev, "invalid number of channels (%u)\n", + nr_channels); + return -EINVAL; + } + + size = sizeof(*atxdmac); + size += nr_channels * sizeof(struct at_xdmac_chan); + atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!atxdmac) { + dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); + return -ENOMEM; + } + + atxdmac->regs = base; + atxdmac->irq = irq; + + atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); + if (IS_ERR(atxdmac->clk)) { + dev_err(&pdev->dev, "can't get dma_clk\n"); + return PTR_ERR(atxdmac->clk); + } + + /* Do not use dev res to prevent races with tasklet */ + ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); + if (ret) { + dev_err(&pdev->dev, "can't request irq\n"); + return ret; + } + + ret = clk_prepare_enable(atxdmac->clk); + if (ret) { + dev_err(&pdev->dev, "can't prepare or enable clock\n"); + goto err_free_irq; + } + + atxdmac->at_xdmac_desc_pool = + dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, + sizeof(struct at_xdmac_desc), 4, 0); + if (!atxdmac->at_xdmac_desc_pool) { + dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); + ret = -ENOMEM; + goto err_clk_disable; + } + + dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); + dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); + dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); + /* + * Without DMA_PRIVATE the driver is not able to allocate more than + * one channel, second allocation fails in private_candidate. + */ + dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); + atxdmac->dma.dev = &pdev->dev; + atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; + atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; + atxdmac->dma.device_tx_status = at_xdmac_tx_status; + atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; + atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; + atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; + atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; + atxdmac->dma.device_control = at_xdmac_control; + atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; + + /* Disable all chans and interrupts. */ + at_xdmac_off(atxdmac); + + /* Init channels. */ + INIT_LIST_HEAD(&atxdmac->dma.channels); + for (i = 0; i < nr_channels; i++) { + struct at_xdmac_chan *atchan = &atxdmac->chan[i]; + + atchan->chan.device = &atxdmac->dma; + list_add_tail(&atchan->chan.device_node, + &atxdmac->dma.channels); + + atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); + atchan->mask = 1 << i; + + spin_lock_init(&atchan->lock); + INIT_LIST_HEAD(&atchan->xfers_list); + INIT_LIST_HEAD(&atchan->free_descs_list); + tasklet_init(&atchan->tasklet, at_xdmac_tasklet, + (unsigned long)atchan); + + /* Clear pending interrupts. */ + while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) + cpu_relax(); + } + platform_set_drvdata(pdev, atxdmac); + + ret = dma_async_device_register(&atxdmac->dma); + if (ret) { + dev_err(&pdev->dev, "fail to register DMA engine device\n"); + goto err_clk_disable; + } + + ret = of_dma_controller_register(pdev->dev.of_node, + at_xdmac_xlate, atxdmac); + if (ret) { + dev_err(&pdev->dev, "could not register of dma controller\n"); + goto err_dma_unregister; + } + + dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", + nr_channels, atxdmac->regs); + + return 0; + +err_dma_unregister: + dma_async_device_unregister(&atxdmac->dma); +err_clk_disable: + clk_disable_unprepare(atxdmac->clk); +err_free_irq: + free_irq(atxdmac->irq, atxdmac->dma.dev); + return ret; +} + +static int at_xdmac_remove(struct platform_device *pdev) +{ + struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); + int i; + + at_xdmac_off(atxdmac); + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&atxdmac->dma); + clk_disable_unprepare(atxdmac->clk); + + synchronize_irq(atxdmac->irq); + + free_irq(atxdmac->irq, atxdmac->dma.dev); + + for (i = 0; i < atxdmac->dma.chancnt; i++) { + struct at_xdmac_chan *atchan = &atxdmac->chan[i]; + + tasklet_kill(&atchan->tasklet); + at_xdmac_free_chan_resources(&atchan->chan); + } + + return 0; +} + +static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = { + .prepare = atmel_xdmac_prepare, + SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) +}; + +static const struct of_device_id atmel_xdmac_dt_ids[] = { + { + .compatible = "atmel,sama5d4-dma", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); + +static struct platform_driver at_xdmac_driver = { + .probe = at_xdmac_probe, + .remove = at_xdmac_remove, + .driver = { + .name = "at_xdmac", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), + .pm = &atmel_xdmac_dev_pm_ops, + } +}; + +static int __init at_xdmac_init(void) +{ + return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe); +} +subsys_initcall(at_xdmac_init); + +MODULE_DESCRIPTION("Atmel Extended DMA Controller driver"); +MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 68007974961a..918b7b3f766f 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -525,8 +525,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) vchan_init(&c->vc, &d->ddev); INIT_LIST_HEAD(&c->node); - d->ddev.chancnt++; - c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); c->ch = chan_id; c->irq_number = irq; @@ -694,7 +692,6 @@ static struct platform_driver bcm2835_dma_driver = { .remove = bcm2835_dma_remove, .driver = { .name = "bcm2835-dma", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(bcm2835_dma_of_match), }, }; diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index a58eec3b2cad..b743adf56465 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -1,3 +1,4 @@ +#include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> @@ -567,7 +568,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) reg |= GCR_TEARDOWN; cppi_writel(reg, c->gcr_reg); c->td_queued = 1; - c->td_retry = 100; + c->td_retry = 500; } if (!c->td_seen || !c->td_desc_seen) { @@ -603,12 +604,16 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) * descriptor before the TD we fetch it from enqueue, it has to be * there waiting for us. */ - if (!c->td_seen && c->td_retry) + if (!c->td_seen && c->td_retry) { + udelay(1); return -EAGAIN; - + } WARN_ON(!c->td_retry); + if (!c->td_desc_seen) { desc_phys = cppi41_pop_desc(cdd, c->q_num); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); WARN_ON(!desc_phys); } @@ -1088,7 +1093,6 @@ static struct platform_driver cpp41_dma_driver = { .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", - .owner = THIS_MODULE, .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index ae2ab14e64b3..bdeafeefa5f6 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -563,10 +563,9 @@ static int jz4740_dma_probe(struct platform_device *pdev) dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; dd->device_control = jz4740_dma_control; dd->dev = &pdev->dev; - dd->chancnt = JZ_DMA_NR_CHANS; INIT_LIST_HEAD(&dd->channels); - for (i = 0; i < dd->chancnt; i++) { + for (i = 0; i < JZ_DMA_NR_CHANS; i++) { chan = &dmadev->chan[i]; chan->id = i; chan->vchan.desc_free = jz4740_dma_desc_free; @@ -608,7 +607,6 @@ static struct platform_driver jz4740_dma_driver = { .remove = jz4740_dma_remove, .driver = { .name = "jz4740-dma", - .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_dma_driver); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 24bfaf0b92ba..e057935e3023 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -330,8 +330,7 @@ static int __init dma_channel_table_init(void) if (err) { pr_err("initialization failure\n"); for_each_dma_cap_mask(cap, dma_cap_mask_all) - if (channel_table[cap]) - free_percpu(channel_table[cap]); + free_percpu(channel_table[cap]); } return err; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 3c5711d5fe97..6fb2e902b459 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -118,17 +118,17 @@ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) struct fsl_edma_hw_tcd { - u32 saddr; - u16 soff; - u16 attr; - u32 nbytes; - u32 slast; - u32 daddr; - u16 doff; - u16 citer; - u32 dlast_sga; - u16 csr; - u16 biter; + __le32 saddr; + __le16 soff; + __le16 attr; + __le32 nbytes; + __le32 slast; + __le32 daddr; + __le16 doff; + __le16 citer; + __le32 dlast_sga; + __le16 csr; + __le16 biter; }; struct fsl_edma_sw_tcd { @@ -175,18 +175,12 @@ struct fsl_edma_engine { }; /* - * R/W functions for big- or little-endian registers - * the eDMA controller's endian is independent of the CPU core's endian. + * R/W functions for big- or little-endian registers: + * The eDMA controller's endian is independent of the CPU core's endian. + * For the big-endian IP module, the offset for 8-bit or 16-bit registers + * should also be swapped opposite to that in little-endian IP. */ -static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) -{ - if (edma->big_endian) - return ioread16be(addr); - else - return ioread16(addr); -} - static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) { if (edma->big_endian) @@ -197,13 +191,18 @@ static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr) { - iowrite8(val, addr); + /* swap the reg offset for these in big-endian mode */ + if (edma->big_endian) + iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); + else + iowrite8(val, addr); } static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr) { + /* swap the reg offset for these in big-endian mode */ if (edma->big_endian) - iowrite16be(val, addr); + iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); else iowrite16(val, addr); } @@ -254,13 +253,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; + slot = EDMAMUX_CHCFG_SOURCE(slot); if (enable) - edma_writeb(fsl_chan->edma, - EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot), - muxaddr + ch_off); + iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); else - edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off); + iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); } static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) @@ -286,9 +284,8 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) fsl_desc = to_fsl_edma_desc(vdesc); for (i = 0; i < fsl_desc->n_tcds; i++) - dma_pool_free(fsl_desc->echan->tcd_pool, - fsl_desc->tcd[i].vtcd, - fsl_desc->tcd[i].ptcd); + dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, + fsl_desc->tcd[i].ptcd); kfree(fsl_desc); } @@ -363,8 +360,8 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, /* calculate the total size in this desc */ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) - len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) - * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); + len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) + * le16_to_cpu(edesc->tcd[i].vtcd->biter); if (!in_progress) return len; @@ -376,17 +373,15 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, /* figure out the finished and calculate the residue */ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { - size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) - * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); + size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) + * le16_to_cpu(edesc->tcd[i].vtcd->biter); if (dir == DMA_MEM_TO_DEV) - dma_addr = edma_readl(fsl_chan->edma, - &(edesc->tcd[i].vtcd->saddr)); + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); else - dma_addr = edma_readl(fsl_chan->edma, - &(edesc->tcd[i].vtcd->daddr)); + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); len -= size; - if (cur_addr > dma_addr && cur_addr < dma_addr + size) { + if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { len += dma_addr + size - cur_addr; break; } @@ -424,55 +419,67 @@ static enum dma_status fsl_edma_tx_status(struct dma_chan *chan, return fsl_chan->status; } -static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan, - u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes, - u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga, - u16 csr) +static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, + struct fsl_edma_hw_tcd *tcd) { + struct fsl_edma_engine *edma = fsl_chan->edma; void __iomem *addr = fsl_chan->edma->membase; u32 ch = fsl_chan->vchan.chan.chan_id; /* - * TCD parameters have been swapped in fill_tcd_params(), - * so just write them to registers in the cpu endian here + * TCD parameters are stored in struct fsl_edma_hw_tcd in little + * endian format. However, we need to load the TCD registers in + * big- or little-endian obeying the eDMA engine model endian. */ - writew(0, addr + EDMA_TCD_CSR(ch)); - writel(src, addr + EDMA_TCD_SADDR(ch)); - writel(dst, addr + EDMA_TCD_DADDR(ch)); - writew(attr, addr + EDMA_TCD_ATTR(ch)); - writew(soff, addr + EDMA_TCD_SOFF(ch)); - writel(nbytes, addr + EDMA_TCD_NBYTES(ch)); - writel(slast, addr + EDMA_TCD_SLAST(ch)); - writew(citer, addr + EDMA_TCD_CITER(ch)); - writew(biter, addr + EDMA_TCD_BITER(ch)); - writew(doff, addr + EDMA_TCD_DOFF(ch)); - writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch)); - writew(csr, addr + EDMA_TCD_CSR(ch)); -} - -static void fill_tcd_params(struct fsl_edma_engine *edma, - struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, - u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, - u16 biter, u16 doff, u32 dlast_sga, bool major_int, - bool disable_req, bool enable_sg) + edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch)); + edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch)); + edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch)); + + edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch)); + edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch)); + + edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch)); + edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch)); + + edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch)); + edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch)); + edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch)); + + edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch)); + + edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch)); +} + +static inline +void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, + u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, + u16 biter, u16 doff, u32 dlast_sga, bool major_int, + bool disable_req, bool enable_sg) { u16 csr = 0; /* - * eDMA hardware SGs require the TCD parameters stored in memory - * the same endian as the eDMA module so that they can be loaded - * automatically by the engine + * eDMA hardware SGs require the TCDs to be stored in little + * endian format irrespective of the register endian model. + * So we put the value in little endian in memory, waiting + * for fsl_edma_set_tcd_regs doing the swap. */ - edma_writel(edma, src, &(tcd->saddr)); - edma_writel(edma, dst, &(tcd->daddr)); - edma_writew(edma, attr, &(tcd->attr)); - edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff)); - edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes)); - edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast)); - edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer)); - edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff)); - edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga)); - edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter)); + tcd->saddr = cpu_to_le32(src); + tcd->daddr = cpu_to_le32(dst); + + tcd->attr = cpu_to_le16(attr); + + tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff)); + + tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes)); + tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast)); + + tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); + tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff)); + + tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga)); + + tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); if (major_int) csr |= EDMA_TCD_CSR_INT_MAJOR; @@ -482,7 +489,7 @@ static void fill_tcd_params(struct fsl_edma_engine *edma, if (enable_sg) csr |= EDMA_TCD_CSR_E_SG; - edma_writew(edma, csr, &(tcd->csr)); + tcd->csr = cpu_to_le16(csr); } static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, @@ -558,9 +565,9 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( doff = fsl_chan->fsc.addr_width; } - fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr, - dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0, - iter, iter, doff, last_sg, true, false, true); + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, + fsl_chan->fsc.attr, soff, nbytes, 0, iter, + iter, doff, last_sg, true, false, true); dma_buf_next += period_len; } @@ -607,16 +614,16 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( iter = sg_dma_len(sg) / nbytes; if (i < sg_len - 1) { last_sg = fsl_desc->tcd[(i + 1)].ptcd; - fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, - src_addr, dst_addr, fsl_chan->fsc.attr, - soff, nbytes, 0, iter, iter, doff, last_sg, - false, false, true); + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, + dst_addr, fsl_chan->fsc.attr, soff, + nbytes, 0, iter, iter, doff, last_sg, + false, false, true); } else { last_sg = 0; - fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, - src_addr, dst_addr, fsl_chan->fsc.attr, - soff, nbytes, 0, iter, iter, doff, last_sg, - true, true, false); + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, + dst_addr, fsl_chan->fsc.attr, soff, + nbytes, 0, iter, iter, doff, last_sg, + true, true, false); } } @@ -625,17 +632,13 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) { - struct fsl_edma_hw_tcd *tcd; struct virt_dma_desc *vdesc; vdesc = vchan_next_desc(&fsl_chan->vchan); if (!vdesc) return; fsl_chan->edesc = to_fsl_edma_desc(vdesc); - tcd = fsl_chan->edesc->tcd[0].vtcd; - fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr, - tcd->soff, tcd->nbytes, tcd->slast, tcd->citer, - tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr); + fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); fsl_edma_enable_request(fsl_chan); fsl_chan->status = DMA_IN_PROGRESS; } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 994bcb2c6b92..3d8feb5e4c2f 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1337,7 +1337,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, /* Add the channel to DMA device channel list */ list_add_tail(&chan->common.device_node, &fdev->common.channels); - fdev->common.chancnt++; dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, chan->irq != NO_IRQ ? chan->irq : fdev->irq); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 88afc48c2ca7..d0df198f62e9 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -729,6 +729,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, case IMX_DMATYPE_CSPI: case IMX_DMATYPE_EXT: case IMX_DMATYPE_SSI: + case IMX_DMATYPE_SAI: per_2_emi = sdma->script_addrs->app_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_app_addr; break; @@ -1287,7 +1288,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) unsigned short *ram_code; if (!fw) { - dev_err(sdma->dev, "firmware not found\n"); + dev_info(sdma->dev, "external firmware not found, using ROM firmware\n"); + /* In this case we just use the ROM firmware. */ return; } @@ -1346,7 +1348,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma, return ret; } -static int __init sdma_init(struct sdma_engine *sdma) +static int sdma_init(struct sdma_engine *sdma) { int i, ret; dma_addr_t ccb_phys; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 895f869d6c2c..32eae38291e5 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) op = IOAT_OP_XOR; dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dest_dma)) + goto dma_unmap; + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, DMA_PREP_INTERRUPT); @@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto dma_unmap; } - dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); @@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) } dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + /* skip validate if the capability is not present */ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) goto free_resources; @@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) xor_val_result = 1; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, &xor_val_result, DMA_PREP_INTERRUPT); @@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) xor_val_result = 0; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, &xor_val_result, DMA_PREP_INTERRUPT); @@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; dma_unmap: if (op == IOAT_OP_XOR) { - dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + if (dest_dma != DMA_ERROR_CODE) + dma_unmap_page(dev, dest_dma, PAGE_SIZE, + DMA_FROM_DEVICE); for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); } else if (op == IOAT_OP_XOR_VAL) { for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); } free_resources: dma->device_free_chan_resources(dma_chan); diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c56137bc3868..263d9f6a207e 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1557,7 +1557,6 @@ static struct platform_driver iop_adma_driver = { .probe = iop_adma_probe, .remove = iop_adma_remove, .driver = { - .owner = THIS_MODULE, .name = "iop-adma", }, }; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a1f911aaf220..a1de14ab2c51 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -722,7 +722,6 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_issue_pending = k3_dma_issue_pending; d->slave.device_control = k3_dma_control; d->slave.copy_align = DMA_ALIGN; - d->slave.chancnt = d->dma_requests; /* init virtual channel */ d->chans = devm_kzalloc(&op->dev, @@ -787,6 +786,7 @@ static int k3_dma_remove(struct platform_device *op) return 0; } +#ifdef CONFIG_PM_SLEEP static int k3_dma_suspend(struct device *dev) { struct k3_dma_dev *d = dev_get_drvdata(dev); @@ -816,13 +816,13 @@ static int k3_dma_resume(struct device *dev) k3_dma_enable_dma(d, true); return 0; } +#endif static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); static struct platform_driver k3_pdma_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .pm = &k3_dma_pmops, .of_match_table = k3_pdma_dt_ids, }, diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index a1a4db5721b8..8b8952f35e6c 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1098,7 +1098,6 @@ static const struct platform_device_id mmp_pdma_id_table[] = { static struct platform_driver mmp_pdma_driver = { .driver = { .name = "mmp-pdma", - .owner = THIS_MODULE, .of_match_table = mmp_pdma_dt_ids, }, .id_table = mmp_pdma_id_table, diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index c6bd015b7165..bfb46957c3dc 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -703,7 +703,6 @@ static const struct platform_device_id mmp_tdma_id_table[] = { static struct platform_driver mmp_tdma_driver = { .driver = { .name = "mmp-tdma", - .owner = THIS_MODULE, .of_match_table = mmp_tdma_dt_ids, }, .id_table = mmp_tdma_id_table, diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 881db2bcb48b..01bec4023de2 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -885,6 +885,7 @@ static int mpc_dma_probe(struct platform_device *op) struct resource res; ulong regs_start, regs_size; int retval, i; + u8 chancnt; mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); if (!mdma) { @@ -956,10 +957,6 @@ static int mpc_dma_probe(struct platform_device *op) dma = &mdma->dma; dma->dev = dev; - if (mdma->is_mpc8308) - dma->chancnt = MPC8308_DMACHAN_MAX; - else - dma->chancnt = MPC512x_DMACHAN_MAX; dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; dma->device_free_chan_resources = mpc_dma_free_chan_resources; dma->device_issue_pending = mpc_dma_issue_pending; @@ -972,7 +969,12 @@ static int mpc_dma_probe(struct platform_device *op) dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma_cap_set(DMA_SLAVE, dma->cap_mask); - for (i = 0; i < dma->chancnt; i++) { + if (mdma->is_mpc8308) + chancnt = MPC8308_DMACHAN_MAX; + else + chancnt = MPC512x_DMACHAN_MAX; + + for (i = 0; i < chancnt; i++) { mchan = &mdma->channels[i]; mchan->chan.device = dma; @@ -1090,7 +1092,6 @@ static struct platform_driver mpc_dma_driver = { .remove = mpc_dma_remove, .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .of_match_table = mpc_dma_match, }, }; diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index bda20e6e1007..d7d61e1a01c3 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1500,7 +1500,6 @@ static const struct dev_pm_ops nbpf_pm_ops = { static struct platform_driver nbpf_driver = { .driver = { - .owner = THIS_MODULE, .name = "dma-nbpf", .of_match_table = nbpf_match, .pm = &nbpf_pm_ops, diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index bbea8243f9e8..6ea1aded7e74 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1074,8 +1074,6 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) vchan_init(&c->vc, &od->ddev); INIT_LIST_HEAD(&c->node); - od->ddev.chancnt++; - return 0; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 9f9ca9fe5ce6..6e0e47d76b23 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -997,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 -const struct pci_device_id pch_dma_id_table[] = { +static const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 19a99743cf52..bdf40b530032 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_dma.h> #include <linux/err.h> +#include <linux/pm_runtime.h> #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -265,6 +266,9 @@ static unsigned cmd_line; #define NR_DEFAULT_DESC 16 +/* Delay for runtime PM autosuspend, ms */ +#define PL330_AUTOSUSPEND_DELAY 20 + /* Populated by the PL330 core driver for DMA API driver's info */ struct pl330_config { u32 periph_id; @@ -1958,6 +1962,7 @@ static void pl330_tasklet(unsigned long data) struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; struct dma_pl330_desc *desc, *_dt; unsigned long flags; + bool power_down = false; spin_lock_irqsave(&pch->lock, flags); @@ -1972,10 +1977,17 @@ static void pl330_tasklet(unsigned long data) /* Try to submit a req imm. next to the last completed cookie */ fill_queue(pch); - /* Make sure the PL330 Channel thread is active */ - spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); - spin_unlock(&pch->thread->dmac->lock); + if (list_empty(&pch->work_list)) { + spin_lock(&pch->thread->dmac->lock); + _stop(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + power_down = true; + } else { + /* Make sure the PL330 Channel thread is active */ + spin_lock(&pch->thread->dmac->lock); + _start(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + } while (!list_empty(&pch->completed_list)) { dma_async_tx_callback callback; @@ -1990,6 +2002,12 @@ static void pl330_tasklet(unsigned long data) if (pch->cyclic) { desc->status = PREP; list_move_tail(&desc->node, &pch->work_list); + if (power_down) { + spin_lock(&pch->thread->dmac->lock); + _start(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + power_down = false; + } } else { desc->status = FREE; list_move_tail(&desc->node, &pch->dmac->desc_pool); @@ -2004,6 +2022,12 @@ static void pl330_tasklet(unsigned long data) } } spin_unlock_irqrestore(&pch->lock, flags); + + /* If work list empty, power down */ + if (power_down) { + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pch->dmac->ddma.dev); + } } bool pl330_filter(struct dma_chan *chan, void *param) @@ -2073,6 +2097,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned switch (cmd) { case DMA_TERMINATE_ALL: + pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); spin_lock(&pl330->lock); @@ -2099,10 +2124,15 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned dma_cookie_complete(&desc->txd); } + if (!list_empty(&pch->work_list)) + pm_runtime_put(pl330->ddma.dev); + list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); break; case DMA_SLAVE_CONFIG: slave_config = (struct dma_slave_config *)arg; @@ -2138,6 +2168,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) tasklet_kill(&pch->task); + pm_runtime_get_sync(pch->dmac->ddma.dev); spin_lock_irqsave(&pch->lock, flags); pl330_release_channel(pch->thread); @@ -2147,6 +2178,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pch->dmac->ddma.dev); } static enum dma_status @@ -2162,6 +2195,15 @@ static void pl330_issue_pending(struct dma_chan *chan) unsigned long flags; spin_lock_irqsave(&pch->lock, flags); + if (list_empty(&pch->work_list)) { + /* + * Warn on nothing pending. Empty submitted_list may + * break our pm_runtime usage counter as it is + * updated on work_list emptiness status. + */ + WARN_ON(list_empty(&pch->submitted_list)); + pm_runtime_get_sync(pch->dmac->ddma.dev); + } list_splice_tail_init(&pch->submitted_list, &pch->work_list); spin_unlock_irqrestore(&pch->lock, flags); @@ -2594,6 +2636,46 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan, return 0; } +/* + * Runtime PM callbacks are provided by amba/bus.c driver. + * + * It is assumed here that IRQ safe runtime PM is chosen in probe and amba + * bus driver will only disable/enable the clock in runtime PM callbacks. + */ +static int __maybe_unused pl330_suspend(struct device *dev) +{ + struct amba_device *pcdev = to_amba_device(dev); + + pm_runtime_disable(dev); + + if (!pm_runtime_status_suspended(dev)) { + /* amba did not disable the clock */ + amba_pclk_disable(pcdev); + } + amba_pclk_unprepare(pcdev); + + return 0; +} + +static int __maybe_unused pl330_resume(struct device *dev) +{ + struct amba_device *pcdev = to_amba_device(dev); + int ret; + + ret = amba_pclk_prepare(pcdev); + if (ret) + return ret; + + if (!pm_runtime_status_suspended(dev)) + ret = amba_pclk_enable(pcdev); + + pm_runtime_enable(dev); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); + static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { @@ -2619,6 +2701,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return -ENOMEM; } + pd = &pl330->ddma; + pd->dev = &adev->dev; + pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; @@ -2655,7 +2740,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); - pd = &pl330->ddma; INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ @@ -2692,7 +2776,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) list_add_tail(&pch->chan.device_node, &pd->channels); } - pd->dev = &adev->dev; if (pdat) { pd->cap_mask = pdat->cap_mask; } else { @@ -2747,6 +2830,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, pcfg->num_peri, pcfg->num_events); + pm_runtime_irq_safe(&adev->dev); + pm_runtime_use_autosuspend(&adev->dev); + pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&adev->dev); + pm_runtime_put_autosuspend(&adev->dev); + return 0; probe_err3: /* Idle the DMAC */ @@ -2773,6 +2862,8 @@ static int pl330_remove(struct amba_device *adev) struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; + pm_runtime_get_noresume(pl330->ddma.dev); + if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); @@ -2811,6 +2902,7 @@ static struct amba_driver pl330_driver = { .drv = { .owner = THIS_MODULE, .name = "dma-pl330", + .pm = &pl330_pm, }, .id_table = pl330_ids, .probe = pl330_probe, @@ -2819,6 +2911,6 @@ static struct amba_driver pl330_driver = { module_amba_driver(pl330_driver); -MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); +MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>"); MODULE_DESCRIPTION("API Driver for PL330 DMAC"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 7a4bbb0f80a5..3122a99ec06b 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c @@ -79,35 +79,97 @@ struct bam_async_desc { struct bam_desc_hw desc[0]; }; -#define BAM_CTRL 0x0000 -#define BAM_REVISION 0x0004 -#define BAM_SW_REVISION 0x0080 -#define BAM_NUM_PIPES 0x003C -#define BAM_TIMER 0x0040 -#define BAM_TIMER_CTRL 0x0044 -#define BAM_DESC_CNT_TRSHLD 0x0008 -#define BAM_IRQ_SRCS 0x000C -#define BAM_IRQ_SRCS_MSK 0x0010 -#define BAM_IRQ_SRCS_UNMASKED 0x0030 -#define BAM_IRQ_STTS 0x0014 -#define BAM_IRQ_CLR 0x0018 -#define BAM_IRQ_EN 0x001C -#define BAM_CNFG_BITS 0x007C -#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80)) -#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80)) -#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000)) -#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000)) -#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000)) -#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000)) -#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000)) -#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000)) -#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000)) -#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000)) -#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000)) -#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000)) -#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000)) -#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000)) -#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000)) +enum bam_reg { + BAM_CTRL, + BAM_REVISION, + BAM_NUM_PIPES, + BAM_DESC_CNT_TRSHLD, + BAM_IRQ_SRCS, + BAM_IRQ_SRCS_MSK, + BAM_IRQ_SRCS_UNMASKED, + BAM_IRQ_STTS, + BAM_IRQ_CLR, + BAM_IRQ_EN, + BAM_CNFG_BITS, + BAM_IRQ_SRCS_EE, + BAM_IRQ_SRCS_MSK_EE, + BAM_P_CTRL, + BAM_P_RST, + BAM_P_HALT, + BAM_P_IRQ_STTS, + BAM_P_IRQ_CLR, + BAM_P_IRQ_EN, + BAM_P_EVNT_DEST_ADDR, + BAM_P_EVNT_REG, + BAM_P_SW_OFSTS, + BAM_P_DATA_FIFO_ADDR, + BAM_P_DESC_FIFO_ADDR, + BAM_P_EVNT_GEN_TRSHLD, + BAM_P_FIFO_SIZES, +}; + +struct reg_offset_data { + u32 base_offset; + unsigned int pipe_mult, evnt_mult, ee_mult; +}; + +static const struct reg_offset_data bam_v1_3_reg_info[] = { + [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, + [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 }, + [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 }, + [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 }, + [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 }, + [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 }, + [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 }, + [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 }, + [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 }, + [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 }, + [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 }, + [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 }, + [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 }, + [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 }, + [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 }, + [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 }, + [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 }, + [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 }, + [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 }, + [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 }, +}; + +static const struct reg_offset_data bam_v1_4_reg_info[] = { + [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 }, + [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 }, + [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 }, + [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 }, + [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 }, + [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 }, + [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 }, + [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 }, + [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 }, + [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 }, + [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 }, + [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 }, + [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, + [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, + [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, + [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, + [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, + [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, + [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, + [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, +}; /* BAM CTRL */ #define BAM_SW_RST BIT(0) @@ -297,6 +359,8 @@ struct bam_device { /* execution environment ID, from DT */ u32 ee; + const struct reg_offset_data *layout; + struct clk *bamclk; int irq; @@ -305,6 +369,23 @@ struct bam_device { }; /** + * bam_addr - returns BAM register address + * @bdev: bam device + * @pipe: pipe instance (ignored when register doesn't have multiple instances) + * @reg: register enum + */ +static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, + enum bam_reg reg) +{ + const struct reg_offset_data r = bdev->layout[reg]; + + return bdev->regs + r.base_offset + + r.pipe_mult * pipe + + r.evnt_mult * pipe + + r.ee_mult * bdev->ee; +} + +/** * bam_reset_channel - Reset individual BAM DMA channel * @bchan: bam channel * @@ -317,8 +398,8 @@ static void bam_reset_channel(struct bam_chan *bchan) lockdep_assert_held(&bchan->vc.lock); /* reset channel */ - writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); - writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); /* don't allow cpu to reorder BAM register accesses done after this */ wmb(); @@ -347,17 +428,18 @@ static void bam_chan_init_hw(struct bam_chan *bchan, * because we allocated 1 more descriptor (8 bytes) than we can use */ writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), - bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); - writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs + - BAM_P_FIFO_SIZES(bchan->id)); + bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); + writel_relaxed(BAM_DESC_FIFO_SIZE, + bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ - writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); + writel_relaxed(P_DEFAULT_IRQS_EN, + bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); /* unmask the specific pipe and EE combo */ - val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); val |= BIT(bchan->id); - writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); /* don't allow cpu to reorder the channel enable done below */ wmb(); @@ -367,7 +449,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan, if (dir == DMA_DEV_TO_MEM) val |= P_DIRECTION; - writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); + writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); bchan->initialized = 1; @@ -432,12 +514,12 @@ static void bam_free_chan(struct dma_chan *chan) bchan->fifo_virt = NULL; /* mask irq for pipe/channel */ - val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); val &= ~BIT(bchan->id); - writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); /* disable irq */ - writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); } /** @@ -583,14 +665,14 @@ static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_PAUSE: spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 1; spin_unlock_irqrestore(&bchan->vc.lock, flag); break; case DMA_RESUME: spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 0; spin_unlock_irqrestore(&bchan->vc.lock, flag); break; @@ -626,7 +708,7 @@ static u32 process_channel_irqs(struct bam_device *bdev) unsigned long flags; struct bam_async_desc *async_desc; - srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee)); + srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); /* return early if no pipe/channel interrupts are present */ if (!(srcs & P_IRQ)) @@ -639,11 +721,9 @@ static u32 process_channel_irqs(struct bam_device *bdev) continue; /* clear pipe irq */ - pipe_stts = readl_relaxed(bdev->regs + - BAM_P_IRQ_STTS(i)); + pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); - writel_relaxed(pipe_stts, bdev->regs + - BAM_P_IRQ_CLR(i)); + writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); spin_lock_irqsave(&bchan->vc.lock, flags); async_desc = bchan->curr_txd; @@ -694,12 +774,12 @@ static irqreturn_t bam_dma_irq(int irq, void *data) tasklet_schedule(&bdev->task); if (srcs & BAM_IRQ) - clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS); + clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); /* don't allow reorder of the various accesses to the BAM registers */ mb(); - writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR); + writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); return IRQ_HANDLED; } @@ -763,7 +843,7 @@ static void bam_apply_new_config(struct bam_chan *bchan, else maxburst = bchan->slave.dst_maxburst; - writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD); + writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); bchan->reconfigure = 0; } @@ -830,7 +910,7 @@ static void bam_start_dma(struct bam_chan *bchan) /* ensure descriptor writes and dma start not reordered */ wmb(); writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), - bdev->regs + BAM_P_EVNT_REG(bchan->id)); + bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); } /** @@ -918,43 +998,44 @@ static int bam_init(struct bam_device *bdev) u32 val; /* read revision and configuration information */ - val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT; + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; val &= NUM_EES_MASK; /* check that configured EE is within range */ if (bdev->ee >= val) return -EINVAL; - val = readl_relaxed(bdev->regs + BAM_NUM_PIPES); + val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); bdev->num_channels = val & BAM_NUM_PIPES_MASK; /* s/w reset bam */ /* after reset all pipes are disabled and idle */ - val = readl_relaxed(bdev->regs + BAM_CTRL); + val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); val |= BAM_SW_RST; - writel_relaxed(val, bdev->regs + BAM_CTRL); + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); val &= ~BAM_SW_RST; - writel_relaxed(val, bdev->regs + BAM_CTRL); + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); /* make sure previous stores are visible before enabling BAM */ wmb(); /* enable bam */ val |= BAM_EN; - writel_relaxed(val, bdev->regs + BAM_CTRL); + writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); /* set descriptor threshhold, start with 4 bytes */ - writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD); + writel_relaxed(DEFAULT_CNT_THRSHLD, + bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ - writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS); + writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); /* enable irqs for errors */ writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, - bdev->regs + BAM_IRQ_EN); + bam_addr(bdev, 0, BAM_IRQ_EN)); /* unmask global bam interrupt */ - writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); return 0; } @@ -969,9 +1050,18 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, bchan->vc.desc_free = bam_dma_free_desc; } +static const struct of_device_id bam_of_match[] = { + { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, + { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, + {} +}; + +MODULE_DEVICE_TABLE(of, bam_of_match); + static int bam_dma_probe(struct platform_device *pdev) { struct bam_device *bdev; + const struct of_device_id *match; struct resource *iores; int ret, i; @@ -981,6 +1071,14 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->dev = &pdev->dev; + match = of_match_node(bam_of_match, pdev->dev.of_node); + if (!match) { + dev_err(&pdev->dev, "Unsupported BAM module\n"); + return -ENODEV; + } + + bdev->layout = match->data; + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); bdev->regs = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(bdev->regs)) @@ -1084,7 +1182,7 @@ static int bam_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&bdev->common); /* mask all interrupts for this execution environment */ - writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); + writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); devm_free_irq(bdev->dev, bdev->irq, bdev); @@ -1104,18 +1202,11 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id bam_of_match[] = { - { .compatible = "qcom,bam-v1.4.0", }, - {} -}; -MODULE_DEVICE_TABLE(of, bam_of_match); - static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", - .owner = THIS_MODULE, .of_match_table = bam_of_match, }, }; diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 7416572d1e40..6941a77521c3 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1402,7 +1402,6 @@ static int s3c24xx_dma_remove(struct platform_device *pdev) static struct platform_driver s3c24xx_dma_driver = { .driver = { .name = "s3c24xx-dma", - .owner = THIS_MODULE, }, .id_table = s3c24xx_dma_driver_ids, .probe = s3c24xx_dma_probe, diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 4b0ef043729a..2329d295efb5 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -829,7 +829,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, { unsigned i; - dmadev->chancnt = ARRAY_SIZE(chan_desc); INIT_LIST_HEAD(&dmadev->channels); dmadev->dev = dev; dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; @@ -838,7 +837,7 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, dmadev->device_tx_status = sa11x0_dma_tx_status; dmadev->device_issue_pending = sa11x0_dma_issue_pending; - for (i = 0; i < dmadev->chancnt; i++) { + for (i = 0; i < ARRAY_SIZE(chan_desc); i++) { struct sa11x0_dma_chan *c; c = kzalloc(sizeof(*c), GFP_KERNEL); diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index 80fd2aeb4870..d95bbdd721f4 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c @@ -253,7 +253,6 @@ static int audmapp_chan_probe(struct platform_device *pdev, static void audmapp_chan_remove(struct audmapp_device *audev) { - struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; struct shdma_chan *schan; int i; @@ -261,7 +260,6 @@ static void audmapp_chan_remove(struct audmapp_device *audev) BUG_ON(!schan); shdma_chan_remove(schan); } - dma_dev->chancnt = 0; } static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, @@ -367,7 +365,6 @@ static struct platform_driver audmapp_driver = { .probe = audmapp_probe, .remove = audmapp_remove, .driver = { - .owner = THIS_MODULE, .name = "rcar-audmapp-engine", .of_match_table = audmapp_of_match, }, diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index b212d9471ab5..20a6f6f2a018 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -619,7 +619,6 @@ error: static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) { - struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev; struct shdma_chan *schan; int i; @@ -628,7 +627,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) shdma_chan_remove(schan); } - dma_dev->chancnt = 0; } static int hpb_dmae_remove(struct platform_device *pdev) @@ -655,7 +653,6 @@ static struct platform_driver hpb_dmae_driver = { .remove = hpb_dmae_remove, .shutdown = hpb_dmae_shutdown, .driver = { - .owner = THIS_MODULE, .name = "hpb-dma-engine", }, }; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 42d497416196..3a2adb131d46 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -391,6 +391,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); pm_runtime_put(schan->dev); schan->pm_state = SHDMA_PM_ESTABLISHED; + } else if (schan->pm_state == SHDMA_PM_PENDING) { + shdma_chan_xfer_ld_queue(schan); } } } @@ -951,7 +953,7 @@ void shdma_chan_probe(struct shdma_dev *sdev, /* Add the channel to DMA device channel list */ list_add_tail(&schan->dma_chan.device_node, &sdev->dma_dev.channels); - sdev->schan[sdev->dma_dev.chancnt++] = schan; + sdev->schan[id] = schan; } EXPORT_SYMBOL(shdma_chan_probe); diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c index b4ff9d3e56d1..f999f9b0d314 100644 --- a/drivers/dma/sh/shdma-of.c +++ b/drivers/dma/sh/shdma-of.c @@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static struct platform_driver shdma_of = { .driver = { - .owner = THIS_MODULE, .name = "shdma-of", .of_match_table = shdma_of_match, }, diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 58eb85770eba..b65317c6ea4e 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -572,7 +572,6 @@ err_no_irq: static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) { - struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; struct shdma_chan *schan; int i; @@ -581,7 +580,6 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) shdma_chan_remove(schan); } - dma_dev->chancnt = 0; } static void sh_dmae_shutdown(struct platform_device *pdev) diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index 3ce103909896..6da2eaa6c294 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -295,7 +295,6 @@ err_no_irq: static void sudmac_chan_remove(struct sudmac_device *su_dev) { - struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; struct shdma_chan *schan; int i; @@ -304,7 +303,6 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev) shdma_chan_remove(schan); } - dma_dev->chancnt = 0; } static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) @@ -411,7 +409,6 @@ static int sudmac_remove(struct platform_device *pdev) static struct platform_driver sudmac_driver = { .driver = { - .owner = THIS_MODULE, .name = SUDMAC_DRV_NAME, }, .probe = sudmac_probe, diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index aac03ab10c54..feb1e8ab8d7b 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -735,7 +735,6 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma = &sdma->dma; dma->dev = dev; - dma->chancnt = SIRFSOC_DMA_CHANNELS; dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; @@ -752,7 +751,7 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); - for (i = 0; i < dma->chancnt; i++) { + for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) { schan = &sdma->channels[i]; schan->chan.device = dma; @@ -835,6 +834,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP static int sirfsoc_dma_pm_suspend(struct device *dev) { struct sirfsoc_dma *sdma = dev_get_drvdata(dev); @@ -916,6 +916,7 @@ static int sirfsoc_dma_pm_resume(struct device *dev) return 0; } +#endif static const struct dev_pm_ops sirfsoc_dma_pm_ops = { SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d9ca3e32d748..4d0710648b08 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3432,6 +3432,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) d40_err(base->dev, "Failed to allocate %d pages.\n", base->lcla_pool.pages); + ret = -ENOMEM; for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 91292f5513ff..159f1736a16f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_dma.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/slab.h> @@ -26,24 +27,6 @@ #include "virt-dma.h" /* - * There's 16 physical channels that can work in parallel. - * - * However we have 30 different endpoints for our requests. - * - * Since the channels are able to handle only an unidirectional - * transfer, we need to allocate more virtual channels so that - * everyone can grab one channel. - * - * Some devices can't work in both direction (mostly because it - * wouldn't make sense), so we have a bit fewer virtual channels than - * 2 channels per endpoints. - */ - -#define NR_MAX_CHANNELS 16 -#define NR_MAX_REQUESTS 30 -#define NR_MAX_VCHANS 53 - -/* * Common registers */ #define DMA_IRQ_EN(x) ((x) * 0x04) @@ -60,6 +43,12 @@ #define DMA_STAT 0x30 /* + * sun8i specific registers + */ +#define SUN8I_DMA_GATE 0x20 +#define SUN8I_DMA_GATE_ENABLE 0x4 + +/* * Channels specific registers */ #define DMA_CHAN_ENABLE 0x00 @@ -102,6 +91,19 @@ #define DRQ_SDRAM 1 /* + * Hardware channels / ports representation + * + * The hardware is used in several SoCs, with differing numbers + * of channels and endpoints. This structure ties those numbers + * to a certain compatible string. + */ +struct sun6i_dma_config { + u32 nr_max_channels; + u32 nr_max_requests; + u32 nr_max_vchans; +}; + +/* * Hardware representation of the LLI * * The hardware will be fed the physical address of this structure, @@ -159,6 +161,7 @@ struct sun6i_dma_dev { struct dma_pool *pool; struct sun6i_pchan *pchans; struct sun6i_vchan *vchans; + const struct sun6i_dma_config *cfg; }; static struct device *chan2dev(struct dma_chan *chan) @@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) static void sun6i_dma_tasklet(unsigned long data) { struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; + const struct sun6i_dma_config *cfg = sdev->cfg; struct sun6i_vchan *vchan; struct sun6i_pchan *pchan; unsigned int pchan_alloc = 0; @@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data) } spin_lock_irq(&sdev->lock); - for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { pchan = &sdev->pchans[pchan_idx]; if (pchan->vchan || list_empty(&sdev->pending)) @@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data) } spin_unlock_irq(&sdev->lock); - for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { if (!(pchan_alloc & BIT(pchan_idx))) continue; @@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) int i, j, ret = IRQ_NONE; u32 status; - for (i = 0; i < 2; i++) { + for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) { status = readl(sdev->base + DMA_IRQ_STAT(i)); if (!status) continue; @@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) writel(status, sdev->base + DMA_IRQ_STAT(i)); - for (j = 0; (j < 8) && status; j++) { + for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { if (status & DMA_IRQ_QUEUE) { pchan = sdev->pchans + j; vchan = pchan->vchan; @@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) } } - status = status >> 4; + status = status >> DMA_IRQ_CHAN_WIDTH; } if (!atomic_read(&sdev->tasklet_shutdown)) @@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; u8 port = dma_spec->args[0]; - if (port > NR_MAX_REQUESTS) + if (port > sdev->cfg->nr_max_requests) return NULL; chan = dma_get_any_slave_channel(&sdev->slave); @@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) { int i; - for (i = 0; i < NR_MAX_VCHANS; i++) { + for (i = 0; i < sdev->cfg->nr_max_vchans; i++) { struct sun6i_vchan *vchan = &sdev->vchans[i]; list_del(&vchan->vc.chan.device_node); @@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) } } +/* + * For A31: + * + * There's 16 physical channels that can work in parallel. + * + * However we have 30 different endpoints for our requests. + * + * Since the channels are able to handle only an unidirectional + * transfer, we need to allocate more virtual channels so that + * everyone can grab one channel. + * + * Some devices can't work in both direction (mostly because it + * wouldn't make sense), so we have a bit fewer virtual channels than + * 2 channels per endpoints. + */ + +static struct sun6i_dma_config sun6i_a31_dma_cfg = { + .nr_max_channels = 16, + .nr_max_requests = 30, + .nr_max_vchans = 53, +}; + +/* + * The A23 only has 8 physical channels, a maximum DRQ port id of 24, + * and a total of 37 usable source and destination endpoints. + */ + +static struct sun6i_dma_config sun8i_a23_dma_cfg = { + .nr_max_channels = 8, + .nr_max_requests = 24, + .nr_max_vchans = 37, +}; + +static struct of_device_id sun6i_dma_match[] = { + { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, + { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, + { /* sentinel */ } +}; + static int sun6i_dma_probe(struct platform_device *pdev) { + const struct of_device_id *device; struct sun6i_dma_dev *sdc; struct resource *res; int ret, i; @@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev) if (!sdc) return -ENOMEM; + device = of_match_device(sun6i_dma_match, &pdev->dev); + if (!device) + return -ENODEV; + sdc->cfg = device->data; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sdc->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sdc->base)) @@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; sdc->slave.device_control = sun6i_dma_control; - sdc->slave.chancnt = NR_MAX_VCHANS; sdc->slave.copy_align = 4; sdc->slave.dev = &pdev->dev; - sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, + sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, sizeof(struct sun6i_pchan), GFP_KERNEL); if (!sdc->pchans) return -ENOMEM; - sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, + sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans, sizeof(struct sun6i_vchan), GFP_KERNEL); if (!sdc->vchans) return -ENOMEM; tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); - for (i = 0; i < NR_MAX_CHANNELS; i++) { + for (i = 0; i < sdc->cfg->nr_max_channels; i++) { struct sun6i_pchan *pchan = &sdc->pchans[i]; pchan->idx = i; pchan->base = sdc->base + 0x100 + i * 0x40; } - for (i = 0; i < NR_MAX_VCHANS; i++) { + for (i = 0; i < sdc->cfg->nr_max_vchans; i++) { struct sun6i_vchan *vchan = &sdc->vchans[i]; INIT_LIST_HEAD(&vchan->node); @@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev) goto err_dma_unregister; } + /* + * sun8i variant requires us to toggle a dma gating register, + * as seen in Allwinner's SDK. This register is not documented + * in the A23 user manual. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun8i-a23-dma")) + writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); + return 0; err_dma_unregister: @@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev) return 0; } -static struct of_device_id sun6i_dma_match[] = { - { .compatible = "allwinner,sun6i-a31-dma" }, - { /* sentinel */ } -}; - static struct platform_driver sun6i_dma_driver = { .probe = sun6i_dma_probe, .remove = sun6i_dma_remove, diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 1c867d0303db..d8450c3f35f0 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1597,7 +1597,6 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = { static struct platform_driver tegra_dmac_driver = { .driver = { .name = "tegra-apbdma", - .owner = THIS_MODULE, .pm = &tegra_dma_dev_pm_ops, .of_match_table = tegra_dma_of_match, }, diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 4506a7b4f972..2407ccf1a64b 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -783,7 +783,6 @@ static int td_remove(struct platform_device *pdev) static struct platform_driver td_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, }, .probe = td_probe, .remove = td_remove, diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 17686caf64d5..0659ec9c4488 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -76,7 +76,7 @@ static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) { -#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) +#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) channel64_writel(dc, CHAR, 0); channel64_writel(dc, __pad_CHAR, 0); #else diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index f5a760598882..f6517b928bab 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h @@ -67,7 +67,7 @@ static inline bool txx9_dma_have_SMPCHN(void) /* Hardware register definitions. */ struct txx9dmac_cregs { -#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) +#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) TXX9_DMA_REG32(CHAR); /* Chain Address Register */ #else u64 CHAR; /* Chain Address Register */ @@ -201,7 +201,7 @@ static inline bool is_dmac64(const struct txx9dmac_chan *dc) #ifdef TXX9_DMA_USE_SIMPLE_CHAIN /* Hardware descriptor definition. (for simple-chain) */ struct txx9dmac_hwdesc { -#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) +#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) TXX9_DMA_REG32(CHAR); #else u64 CHAR; diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index a6e64767186e..4a3a8f3137b3 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -942,6 +942,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, if (!xt->numf || !xt->sgl[0].size) return NULL; + if (xt->frame_size != 1) + return NULL; + /* Allocate a transaction descriptor. */ desc = xilinx_vdma_alloc_tx_descriptor(chan); if (!desc) @@ -960,7 +963,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, hw = &segment->hw; hw->vsize = xt->numf; hw->hsize = xt->sgl[0].size; - hw->stride = xt->sgl[0].icg << + hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; hw->stride |= chan->config.frm_dly << XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; @@ -971,9 +974,11 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, hw->buf_addr = xt->src_start; /* Link the previous next descriptor to current */ - prev = list_last_entry(&desc->segments, - struct xilinx_vdma_tx_segment, node); - prev->hw.next_desc = segment->phys; + if (!list_empty(&desc->segments)) { + prev = list_last_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + prev->hw.next_desc = segment->phys; + } /* Insert the segment into the descriptor segments list. */ list_add_tail(&segment->node, &desc->segments); diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c index 73b73969d361..997e61ef173c 100644 --- a/drivers/gpio/gpio-msm-v1.c +++ b/drivers/gpio/gpio-msm-v1.c @@ -686,7 +686,7 @@ static int gpio_msm_v1_probe(struct platform_device *pdev) irq_set_chained_handler(irq1, msm_gpio_irq_handler); irq_set_chained_handler(irq2, msm_gpio_irq_handler); irq_set_irq_wake(irq1, 1); - irq_set_irq_wake(irq2, 2); + irq_set_irq_wake(irq2, 1); return 0; } diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c index 353263c85d26..506a2ea0eb4c 100644 --- a/drivers/gpio/gpio-spear-spics.c +++ b/drivers/gpio/gpio-spear-spics.c @@ -204,5 +204,5 @@ static int __init spics_gpio_init(void) subsys_initcall(spics_gpio_init); MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>"); -MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction"); +MODULE_DESCRIPTION("STMicroelectronics SPEAr SPI Chip Select Abstraction"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 4a85bb644e24..b928c17bdeed 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -347,7 +347,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) @@ -356,7 +356,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -406,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ @@ -417,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member); \ @@ -428,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ @@ -439,7 +439,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -453,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. @@ -467,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ @@ -480,7 +480,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ @@ -493,7 +493,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. @@ -509,7 +509,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. @@ -524,7 +524,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index f42df4dd58d2..230b6f887cd8 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -371,6 +371,7 @@ config HID_LOGITECH_DJ tristate "Logitech Unifying receivers full support" depends on HIDRAW depends on HID_LOGITECH + select HID_LOGITECH_HIDPP ---help--- Say Y if you want support for Logitech Unifying receivers and devices. Unifying receivers are capable of pairing up to 6 Logitech compliant @@ -378,6 +379,17 @@ config HID_LOGITECH_DJ generic USB_HID driver and all incoming events will be multiplexed into a single mouse and a single keyboard device. +config HID_LOGITECH_HIDPP + tristate "Logitech HID++ devices support" + depends on HID_LOGITECH + ---help--- + Support for Logitech devices relyingon the HID++ Logitech specification + + Say Y if you want support for Logitech devices relying on the HID++ + specification. Such devices are the various Logitech Touchpads (T650, + T651, TK820), some mice (Zone Touch mouse), or even keyboards (Solar + Keayboard). + config LOGITECH_FF bool "Logitech force feedback support" depends on HID_LOGITECH @@ -613,6 +625,13 @@ config HID_PICOLCD_CIR ---help--- Provide access to PicoLCD's CIR interface via remote control (LIRC). +config HID_PLANTRONICS + tristate "Plantronics USB HID Driver" + default !EXPERT + depends on HID + ---help--- + Provides HID support for Plantronics telephony devices. + config HID_PRIMAX tristate "Primax non-fully HID-compliant devices" depends on HID @@ -629,7 +648,7 @@ config HID_ROCCAT support for its special functionalities. config HID_SAITEK - tristate "Saitek non-fully HID-compliant devices" + tristate "Saitek (Mad Catz) non-fully HID-compliant devices" depends on HID ---help--- Support for Saitek devices that are not fully compliant with the @@ -637,6 +656,7 @@ config HID_SAITEK Supported devices: - PS1000 Dual Analog Pad + - R.A.T.9 Gaming Mouse - R.A.T.7 Gaming Mouse - M.M.O.7 Gaming Mouse diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index e2850d8af9ca..debd15b44b59 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o +obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o @@ -94,6 +95,7 @@ ifdef CONFIG_DEBUG_FS hid-picolcd-y += hid-picolcd_debugfs.o endif +obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o obj-$(CONFIG_HID_PRIMAX) += hid-primax.o obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3402033fa52a..c3d0ac1a0988 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -702,6 +702,11 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type) if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && type == HID_COLLECTION_PHYSICAL) hid->group = HID_GROUP_SENSOR_HUB; + + if (hid->vendor == USB_VENDOR_ID_MICROSOFT && + hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 && + hid->group == HID_GROUP_MULTITOUCH) + hid->group = HID_GROUP_GENERIC; } static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) @@ -780,22 +785,19 @@ static int hid_scan_report(struct hid_device *hid) hid->group = HID_GROUP_MULTITOUCH_WIN_8; /* - * Vendor specific handlings - */ - if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) && - (hid->group == HID_GROUP_GENERIC) && - /* only bind to the mouse interface of composite USB devices */ - (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE)) - /* hid-rmi should take care of them, not hid-generic */ - hid->group = HID_GROUP_RMI; - - /* * Vendor specific handlings */ switch (hid->vendor) { case USB_VENDOR_ID_WACOM: hid->group = HID_GROUP_WACOM; break; + case USB_VENDOR_ID_SYNAPTICS: + if ((hid->group == HID_GROUP_GENERIC) && + (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE)) + /* hid-rmi should only bind to the mouse interface of + * composite USB devices */ + hid->group = HID_GROUP_RMI; + break; } vfree(parser); @@ -1280,12 +1282,6 @@ void hid_output_report(struct hid_report *report, __u8 *data) } EXPORT_SYMBOL_GPL(hid_output_report); -static int hid_report_len(struct hid_report *report) -{ - /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ - return ((report->size - 1) >> 3) + 1 + (report->id > 0); -} - /* * Allocator for buffer that is going to be passed to hid_output_report() */ @@ -1822,6 +1818,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, @@ -1862,6 +1859,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, @@ -1887,6 +1885,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, #if IS_ENABLED(CONFIG_HID_ROCCAT) { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, @@ -1910,10 +1909,12 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, @@ -2539,7 +2540,8 @@ int hid_add_device(struct hid_device *hdev) * Scan generic devices for group information */ if (hid_ignore_special_drivers || - !hid_match_id(hdev, hid_have_special_driver)) { + (!hdev->group && + !hid_match_id(hdev, hid_have_special_driver))) { ret = hid_scan_report(hdev); if (ret) hid_warn(hdev, "bad device descriptor (%d)\n", ret); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7c863738e419..7460f3402298 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -300,6 +300,7 @@ #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f #define USB_VENDOR_ID_ELECOM 0x056e @@ -578,6 +579,7 @@ #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e +#define USB_DEVICE_ID_LOGITECH_T651 0xb00c #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f @@ -620,6 +622,7 @@ #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 +#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -649,6 +652,7 @@ #define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 +#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -716,6 +720,8 @@ #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 +#define USB_VENDOR_ID_PLANTRONICS 0x047f + #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 #define USB_DEVICE_ID_PANABOARD_UBT880 0x104d @@ -813,6 +819,9 @@ #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 +#define USB_VENDOR_ID_SMK 0x0609 +#define USB_DEVICE_ID_SMK_PS3_BDREMOTE 0x0306 + #define USB_VENDOR_ID_SONY 0x054c #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b #define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374 @@ -931,6 +940,9 @@ #define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 #define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 +#define USB_VENDOR_ID_VTL 0x0306 +#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f + #define USB_VENDOR_ID_WACOM 0x056a #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 #define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 725f22ca47fc..e0a0f06ac5ef 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -872,7 +872,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; - default: goto ignore; + default: map_key_clear(KEY_UNKNOWN); } break; @@ -1215,7 +1215,7 @@ static void hidinput_led_worker(struct work_struct *work) return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT); /* fall back to generic raw-output-report */ - len = ((report->size - 1) >> 3) + 1 + (report->id > 0); + len = hid_report_len(report); buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) return; diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index bf227f7679af..4c55f4d95798 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -62,7 +62,6 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, /* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR || (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) { - set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { case 0x00f1: /* Fn-F4: Mic mute */ map_key_clear(KEY_MICMUTE); @@ -85,13 +84,13 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, case 0x00f8: /* Fn-F11: View open applications (3 boxes) */ map_key_clear(KEY_SCALE); return 1; - case 0x00fa: /* Fn-Esc: Fn-lock toggle */ - map_key_clear(KEY_FN_ESC); - return 1; - case 0x00fb: /* Fn-F12: Open My computer (6 boxes) USB-only */ + case 0x00f9: /* Fn-F12: Open My computer (6 boxes) USB-only */ /* NB: This mapping is invented in raw_event below */ map_key_clear(KEY_FILE); return 1; + case 0x00fa: /* Fn-Esc: Fn-lock toggle */ + map_key_clear(KEY_FN_ESC); + return 1; } } @@ -207,8 +206,8 @@ static int lenovo_raw_event(struct hid_device *hdev, && data[0] == 0x15 && data[1] == 0x94 && data[2] == 0x01)) { - data[1] = 0x0; - data[2] = 0x4; + data[1] = 0x00; + data[2] = 0x01; } return 0; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 71f569292cab..c917ab61aafa 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -26,9 +26,104 @@ #include <linux/hid.h> #include <linux/module.h> #include <linux/usb.h> +#include <linux/kfifo.h> #include <asm/unaligned.h> #include "hid-ids.h" -#include "hid-logitech-dj.h" + +#define DJ_MAX_PAIRED_DEVICES 6 +#define DJ_MAX_NUMBER_NOTIFICATIONS 8 +#define DJ_RECEIVER_INDEX 0 +#define DJ_DEVICE_INDEX_MIN 1 +#define DJ_DEVICE_INDEX_MAX 6 + +#define DJREPORT_SHORT_LENGTH 15 +#define DJREPORT_LONG_LENGTH 32 + +#define REPORT_ID_DJ_SHORT 0x20 +#define REPORT_ID_DJ_LONG 0x21 + +#define REPORT_ID_HIDPP_SHORT 0x10 +#define REPORT_ID_HIDPP_LONG 0x11 + +#define HIDPP_REPORT_SHORT_LENGTH 7 +#define HIDPP_REPORT_LONG_LENGTH 20 + +#define HIDPP_RECEIVER_INDEX 0xff + +#define REPORT_TYPE_RFREPORT_FIRST 0x01 +#define REPORT_TYPE_RFREPORT_LAST 0x1F + +/* Command Switch to DJ mode */ +#define REPORT_TYPE_CMD_SWITCH 0x80 +#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00 +#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01 +#define TIMEOUT_NO_KEEPALIVE 0x00 + +/* Command to Get the list of Paired devices */ +#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81 + +/* Device Paired Notification */ +#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41 +#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01 +#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02 +#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00 +#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01 +#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02 +#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03 + +/* Device Un-Paired Notification */ +#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40 + + +/* Connection Status Notification */ +#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42 +#define CONNECTION_STATUS_PARAM_STATUS 0x00 +#define STATUS_LINKLOSS 0x01 + +/* Error Notification */ +#define REPORT_TYPE_NOTIF_ERROR 0x7F +#define NOTIF_ERROR_PARAM_ETYPE 0x00 +#define ETYPE_KEEPALIVE_TIMEOUT 0x01 + +/* supported DJ HID && RF report types */ +#define REPORT_TYPE_KEYBOARD 0x01 +#define REPORT_TYPE_MOUSE 0x02 +#define REPORT_TYPE_CONSUMER_CONTROL 0x03 +#define REPORT_TYPE_SYSTEM_CONTROL 0x04 +#define REPORT_TYPE_MEDIA_CENTER 0x08 +#define REPORT_TYPE_LEDS 0x0E + +/* RF Report types bitfield */ +#define STD_KEYBOARD 0x00000002 +#define STD_MOUSE 0x00000004 +#define MULTIMEDIA 0x00000008 +#define POWER_KEYS 0x00000010 +#define MEDIA_CENTER 0x00000100 +#define KBD_LEDS 0x00004000 + +struct dj_report { + u8 report_id; + u8 device_index; + u8 report_type; + u8 report_params[DJREPORT_SHORT_LENGTH - 3]; +}; + +struct dj_receiver_dev { + struct hid_device *hdev; + struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES + + DJ_DEVICE_INDEX_MIN]; + struct work_struct work; + struct kfifo notif_fifo; + spinlock_t lock; + bool querying_devices; +}; + +struct dj_device { + struct hid_device *hdev; + struct dj_receiver_dev *dj_receiver_dev; + u32 reports_supported; + u8 device_index; +}; /* Keyboard descriptor (1) */ static const char kbd_descriptor[] = { @@ -156,6 +251,57 @@ static const char media_descriptor[] = { 0xc0, /* EndCollection */ }; /* */ +/* HIDPP descriptor */ +static const char hidpp_descriptor[] = { + 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ + 0x09, 0x01, /* Usage (Vendor Usage 1) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x10, /* Report ID (16) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x06, /* Report Count (6) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x00, /* Logical Maximum (255) */ + 0x09, 0x01, /* Usage (Vendor Usage 1) */ + 0x81, 0x00, /* Input (Data,Arr,Abs) */ + 0x09, 0x01, /* Usage (Vendor Usage 1) */ + 0x91, 0x00, /* Output (Data,Arr,Abs) */ + 0xc0, /* End Collection */ + 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ + 0x09, 0x02, /* Usage (Vendor Usage 2) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x11, /* Report ID (17) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x13, /* Report Count (19) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x00, /* Logical Maximum (255) */ + 0x09, 0x02, /* Usage (Vendor Usage 2) */ + 0x81, 0x00, /* Input (Data,Arr,Abs) */ + 0x09, 0x02, /* Usage (Vendor Usage 2) */ + 0x91, 0x00, /* Output (Data,Arr,Abs) */ + 0xc0, /* End Collection */ + 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ + 0x09, 0x04, /* Usage (Vendor Usage 0x04) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x20, /* Report ID (32) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x0e, /* Report Count (14) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x00, /* Logical Maximum (255) */ + 0x09, 0x41, /* Usage (Vendor Usage 0x41) */ + 0x81, 0x00, /* Input (Data,Arr,Abs) */ + 0x09, 0x41, /* Usage (Vendor Usage 0x41) */ + 0x91, 0x00, /* Output (Data,Arr,Abs) */ + 0x85, 0x21, /* Report ID (33) */ + 0x95, 0x1f, /* Report Count (31) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x00, /* Logical Maximum (255) */ + 0x09, 0x42, /* Usage (Vendor Usage 0x42) */ + 0x81, 0x00, /* Input (Data,Arr,Abs) */ + 0x09, 0x42, /* Usage (Vendor Usage 0x42) */ + 0x91, 0x00, /* Output (Data,Arr,Abs) */ + 0xc0, /* End Collection */ +}; + /* Maximum size of all defined hid reports in bytes (including report id) */ #define MAX_REPORT_SIZE 8 @@ -165,7 +311,8 @@ static const char media_descriptor[] = { sizeof(mse_descriptor) + \ sizeof(consumer_descriptor) + \ sizeof(syscontrol_descriptor) + \ - sizeof(media_descriptor)) + sizeof(media_descriptor) + \ + sizeof(hidpp_descriptor)) /* Number of possible hid report types that can be created by this driver. * @@ -256,11 +403,15 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, dj_hiddev->dev.parent = &djrcv_hdev->dev; dj_hiddev->bus = BUS_USB; dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor); - dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct); + dj_hiddev->product = + (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB] + << 8) | + dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]; snprintf(dj_hiddev->name, sizeof(dj_hiddev->name), - "Logitech Unifying Device. Wireless PID:%02x%02x", - dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB], - dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]); + "Logitech Unifying Device. Wireless PID:%04x", + dj_hiddev->product); + + dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE; usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys)); snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index); @@ -422,6 +573,13 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, } } +static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data, + int size) +{ + /* We are called from atomic context (tasklet && djrcv->lock held) */ + if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1)) + dbg_hid("hid_input_report error\n"); +} static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) @@ -472,7 +630,9 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, unsigned timeout) { + struct hid_device *hdev = djrcv_dev->hdev; struct dj_report *dj_report; + u8 *buf; int retval; dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); @@ -484,7 +644,6 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); - kfree(dj_report); /* * Ugly sleep to work around a USB 3.0 bug when the receiver is still @@ -493,6 +652,30 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, */ msleep(50); + /* + * Magical bits to set up hidpp notifications when the dj devices + * are connected/disconnected. + * + * We can reuse dj_report because HIDPP_REPORT_SHORT_LENGTH is smaller + * than DJREPORT_SHORT_LENGTH. + */ + buf = (u8 *)dj_report; + + memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH); + + buf[0] = REPORT_ID_HIDPP_SHORT; + buf[1] = 0xFF; + buf[2] = 0x80; + buf[3] = 0x00; + buf[4] = 0x00; + buf[5] = 0x09; + buf[6] = 0x00; + + hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf, + HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT, + HID_REQ_SET_REPORT); + + kfree(dj_report); return retval; } @@ -509,6 +692,9 @@ static void logi_dj_ll_close(struct hid_device *hid) dbg_hid("%s:%s\n", __func__, hid->phys); } +static u8 unifying_name_query[] = {0x10, 0xff, 0x83, 0xb5, 0x40, 0x00, 0x00}; +static u8 unifying_name_answer[] = {0x11, 0xff, 0x83, 0xb5}; + static int logi_dj_ll_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t count, unsigned char report_type, @@ -519,6 +705,22 @@ static int logi_dj_ll_raw_request(struct hid_device *hid, u8 *out_buf; int ret; + if ((buf[0] == REPORT_ID_HIDPP_SHORT) || + (buf[0] == REPORT_ID_HIDPP_LONG)) { + if (count < 2) + return -EINVAL; + + /* special case where we should not overwrite + * the device_index */ + if (count == 7 && !memcmp(buf, unifying_name_query, + sizeof(unifying_name_query))) + buf[4] |= djdev->device_index - 1; + else + buf[1] = djdev->device_index; + return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf, + count, report_type, reqtype); + } + if (buf[0] != REPORT_TYPE_LEDS) return -EINVAL; @@ -597,6 +799,8 @@ static int logi_dj_ll_parse(struct hid_device *hid) __func__, djdev->reports_supported); } + rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor)); + retval = hid_parse_report(hid, rdesc, rsize); kfree(rdesc); @@ -624,8 +828,7 @@ static struct hid_ll_driver logi_dj_ll_driver = { .raw_request = logi_dj_ll_raw_request, }; - -static int logi_dj_raw_event(struct hid_device *hdev, +static int logi_dj_dj_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { @@ -633,36 +836,24 @@ static int logi_dj_raw_event(struct hid_device *hdev, struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; - dbg_hid("%s, size:%d\n", __func__, size); - - /* Here we receive all data coming from iface 2, there are 4 cases: - * - * 1) Data should continue its normal processing i.e. data does not - * come from the DJ collection, in which case we do nothing and - * return 0, so hid-core can continue normal processing (will forward - * to associated hidraw device) + /* + * Here we receive all data coming from iface 2, there are 3 cases: * - * 2) Data is from DJ collection, and is intended for this driver i. e. - * data contains arrival, departure, etc notifications, in which case - * we queue them for delayed processing by the work queue. We return 1 - * to hid-core as no further processing is required from it. + * 1) Data is intended for this driver i. e. data contains arrival, + * departure, etc notifications, in which case we queue them for delayed + * processing by the work queue. We return 1 to hid-core as no further + * processing is required from it. * - * 3) Data is from DJ collection, and informs a connection change, - * if the change means rf link loss, then we must send a null report - * to the upper layer to discard potentially pressed keys that may be - * repeated forever by the input layer. Return 1 to hid-core as no - * further processing is required. + * 2) Data informs a connection change, if the change means rf link + * loss, then we must send a null report to the upper layer to discard + * potentially pressed keys that may be repeated forever by the input + * layer. Return 1 to hid-core as no further processing is required. * - * 4) Data is from DJ collection and is an actual input event from - * a paired DJ device in which case we forward it to the correct hid - * device (via hid_input_report() ) and return 1 so hid-core does not do - * anything else with it. + * 3) Data is an actual input event from a paired DJ device in which + * case we forward it to the correct hid device (via hid_input_report() + * ) and return 1 so hid-core does not anything else with it. */ - /* case 1) */ - if (data[0] != REPORT_ID_DJ_SHORT) - return false; - if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { /* @@ -707,6 +898,80 @@ out: return true; } +static int logi_dj_hidpp_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, + int size) +{ + struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); + struct dj_report *dj_report = (struct dj_report *) data; + unsigned long flags; + u8 device_index = dj_report->device_index; + + if (device_index == HIDPP_RECEIVER_INDEX) { + /* special case were the device wants to know its unifying + * name */ + if (size == HIDPP_REPORT_LONG_LENGTH && + !memcmp(data, unifying_name_answer, + sizeof(unifying_name_answer)) && + ((data[4] & 0xF0) == 0x40)) + device_index = (data[4] & 0x0F) + 1; + else + return false; + } + + /* + * Data is from the HID++ collection, in this case, we forward the + * data to the corresponding child dj device and return 0 to hid-core + * so he data also goes to the hidraw device of the receiver. This + * allows a user space application to implement the full HID++ routing + * via the receiver. + */ + + if ((device_index < DJ_DEVICE_INDEX_MIN) || + (device_index > DJ_DEVICE_INDEX_MAX)) { + /* + * Device index is wrong, bail out. + * This driver can ignore safely the receiver notifications, + * so ignore those reports too. + */ + dev_err(&hdev->dev, "%s: invalid device index:%d\n", + __func__, dj_report->device_index); + return false; + } + + spin_lock_irqsave(&djrcv_dev->lock, flags); + + if (!djrcv_dev->paired_dj_devices[device_index]) + /* received an event for an unknown device, bail out */ + goto out; + + logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index], + data, size); + +out: + spin_unlock_irqrestore(&djrcv_dev->lock, flags); + + return false; +} + +static int logi_dj_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, + int size) +{ + dbg_hid("%s, size:%d\n", __func__, size); + + switch (data[0]) { + case REPORT_ID_DJ_SHORT: + return logi_dj_dj_event(hdev, report, data, size); + case REPORT_ID_HIDPP_SHORT: + /* intentional fallthrough */ + case REPORT_ID_HIDPP_LONG: + return logi_dj_hidpp_event(hdev, report, data, size); + } + + return false; +} + static int logi_dj_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -714,9 +979,6 @@ static int logi_dj_probe(struct hid_device *hdev, struct dj_receiver_dev *djrcv_dev; int retval; - if (is_dj_device((struct dj_device *)hdev->driver_data)) - return -ENODEV; - dbg_hid("%s called for ifnum %d\n", __func__, intf->cur_altsetting->desc.bInterfaceNumber); @@ -869,22 +1131,6 @@ static void logi_dj_remove(struct hid_device *hdev) hid_set_drvdata(hdev, NULL); } -static int logi_djdevice_probe(struct hid_device *hdev, - const struct hid_device_id *id) -{ - int ret; - struct dj_device *dj_dev = hdev->driver_data; - - if (!is_dj_device(dj_dev)) - return -ENODEV; - - ret = hid_parse(hdev); - if (!ret) - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - - return ret; -} - static const struct hid_device_id logi_dj_receivers[] = { {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)}, @@ -906,51 +1152,8 @@ static struct hid_driver logi_djreceiver_driver = { #endif }; +module_hid_driver(logi_djreceiver_driver); -static const struct hid_device_id logi_dj_devices[] = { - {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, - USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)}, - {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, - USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)}, - {} -}; - -static struct hid_driver logi_djdevice_driver = { - .name = "logitech-djdevice", - .id_table = logi_dj_devices, - .probe = logi_djdevice_probe, -}; - - -static int __init logi_dj_init(void) -{ - int retval; - - dbg_hid("Logitech-DJ:%s\n", __func__); - - retval = hid_register_driver(&logi_djreceiver_driver); - if (retval) - return retval; - - retval = hid_register_driver(&logi_djdevice_driver); - if (retval) - hid_unregister_driver(&logi_djreceiver_driver); - - return retval; - -} - -static void __exit logi_dj_exit(void) -{ - dbg_hid("Logitech-DJ:%s\n", __func__); - - hid_unregister_driver(&logi_djdevice_driver); - hid_unregister_driver(&logi_djreceiver_driver); - -} - -module_init(logi_dj_init); -module_exit(logi_dj_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Logitech"); MODULE_AUTHOR("Nestor Lopez Casado"); diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h deleted file mode 100644 index daeb0aa4bee9..000000000000 --- a/drivers/hid/hid-logitech-dj.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef __HID_LOGITECH_DJ_H -#define __HID_LOGITECH_DJ_H - -/* - * HID driver for Logitech Unifying receivers - * - * Copyright (c) 2011 Logitech - */ - -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/kfifo.h> - -#define DJ_MAX_PAIRED_DEVICES 6 -#define DJ_MAX_NUMBER_NOTIFICATIONS 8 -#define DJ_RECEIVER_INDEX 0 -#define DJ_DEVICE_INDEX_MIN 1 -#define DJ_DEVICE_INDEX_MAX 6 - -#define DJREPORT_SHORT_LENGTH 15 -#define DJREPORT_LONG_LENGTH 32 - -#define REPORT_ID_DJ_SHORT 0x20 -#define REPORT_ID_DJ_LONG 0x21 - -#define REPORT_TYPE_RFREPORT_FIRST 0x01 -#define REPORT_TYPE_RFREPORT_LAST 0x1F - -/* Command Switch to DJ mode */ -#define REPORT_TYPE_CMD_SWITCH 0x80 -#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00 -#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01 -#define TIMEOUT_NO_KEEPALIVE 0x00 - -/* Command to Get the list of Paired devices */ -#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81 - -/* Device Paired Notification */ -#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41 -#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01 -#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02 -#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00 -#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01 -#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02 -#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03 - -/* Device Un-Paired Notification */ -#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40 - - -/* Connection Status Notification */ -#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42 -#define CONNECTION_STATUS_PARAM_STATUS 0x00 -#define STATUS_LINKLOSS 0x01 - -/* Error Notification */ -#define REPORT_TYPE_NOTIF_ERROR 0x7F -#define NOTIF_ERROR_PARAM_ETYPE 0x00 -#define ETYPE_KEEPALIVE_TIMEOUT 0x01 - -/* supported DJ HID && RF report types */ -#define REPORT_TYPE_KEYBOARD 0x01 -#define REPORT_TYPE_MOUSE 0x02 -#define REPORT_TYPE_CONSUMER_CONTROL 0x03 -#define REPORT_TYPE_SYSTEM_CONTROL 0x04 -#define REPORT_TYPE_MEDIA_CENTER 0x08 -#define REPORT_TYPE_LEDS 0x0E - -/* RF Report types bitfield */ -#define STD_KEYBOARD 0x00000002 -#define STD_MOUSE 0x00000004 -#define MULTIMEDIA 0x00000008 -#define POWER_KEYS 0x00000010 -#define MEDIA_CENTER 0x00000100 -#define KBD_LEDS 0x00004000 - -struct dj_report { - u8 report_id; - u8 device_index; - u8 report_type; - u8 report_params[DJREPORT_SHORT_LENGTH - 3]; -}; - -struct dj_receiver_dev { - struct hid_device *hdev; - struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES + - DJ_DEVICE_INDEX_MIN]; - struct work_struct work; - struct kfifo notif_fifo; - spinlock_t lock; - bool querying_devices; -}; - -struct dj_device { - struct hid_device *hdev; - struct dj_receiver_dev *dj_receiver_dev; - u32 reports_supported; - u8 device_index; -}; - -/** - * is_dj_device - know if the given dj_device is not the receiver. - * @dj_dev: the dj device to test - * - * This macro tests if a struct dj_device pointer is a device created - * by the bus enumarator. - */ -#define is_dj_device(dj_dev) \ - (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent) - -#endif diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c new file mode 100644 index 000000000000..2f420c0b6609 --- /dev/null +++ b/drivers/hid/hid-logitech-hidpp.c @@ -0,0 +1,1241 @@ +/* + * HIDPP protocol for Logitech Unifying receivers + * + * Copyright (c) 2011 Logitech (c) + * Copyright (c) 2012-2013 Google (c) + * Copyright (c) 2013-2014 Red Hat Inc. + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/kfifo.h> +#include <linux/input/mt.h> +#include <asm/unaligned.h> +#include "hid-ids.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); +MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); + +#define REPORT_ID_HIDPP_SHORT 0x10 +#define REPORT_ID_HIDPP_LONG 0x11 + +#define HIDPP_REPORT_SHORT_LENGTH 7 +#define HIDPP_REPORT_LONG_LENGTH 20 + +#define HIDPP_QUIRK_CLASS_WTP BIT(0) + +/* bits 1..20 are reserved for classes */ +#define HIDPP_QUIRK_DELAYED_INIT BIT(21) +#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) +#define HIDPP_QUIRK_MULTI_INPUT BIT(23) + +/* + * There are two hidpp protocols in use, the first version hidpp10 is known + * as register access protocol or RAP, the second version hidpp20 is known as + * feature access protocol or FAP + * + * Most older devices (including the Unifying usb receiver) use the RAP protocol + * where as most newer devices use the FAP protocol. Both protocols are + * compatible with the underlying transport, which could be usb, Unifiying, or + * bluetooth. The message lengths are defined by the hid vendor specific report + * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and + * the HIDPP_LONG report type (total message length 20 bytes) + * + * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG + * messages. The Unifying receiver itself responds to RAP messages (device index + * is 0xFF for the receiver), and all messages (short or long) with a device + * index between 1 and 6 are passed untouched to the corresponding paired + * Unifying device. + * + * The paired device can be RAP or FAP, it will receive the message untouched + * from the Unifiying receiver. + */ + +struct fap { + u8 feature_index; + u8 funcindex_clientid; + u8 params[HIDPP_REPORT_LONG_LENGTH - 4U]; +}; + +struct rap { + u8 sub_id; + u8 reg_address; + u8 params[HIDPP_REPORT_LONG_LENGTH - 4U]; +}; + +struct hidpp_report { + u8 report_id; + u8 device_index; + union { + struct fap fap; + struct rap rap; + u8 rawbytes[sizeof(struct fap)]; + }; +} __packed; + +struct hidpp_device { + struct hid_device *hid_dev; + struct mutex send_mutex; + void *send_receive_buf; + wait_queue_head_t wait; + bool answer_available; + u8 protocol_major; + u8 protocol_minor; + + void *private_data; + + struct work_struct work; + struct kfifo delayed_work_fifo; + atomic_t connected; + struct input_dev *delayed_input; + + unsigned long quirks; +}; + + +#define HIDPP_ERROR 0x8f +#define HIDPP_ERROR_SUCCESS 0x00 +#define HIDPP_ERROR_INVALID_SUBID 0x01 +#define HIDPP_ERROR_INVALID_ADRESS 0x02 +#define HIDPP_ERROR_INVALID_VALUE 0x03 +#define HIDPP_ERROR_CONNECT_FAIL 0x04 +#define HIDPP_ERROR_TOO_MANY_DEVICES 0x05 +#define HIDPP_ERROR_ALREADY_EXISTS 0x06 +#define HIDPP_ERROR_BUSY 0x07 +#define HIDPP_ERROR_UNKNOWN_DEVICE 0x08 +#define HIDPP_ERROR_RESOURCE_ERROR 0x09 +#define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a +#define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b +#define HIDPP_ERROR_WRONG_PIN_CODE 0x0c + +static void hidpp_connect_event(struct hidpp_device *hidpp_dev); + +static int __hidpp_send_report(struct hid_device *hdev, + struct hidpp_report *hidpp_report) +{ + int fields_count, ret; + + switch (hidpp_report->report_id) { + case REPORT_ID_HIDPP_SHORT: + fields_count = HIDPP_REPORT_SHORT_LENGTH; + break; + case REPORT_ID_HIDPP_LONG: + fields_count = HIDPP_REPORT_LONG_LENGTH; + break; + default: + return -ENODEV; + } + + /* + * set the device_index as the receiver, it will be overwritten by + * hid_hw_request if needed + */ + hidpp_report->device_index = 0xff; + + ret = hid_hw_raw_request(hdev, hidpp_report->report_id, + (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT, + HID_REQ_SET_REPORT); + + return ret == fields_count ? 0 : -1; +} + +/** + * hidpp_send_message_sync() returns 0 in case of success, and something else + * in case of a failure. + * - If ' something else' is positive, that means that an error has been raised + * by the protocol itself. + * - If ' something else' is negative, that means that we had a classic error + * (-ENOMEM, -EPIPE, etc...) + */ +static int hidpp_send_message_sync(struct hidpp_device *hidpp, + struct hidpp_report *message, + struct hidpp_report *response) +{ + int ret; + + mutex_lock(&hidpp->send_mutex); + + hidpp->send_receive_buf = response; + hidpp->answer_available = false; + + /* + * So that we can later validate the answer when it arrives + * in hidpp_raw_event + */ + *response = *message; + + ret = __hidpp_send_report(hidpp->hid_dev, message); + + if (ret) { + dbg_hid("__hidpp_send_report returned err: %d\n", ret); + memset(response, 0, sizeof(struct hidpp_report)); + goto exit; + } + + if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, + 5*HZ)) { + dbg_hid("%s:timeout waiting for response\n", __func__); + memset(response, 0, sizeof(struct hidpp_report)); + ret = -ETIMEDOUT; + } + + if (response->report_id == REPORT_ID_HIDPP_SHORT && + response->fap.feature_index == HIDPP_ERROR) { + ret = response->fap.params[1]; + dbg_hid("__hidpp_send_report got hidpp error %02X\n", ret); + goto exit; + } + +exit: + mutex_unlock(&hidpp->send_mutex); + return ret; + +} + +static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp, + u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count, + struct hidpp_report *response) +{ + struct hidpp_report *message; + int ret; + + if (param_count > sizeof(message->fap.params)) + return -EINVAL; + + message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); + if (!message) + return -ENOMEM; + message->report_id = REPORT_ID_HIDPP_LONG; + message->fap.feature_index = feat_index; + message->fap.funcindex_clientid = funcindex_clientid; + memcpy(&message->fap.params, params, param_count); + + ret = hidpp_send_message_sync(hidpp, message, response); + kfree(message); + return ret; +} + +static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, + u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count, + struct hidpp_report *response) +{ + struct hidpp_report *message; + int ret; + + if ((report_id != REPORT_ID_HIDPP_SHORT) && + (report_id != REPORT_ID_HIDPP_LONG)) + return -EINVAL; + + if (param_count > sizeof(message->rap.params)) + return -EINVAL; + + message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); + if (!message) + return -ENOMEM; + message->report_id = report_id; + message->rap.sub_id = sub_id; + message->rap.reg_address = reg_address; + memcpy(&message->rap.params, params, param_count); + + ret = hidpp_send_message_sync(hidpp_dev, message, response); + kfree(message); + return ret; +} + +static void delayed_work_cb(struct work_struct *work) +{ + struct hidpp_device *hidpp = container_of(work, struct hidpp_device, + work); + hidpp_connect_event(hidpp); +} + +static inline bool hidpp_match_answer(struct hidpp_report *question, + struct hidpp_report *answer) +{ + return (answer->fap.feature_index == question->fap.feature_index) && + (answer->fap.funcindex_clientid == question->fap.funcindex_clientid); +} + +static inline bool hidpp_match_error(struct hidpp_report *question, + struct hidpp_report *answer) +{ + return (answer->fap.feature_index == HIDPP_ERROR) && + (answer->fap.funcindex_clientid == question->fap.feature_index) && + (answer->fap.params[0] == question->fap.funcindex_clientid); +} + +static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) +{ + return (report->report_id == REPORT_ID_HIDPP_SHORT) && + (report->rap.sub_id == 0x41); +} + +/* -------------------------------------------------------------------------- */ +/* HIDP++ 1.0 commands */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_SET_REGISTER 0x80 +#define HIDPP_GET_REGISTER 0x81 +#define HIDPP_SET_LONG_REGISTER 0x82 +#define HIDPP_GET_LONG_REGISTER 0x83 + +#define HIDPP_REG_PAIRING_INFORMATION 0xB5 +#define DEVICE_NAME 0x40 + +static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev) +{ + struct hidpp_report response; + int ret; + /* hid-logitech-dj is in charge of setting the right device index */ + u8 params[1] = { DEVICE_NAME }; + char *name; + int len; + + ret = hidpp_send_rap_command_sync(hidpp_dev, + REPORT_ID_HIDPP_SHORT, + HIDPP_GET_LONG_REGISTER, + HIDPP_REG_PAIRING_INFORMATION, + params, 1, &response); + if (ret) + return NULL; + + len = response.rap.params[1]; + + if (2 + len > sizeof(response.rap.params)) + return NULL; + + name = kzalloc(len + 1, GFP_KERNEL); + if (!name) + return NULL; + + memcpy(name, &response.rap.params[2], len); + return name; +} + +/* -------------------------------------------------------------------------- */ +/* 0x0000: Root */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_ROOT 0x0000 +#define HIDPP_PAGE_ROOT_IDX 0x00 + +#define CMD_ROOT_GET_FEATURE 0x01 +#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11 + +static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature, + u8 *feature_index, u8 *feature_type) +{ + struct hidpp_report response; + int ret; + u8 params[2] = { feature >> 8, feature & 0x00FF }; + + ret = hidpp_send_fap_command_sync(hidpp, + HIDPP_PAGE_ROOT_IDX, + CMD_ROOT_GET_FEATURE, + params, 2, &response); + if (ret) + return ret; + + *feature_index = response.fap.params[0]; + *feature_type = response.fap.params[1]; + + return ret; +} + +static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) +{ + struct hidpp_report response; + int ret; + + ret = hidpp_send_fap_command_sync(hidpp, + HIDPP_PAGE_ROOT_IDX, + CMD_ROOT_GET_PROTOCOL_VERSION, + NULL, 0, &response); + + if (ret == HIDPP_ERROR_INVALID_SUBID) { + hidpp->protocol_major = 1; + hidpp->protocol_minor = 0; + return 0; + } + + /* the device might not be connected */ + if (ret == HIDPP_ERROR_RESOURCE_ERROR) + return -EIO; + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + hidpp->protocol_major = response.fap.params[0]; + hidpp->protocol_minor = response.fap.params[1]; + + return ret; +} + +static bool hidpp_is_connected(struct hidpp_device *hidpp) +{ + int ret; + + ret = hidpp_root_get_protocol_version(hidpp); + if (!ret) + hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); + return ret == 0; +} + +/* -------------------------------------------------------------------------- */ +/* 0x0005: GetDeviceNameType */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005 + +#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01 +#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11 +#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21 + +static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp, + u8 feature_index, u8 *nameLength) +{ + struct hidpp_report response; + int ret; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_GET_DEVICE_NAME_TYPE_GET_COUNT, NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + *nameLength = response.fap.params[0]; + + return ret; +} + +static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp, + u8 feature_index, u8 char_index, char *device_name, int len_buf) +{ + struct hidpp_report response; + int ret, i; + int count; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME, &char_index, 1, + &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + if (response.report_id == REPORT_ID_HIDPP_LONG) + count = HIDPP_REPORT_LONG_LENGTH - 4; + else + count = HIDPP_REPORT_SHORT_LENGTH - 4; + + if (len_buf < count) + count = len_buf; + + for (i = 0; i < count; i++) + device_name[i] = response.fap.params[i]; + + return count; +} + +static char *hidpp_get_device_name(struct hidpp_device *hidpp) +{ + u8 feature_type; + u8 feature_index; + u8 __name_length; + char *name; + unsigned index = 0; + int ret; + + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE, + &feature_index, &feature_type); + if (ret) + return NULL; + + ret = hidpp_devicenametype_get_count(hidpp, feature_index, + &__name_length); + if (ret) + return NULL; + + name = kzalloc(__name_length + 1, GFP_KERNEL); + if (!name) + return NULL; + + while (index < __name_length) { + ret = hidpp_devicenametype_get_device_name(hidpp, + feature_index, index, name + index, + __name_length - index); + if (ret <= 0) { + kfree(name); + return NULL; + } + index += ret; + } + + return name; +} + +/* -------------------------------------------------------------------------- */ +/* 0x6100: TouchPadRawXY */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100 + +#define CMD_TOUCHPAD_GET_RAW_INFO 0x01 +#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21 + +#define EVENT_TOUCHPAD_RAW_XY 0x00 + +#define TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT 0x01 +#define TOUCHPAD_RAW_XY_ORIGIN_UPPER_LEFT 0x03 + +struct hidpp_touchpad_raw_info { + u16 x_size; + u16 y_size; + u8 z_range; + u8 area_range; + u8 timestamp_unit; + u8 maxcontacts; + u8 origin; + u16 res; +}; + +struct hidpp_touchpad_raw_xy_finger { + u8 contact_type; + u8 contact_status; + u16 x; + u16 y; + u8 z; + u8 area; + u8 finger_id; +}; + +struct hidpp_touchpad_raw_xy { + u16 timestamp; + struct hidpp_touchpad_raw_xy_finger fingers[2]; + u8 spurious_flag; + u8 end_of_frame; + u8 finger_count; + u8 button; +}; + +static int hidpp_touchpad_get_raw_info(struct hidpp_device *hidpp, + u8 feature_index, struct hidpp_touchpad_raw_info *raw_info) +{ + struct hidpp_report response; + int ret; + u8 *params = (u8 *)response.fap.params; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_TOUCHPAD_GET_RAW_INFO, NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + raw_info->x_size = get_unaligned_be16(¶ms[0]); + raw_info->y_size = get_unaligned_be16(¶ms[2]); + raw_info->z_range = params[4]; + raw_info->area_range = params[5]; + raw_info->maxcontacts = params[7]; + raw_info->origin = params[8]; + /* res is given in unit per inch */ + raw_info->res = get_unaligned_be16(¶ms[13]) * 2 / 51; + + return ret; +} + +static int hidpp_touchpad_set_raw_report_state(struct hidpp_device *hidpp_dev, + u8 feature_index, bool send_raw_reports, + bool sensor_enhanced_settings) +{ + struct hidpp_report response; + + /* + * Params: + * bit 0 - enable raw + * bit 1 - 16bit Z, no area + * bit 2 - enhanced sensitivity + * bit 3 - width, height (4 bits each) instead of area + * bit 4 - send raw + gestures (degrades smoothness) + * remaining bits - reserved + */ + u8 params = send_raw_reports | (sensor_enhanced_settings << 2); + + return hidpp_send_fap_command_sync(hidpp_dev, feature_index, + CMD_TOUCHPAD_SET_RAW_REPORT_STATE, ¶ms, 1, &response); +} + +static void hidpp_touchpad_touch_event(u8 *data, + struct hidpp_touchpad_raw_xy_finger *finger) +{ + u8 x_m = data[0] << 2; + u8 y_m = data[2] << 2; + + finger->x = x_m << 6 | data[1]; + finger->y = y_m << 6 | data[3]; + + finger->contact_type = data[0] >> 6; + finger->contact_status = data[2] >> 6; + + finger->z = data[4]; + finger->area = data[5]; + finger->finger_id = data[6] >> 4; +} + +static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev, + u8 *data, struct hidpp_touchpad_raw_xy *raw_xy) +{ + memset(raw_xy, 0, sizeof(struct hidpp_touchpad_raw_xy)); + raw_xy->end_of_frame = data[8] & 0x01; + raw_xy->spurious_flag = (data[8] >> 1) & 0x01; + raw_xy->finger_count = data[15] & 0x0f; + raw_xy->button = (data[8] >> 2) & 0x01; + + if (raw_xy->finger_count) { + hidpp_touchpad_touch_event(&data[2], &raw_xy->fingers[0]); + hidpp_touchpad_touch_event(&data[9], &raw_xy->fingers[1]); + } +} + +/* ************************************************************************** */ +/* */ +/* Device Support */ +/* */ +/* ************************************************************************** */ + +/* -------------------------------------------------------------------------- */ +/* Touchpad HID++ devices */ +/* -------------------------------------------------------------------------- */ + +#define WTP_MANUAL_RESOLUTION 39 + +struct wtp_data { + struct input_dev *input; + u16 x_size, y_size; + u8 finger_count; + u8 mt_feature_index; + u8 button_feature_index; + u8 maxcontacts; + bool flip_y; + unsigned int resolution; +}; + +static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + + if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && + (field->application == HID_GD_KEYBOARD)) + return 0; + + return -1; +} + +static void wtp_populate_input(struct hidpp_device *hidpp, + struct input_dev *input_dev, bool origin_is_hid_core) +{ + struct wtp_data *wd = hidpp->private_data; + + if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core) + /* this is the generic hid-input call */ + return; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(EV_KEY, input_dev->evbit); + __clear_bit(EV_REL, input_dev->evbit); + __clear_bit(EV_LED, input_dev->evbit); + + input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, wd->x_size, 0, 0); + input_abs_set_res(input_dev, ABS_MT_POSITION_X, wd->resolution); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, wd->y_size, 0, 0); + input_abs_set_res(input_dev, ABS_MT_POSITION_Y, wd->resolution); + + /* Max pressure is not given by the devices, pick one */ + input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 50, 0, 0); + + input_set_capability(input_dev, EV_KEY, BTN_LEFT); + + if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) + input_set_capability(input_dev, EV_KEY, BTN_RIGHT); + else + __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); + + input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER | + INPUT_MT_DROP_UNUSED); + + wd->input = input_dev; +} + +static void wtp_touch_event(struct wtp_data *wd, + struct hidpp_touchpad_raw_xy_finger *touch_report) +{ + int slot; + + if (!touch_report->finger_id || touch_report->contact_type) + /* no actual data */ + return; + + slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id); + + input_mt_slot(wd->input, slot); + input_mt_report_slot_state(wd->input, MT_TOOL_FINGER, + touch_report->contact_status); + if (touch_report->contact_status) { + input_event(wd->input, EV_ABS, ABS_MT_POSITION_X, + touch_report->x); + input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y, + wd->flip_y ? wd->y_size - touch_report->y : + touch_report->y); + input_event(wd->input, EV_ABS, ABS_MT_PRESSURE, + touch_report->area); + } +} + +static void wtp_send_raw_xy_event(struct hidpp_device *hidpp, + struct hidpp_touchpad_raw_xy *raw) +{ + struct wtp_data *wd = hidpp->private_data; + int i; + + for (i = 0; i < 2; i++) + wtp_touch_event(wd, &(raw->fingers[i])); + + if (raw->end_of_frame && + !(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS)) + input_event(wd->input, EV_KEY, BTN_LEFT, raw->button); + + if (raw->end_of_frame || raw->finger_count <= 2) { + input_mt_sync_frame(wd->input); + input_sync(wd->input); + } +} + +static int wtp_mouse_raw_xy_event(struct hidpp_device *hidpp, u8 *data) +{ + struct wtp_data *wd = hidpp->private_data; + u8 c1_area = ((data[7] & 0xf) * (data[7] & 0xf) + + (data[7] >> 4) * (data[7] >> 4)) / 2; + u8 c2_area = ((data[13] & 0xf) * (data[13] & 0xf) + + (data[13] >> 4) * (data[13] >> 4)) / 2; + struct hidpp_touchpad_raw_xy raw = { + .timestamp = data[1], + .fingers = { + { + .contact_type = 0, + .contact_status = !!data[7], + .x = get_unaligned_le16(&data[3]), + .y = get_unaligned_le16(&data[5]), + .z = c1_area, + .area = c1_area, + .finger_id = data[2], + }, { + .contact_type = 0, + .contact_status = !!data[13], + .x = get_unaligned_le16(&data[9]), + .y = get_unaligned_le16(&data[11]), + .z = c2_area, + .area = c2_area, + .finger_id = data[8], + } + }, + .finger_count = wd->maxcontacts, + .spurious_flag = 0, + .end_of_frame = (data[0] >> 7) == 0, + .button = data[0] & 0x01, + }; + + wtp_send_raw_xy_event(hidpp, &raw); + + return 1; +} + +static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct wtp_data *wd = hidpp->private_data; + struct hidpp_report *report = (struct hidpp_report *)data; + struct hidpp_touchpad_raw_xy raw; + + if (!wd || !wd->input) + return 1; + + switch (data[0]) { + case 0x02: + if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { + input_event(wd->input, EV_KEY, BTN_LEFT, + !!(data[1] & 0x01)); + input_event(wd->input, EV_KEY, BTN_RIGHT, + !!(data[1] & 0x02)); + input_sync(wd->input); + } else { + if (size < 21) + return 1; + return wtp_mouse_raw_xy_event(hidpp, &data[7]); + } + case REPORT_ID_HIDPP_LONG: + if ((report->fap.feature_index != wd->mt_feature_index) || + (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) + return 1; + hidpp_touchpad_raw_xy_event(hidpp, data + 4, &raw); + + wtp_send_raw_xy_event(hidpp, &raw); + return 0; + } + + return 0; +} + +static int wtp_get_config(struct hidpp_device *hidpp) +{ + struct wtp_data *wd = hidpp->private_data; + struct hidpp_touchpad_raw_info raw_info = {0}; + u8 feature_type; + int ret; + + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY, + &wd->mt_feature_index, &feature_type); + if (ret) + /* means that the device is not powered up */ + return ret; + + ret = hidpp_touchpad_get_raw_info(hidpp, wd->mt_feature_index, + &raw_info); + if (ret) + return ret; + + wd->x_size = raw_info.x_size; + wd->y_size = raw_info.y_size; + wd->maxcontacts = raw_info.maxcontacts; + wd->flip_y = raw_info.origin == TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT; + wd->resolution = raw_info.res; + if (!wd->resolution) + wd->resolution = WTP_MANUAL_RESOLUTION; + + return 0; +} + +static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct wtp_data *wd; + + wd = devm_kzalloc(&hdev->dev, sizeof(struct wtp_data), + GFP_KERNEL); + if (!wd) + return -ENOMEM; + + hidpp->private_data = wd; + + return 0; +}; + +static void wtp_connect(struct hid_device *hdev, bool connected) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct wtp_data *wd = hidpp->private_data; + int ret; + + if (!connected) + return; + + if (!wd->x_size) { + ret = wtp_get_config(hidpp); + if (ret) { + hid_err(hdev, "Can not get wtp config: %d\n", ret); + return; + } + } + + hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index, + true, true); +} + +/* -------------------------------------------------------------------------- */ +/* Generic HID++ devices */ +/* -------------------------------------------------------------------------- */ + +static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) + return wtp_input_mapping(hdev, hi, field, usage, bit, max); + + return 0; +} + +static void hidpp_populate_input(struct hidpp_device *hidpp, + struct input_dev *input, bool origin_is_hid_core) +{ + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) + wtp_populate_input(hidpp, input, origin_is_hid_core); +} + +static void hidpp_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct input_dev *input = hidinput->input; + + hidpp_populate_input(hidpp, input, true); +} + +static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, + int size) +{ + struct hidpp_report *question = hidpp->send_receive_buf; + struct hidpp_report *answer = hidpp->send_receive_buf; + struct hidpp_report *report = (struct hidpp_report *)data; + + /* + * If the mutex is locked then we have a pending answer from a + * previoulsly sent command + */ + if (unlikely(mutex_is_locked(&hidpp->send_mutex))) { + /* + * Check for a correct hidpp20 answer or the corresponding + * error + */ + if (hidpp_match_answer(question, report) || + hidpp_match_error(question, report)) { + *answer = *report; + hidpp->answer_available = true; + wake_up(&hidpp->wait); + /* + * This was an answer to a command that this driver sent + * We return 1 to hid-core to avoid forwarding the + * command upstream as it has been treated by the driver + */ + + return 1; + } + } + + if (unlikely(hidpp_report_is_connect_event(report))) { + atomic_set(&hidpp->connected, + !(report->rap.params[0] & (1 << 6))); + if ((hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) && + (schedule_work(&hidpp->work) == 0)) + dbg_hid("%s: connect event already queued\n", __func__); + return 1; + } + + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) + return wtp_raw_event(hidpp->hid_dev, data, size); + + return 0; +} + +static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + + switch (data[0]) { + case REPORT_ID_HIDPP_LONG: + if (size != HIDPP_REPORT_LONG_LENGTH) { + hid_err(hdev, "received hid++ report of bad size (%d)", + size); + return 1; + } + return hidpp_raw_hidpp_event(hidpp, data, size); + case REPORT_ID_HIDPP_SHORT: + if (size != HIDPP_REPORT_SHORT_LENGTH) { + hid_err(hdev, "received hid++ report of bad size (%d)", + size); + return 1; + } + return hidpp_raw_hidpp_event(hidpp, data, size); + } + + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) + return wtp_raw_event(hdev, data, size); + + return 0; +} + +static void hidpp_overwrite_name(struct hid_device *hdev, bool use_unifying) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + char *name; + + if (use_unifying) + /* + * the device is connected through an Unifying receiver, and + * might not be already connected. + * Ask the receiver for its name. + */ + name = hidpp_get_unifying_name(hidpp); + else + name = hidpp_get_device_name(hidpp); + + if (!name) + hid_err(hdev, "unable to retrieve the name of the device"); + else + snprintf(hdev->name, sizeof(hdev->name), "%s", name); + + kfree(name); +} + +static int hidpp_input_open(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + return hid_hw_open(hid); +} + +static void hidpp_input_close(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + hid_hw_close(hid); +} + +static struct input_dev *hidpp_allocate_input(struct hid_device *hdev) +{ + struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev); + + if (!input_dev) + return NULL; + + input_set_drvdata(input_dev, hdev); + input_dev->open = hidpp_input_open; + input_dev->close = hidpp_input_close; + + input_dev->name = hdev->name; + input_dev->phys = hdev->phys; + input_dev->uniq = hdev->uniq; + input_dev->id.bustype = hdev->bus; + input_dev->id.vendor = hdev->vendor; + input_dev->id.product = hdev->product; + input_dev->id.version = hdev->version; + input_dev->dev.parent = &hdev->dev; + + return input_dev; +} + +static void hidpp_connect_event(struct hidpp_device *hidpp) +{ + struct hid_device *hdev = hidpp->hid_dev; + int ret = 0; + bool connected = atomic_read(&hidpp->connected); + struct input_dev *input; + char *name, *devm_name; + + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) + wtp_connect(hdev, connected); + + if (!connected || hidpp->delayed_input) + return; + + if (!hidpp->protocol_major) { + ret = !hidpp_is_connected(hidpp); + if (ret) { + hid_err(hdev, "Can not get the protocol version.\n"); + return; + } + } + + /* the device is already connected, we can ask for its name and + * protocol */ + hid_info(hdev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); + + input = hidpp_allocate_input(hdev); + if (!input) { + hid_err(hdev, "cannot allocate new input device: %d\n", ret); + return; + } + + name = hidpp_get_device_name(hidpp); + if (!name) { + hid_err(hdev, "unable to retrieve the name of the device"); + } else { + devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name); + if (devm_name) + input->name = devm_name; + kfree(name); + } + + hidpp_populate_input(hidpp, input, false); + + ret = input_register_device(input); + if (ret) + input_free_device(input); + + hidpp->delayed_input = input; +} + +static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct hidpp_device *hidpp; + int ret; + bool connected; + unsigned int connect_mask = HID_CONNECT_DEFAULT; + + hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device), + GFP_KERNEL); + if (!hidpp) + return -ENOMEM; + + hidpp->hid_dev = hdev; + hid_set_drvdata(hdev, hidpp); + + hidpp->quirks = id->driver_data; + + if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { + ret = wtp_allocate(hdev, id); + if (ret) + goto wtp_allocate_fail; + } + + INIT_WORK(&hidpp->work, delayed_work_cb); + mutex_init(&hidpp->send_mutex); + init_waitqueue_head(&hidpp->wait); + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "%s:parse failed\n", __func__); + goto hid_parse_fail; + } + + /* Allow incoming packets */ + hid_device_io_start(hdev); + + connected = hidpp_is_connected(hidpp); + if (id->group != HID_GROUP_LOGITECH_DJ_DEVICE) { + if (!connected) { + hid_err(hdev, "Device not connected"); + hid_device_io_stop(hdev); + goto hid_parse_fail; + } + + hid_info(hdev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); + } + + hidpp_overwrite_name(hdev, id->group == HID_GROUP_LOGITECH_DJ_DEVICE); + atomic_set(&hidpp->connected, connected); + + if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { + ret = wtp_get_config(hidpp); + if (ret) + goto hid_parse_fail; + } + + /* Block incoming packets */ + hid_device_io_stop(hdev); + + if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) + connect_mask &= ~HID_CONNECT_HIDINPUT; + + /* Re-enable hidinput for multi-input devices */ + if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) + connect_mask |= HID_CONNECT_HIDINPUT; + + ret = hid_hw_start(hdev, connect_mask); + if (ret) { + hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); + goto hid_hw_start_fail; + } + + if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) { + /* Allow incoming packets */ + hid_device_io_start(hdev); + + hidpp_connect_event(hidpp); + } + + return ret; + +hid_hw_start_fail: +hid_parse_fail: + cancel_work_sync(&hidpp->work); + mutex_destroy(&hidpp->send_mutex); +wtp_allocate_fail: + hid_set_drvdata(hdev, NULL); + return ret; +} + +static void hidpp_remove(struct hid_device *hdev) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + + cancel_work_sync(&hidpp->work); + mutex_destroy(&hidpp->send_mutex); + hid_hw_stop(hdev); +} + +static const struct hid_device_id hidpp_devices[] = { + { /* wireless touchpad */ + HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, + USB_VENDOR_ID_LOGITECH, 0x4011), + .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | + HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, + { /* wireless touchpad T650 */ + HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, + USB_VENDOR_ID_LOGITECH, 0x4101), + .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, + { /* wireless touchpad T651 */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_T651), + .driver_data = HIDPP_QUIRK_CLASS_WTP }, + { /* Keyboard TK820 */ + HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, + USB_VENDOR_ID_LOGITECH, 0x4102), + .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT | + HIDPP_QUIRK_CLASS_WTP }, + + { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, + USB_VENDOR_ID_LOGITECH, HID_ANY_ID)}, + {} +}; + +MODULE_DEVICE_TABLE(hid, hidpp_devices); + +static struct hid_driver hidpp_driver = { + .name = "logitech-hidpp-device", + .id_table = hidpp_devices, + .probe = hidpp_probe, + .remove = hidpp_remove, + .raw_event = hidpp_raw_event, + .input_configured = hidpp_input_configured, + .input_mapping = hidpp_input_mapping, +}; + +module_hid_driver(hidpp_driver); diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 8ba17a946f2a..cacda43f6a6f 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -274,6 +274,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), .driver_data = MS_DUPLICATE_USAGES }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3), + .driver_data = MS_HIDINPUT }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), .driver_data = MS_PRESENTER }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 51e25b9407f2..f65e78b46999 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -67,6 +67,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) #define MT_QUIRK_HOVERING (1 << 11) #define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12) +#define MT_QUIRK_FORCE_GET_FEATURE (1 << 13) #define MT_INPUTMODE_TOUCHSCREEN 0x02 #define MT_INPUTMODE_TOUCHPAD 0x03 @@ -150,6 +151,7 @@ static void mt_post_parse(struct mt_device *td); #define MT_CLS_FLATFROG 0x0107 #define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108 #define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109 +#define MT_CLS_VTL 0x0110 #define MT_DEFAULT_MAXCONTACT 10 #define MT_MAX_MAXCONTACT 250 @@ -255,6 +257,11 @@ static struct mt_class mt_classes[] = { .sn_move = 2048, .maxcontacts = 40, }, + { .name = MT_CLS_VTL, + .quirks = MT_QUIRK_ALWAYS_VALID | + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_FORCE_GET_FEATURE, + }, { } }; @@ -809,6 +816,9 @@ static void mt_set_input_mode(struct hid_device *hdev) struct mt_device *td = hid_get_drvdata(hdev); struct hid_report *r; struct hid_report_enum *re; + struct mt_class *cls = &td->mtclass; + char *buf; + int report_len; if (td->inputmode < 0) return; @@ -816,6 +826,18 @@ static void mt_set_input_mode(struct hid_device *hdev) re = &(hdev->report_enum[HID_FEATURE_REPORT]); r = re->report_id_hash[td->inputmode]; if (r) { + if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { + report_len = hid_report_len(r); + buf = hid_alloc_report_buf(r, GFP_KERNEL); + if (!buf) { + hid_err(hdev, "failed to allocate buffer for report\n"); + return; + } + hid_hw_raw_request(hdev, r->id, buf, report_len, + HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + kfree(buf); + } r->field[0]->value[td->inputmode_index] = td->inputmode_value; hid_hw_request(hdev, r, HID_REQ_SET_REPORT); } @@ -1281,6 +1303,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, + /* VTL panels */ + { .driver_data = MT_CLS_VTL, + MT_USB_DEVICE(USB_VENDOR_ID_VTL, + USB_DEVICE_ID_VTL_MULTITOUCH_FF3F) }, + /* Wistron panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_WISTRON, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c new file mode 100644 index 000000000000..2180e0789b76 --- /dev/null +++ b/drivers/hid/hid-plantronics.c @@ -0,0 +1,55 @@ +/* + * Plantronics USB HID Driver + * + * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com> + * Copyright (c) 2014 Terry Junge <terry.junge@plantronics.com> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include "hid-ids.h" + +#include <linux/hid.h> +#include <linux/module.h> + +static int plantronics_input_mapping(struct hid_device *hdev, + struct hid_input *hi, + struct hid_field *field, + struct hid_usage *usage, + unsigned long **bit, int *max) +{ + if (field->application == HID_CP_CONSUMERCONTROL + && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) { + hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n", + usage->hid, field->application); + return 0; + } + + hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n", + usage->hid, field->application); + + return -1; +} + +static const struct hid_device_id plantronics_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(hid, plantronics_devices); + +static struct hid_driver plantronics_driver = { + .name = "plantronics", + .id_table = plantronics_devices, + .input_mapping = plantronics_input_mapping, +}; +module_hid_driver(plantronics_driver); + +MODULE_AUTHOR("JD Cole <jd.cole@plantronics.com>"); +MODULE_AUTHOR("Terry Junge <terry.junge@plantronics.com>"); +MODULE_DESCRIPTION("Plantronics USB HID Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 3cccff73b9b9..b51200fe2f33 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -584,11 +584,15 @@ static int rmi_populate_f11(struct hid_device *hdev) bool has_query10 = false; bool has_query11; bool has_query12; + bool has_query27; + bool has_query28; + bool has_query36 = false; bool has_physical_props; bool has_gestures; bool has_rel; + bool has_data40 = false; unsigned x_size, y_size; - u16 query12_offset; + u16 query_offset; if (!data->f11.query_base_addr) { hid_err(hdev, "No 2D sensor found, giving up.\n"); @@ -604,6 +608,8 @@ static int rmi_populate_f11(struct hid_device *hdev) has_query9 = !!(buf[0] & BIT(3)); has_query11 = !!(buf[0] & BIT(4)); has_query12 = !!(buf[0] & BIT(5)); + has_query27 = !!(buf[0] & BIT(6)); + has_query28 = !!(buf[0] & BIT(7)); /* query 1 to get the max number of fingers */ ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf); @@ -626,43 +632,43 @@ static int rmi_populate_f11(struct hid_device *hdev) has_rel = !!(buf[0] & BIT(3)); has_gestures = !!(buf[0] & BIT(5)); + /* + * At least 4 queries are guaranteed to be present in F11 + * +1 for query 5 which is present since absolute events are + * reported and +1 for query 12. + */ + query_offset = 6; + + if (has_rel) + ++query_offset; /* query 6 is present */ + if (has_gestures) { /* query 8 to find out if query 10 exists */ - ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf); + ret = rmi_read(hdev, + data->f11.query_base_addr + query_offset + 1, buf); if (ret) { hid_err(hdev, "can not read gesture information: %d.\n", ret); return ret; } has_query10 = !!(buf[0] & BIT(2)); - } - /* - * At least 4 queries are guaranteed to be present in F11 - * +1 for query 5 which is present since absolute events are - * reported and +1 for query 12. - */ - query12_offset = 6; - - if (has_rel) - ++query12_offset; /* query 6 is present */ - - if (has_gestures) - query12_offset += 2; /* query 7 and 8 are present */ + query_offset += 2; /* query 7 and 8 are present */ + } if (has_query9) - ++query12_offset; + ++query_offset; if (has_query10) - ++query12_offset; + ++query_offset; if (has_query11) - ++query12_offset; + ++query_offset; /* query 12 to know if the physical properties are reported */ if (has_query12) { ret = rmi_read(hdev, data->f11.query_base_addr - + query12_offset, buf); + + query_offset, buf); if (ret) { hid_err(hdev, "can not get query 12: %d.\n", ret); return ret; @@ -670,9 +676,10 @@ static int rmi_populate_f11(struct hid_device *hdev) has_physical_props = !!(buf[0] & BIT(5)); if (has_physical_props) { + query_offset += 1; ret = rmi_read_block(hdev, data->f11.query_base_addr - + query12_offset + 1, buf, 4); + + query_offset, buf, 4); if (ret) { hid_err(hdev, "can not read query 15-18: %d.\n", ret); @@ -687,9 +694,45 @@ static int rmi_populate_f11(struct hid_device *hdev) hid_info(hdev, "%s: size in mm: %d x %d\n", __func__, data->x_size_mm, data->y_size_mm); + + /* + * query 15 - 18 contain the size of the sensor + * and query 19 - 26 contain bezel dimensions + */ + query_offset += 12; + } + } + + if (has_query27) + ++query_offset; + + if (has_query28) { + ret = rmi_read(hdev, data->f11.query_base_addr + + query_offset, buf); + if (ret) { + hid_err(hdev, "can not get query 28: %d.\n", ret); + return ret; + } + + has_query36 = !!(buf[0] & BIT(6)); + } + + if (has_query36) { + query_offset += 2; + ret = rmi_read(hdev, data->f11.query_base_addr + + query_offset, buf); + if (ret) { + hid_err(hdev, "can not get query 36: %d.\n", ret); + return ret; } + + has_data40 = !!(buf[0] & BIT(5)); } + + if (has_data40) + data->f11.report_size += data->max_fingers * 2; + /* * retrieve the ctrl registers * the ctrl register has a size of 20 but a fw bug split it into 16 + 4, diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 6101816a7ddd..c29265055ac1 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -46,6 +46,7 @@ static void kone_profile_activated(struct kone_device *kone, uint new_profile) static void kone_profile_report(struct kone_device *kone, uint new_profile) { struct kone_roccat_report roccat_report; + roccat_report.event = kone_mouse_event_switch_profile; roccat_report.value = new_profile; roccat_report.key = 0; @@ -163,6 +164,7 @@ static int kone_set_settings(struct usb_device *usb_dev, struct kone_settings const *settings) { int retval; + retval = kone_send(usb_dev, kone_command_settings, settings, sizeof(struct kone_settings)); if (retval) @@ -387,7 +389,7 @@ static struct bin_attribute bin_attr_profile##number = { \ .read = kone_sysfs_read_profilex, \ .write = kone_sysfs_write_profilex, \ .private = &profile_numbers[number-1], \ -}; +} PROFILE_ATTR(1); PROFILE_ATTR(2); PROFILE_ATTR(3); @@ -456,6 +458,7 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev, static int kone_tcu_command(struct usb_device *usb_dev, int number) { unsigned char value; + value = number; return kone_send(usb_dev, kone_command_calibrate, &value, 1); } @@ -697,10 +700,8 @@ static int kone_init_specials(struct hid_device *hdev) == USB_INTERFACE_PROTOCOL_MOUSE) { kone = kzalloc(sizeof(*kone), GFP_KERNEL); - if (!kone) { - hid_err(hdev, "can't alloc device descriptor\n"); + if (!kone) return -ENOMEM; - } hid_set_drvdata(hdev, kone); retval = kone_init_kone_device_struct(usb_dev, kone); diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 69cca1476a0c..5632c54eadf0 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -7,7 +7,7 @@ * (This module is based on "hid-ortek".) * Copyright (c) 2012 Andreas Hübner * - * R.A.T.7, M.M.O.7 (USB gaming mice): + * R.A.T.7, R.A.T.9, M.M.O.7 (USB gaming mice): * Fixes the mode button which cycles through three constantly pressed * buttons. All three press events are mapped to one button and the * missing release event is generated immediately. @@ -179,6 +179,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_FIX_PS1000 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), .driver_data = SAITEK_RELEASE_MODE_RAT7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), + .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index bc4269e559f1..31e9d2561106 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -798,6 +798,12 @@ union sixaxis_output_report_01 { __u8 buf[36]; }; +#define DS4_REPORT_0x02_SIZE 37 +#define DS4_REPORT_0x05_SIZE 32 +#define DS4_REPORT_0x11_SIZE 78 +#define DS4_REPORT_0x81_SIZE 7 +#define SIXAXIS_REPORT_0xF2_SIZE 18 + static spinlock_t sony_dev_list_lock; static LIST_HEAD(sony_device_list); static DEFINE_IDA(sony_device_id_allocator); @@ -811,6 +817,7 @@ struct sony_sc { struct work_struct state_worker; struct power_supply battery; int device_id; + __u8 *output_report_dmabuf; #ifdef CONFIG_SONY_FF __u8 left; @@ -1142,9 +1149,20 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev) static int sixaxis_set_operational_bt(struct hid_device *hdev) { - unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; - return hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), + static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; + __u8 *buf; + int ret; + + buf = kmemdup(report, sizeof(report), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + + kfree(buf); + + return ret; } /* @@ -1153,10 +1171,19 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev) */ static int dualshock4_set_operational_bt(struct hid_device *hdev) { - __u8 buf[37] = { 0 }; + __u8 *buf; + int ret; + + buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; - return hid_hw_raw_request(hdev, 0x02, buf, sizeof(buf), + ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + + kfree(buf); + + return ret; } static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS]) @@ -1471,9 +1498,7 @@ error_leds: static void sixaxis_state_worker(struct work_struct *work) { - struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); - int n; - union sixaxis_output_report_01 report = { + static const union sixaxis_output_report_01 default_report = { .buf = { 0x01, 0x00, 0xff, 0x00, 0xff, 0x00, @@ -1485,20 +1510,27 @@ static void sixaxis_state_worker(struct work_struct *work) 0x00, 0x00, 0x00, 0x00, 0x00 } }; + struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); + struct sixaxis_output_report *report = + (struct sixaxis_output_report *)sc->output_report_dmabuf; + int n; + + /* Initialize the report with default values */ + memcpy(report, &default_report, sizeof(struct sixaxis_output_report)); #ifdef CONFIG_SONY_FF - report.data.rumble.right_motor_on = sc->right ? 1 : 0; - report.data.rumble.left_motor_force = sc->left; + report->rumble.right_motor_on = sc->right ? 1 : 0; + report->rumble.left_motor_force = sc->left; #endif - report.data.leds_bitmap |= sc->led_state[0] << 1; - report.data.leds_bitmap |= sc->led_state[1] << 2; - report.data.leds_bitmap |= sc->led_state[2] << 3; - report.data.leds_bitmap |= sc->led_state[3] << 4; + report->leds_bitmap |= sc->led_state[0] << 1; + report->leds_bitmap |= sc->led_state[1] << 2; + report->leds_bitmap |= sc->led_state[2] << 3; + report->leds_bitmap |= sc->led_state[3] << 4; /* Set flag for all leds off, required for 3rd party INTEC controller */ - if ((report.data.leds_bitmap & 0x1E) == 0) - report.data.leds_bitmap |= 0x20; + if ((report->leds_bitmap & 0x1E) == 0) + report->leds_bitmap |= 0x20; /* * The LEDs in the report are indexed in reverse order to their @@ -1511,28 +1543,30 @@ static void sixaxis_state_worker(struct work_struct *work) */ for (n = 0; n < 4; n++) { if (sc->led_delay_on[n] || sc->led_delay_off[n]) { - report.data.led[3 - n].duty_off = sc->led_delay_off[n]; - report.data.led[3 - n].duty_on = sc->led_delay_on[n]; + report->led[3 - n].duty_off = sc->led_delay_off[n]; + report->led[3 - n].duty_on = sc->led_delay_on[n]; } } - hid_hw_raw_request(sc->hdev, report.data.report_id, report.buf, - sizeof(report), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); + hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report, + sizeof(struct sixaxis_output_report), + HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } static void dualshock4_state_worker(struct work_struct *work) { struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); struct hid_device *hdev = sc->hdev; + __u8 *buf = sc->output_report_dmabuf; int offset; - __u8 buf[78] = { 0 }; - if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { + memset(buf, 0, DS4_REPORT_0x05_SIZE); buf[0] = 0x05; buf[1] = 0xFF; offset = 4; } else { + memset(buf, 0, DS4_REPORT_0x11_SIZE); buf[0] = 0x11; buf[1] = 0xB0; buf[3] = 0x0F; @@ -1560,12 +1594,33 @@ static void dualshock4_state_worker(struct work_struct *work) buf[offset++] = sc->led_delay_off[3]; if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) - hid_hw_output_report(hdev, buf, 32); + hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE); else - hid_hw_raw_request(hdev, 0x11, buf, 78, + hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } +static int sony_allocate_output_report(struct sony_sc *sc) +{ + if (sc->quirks & SIXAXIS_CONTROLLER) + sc->output_report_dmabuf = + kmalloc(sizeof(union sixaxis_output_report_01), + GFP_KERNEL); + else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) + sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE, + GFP_KERNEL); + else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) + sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE, + GFP_KERNEL); + else + return 0; + + if (!sc->output_report_dmabuf) + return -ENOMEM; + + return 0; +} + #ifdef CONFIG_SONY_FF static int sony_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) @@ -1754,6 +1809,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc) static int sony_check_add(struct sony_sc *sc) { + __u8 *buf = NULL; int n, ret; if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) || @@ -1769,36 +1825,44 @@ static int sony_check_add(struct sony_sc *sc) return 0; } } else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { - __u8 buf[7]; + buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; /* * The MAC address of a DS4 controller connected via USB can be * retrieved with feature report 0x81. The address begins at * offset 1. */ - ret = hid_hw_raw_request(sc->hdev, 0x81, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + ret = hid_hw_raw_request(sc->hdev, 0x81, buf, + DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); - if (ret != 7) { + if (ret != DS4_REPORT_0x81_SIZE) { hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n"); - return ret < 0 ? ret : -EINVAL; + ret = ret < 0 ? ret : -EINVAL; + goto out_free; } memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address)); } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) { - __u8 buf[18]; + buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; /* * The MAC address of a Sixaxis controller connected via USB can * be retrieved with feature report 0xf2. The address begins at * offset 4. */ - ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, + SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); - if (ret != 18) { + if (ret != SIXAXIS_REPORT_0xF2_SIZE) { hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n"); - return ret < 0 ? ret : -EINVAL; + ret = ret < 0 ? ret : -EINVAL; + goto out_free; } /* @@ -1811,7 +1875,13 @@ static int sony_check_add(struct sony_sc *sc) return 0; } - return sony_check_add_dev_list(sc); + ret = sony_check_add_dev_list(sc); + +out_free: + + kfree(buf); + + return ret; } static int sony_set_device_id(struct sony_sc *sc) @@ -1895,6 +1965,12 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } + ret = sony_allocate_output_report(sc); + if (ret < 0) { + hid_err(hdev, "failed to allocate the output report buffer\n"); + goto err_stop; + } + ret = sony_set_device_id(sc); if (ret < 0) { hid_err(hdev, "failed to allocate the device id\n"); @@ -1984,6 +2060,7 @@ err_stop: if (sc->quirks & SONY_BATTERY_SUPPORT) sony_battery_remove(sc); sony_cancel_work_sync(sc); + kfree(sc->output_report_dmabuf); sony_remove_dev_list(sc); sony_release_device_id(sc); hid_hw_stop(hdev); @@ -2004,6 +2081,8 @@ static void sony_remove(struct hid_device *hdev) sony_cancel_work_sync(sc); + kfree(sc->output_report_dmabuf); + sony_remove_dev_list(sc); sony_release_device_id(sc); @@ -2034,6 +2113,9 @@ static const struct hid_device_id sony_devices[] = { /* Logitech Harmony Adapter for PS3 */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3), .driver_data = PS3REMOTE }, + /* SMK-Link PS3 BD Remote Control */ + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE), + .driver_data = PS3REMOTE }, /* Sony Dualshock 4 controllers for PS4 */ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), .driver_data = DUALSHOCK4_CONTROLLER_USB }, diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index f09e70cafaf1..d32037cbf9db 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -137,6 +137,7 @@ struct i2c_hid { * descriptor. */ unsigned int bufsize; /* i2c buffer size */ char *inbuf; /* Input buffer */ + char *rawbuf; /* Raw Input buffer */ char *cmdbuf; /* Command buffer */ char *argsbuf; /* Command arguments buffer */ @@ -369,7 +370,7 @@ static int i2c_hid_hwreset(struct i2c_client *client) static void i2c_hid_get_input(struct i2c_hid *ihid) { int ret, ret_size; - int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); + int size = ihid->bufsize; ret = i2c_master_recv(ihid->client, ihid->inbuf, size); if (ret != size) { @@ -437,7 +438,7 @@ static void i2c_hid_init_report(struct hid_report *report, u8 *buffer, report->id, buffer, size)) return; - i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, ihid->inbuf); + i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, buffer); ret_size = buffer[0] | (buffer[1] << 8); @@ -504,9 +505,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type, static void i2c_hid_free_buffers(struct i2c_hid *ihid) { kfree(ihid->inbuf); + kfree(ihid->rawbuf); kfree(ihid->argsbuf); kfree(ihid->cmdbuf); ihid->inbuf = NULL; + ihid->rawbuf = NULL; ihid->cmdbuf = NULL; ihid->argsbuf = NULL; ihid->bufsize = 0; @@ -522,10 +525,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size) report_size; /* report */ ihid->inbuf = kzalloc(report_size, GFP_KERNEL); + ihid->rawbuf = kzalloc(report_size, GFP_KERNEL); ihid->argsbuf = kzalloc(args_len, GFP_KERNEL); ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL); - if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) { + if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) { i2c_hid_free_buffers(ihid); return -ENOMEM; } @@ -552,12 +556,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, ret = i2c_hid_get_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x01, - report_number, ihid->inbuf, ask_count); + report_number, ihid->rawbuf, ask_count); if (ret < 0) return ret; - ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8); + ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8); if (ret_count <= 2) return 0; @@ -566,7 +570,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, /* The query buffer contains the size, dropping it in the reply */ count = min(count, ret_count - 2); - memcpy(buf, ihid->inbuf + 2, count); + memcpy(buf, ihid->rawbuf + 2, count); return count; } diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index ca6849a0121e..bfbe1bedda7f 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -278,18 +278,20 @@ static void hid_irq_in(struct urb *urb) usbhid->retry_delay = 0; if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open) break; - hid_input_report(urb->context, HID_INPUT_REPORT, - urb->transfer_buffer, - urb->actual_length, 1); - /* - * autosuspend refused while keys are pressed - * because most keyboards don't wake up when - * a key is released - */ - if (hid_check_keys_pressed(hid)) - set_bit(HID_KEYS_PRESSED, &usbhid->iofl); - else - clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); + if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { + hid_input_report(urb->context, HID_INPUT_REPORT, + urb->transfer_buffer, + urb->actual_length, 1); + /* + * autosuspend refused while keys are pressed + * because most keyboards don't wake up when + * a key is released + */ + if (hid_check_keys_pressed(hid)) + set_bit(HID_KEYS_PRESSED, &usbhid->iofl); + else + clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); + } break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); @@ -338,8 +340,7 @@ static int hid_submit_out(struct hid_device *hid) report = usbhid->out[usbhid->outtail].report; raw_report = usbhid->out[usbhid->outtail].raw_report; - usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + - 1 + (report->id > 0); + usbhid->urbout->transfer_buffer_length = hid_report_len(report); usbhid->urbout->dev = hid_to_usb_dev(hid); if (raw_report) { memcpy(usbhid->outbuf, raw_report, @@ -688,6 +689,7 @@ int usbhid_open(struct hid_device *hid) goto done; } usbhid->intf->needs_remote_wakeup = 1; + set_bit(HID_RESUME_RUNNING, &usbhid->iofl); res = hid_start_in(hid); if (res) { if (res != -ENOSPC) { @@ -701,6 +703,15 @@ int usbhid_open(struct hid_device *hid) } } usb_autopm_put_interface(usbhid->intf); + + /* + * In case events are generated while nobody was listening, + * some are released when the device is re-opened. + * Wait 50 msec for the queue to empty before allowing events + * to go through hid. + */ + msleep(50); + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); } done: mutex_unlock(&hid_open_mut); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 552671ee7c5d..dc89be90b35e 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -73,11 +73,13 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index f633c24ce28b..807922b49aa4 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -52,6 +52,7 @@ struct usb_interface *usbhid_find_interface(int minor); #define HID_STARTED 8 #define HID_KEYS_PRESSED 10 #define HID_NO_BANDWIDTH 11 +#define HID_RESUME_RUNNING 12 /* * USB-specific HID struct, to be pointed to diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 0cc53440543a..7db432809e9e 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -140,7 +140,7 @@ extern const struct hid_device_id wacom_ids[]; void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); void wacom_setup_device_quirks(struct wacom_features *features); -int wacom_setup_input_capabilities(struct input_dev *input_dev, +int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac); int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 8593047bb726..654202941d30 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -13,6 +13,7 @@ #include "wacom_wac.h" #include "wacom.h" +#include <linux/input/mt.h> #define WAC_MSG_RETRIES 5 @@ -70,22 +71,15 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, static int wacom_open(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); - int retval; - - mutex_lock(&wacom->lock); - retval = hid_hw_open(wacom->hdev); - mutex_unlock(&wacom->lock); - return retval; + return hid_hw_open(wacom->hdev); } static void wacom_close(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); - mutex_lock(&wacom->lock); hid_hw_close(wacom->hdev); - mutex_unlock(&wacom->lock); } /* @@ -192,9 +186,15 @@ static void wacom_usage_mapping(struct hid_device *hdev, if (!pen && !finger) return; - if (finger && !features->touch_max) - /* touch device at least supports one touch point */ - features->touch_max = 1; + /* + * Bamboo models do not support HID_DG_CONTACTMAX. + * And, Bamboo Pen only descriptor contains touch. + */ + if (features->type != BAMBOO_PT) { + /* ISDv4 touch devices at least supports one touch point */ + if (finger && !features->touch_max) + features->touch_max = 1; + } switch (usage->hid) { case HID_GD_X: @@ -230,6 +230,21 @@ static void wacom_usage_mapping(struct hid_device *hdev, wacom_wac_usage_mapping(hdev, field, usage); } +static void wacom_post_parse_hid(struct hid_device *hdev, + struct wacom_features *features) +{ + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + + if (features->type == HID_GENERIC) { + /* Any last-minute generic device setup */ + if (features->touch_max > 1) { + input_mt_init_slots(wacom_wac->input, wacom_wac->features.touch_max, + INPUT_MT_DIRECT); + } + } +} + static void wacom_parse_hid(struct hid_device *hdev, struct wacom_features *features) { @@ -264,6 +279,8 @@ static void wacom_parse_hid(struct hid_device *hdev, wacom_usage_mapping(hdev, hreport->field[i], hreport->field[i]->usage + j); } + + wacom_post_parse_hid(hdev, features); } static int wacom_hid_set_device_mode(struct hid_device *hdev) @@ -1129,7 +1146,7 @@ static void wacom_clean_inputs(struct wacom *wacom) input_free_device(wacom->wacom_wac.input); } if (wacom->wacom_wac.pad_input) { - if (wacom->wacom_wac.input_registered) + if (wacom->wacom_wac.pad_registered) input_unregister_device(wacom->wacom_wac.pad_input); else input_free_device(wacom->wacom_wac.pad_input); @@ -1151,13 +1168,13 @@ static int wacom_register_inputs(struct wacom *wacom) if (!input_dev || !pad_input_dev) return -EINVAL; - error = wacom_setup_input_capabilities(input_dev, wacom_wac); - if (error) - return error; - - error = input_register_device(input_dev); - if (error) - return error; + error = wacom_setup_pentouch_input_capabilities(input_dev, wacom_wac); + if (!error) { + error = input_register_device(input_dev); + if (error) + return error; + wacom_wac->input_registered = true; + } error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); if (error) { @@ -1169,22 +1186,23 @@ static int wacom_register_inputs(struct wacom *wacom) error = input_register_device(pad_input_dev); if (error) goto fail_register_pad_input; + wacom_wac->pad_registered = true; error = wacom_initialize_leds(wacom); if (error) goto fail_leds; } - wacom_wac->input_registered = true; - return 0; fail_leds: input_unregister_device(pad_input_dev); pad_input_dev = NULL; + wacom_wac->pad_registered = false; fail_register_pad_input: input_unregister_device(input_dev); wacom_wac->input = NULL; + wacom_wac->input_registered = false; return error; } @@ -1321,12 +1339,6 @@ static void wacom_calculate_res(struct wacom_features *features) features->unitExpo); } -static int wacom_hid_report_len(struct hid_report *report) -{ - /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ - return ((report->size - 1) >> 3) + 1 + (report->id > 0); -} - static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; @@ -1336,7 +1348,7 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev) report_enum = hdev->report_enum + HID_INPUT_REPORT; list_for_each_entry(report, &report_enum->report_list, list) { - size_t report_size = wacom_hid_report_len(report); + size_t report_size = hid_report_len(report); if (report_size > size) size = report_size; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 586b2405b0d4..ac7447c7b82e 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -25,6 +25,10 @@ #define WACOM_INTUOS_RES 100 #define WACOM_INTUOS3_RES 200 +/* Newer Cintiq and DTU have an offset between tablet and screen areas */ +#define WACOM_DTU_OFFSET 200 +#define WACOM_CINTIQ_OFFSET 400 + /* * Scale factor relating reported contact size to logical contact area. * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo @@ -600,8 +604,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom) } input_report_abs(input, ABS_PRESSURE, t); input_report_abs(input, ABS_TILT_X, - ((data[7] << 1) & 0x7e) | (data[8] >> 7)); - input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); + (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); + input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); input_report_key(input, BTN_STYLUS, data[1] & 2); input_report_key(input, BTN_STYLUS2, data[1] & 4); input_report_key(input, BTN_TOUCH, t > 10); @@ -612,8 +616,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom) input_report_abs(input, ABS_WHEEL, (data[6] << 2) | ((data[7] >> 6) & 3)); input_report_abs(input, ABS_TILT_X, - ((data[7] << 1) & 0x7e) | (data[8] >> 7)); - input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); + (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); + input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); } } @@ -915,8 +919,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_EXTRA, data[6] & 0x10); input_report_abs(input, ABS_TILT_X, - ((data[7] << 1) & 0x7e) | (data[8] >> 7)); - input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); + (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64); + input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64); } else { /* 2D mouse packet */ input_report_key(input, BTN_LEFT, data[8] & 0x04); @@ -1377,11 +1381,12 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; + struct wacom_features *features = &wacom_wac->features; unsigned touch_max = wacom_wac->features.touch_max; switch (usage->hid) { case HID_GD_X: + features->last_slot_field = usage->hid; if (touch_max == 1) wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); else @@ -1389,6 +1394,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, ABS_MT_POSITION_X, 4); break; case HID_GD_Y: + features->last_slot_field = usage->hid; if (touch_max == 1) wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); else @@ -1396,19 +1402,48 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, ABS_MT_POSITION_Y, 4); break; case HID_DG_CONTACTID: - input_mt_init_slots(input, wacom_wac->features.touch_max, - INPUT_MT_DIRECT); + features->last_slot_field = usage->hid; break; case HID_DG_INRANGE: + features->last_slot_field = usage->hid; break; case HID_DG_INVERT: + features->last_slot_field = usage->hid; break; case HID_DG_TIPSWITCH: + features->last_slot_field = usage->hid; wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); break; } } +static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, + struct input_dev *input) +{ + struct hid_data *hid_data = &wacom_wac->hid_data; + bool mt = wacom_wac->features.touch_max > 1; + bool prox = hid_data->tipswitch && + !wacom_wac->shared->stylus_in_proximity; + + if (mt) { + int slot; + + slot = input_mt_get_slot_by_key(input, hid_data->id); + input_mt_slot(input, slot); + input_mt_report_slot_state(input, MT_TOOL_FINGER, prox); + } + else { + input_report_key(input, BTN_TOUCH, prox); + } + + if (prox) { + input_report_abs(input, mt ? ABS_MT_POSITION_X : ABS_X, + hid_data->x); + input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y, + hid_data->y); + } +} + static int wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -1431,36 +1466,35 @@ static int wacom_wac_finger_event(struct hid_device *hdev, } + if (usage->usage_index + 1 == field->report_count) { + if (usage->hid == wacom_wac->features.last_slot_field) + wacom_wac_finger_slot(wacom_wac, wacom_wac->input); + } + return 0; } -static void wacom_wac_finger_mt_report(struct wacom_wac *wacom_wac, - struct input_dev *input, bool touch) +static int wacom_wac_finger_count_touches(struct hid_device *hdev) { - int slot; - struct hid_data *hid_data = &wacom_wac->hid_data; + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct input_dev *input = wacom_wac->input; + unsigned touch_max = wacom_wac->features.touch_max; + int count = 0; + int i; - slot = input_mt_get_slot_by_key(input, hid_data->id); + if (touch_max == 1) + return wacom_wac->hid_data.tipswitch && + !wacom_wac->shared->stylus_in_proximity; - input_mt_slot(input, slot); - input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); - if (touch) { - input_report_abs(input, ABS_MT_POSITION_X, hid_data->x); - input_report_abs(input, ABS_MT_POSITION_Y, hid_data->y); + for (i = 0; i < input->mt->num_slots; i++) { + struct input_mt_slot *ps = &input->mt->slots[i]; + int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); + if (id >= 0) + count++; } - input_mt_sync_frame(input); -} -static void wacom_wac_finger_single_touch_report(struct wacom_wac *wacom_wac, - struct input_dev *input, bool touch) -{ - struct hid_data *hid_data = &wacom_wac->hid_data; - - if (touch) { - input_report_abs(input, ABS_X, hid_data->x); - input_report_abs(input, ABS_Y, hid_data->y); - } - input_report_key(input, BTN_TOUCH, touch); + return count; } static void wacom_wac_finger_report(struct hid_device *hdev, @@ -1469,24 +1503,23 @@ static void wacom_wac_finger_report(struct hid_device *hdev, struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->input; - bool touch = wacom_wac->hid_data.tipswitch && - !wacom_wac->shared->stylus_in_proximity; unsigned touch_max = wacom_wac->features.touch_max; if (touch_max > 1) - wacom_wac_finger_mt_report(wacom_wac, input, touch); - else - wacom_wac_finger_single_touch_report(wacom_wac, input, touch); + input_mt_sync_frame(input); + input_sync(input); /* keep touch state for pen event */ - wacom_wac->shared->touch_down = touch; + wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(hdev); } #define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ - ((f)->physical == HID_DG_STYLUS)) + ((f)->physical == HID_DG_STYLUS) || \ + ((f)->application == HID_DG_PEN)) #define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \ - ((f)->physical == HID_DG_FINGER)) + ((f)->physical == HID_DG_FINGER) || \ + ((f)->application == HID_DG_TOUCHSCREEN)) void wacom_wac_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) @@ -1681,7 +1714,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) return 0; if (data[0] == WACOM_REPORT_USB) { - if (features->type == INTUOSHT && features->touch_max) { + if (features->type == INTUOSHT && + wacom->shared->touch_input && + features->touch_max) { input_report_switch(wacom->shared->touch_input, SW_MUTE_DEVICE, data[8] & 0x40); input_sync(wacom->shared->touch_input); @@ -1774,7 +1809,8 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len) int pid, battery, ps_connected; if ((wacom->shared->type == INTUOSHT) && - wacom->shared->touch_max) { + wacom->shared->touch_input && + wacom->shared->touch_max) { input_report_switch(wacom->shared->touch_input, SW_MUTE_DEVICE, data[5] & 0x40); input_sync(wacom->shared->touch_input); @@ -1838,6 +1874,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) break; case DTUS: + case DTUSX: sync = wacom_dtus_irq(wacom_wac); break; @@ -1926,8 +1963,10 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features.distance_max, 0, 0); input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0); - input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0); - input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0); + input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, 0, 0); + input_abs_set_res(input_dev, ABS_TILT_X, 57); + input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, 0, 0); + input_abs_set_res(input_dev, ABS_TILT_Y, 57); } static void wacom_setup_intuos(struct wacom_wac *wacom_wac) @@ -1947,6 +1986,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac) __set_bit(BTN_TOOL_LENS, input_dev->keybit); input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_RZ, 287); input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0); } @@ -2029,7 +2069,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev, } } -int wacom_setup_input_capabilities(struct input_dev *input_dev, +int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; @@ -2047,9 +2087,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, switch (features->type) { case WACOM_MO: - input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); - /* fall through */ - case WACOM_G4: /* fall through */ @@ -2092,6 +2129,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case WACOM_24HD: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); /* fall through */ @@ -2106,6 +2144,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case WACOM_BEE: case CINTIQ: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); @@ -2114,6 +2153,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case WACOM_13HD: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); break; @@ -2122,6 +2162,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case INTUOS3L: case INTUOS3S: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); /* fall through */ case INTUOS: @@ -2144,6 +2185,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); wacom_setup_intuos(wacom_wac); } else if (features->device_type == BTN_TOOL_FINGER) { @@ -2162,6 +2204,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case INTUOS4L: case INTUOS4S: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); wacom_setup_intuos(wacom_wac); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); @@ -2196,6 +2239,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, /* fall through */ case DTUS: + case DTUSX: case PL: case DTU: __set_bit(BTN_TOOL_PEN, input_dev->keybit); @@ -2246,6 +2290,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __clear_bit(ABS_X, input_dev->absbit); __clear_bit(ABS_Y, input_dev->absbit); __clear_bit(BTN_TOUCH, input_dev->keybit); + + /* PAD is setup by wacom_setup_pad_input_capabilities later */ + return 1; } } else if (features->device_type == BTN_TOOL_PEN) { __set_bit(INPUT_PROP_POINTER, input_dev->propbit); @@ -2261,6 +2308,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case CINTIQ_HYBRID: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); @@ -2303,9 +2351,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case WACOM_G4: __set_bit(BTN_BACK, input_dev->keybit); - __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); - __set_bit(BTN_RIGHT, input_dev->keybit); input_set_capability(input_dev, EV_REL, REL_WHEEL); break; @@ -2402,7 +2448,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOSPS: /* touch interface does not have the pad device */ if (features->device_type != BTN_TOOL_PEN) - return 1; + return -ENODEV; for (i = 0; i < 7; i++) __set_bit(BTN_0 + i, input_dev->keybit); @@ -2446,8 +2492,10 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOSHT: case BAMBOO_PT: /* pad device is on the touch interface */ - if (features->device_type != BTN_TOOL_FINGER) - return 1; + if ((features->device_type != BTN_TOOL_FINGER) || + /* Bamboo Pen only tablet does not have pad */ + ((features->type == BAMBOO_PT) && !features->touch_max)) + return -ENODEV; __clear_bit(ABS_MISC, input_dev->absbit); @@ -2460,7 +2508,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, default: /* no pad supported */ - return 1; + return -ENODEV; } return 0; } @@ -2664,11 +2712,13 @@ static const struct wacom_features wacom_features_0x317 = INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", 104280, 65400, 2047, 63, - WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63, + WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0xF8 = - { "Wacom Cintiq 24HD touch", 104280, 65400, 2047, 63, /* Pen */ - WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */ + WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; static const struct wacom_features wacom_features_0xF6 = { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ @@ -2684,8 +2734,9 @@ static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x304 = - { "Wacom Cintiq 13HD", 59352, 33648, 1023, 63, - WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63, + WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2697,28 +2748,38 @@ static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xFB = - { "Wacom DTU1031", 22096, 13960, 511, 0, - DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom DTU1031", 21896, 13760, 511, 0, + DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; +static const struct wacom_features wacom_features_0x32F = + { "Wacom DTU1031X", 22472, 12728, 511, 0, + DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; static const struct wacom_features wacom_features_0x57 = { "Wacom DTK2241", 95640, 54060, 2047, 63, - DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x59 = /* Pen */ { "Wacom DTH2242", 95640, 54060, 2047, 63, - DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D }; static const struct wacom_features wacom_features_0x5D = /* Touch */ { "Wacom DTH2242", .type = WACOM_24HDT, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xCC = - { "Wacom Cintiq 21UX2", 87000, 65400, 2047, 63, - WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63, + WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0xFA = - { "Wacom Cintiq 22HD", 95640, 54060, 2047, 63, - WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63, + WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x5B = - { "Wacom Cintiq 22HDT", 95640, 54060, 2047, 63, - WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63, + WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; static const struct wacom_features wacom_features_0x5E = { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, @@ -2863,21 +2924,27 @@ static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x307 = - { "Wacom ISDv5 307", 59352, 33648, 2047, 63, - CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom ISDv5 307", 59152, 33448, 2047, 63, + CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 }; static const struct wacom_features wacom_features_0x309 = { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x30A = - { "Wacom ISDv5 30A", 59352, 33648, 2047, 63, - CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom ISDv5 30A", 59152, 33448, 2047, 63, + CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C }; static const struct wacom_features wacom_features_0x30C = { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; +static const struct wacom_features wacom_features_0x323 = + { "Wacom Intuos P M", 21600, 13500, 1023, 31, + INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC }; @@ -3022,10 +3089,13 @@ const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x314) }, { USB_DEVICE_WACOM(0x315) }, { USB_DEVICE_WACOM(0x317) }, + { USB_DEVICE_WACOM(0x323) }, + { USB_DEVICE_WACOM(0x32F) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x4004) }, { USB_DEVICE_WACOM(0x5000) }, { USB_DEVICE_WACOM(0x5002) }, + { USB_DEVICE_LENOVO(0x6004) }, { USB_DEVICE_WACOM(HID_ANY_ID) }, { } diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 0f0b85ec1322..bfad815cda8a 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -80,6 +80,7 @@ enum { PL, DTU, DTUS, + DTUSX, INTUOS, INTUOS3S, INTUOS3, @@ -144,6 +145,7 @@ struct wacom_features { int pktlen; bool check_for_hid_type; int hid_type; + int last_slot_field; }; struct wacom_shared { @@ -183,6 +185,7 @@ struct wacom_wac { struct input_dev *input; struct input_dev *pad_input; bool input_registered; + bool pad_registered; int pid; int battery_capacity; int num_contacts_left; diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 3d42043fec51..8c6363cdd208 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -487,7 +487,7 @@ static void ide_floppy_setup(ide_drive_t *drive) * it. It should be fixed as of version 1.9, but to be on the safe side * we'll leave the limitation below for the 2.2.x tree. */ - if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) { + if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) { drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; /* This value will be visible in the /proc/ide/hdx/settings */ drive->pc_delay = IDEFLOPPY_PC_DELAY; @@ -498,7 +498,7 @@ static void ide_floppy_setup(ide_drive_t *drive) * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes * nasty clicking noises without it, so please don't remove this. */ - if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { + if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) { blk_queue_max_hw_sectors(drive->queue, 64); drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; /* IOMEGA Clik! drives do not support lock/unlock commands */ diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 2ce6268a2734..e29b02ca9e91 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -101,8 +101,7 @@ void ide_device_put(ide_drive_t *drive) struct device *host_dev = drive->hwif->host->dev[0]; struct module *module = host_dev ? host_dev->driver->owner : NULL; - if (module) - module_put(module); + module_put(module); #endif put_device(&drive->gendev); } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6dbfbc209491..30f0e61341c5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -144,13 +144,26 @@ config OMAP_IOMMU select IOMMU_API config OMAP_IOMMU_DEBUG - tristate "Export OMAP IOMMU internals in DebugFS" - depends on OMAP_IOMMU && DEBUG_FS - help - Select this to see extensive information about - the internal state of OMAP IOMMU in debugfs. + bool "Export OMAP IOMMU internals in DebugFS" + depends on OMAP_IOMMU && DEBUG_FS + ---help--- + Select this to see extensive information about + the internal state of OMAP IOMMU in debugfs. + + Say N unless you know you need this. - Say N unless you know you need this. +config ROCKCHIP_IOMMU + bool "Rockchip IOMMU Support" + depends on ARM + depends on ARCH_ROCKCHIP || COMPILE_TEST + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for IOMMUs found on Rockchip rk32xx SOCs. + These IOMMUs allow virtualization of the address space used by most + cores within the multimedia subsystem. + Say Y here if you are using a Rockchip SoC that includes an IOMMU + device. config TEGRA_IOMMU_GART bool "Tegra GART IOMMU Support" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef74b8ee..7b976f294a69 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o -obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o +obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2d84c9edf3b8..b205f76d7129 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return (irq_remapping_enabled == 1); + case IOMMU_CAP_NOEXEC: + return false; } return false; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 90d734bbf467..a2d87a60c27f 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state(struct pasid_state *pasid_state) { - if (atomic_dec_and_test(&pasid_state->count)) { - put_device_state(pasid_state->device_state); + if (atomic_dec_and_test(&pasid_state->count)) wake_up(&pasid_state->wq); - } } static void put_pasid_state_wait(struct pasid_state *pasid_state) @@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_dec_and_test(&pasid_state->count)) - put_device_state(pasid_state->device_state); - else + if (!atomic_dec_and_test(&pasid_state->count)) schedule(); finish_wait(&pasid_state->wq, &wait); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e393ae01b5d2..b8aac1389a96 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -404,9 +404,16 @@ struct arm_smmu_cfg { #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, +}; + struct arm_smmu_domain { struct arm_smmu_device *smmu; struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; spinlock_t lock; }; @@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->smmu) goto out_unlock; - if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { + /* + * Mapping the requested stage onto what we support is surprisingly + * complicated, mainly because the spec allows S1+S2 SMMUs without + * support for nested translation. That means we end up with the + * following table: + * + * Requested Supported Actual + * S1 N S1 + * S1 S1+S2 S1 + * S1 S2 S2 + * S1 S1 S1 + * N N N + * N S1+S2 S2 + * N S2 S2 + * N S1 S1 + * + * Note that you can't actually request stage-2 mappings. + */ + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + switch (smmu_domain->stage) { + case ARM_SMMU_DOMAIN_S1: + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + break; + case ARM_SMMU_DOMAIN_NESTED: /* * We will likely want to change this if/when KVM gets * involved. */ - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else { + case ARM_SMMU_DOMAIN_S2: cfg->cbar = CBAR_TYPE_S2_TRANS; start = 0; + break; + default: + ret = -EINVAL; + goto out_unlock; } ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, @@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long pfn, int prot, int stage) { pte_t *pte, *start; - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; if (pmd_none(*pmd)) { /* Allocate a new set of tables */ @@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, pteval |= ARM_SMMU_PTE_MEMATTR_NC; } + if (prot & IOMMU_NOEXEC) + pteval |= ARM_SMMU_PTE_XN; + /* If no access, create a faulting entry to avoid TLB fills */ - if (prot & IOMMU_EXEC) - pteval &= ~ARM_SMMU_PTE_XN; - else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) pteval &= ~ARM_SMMU_PTE_PAGE; pteval |= ARM_SMMU_PTE_SH_IS; @@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; default: return false; } @@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev) iommu_group_remove_device(dev); } +static int arm_smmu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } +} + +static int arm_smmu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) + return -EPERM; + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + return 0; + default: + return -ENODEV; + } +} + static const struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_init = arm_smmu_domain_init, - .domain_destroy = arm_smmu_domain_destroy, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .pgsize_bitmap = (SECTION_SIZE | - ARM_SMMU_PTE_CONT_SIZE | - PAGE_SIZE), + .capable = arm_smmu_capable, + .domain_init = arm_smmu_domain_init, + .domain_destroy = arm_smmu_domain_destroy, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .pgsize_bitmap = (SECTION_SIZE | + ARM_SMMU_PTE_CONT_SIZE | + PAGE_SIZE), }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) @@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = { static int __init arm_smmu_init(void) { + struct device_node *np; int ret; + /* + * Play nice with systems that don't have an ARM SMMU by checking that + * an ARM SMMU exists in the system before proceeding with the driver + * and IOMMU bus operation registration. + */ + np = of_find_matching_node(NULL, arm_smmu_of_match); + if (!np) + return 0; + + of_node_put(np); + ret = platform_driver_register(&arm_smmu_driver); if (ret) return ret; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index c5c61cabd6e3..9847613085e1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -44,6 +44,14 @@ #include "irq_remapping.h" +typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); +struct dmar_res_callback { + dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED]; + void *arg[ACPI_DMAR_TYPE_RESERVED]; + bool ignore_unhandled; + bool print_entry; +}; + /* * Assumptions: * 1) The hotplug framework guarentees that DMAR unit will be hot-added @@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units); struct acpi_table_header * __initdata dmar_tbl; static acpi_size dmar_tbl_size; static int dmar_dev_scope_status = 1; +static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); -static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) +static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* * add INCLUDE_ALL at the tail, so scan the list will find it at @@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = { .priority = INT_MIN, }; +static struct dmar_drhd_unit * +dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) +{ + struct dmar_drhd_unit *dmaru; + + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) + if (dmaru->segment == drhd->segment && + dmaru->reg_base_addr == drhd->address) + return dmaru; + + return NULL; +} + /** * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition * structure which uniquely represent one DMA remapping hardware unit * present in the platform */ -static int __init -dmar_parse_one_drhd(struct acpi_dmar_header *header) +static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; int ret = 0; drhd = (struct acpi_dmar_hardware_unit *)header; - dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); + dmaru = dmar_find_dmaru(drhd); + if (dmaru) + goto out; + + dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); if (!dmaru) return -ENOMEM; - dmaru->hdr = header; + /* + * If header is allocated from slab by ACPI _DSM method, we need to + * copy the content because the memory buffer will be freed on return. + */ + dmaru->hdr = (void *)(dmaru + 1); + memcpy(dmaru->hdr, header, header->length); dmaru->reg_base_addr = drhd->address; dmaru->segment = drhd->segment; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ @@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) return ret; } dmar_register_drhd_unit(dmaru); + +out: + if (arg) + (*(int *)arg)++; + return 0; } @@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) kfree(dmaru); } -static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) +static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, + void *arg) { struct acpi_dmar_andd *andd = (void *)header; @@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) } #ifdef CONFIG_ACPI_NUMA -static int __init -dmar_parse_one_rhsa(struct acpi_dmar_header *header) +static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_rhsa *rhsa; struct dmar_drhd_unit *drhd; @@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) return 0; } +#else +#define dmar_parse_one_rhsa dmar_res_noop #endif static void __init @@ -503,6 +540,52 @@ static int __init dmar_table_detect(void) return (ACPI_SUCCESS(status) ? 1 : 0); } +static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, + size_t len, struct dmar_res_callback *cb) +{ + int ret = 0; + struct acpi_dmar_header *iter, *next; + struct acpi_dmar_header *end = ((void *)start) + len; + + for (iter = start; iter < end && ret == 0; iter = next) { + next = (void *)iter + iter->length; + if (iter->length == 0) { + /* Avoid looping forever on bad ACPI tables */ + pr_debug(FW_BUG "Invalid 0-length structure\n"); + break; + } else if (next > end) { + /* Avoid passing table end */ + pr_warn(FW_BUG "record passes table end\n"); + ret = -EINVAL; + break; + } + + if (cb->print_entry) + dmar_table_print_dmar_entry(iter); + + if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { + /* continue for forward compatibility */ + pr_debug("Unknown DMAR structure type %d\n", + iter->type); + } else if (cb->cb[iter->type]) { + ret = cb->cb[iter->type](iter, cb->arg[iter->type]); + } else if (!cb->ignore_unhandled) { + pr_warn("No handler for DMAR structure type %d\n", + iter->type); + ret = -EINVAL; + } + } + + return ret; +} + +static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, + struct dmar_res_callback *cb) +{ + return dmar_walk_remapping_entries((void *)(dmar + 1), + dmar->header.length - sizeof(*dmar), cb); +} + /** * parse_dmar_table - parses the DMA reporting table */ @@ -510,9 +593,18 @@ static int __init parse_dmar_table(void) { struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; int ret = 0; int drhd_count = 0; + struct dmar_res_callback cb = { + .print_entry = true, + .ignore_unhandled = true, + .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, + .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, + .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, + .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, + .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, + }; /* * Do it again, earlier dmar_tbl mapping could be mapped with @@ -536,51 +628,10 @@ parse_dmar_table(void) } pr_info("Host address width %d\n", dmar->width + 1); - - entry_header = (struct acpi_dmar_header *)(dmar + 1); - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - ret = -EINVAL; - break; - } - - dmar_table_print_dmar_entry(entry_header); - - switch (entry_header->type) { - case ACPI_DMAR_TYPE_HARDWARE_UNIT: - drhd_count++; - ret = dmar_parse_one_drhd(entry_header); - break; - case ACPI_DMAR_TYPE_RESERVED_MEMORY: - ret = dmar_parse_one_rmrr(entry_header); - break; - case ACPI_DMAR_TYPE_ROOT_ATS: - ret = dmar_parse_one_atsr(entry_header); - break; - case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: -#ifdef CONFIG_ACPI_NUMA - ret = dmar_parse_one_rhsa(entry_header); -#endif - break; - case ACPI_DMAR_TYPE_NAMESPACE: - ret = dmar_parse_one_andd(entry_header); - break; - default: - pr_warn("Unknown DMAR structure type %d\n", - entry_header->type); - ret = 0; /* for forward compatibility */ - break; - } - if (ret) - break; - - entry_header = ((void *)entry_header + entry_header->length); - } - if (drhd_count == 0) + ret = dmar_walk_dmar_table(dmar, &cb); + if (ret == 0 && drhd_count == 0) pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); + return ret; } @@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message) dmi_get_system_info(DMI_PRODUCT_VERSION)); } -static int __init check_zero_address(void) +static int __ref +dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) { - struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; struct acpi_dmar_hardware_unit *drhd; + void __iomem *addr; + u64 cap, ecap; - dmar = (struct acpi_table_dmar *)dmar_tbl; - entry_header = (struct acpi_dmar_header *)(dmar + 1); - - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - return 0; - } + drhd = (void *)entry; + if (!drhd->address) { + warn_invalid_dmar(0, ""); + return -EINVAL; + } - if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { - void __iomem *addr; - u64 cap, ecap; + if (arg) + addr = ioremap(drhd->address, VTD_PAGE_SIZE); + else + addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); + if (!addr) { + pr_warn("IOMMU: can't validate: %llx\n", drhd->address); + return -EINVAL; + } - drhd = (void *)entry_header; - if (!drhd->address) { - warn_invalid_dmar(0, ""); - goto failed; - } + cap = dmar_readq(addr + DMAR_CAP_REG); + ecap = dmar_readq(addr + DMAR_ECAP_REG); - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); - if (!addr ) { - printk("IOMMU: can't validate: %llx\n", drhd->address); - goto failed; - } - cap = dmar_readq(addr + DMAR_CAP_REG); - ecap = dmar_readq(addr + DMAR_ECAP_REG); - early_iounmap(addr, VTD_PAGE_SIZE); - if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { - warn_invalid_dmar(drhd->address, - " returns all ones"); - goto failed; - } - } + if (arg) + iounmap(addr); + else + early_iounmap(addr, VTD_PAGE_SIZE); - entry_header = ((void *)entry_header + entry_header->length); + if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->address, " returns all ones"); + return -EINVAL; } - return 1; -failed: return 0; } int __init detect_intel_iommu(void) { int ret; + struct dmar_res_callback validate_drhd_cb = { + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, + .ignore_unhandled = true, + }; down_write(&dmar_global_lock); ret = dmar_table_detect(); if (ret) - ret = check_zero_address(); - { - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { - iommu_detected = 1; - /* Make sure ACS will be enabled */ - pci_request_acs(); - } + ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, + &validate_drhd_cb); + if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + iommu_detected = 1; + /* Make sure ACS will be enabled */ + pci_request_acs(); + } #ifdef CONFIG_X86 - if (ret) - x86_init.iommu.iommu_init = intel_iommu_init; + if (ret) + x86_init.iommu.iommu_init = intel_iommu_init; #endif - } + early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); dmar_tbl = NULL; up_write(&dmar_global_lock); @@ -931,11 +974,32 @@ out: return err; } +static int dmar_alloc_seq_id(struct intel_iommu *iommu) +{ + iommu->seq_id = find_first_zero_bit(dmar_seq_ids, + DMAR_UNITS_SUPPORTED); + if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { + iommu->seq_id = -1; + } else { + set_bit(iommu->seq_id, dmar_seq_ids); + sprintf(iommu->name, "dmar%d", iommu->seq_id); + } + + return iommu->seq_id; +} + +static void dmar_free_seq_id(struct intel_iommu *iommu) +{ + if (iommu->seq_id >= 0) { + clear_bit(iommu->seq_id, dmar_seq_ids); + iommu->seq_id = -1; + } +} + static int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; u32 ver, sts; - static int iommu_allocated = 0; int agaw = 0; int msagaw = 0; int err; @@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) if (!iommu) return -ENOMEM; - iommu->seq_id = iommu_allocated++; - sprintf (iommu->name, "dmar%d", iommu->seq_id); + if (dmar_alloc_seq_id(iommu) < 0) { + pr_err("IOMMU: failed to allocate seq_id\n"); + err = -ENOSPC; + goto error; + } err = map_iommu(iommu, drhd->reg_base_addr); if (err) { pr_err("IOMMU: failed to map %s\n", iommu->name); - goto error; + goto error_free_seq_id; } err = -EINVAL; @@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) return 0; - err_unmap: +err_unmap: unmap_iommu(iommu); - error: +error_free_seq_id: + dmar_free_seq_id(iommu); +error: kfree(iommu); return err; } @@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->reg) unmap_iommu(iommu); + dmar_free_seq_id(iommu); kfree(iommu); } @@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void) return dmar->flags & 0x1; } +/* Check whether DMAR units are in use */ +static inline bool dmar_in_use(void) +{ + return irq_remapping_enabled || intel_iommu_enabled; +} + static int __init dmar_free_unused_resources(void) { struct dmar_drhd_unit *dmaru, *dmaru_n; - /* DMAR units are in use */ - if (irq_remapping_enabled || intel_iommu_enabled) + if (dmar_in_use()) return 0; if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) @@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void) late_initcall(dmar_free_unused_resources); IOMMU_INIT_POST(detect_intel_iommu); + +/* + * DMAR Hotplug Support + * For more details, please refer to Intel(R) Virtualization Technology + * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 + * "Remapping Hardware Unit Hot Plug". + */ +static u8 dmar_hp_uuid[] = { + /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C, + /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF +}; + +/* + * Currently there's only one revision and BIOS will not check the revision id, + * so use 0 for safety. + */ +#define DMAR_DSM_REV_ID 0 +#define DMAR_DSM_FUNC_DRHD 1 +#define DMAR_DSM_FUNC_ATSR 2 +#define DMAR_DSM_FUNC_RHSA 3 + +static inline bool dmar_detect_dsm(acpi_handle handle, int func) +{ + return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func); +} + +static int dmar_walk_dsm_resource(acpi_handle handle, int func, + dmar_res_handler_t handler, void *arg) +{ + int ret = -ENODEV; + union acpi_object *obj; + struct acpi_dmar_header *start; + struct dmar_res_callback callback; + static int res_type[] = { + [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, + [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, + [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, + }; + + if (!dmar_detect_dsm(handle, func)) + return 0; + + obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, + func, NULL, ACPI_TYPE_BUFFER); + if (!obj) + return -ENODEV; + + memset(&callback, 0, sizeof(callback)); + callback.cb[res_type[func]] = handler; + callback.arg[res_type[func]] = arg; + start = (struct acpi_dmar_header *)obj->buffer.pointer; + ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); + + ACPI_FREE(obj); + + return ret; +} + +static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg) +{ + int ret; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return -ENODEV; + + ret = dmar_ir_hotplug(dmaru, true); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, true); + + return ret; +} + +static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) +{ + int i, ret; + struct device *dev; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return 0; + + /* + * All PCI devices managed by this unit should have been destroyed. + */ + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + for_each_active_dev_scope(dmaru->devices, + dmaru->devices_cnt, i, dev) + return -EBUSY; + + ret = dmar_ir_hotplug(dmaru, false); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, false); + + return ret; +} + +static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg) +{ + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (dmaru) { + list_del_rcu(&dmaru->list); + synchronize_rcu(); + dmar_free_drhd(dmaru); + } + + return 0; +} + +static int dmar_hotplug_insert(acpi_handle handle) +{ + int ret; + int drhd_count = 0; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_validate_one_drhd, (void *)1); + if (ret) + goto out; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_parse_one_drhd, (void *)&drhd_count); + if (ret == 0 && drhd_count == 0) { + pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); + goto out; + } else if (ret) { + goto release_drhd; + } + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, + &dmar_parse_one_rhsa, NULL); + if (ret) + goto release_drhd; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_parse_one_atsr, NULL); + if (ret) + goto release_atsr; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + if (!ret) + return 0; + + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); +release_atsr: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL); +release_drhd: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL); +out: + return ret; +} + +static int dmar_hotplug_remove(acpi_handle handle) +{ + int ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_check_one_atsr, NULL); + if (ret) + return ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); + if (ret == 0) { + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL)); + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL)); + } else { + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + } + + return ret; +} + +static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, + void *context, void **retval) +{ + acpi_handle *phdl = retval; + + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + *phdl = handle; + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + +static int dmar_device_hotplug(acpi_handle handle, bool insert) +{ + int ret; + acpi_handle tmp = NULL; + acpi_status status; + + if (!dmar_in_use()) + return 0; + + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + tmp = handle; + } else { + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, + dmar_get_dsm_handle, + NULL, NULL, &tmp); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to locate _DSM method.\n"); + return -ENXIO; + } + } + if (tmp == NULL) + return 0; + + down_write(&dmar_global_lock); + if (insert) + ret = dmar_hotplug_insert(tmp); + else + ret = dmar_hotplug_remove(tmp); + up_write(&dmar_global_lock); + + return ret; +} + +int dmar_device_add(acpi_handle handle) +{ + return dmar_device_hotplug(handle, true); +} + +int dmar_device_remove(acpi_handle handle) +{ + return dmar_device_hotplug(handle, false); +} diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 02cd26a17fe0..1232336b960e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) } static inline void set_root_value(struct root_entry *root, unsigned long value) { + root->val &= ~VTD_PAGE_MASK; root->val |= value & VTD_PAGE_MASK; } @@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, static inline void context_set_address_root(struct context_entry *context, unsigned long value) { + context->lo &= ~VTD_PAGE_MASK; context->lo |= value & VTD_PAGE_MASK; } @@ -328,17 +330,10 @@ static int hw_pass_through = 1; /* si_domain contains mulitple devices */ #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) -/* define the limit of IOMMUs supported in each domain */ -#ifdef CONFIG_X86 -# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS -#else -# define IOMMU_UNITS_SUPPORTED 64 -#endif - struct dmar_domain { int id; /* domain id */ int nid; /* node id */ - DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); + DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); /* bitmap of iommus this domain uses*/ struct list_head devices; /* all devices' list */ @@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) unsigned long flags; root = (struct root_entry *)alloc_pgtable_page(iommu->node); - if (!root) + if (!root) { + pr_err("IOMMU: allocating root entry for %s failed\n", + iommu->name); return -ENOMEM; + } __iommu_flush_cache(iommu, root, ROOT_SIZE); @@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) return 0; } -static void free_dmar_iommu(struct intel_iommu *iommu) +static void disable_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; @@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); +} - kfree(iommu->domains); - kfree(iommu->domain_ids); - iommu->domains = NULL; - iommu->domain_ids = NULL; +static void free_dmar_iommu(struct intel_iommu *iommu) +{ + if ((iommu->domains) && (iommu->domain_ids)) { + kfree(iommu->domains); + kfree(iommu->domain_ids); + iommu->domains = NULL; + iommu->domain_ids = NULL; + } g_iommus[iommu->seq_id] = NULL; @@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, { struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); - unsigned long sg_res; + unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; @@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; - if (sg) - sg_res = 0; - else { - sg_res = nr_pages + 1; + if (!sg) { + sg_res = nr_pages; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } @@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } +static void intel_iommu_init_qi(struct intel_iommu *iommu) +{ + /* + * Start from the sane iommu hardware state. + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. + */ + if (!iommu->qi) { + /* + * Clear any previous faults. + */ + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); + } + + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + pr_info("IOMMU: %s using Register based invalidation\n", + iommu->name); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); + } +} + static int __init init_dmars(void) { struct dmar_drhd_unit *drhd; @@ -2728,14 +2764,18 @@ static int __init init_dmars(void) * threaded kernel __init code path all other access are read * only */ - if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { g_num_of_iommus++; continue; } printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", - IOMMU_UNITS_SUPPORTED); + DMAR_UNITS_SUPPORTED); } + /* Preallocate enough resources for IOMMU hot-addition */ + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) + g_num_of_iommus = DMAR_UNITS_SUPPORTED; + g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { @@ -2764,58 +2804,14 @@ static int __init init_dmars(void) * among all IOMMU's. Need to Split it later. */ ret = iommu_alloc_root_entry(iommu); - if (ret) { - printk(KERN_ERR "IOMMU: allocate root entry failed\n"); + if (ret) goto free_iommu; - } if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; } - /* - * Start from the sane iommu hardware state. - */ - for_each_active_iommu(iommu, drhd) { - /* - * If the queued invalidation is already initialized by us - * (for example, while enabling interrupt-remapping) then - * we got the things already rolling from a sane state. - */ - if (iommu->qi) - continue; - - /* - * Clear any previous faults. - */ - dmar_fault(-1, iommu); - /* - * Disable queued invalidation if supported and already enabled - * before OS handover. - */ - dmar_disable_qi(iommu); - } - - for_each_active_iommu(iommu, drhd) { - if (dmar_enable_qi(iommu)) { - /* - * Queued Invalidate not enabled, use Register Based - * Invalidate - */ - iommu->flush.flush_context = __iommu_flush_context; - iommu->flush.flush_iotlb = __iommu_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } else { - iommu->flush.flush_context = qi_flush_context; - iommu->flush.flush_iotlb = qi_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } - } + for_each_active_iommu(iommu, drhd) + intel_iommu_init_qi(iommu); if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; @@ -2901,8 +2897,10 @@ static int __init init_dmars(void) return 0; free_iommu: - for_each_active_iommu(iommu, drhd) + for_each_active_iommu(iommu, drhd) { + disable_dmar_iommu(iommu); free_dmar_iommu(iommu); + } kfree(deferred_flush); free_g_iommus: kfree(g_iommus); @@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {} #endif /* CONFIG_PM */ -int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) +int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; @@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) return 0; } -int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) +static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) +{ + struct dmar_atsr_unit *atsru; + struct acpi_dmar_atsr *tmp; + + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + tmp = (struct acpi_dmar_atsr *)atsru->hdr; + if (atsr->segment != tmp->segment) + continue; + if (atsr->header.length != tmp->header.length) + continue; + if (memcmp(atsr, tmp, atsr->header.length) == 0) + return atsru; + } + + return NULL; +} + +int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; + if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled) + return 0; + atsr = container_of(hdr, struct acpi_dmar_atsr, header); - atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); + atsru = dmar_find_atsr(atsr); + if (atsru) + return 0; + + atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); if (!atsru) return -ENOMEM; - atsru->hdr = hdr; + /* + * If memory is allocated from slab by ACPI _DSM method, we need to + * copy the memory content because the memory buffer will be freed + * on return. + */ + atsru->hdr = (void *)(atsru + 1); + memcpy(atsru->hdr, hdr, hdr->length); atsru->include_all = atsr->flags & 0x1; if (!atsru->include_all) { atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), @@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) kfree(atsru); } +int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (atsru) { + list_del_rcu(&atsru->list); + synchronize_rcu(); + intel_iommu_free_atsr(atsru); + } + + return 0; +} + +int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + int i; + struct device *dev; + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (!atsru) + return 0; + + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, + i, dev) + return -EBUSY; + + return 0; +} + +static int intel_iommu_add(struct dmar_drhd_unit *dmaru) +{ + int sp, ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (g_iommus[iommu->seq_id]) + return 0; + + if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { + pr_warn("IOMMU: %s doesn't support hardware pass through.\n", + iommu->name); + return -ENXIO; + } + if (!ecap_sc_support(iommu->ecap) && + domain_update_iommu_snooping(iommu)) { + pr_warn("IOMMU: %s doesn't support snooping.\n", + iommu->name); + return -ENXIO; + } + sp = domain_update_iommu_superpage(iommu) - 1; + if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { + pr_warn("IOMMU: %s doesn't support large page.\n", + iommu->name); + return -ENXIO; + } + + /* + * Disable translation if already enabled prior to OS handover. + */ + if (iommu->gcmd & DMA_GCMD_TE) + iommu_disable_translation(iommu); + + g_iommus[iommu->seq_id] = iommu; + ret = iommu_init_domains(iommu); + if (ret == 0) + ret = iommu_alloc_root_entry(iommu); + if (ret) + goto out; + + if (dmaru->ignored) { + /* + * we always have to disable PMRs or DMA may fail on this device + */ + if (force_on) + iommu_disable_protect_mem_regions(iommu); + return 0; + } + + intel_iommu_init_qi(iommu); + iommu_flush_write_buffer(iommu); + ret = dmar_set_interrupt(iommu); + if (ret) + goto disable_iommu; + + iommu_set_root_entry(iommu); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + iommu_enable_translation(iommu); + + if (si_domain) { + ret = iommu_attach_domain(si_domain, iommu); + if (ret < 0 || si_domain->id != ret) + goto disable_iommu; + domain_attach_iommu(si_domain, iommu); + } + + iommu_disable_protect_mem_regions(iommu); + return 0; + +disable_iommu: + disable_dmar_iommu(iommu); +out: + free_dmar_iommu(iommu); + return ret; +} + +int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!intel_iommu_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + + if (insert) { + ret = intel_iommu_add(dmaru); + } else { + disable_dmar_iommu(iommu); + free_dmar_iommu(iommu); + } + + return ret; +} + static void intel_iommu_free_dmars(void) { struct dmar_rmrr_unit *rmrru, *rmrr_n; diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 7c80661b35c1..27541d440849 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -36,7 +36,6 @@ struct hpet_scope { static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct hpet_scope ir_hpet[MAX_HPET_TBS]; -static int ir_ioapic_num, ir_hpet_num; /* * Lock ordering: @@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) int i; for (i = 0; i < MAX_HPET_TBS; i++) - if (ir_hpet[i].id == hpet_id) + if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) return ir_hpet[i].iommu; return NULL; } @@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) int i; for (i = 0; i < MAX_IO_APICS; i++) - if (ir_ioapic[i].id == apic) + if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) return ir_ioapic[i].iommu; return NULL; } @@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) down_read(&dmar_global_lock); for (i = 0; i < MAX_IO_APICS; i++) { - if (ir_ioapic[i].id == apic) { + if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; break; } @@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) down_read(&dmar_global_lock); for (i = 0; i < MAX_HPET_TBS; i++) { - if (ir_hpet[i].id == id) { + if (ir_hpet[i].iommu && ir_hpet[i].id == id) { sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; break; } @@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) raw_spin_unlock_irqrestore(&iommu->register_lock, flags); } - -static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) +static int intel_setup_irq_remapping(struct intel_iommu *iommu) { struct ir_table *ir_table; struct page *pages; unsigned long *bitmap; - ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), - GFP_ATOMIC); + if (iommu->ir_table) + return 0; - if (!iommu->ir_table) + ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); + if (!ir_table) return -ENOMEM; pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, @@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); - kfree(iommu->ir_table); - return -ENOMEM; + goto out_free_table; } bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), sizeof(long), GFP_ATOMIC); if (bitmap == NULL) { pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); - __free_pages(pages, INTR_REMAP_PAGE_ORDER); - kfree(ir_table); - return -ENOMEM; + goto out_free_pages; } ir_table->base = page_address(pages); ir_table->bitmap = bitmap; - - iommu_set_irq_remapping(iommu, mode); + iommu->ir_table = ir_table; return 0; + +out_free_pages: + __free_pages(pages, INTR_REMAP_PAGE_ORDER); +out_free_table: + kfree(ir_table); + return -ENOMEM; +} + +static void intel_teardown_irq_remapping(struct intel_iommu *iommu) +{ + if (iommu && iommu->ir_table) { + free_pages((unsigned long)iommu->ir_table->base, + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table->bitmap); + kfree(iommu->ir_table); + iommu->ir_table = NULL; + } } /* @@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) if (!ecap_ir_support(iommu->ecap)) continue; - if (intel_setup_irq_remapping(iommu, eim)) + if (intel_setup_irq_remapping(iommu)) goto error; + iommu_set_irq_remapping(iommu, eim); setup = 1; } @@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void) return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; error: - /* - * handle error condition gracefully here! - */ + for_each_iommu(iommu, drhd) + if (ecap_ir_support(iommu->ecap)) { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + } if (x2apic_present) pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); @@ -699,12 +714,13 @@ error: return -1; } -static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, PCI_SECONDARY_BUS); path++; } - ir_hpet[ir_hpet_num].bus = bus; - ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); - ir_hpet[ir_hpet_num].iommu = iommu; - ir_hpet[ir_hpet_num].id = scope->enumeration_id; - ir_hpet_num++; + + for (count = 0; count < MAX_HPET_TBS; count++) { + if (ir_hpet[count].iommu == iommu && + ir_hpet[count].id == scope->enumeration_id) + return 0; + else if (ir_hpet[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max HPET blocks\n"); + return -ENOSPC; + } + + ir_hpet[free].iommu = iommu; + ir_hpet[free].id = scope->enumeration_id; + ir_hpet[free].bus = bus; + ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); + pr_info("HPET id %d under DRHD base 0x%Lx\n", + scope->enumeration_id, drhd->address); + + return 0; } -static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, path++; } - ir_ioapic[ir_ioapic_num].bus = bus; - ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); - ir_ioapic[ir_ioapic_num].iommu = iommu; - ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; - ir_ioapic_num++; + for (count = 0; count < MAX_IO_APICS; count++) { + if (ir_ioapic[count].iommu == iommu && + ir_ioapic[count].id == scope->enumeration_id) + return 0; + else if (ir_ioapic[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max IO APICS\n"); + return -ENOSPC; + } + + ir_ioapic[free].bus = bus; + ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); + ir_ioapic[free].iommu = iommu; + ir_ioapic[free].id = scope->enumeration_id; + pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", + scope->enumeration_id, drhd->address, iommu->seq_id); + + return 0; } static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, struct intel_iommu *iommu) { + int ret = 0; struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_device_scope *scope; void *start, *end; drhd = (struct acpi_dmar_hardware_unit *)header; - start = (void *)(drhd + 1); end = ((void *)drhd) + header->length; - while (start < end) { + while (start < end && ret == 0) { scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - if (ir_ioapic_num == MAX_IO_APICS) { - printk(KERN_WARNING "Exceeded Max IO APICS\n"); - return -1; - } - - printk(KERN_INFO "IOAPIC id %d under DRHD base " - " 0x%Lx IOMMU %d\n", scope->enumeration_id, - drhd->address, iommu->seq_id); + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) + ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); + else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) + ret = ir_parse_one_hpet_scope(scope, iommu, drhd); + start += scope->length; + } - ir_parse_one_ioapic_scope(scope, iommu); - } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { - if (ir_hpet_num == MAX_HPET_TBS) { - printk(KERN_WARNING "Exceeded Max HPET blocks\n"); - return -1; - } + return ret; +} - printk(KERN_INFO "HPET id %d under DRHD base" - " 0x%Lx\n", scope->enumeration_id, - drhd->address); +static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) +{ + int i; - ir_parse_one_hpet_scope(scope, iommu); - } - start += scope->length; - } + for (i = 0; i < MAX_HPET_TBS; i++) + if (ir_hpet[i].iommu == iommu) + ir_hpet[i].iommu = NULL; - return 0; + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].iommu == iommu) + ir_ioapic[i].iommu = NULL; } /* @@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = { .msi_setup_irq = intel_msi_setup_irq, .alloc_hpet_msi = intel_alloc_hpet_msi, }; + +/* + * Support of Interrupt Remapping Unit Hotplug + */ +static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) +{ + int ret; + int eim = x2apic_enabled(); + + if (eim && !ecap_eim_support(iommu->ecap)) { + pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", + iommu->reg_phys, iommu->ecap); + return -ENODEV; + } + + if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { + pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", + iommu->reg_phys); + return -ENODEV; + } + + /* TODO: check all IOAPICs are covered by IOMMU */ + + /* Setup Interrupt-remapping now. */ + ret = intel_setup_irq_remapping(iommu); + if (ret) { + pr_err("DRHD %Lx: failed to allocate resource\n", + iommu->reg_phys); + ir_remove_ioapic_hpet_scope(iommu); + return ret; + } + + if (!iommu->qi) { + /* Clear previous faults. */ + dmar_fault(-1, iommu); + iommu_disable_irq_remapping(iommu); + dmar_disable_qi(iommu); + } + + /* Enable queued invalidation */ + ret = dmar_enable_qi(iommu); + if (!ret) { + iommu_set_irq_remapping(iommu, eim); + } else { + pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", + iommu->reg_phys, iommu->ecap, ret); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + + return ret; +} + +int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!irq_remapping_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + if (!ecap_ir_support(iommu->ecap)) + return 0; + + if (insert) { + if (!iommu->ir_table) + ret = dmar_ir_add(dmaru, iommu); + } else { + if (iommu->ir_table) { + if (!bitmap_empty(iommu->ir_table->bitmap, + INTR_REMAP_TABLE_ENTRIES)) { + ret = -EBUSY; + } else { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + } + } + + return ret; +} diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 02e4313e937c..1bd63352ab17 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, { struct scatterlist *s; size_t mapped = 0; - unsigned int i; + unsigned int i, min_pagesz; int ret; - for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)); + if (unlikely(domain->ops->pgsize_bitmap == 0UL)) + return 0; - /* We are mapping on page boundarys, so offset must be 0 */ - if (s->offset) + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + + /* + * We are mapping on IOMMU page boundaries, so offset within + * the page must be 0. However, the IOMMU may support pages + * smaller than PAGE_SIZE, so s->offset may still represent + * an offset of that boundary within the CPU page. + */ + if (!IS_ALIGNED(s->offset, min_pagesz)) goto out_err; ret = iommu_map(domain, iova + mapped, phys, s->length, prot); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index e509c58eee92..99effbb17191 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev) dev_name(&pdev->dev), mmu); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); - return irq; + return ret; } ipmmu_device_reset(mmu); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 1c7b78ecf3e3..e1b05379ca0e 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -73,8 +73,7 @@ fail: static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); } diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 61def7cb5263..b6d01f97e537 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; + struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev); void __iomem *regs_base; int ret, irq, par; @@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); - if (iommu_clk) - clk_disable(iommu_clk); + clk_disable(iommu_clk); clk_disable(iommu_pclk); @@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) static int msm_iommu_ctx_probe(struct platform_device *pdev) { - struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; + struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev); struct msm_iommu_drvdata *drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int i, ret; @@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) SET_NSCFG(drvdata->base, mid, 3); } - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 531658d17333..f3d20a2039d2 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -10,45 +10,35 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> #include <linux/err.h> -#include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/uaccess.h> -#include <linux/platform_device.h> #include <linux/debugfs.h> -#include <linux/omap-iommu.h> #include <linux/platform_data/iommu-omap.h> #include "omap-iopgtable.h" #include "omap-iommu.h" -#define MAXCOLUMN 100 /* for short messages */ - static DEFINE_MUTEX(iommu_debug_lock); static struct dentry *iommu_debug_root; -static ssize_t debug_read_ver(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static inline bool is_omap_iommu_detached(struct omap_iommu *obj) { - u32 ver = omap_iommu_arch_version(); - char buf[MAXCOLUMN], *p = buf; - - p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); - - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + return !obj->domain; } static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes, rest; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, return bytes; } -static ssize_t debug_write_pagetable(struct file *file, - const char __user *userbuf, size_t count, loff_t *ppos) +static void dump_ioptable(struct seq_file *s) { - struct iotlb_entry e; - struct cr_regs cr; - int err; - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); - char buf[MAXCOLUMN], *p = buf; - - count = min(count, sizeof(buf)); - - mutex_lock(&iommu_debug_lock); - if (copy_from_user(p, userbuf, count)) { - mutex_unlock(&iommu_debug_lock); - return -EFAULT; - } - - sscanf(p, "%x %x", &cr.cam, &cr.ram); - if (!cr.cam || !cr.ram) { - mutex_unlock(&iommu_debug_lock); - return -EINVAL; - } - - omap_iotlb_cr_to_e(&cr, &e); - err = omap_iopgtable_store_entry(obj, &e); - if (err) - dev_err(obj->dev, "%s: fail to store cr\n", __func__); - - mutex_unlock(&iommu_debug_lock); - return count; -} - -#define dump_ioptable_entry_one(lv, da, val) \ - ({ \ - int __err = 0; \ - ssize_t bytes; \ - const int maxcol = 22; \ - const char *str = "%d: %08x %08x\n"; \ - bytes = snprintf(p, maxcol, str, lv, da, val); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - __err = -ENOMEM; \ - __err; \ - }) - -static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) -{ - int i; - u32 *iopgd; - char *p = buf; + int i, j; + u32 da; + u32 *iopgd, *iopte; + struct omap_iommu *obj = s->private; spin_lock(&obj->page_table_lock); iopgd = iopgd_offset(obj, 0); for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { - int j, err; - u32 *iopte; - u32 da; - if (!*iopgd) continue; if (!(*iopgd & IOPGD_TABLE)) { da = i << IOPGD_SHIFT; - - err = dump_ioptable_entry_one(1, da, *iopgd); - if (err) - goto out; + seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); continue; } iopte = iopte_offset(iopgd, 0); - for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { if (!*iopte) continue; da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); - err = dump_ioptable_entry_one(2, da, *iopgd); - if (err) - goto out; + seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte); } } -out: - spin_unlock(&obj->page_table_lock); - return p - buf; + spin_unlock(&obj->page_table_lock); } -static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static int debug_read_pagetable(struct seq_file *s, void *data) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); - char *p, *buf; - size_t bytes; + struct omap_iommu *obj = s->private; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); - p += sprintf(p, "-----------------------------------------\n"); + if (is_omap_iommu_detached(obj)) + return -EPERM; mutex_lock(&iommu_debug_lock); - bytes = PAGE_SIZE - (p - buf); - p += dump_ioptable(obj, p, bytes); - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + seq_printf(s, "L: %8s %8s\n", "da:", "pte:"); + seq_puts(s, "--------------------------\n"); + dump_ioptable(s); mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - return bytes; + return 0; } -#define DEBUG_FOPS(name) \ - static const struct file_operations debug_##name##_fops = { \ - .open = simple_open, \ - .read = debug_read_##name, \ - .write = debug_write_##name, \ - .llseek = generic_file_llseek, \ - }; +#define DEBUG_SEQ_FOPS_RO(name) \ + static int debug_open_##name(struct inode *inode, struct file *file) \ + { \ + return single_open(file, debug_read_##name, inode->i_private); \ + } \ + \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_##name, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } #define DEBUG_FOPS_RO(name) \ static const struct file_operations debug_##name##_fops = { \ @@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, .llseek = generic_file_llseek, \ }; -DEBUG_FOPS_RO(ver); DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(tlb); -DEBUG_FOPS(pagetable); +DEBUG_SEQ_FOPS_RO(pagetable); #define __DEBUG_ADD_FILE(attr, mode) \ { \ struct dentry *dent; \ - dent = debugfs_create_file(#attr, mode, parent, \ - dev, &debug_##attr##_fops); \ + dent = debugfs_create_file(#attr, mode, obj->debug_dir, \ + obj, &debug_##attr##_fops); \ if (!dent) \ - return -ENOMEM; \ + goto err; \ } -#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600) #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) -static int iommu_debug_register(struct device *dev, void *data) +void omap_iommu_debugfs_add(struct omap_iommu *obj) { - struct platform_device *pdev = to_platform_device(dev); - struct omap_iommu *obj = platform_get_drvdata(pdev); - struct omap_iommu_arch_data *arch_data; - struct dentry *d, *parent; - - if (!obj || !obj->dev) - return -EINVAL; - - arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); - if (!arch_data) - return -ENOMEM; - - arch_data->iommu_dev = obj; + struct dentry *d; - dev->archdata.iommu = arch_data; + if (!iommu_debug_root) + return; - d = debugfs_create_dir(obj->name, iommu_debug_root); - if (!d) - goto nomem; - parent = d; + obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); + if (!obj->debug_dir) + return; - d = debugfs_create_u8("nr_tlb_entries", 400, parent, + d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir, (u8 *)&obj->nr_tlb_entries); if (!d) - goto nomem; + return; - DEBUG_ADD_FILE_RO(ver); DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(tlb); - DEBUG_ADD_FILE(pagetable); + DEBUG_ADD_FILE_RO(pagetable); - return 0; + return; -nomem: - kfree(arch_data); - return -ENOMEM; +err: + debugfs_remove_recursive(obj->debug_dir); } -static int iommu_debug_unregister(struct device *dev, void *data) +void omap_iommu_debugfs_remove(struct omap_iommu *obj) { - if (!dev->archdata.iommu) - return 0; - - kfree(dev->archdata.iommu); + if (!obj->debug_dir) + return; - dev->archdata.iommu = NULL; - - return 0; + debugfs_remove_recursive(obj->debug_dir); } -static int __init iommu_debug_init(void) +void __init omap_iommu_debugfs_init(void) { - struct dentry *d; - int err; - - d = debugfs_create_dir("iommu", NULL); - if (!d) - return -ENOMEM; - iommu_debug_root = d; - - err = omap_foreach_iommu_device(d, iommu_debug_register); - if (err) - goto err_out; - return 0; - -err_out: - debugfs_remove_recursive(iommu_debug_root); - return err; + iommu_debug_root = debugfs_create_dir("omap_iommu", NULL); + if (!iommu_debug_root) + pr_err("can't create debugfs dir\n"); } -module_init(iommu_debug_init) -static void __exit iommu_debugfs_exit(void) +void __exit omap_iommu_debugfs_exit(void) { - debugfs_remove_recursive(iommu_debug_root); - omap_foreach_iommu_device(NULL, iommu_debug_unregister); + debugfs_remove(iommu_debug_root); } -module_exit(iommu_debugfs_exit) - -MODULE_DESCRIPTION("omap iommu: debugfs interface"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 18003c044454..bbb7dcef02d3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -76,53 +76,23 @@ struct iotlb_lock { short vict; }; -/* accommodate the difference between omap1 and omap2/3 */ -static const struct iommu_functions *arch_iommu; - static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; /** - * omap_install_iommu_arch - Install archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * There are several kind of iommu algorithm(tlb, pagetable) among - * omap series. This interface installs such an iommu algorighm. - **/ -int omap_install_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu) - return -EBUSY; - - arch_iommu = ops; - return 0; -} -EXPORT_SYMBOL_GPL(omap_install_iommu_arch); - -/** - * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * This interface uninstalls the iommu algorighm installed previously. - **/ -void omap_uninstall_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu != ops) - pr_err("%s: not your arch\n", __func__); - - arch_iommu = NULL; -} -EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); - -/** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device **/ void omap_iommu_save_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->save_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + p[i] = iommu_read_reg(obj, i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); @@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); void omap_iommu_restore_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->restore_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + iommu_write_reg(obj, p[i], i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); -/** - * omap_iommu_arch_version - Return running iommu arch version - **/ -u32 omap_iommu_arch_version(void) +static void __iommu_set_twl(struct omap_iommu *obj, bool on) { - return arch_iommu->version; + u32 l = iommu_read_reg(obj, MMU_CNTL); + + if (on) + iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); + else + iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); + + l &= ~MMU_CNTL_MASK; + if (on) + l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); + else + l |= (MMU_CNTL_MMU_EN); + + iommu_write_reg(obj, l, MMU_CNTL); +} + +static int omap2_iommu_enable(struct omap_iommu *obj) +{ + u32 l, pa; + + if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) + return -EINVAL; + + pa = virt_to_phys(obj->iopgd); + if (!IS_ALIGNED(pa, SZ_16K)) + return -EINVAL; + + l = iommu_read_reg(obj, MMU_REVISION); + dev_info(obj->dev, "%s: version %d.%d\n", obj->name, + (l >> 4) & 0xf, l & 0xf); + + iommu_write_reg(obj, pa, MMU_TTB); + + if (obj->has_bus_err_back) + iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); + + __iommu_set_twl(obj, true); + + return 0; +} + +static void omap2_iommu_disable(struct omap_iommu *obj) +{ + u32 l = iommu_read_reg(obj, MMU_CNTL); + + l &= ~MMU_CNTL_MASK; + iommu_write_reg(obj, l, MMU_CNTL); + + dev_dbg(obj->dev, "%s is shutting down\n", obj->name); } -EXPORT_SYMBOL_GPL(omap_iommu_arch_version); static int iommu_enable(struct omap_iommu *obj) { int err; struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; - - if (!arch_iommu) - return -ENODEV; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->deassert_reset) { err = pdata->deassert_reset(pdev, pdata->reset_name); @@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj) pm_runtime_get_sync(obj->dev); - err = arch_iommu->enable(obj); + err = omap2_iommu_enable(obj); return err; } @@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj) static void iommu_disable(struct omap_iommu *obj) { struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - arch_iommu->disable(obj); + omap2_iommu_disable(obj); pm_runtime_put_sync(obj->dev); @@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj) /* * TLB operations */ -void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - BUG_ON(!cr || !e); - - arch_iommu->cr_to_e(cr, e); -} -EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); - static inline int iotlb_cr_valid(struct cr_regs *cr) { if (!cr) return -EINVAL; - return arch_iommu->cr_valid(cr); -} - -static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - if (!e) - return NULL; - - return arch_iommu->alloc_cr(obj, e); + return cr->cam & MMU_CAM_V; } static u32 iotlb_cr_to_virt(struct cr_regs *cr) { - return arch_iommu->cr_to_virt(cr); + u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; + u32 mask = get_cam_va_mask(cr->cam & page_size); + + return cr->cam & mask; } static u32 get_iopte_attr(struct iotlb_entry *e) { - return arch_iommu->get_pte_attr(e); + u32 attr; + + attr = e->mixed << 5; + attr |= e->endian; + attr |= e->elsz >> 3; + attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || + (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); + return attr; } static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) { - return arch_iommu->fault_isr(obj, da); + u32 status, fault_addr; + + status = iommu_read_reg(obj, MMU_IRQSTATUS); + status &= MMU_IRQ_MASK; + if (!status) { + *da = 0; + return 0; + } + + fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); + *da = fault_addr; + + iommu_write_reg(obj, status, MMU_IRQSTATUS); + + return status; } static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) @@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_read_cr(obj, cr); + cr->cam = iommu_read_reg(obj, MMU_READ_CAM); + cr->ram = iommu_read_reg(obj, MMU_READ_RAM); } static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_load_cr(obj, cr); + iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); + iommu_write_reg(obj, cr->ram, MMU_RAM); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); iommu_write_reg(obj, 1, MMU_LD_TLB); } -/** - * iotlb_dump_cr - Dump an iommu tlb entry into buf - * @obj: target iommu - * @cr: contents of cam and ram register - * @buf: output buffer - **/ -static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, - char *buf) -{ - BUG_ON(!cr || !buf); - - return arch_iommu->dump_cr(obj, cr, buf); -} - /* only used in iotlb iteration for-loop */ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) { @@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) return cr; } +#ifdef PREFETCH_IOTLB +static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, + struct iotlb_entry *e) +{ + struct cr_regs *cr; + + if (!e) + return NULL; + + if (e->da & ~(get_cam_va_mask(e->pgsz))) { + dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, + e->da); + return ERR_PTR(-EINVAL); + } + + cr = kmalloc(sizeof(*cr), GFP_KERNEL); + if (!cr) + return ERR_PTR(-ENOMEM); + + cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; + cr->ram = e->pa | e->endian | e->elsz | e->mixed; + + return cr; +} + /** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ -#ifdef PREFETCH_IOTLB static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err = 0; @@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj) pm_runtime_put_sync(obj->dev); } -#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) +#ifdef CONFIG_OMAP_IOMMU_DEBUG + +#define pr_reg(name) \ + do { \ + ssize_t bytes; \ + const char *str = "%20s: %08x\n"; \ + const int maxcol = 32; \ + bytes = snprintf(p, maxcol, str, __stringify(name), \ + iommu_read_reg(obj, MMU_##name)); \ + p += bytes; \ + len -= bytes; \ + if (len < maxcol) \ + goto out; \ + } while (0) + +static ssize_t +omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) +{ + char *p = buf; + + pr_reg(REVISION); + pr_reg(IRQSTATUS); + pr_reg(IRQENABLE); + pr_reg(WALKING_ST); + pr_reg(CNTL); + pr_reg(FAULT_AD); + pr_reg(TTB); + pr_reg(LOCK); + pr_reg(LD_TLB); + pr_reg(CAM); + pr_reg(RAM); + pr_reg(GFLUSH); + pr_reg(FLUSH_ENTRY); + pr_reg(READ_CAM); + pr_reg(READ_RAM); + pr_reg(EMU_FAULT_AD); +out: + return p - buf; +} ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) { @@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) pm_runtime_get_sync(obj->dev); - bytes = arch_iommu->dump_ctx(obj, buf, bytes); + bytes = omap2_iommu_dump_ctx(obj, buf, bytes); pm_runtime_put_sync(obj->dev); return bytes; } -EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); static int __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) @@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) } /** + * iotlb_dump_cr - Dump an iommu tlb entry into buf + * @obj: target iommu + * @cr: contents of cam and ram register + * @buf: output buffer + **/ +static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, + char *buf) +{ + char *p = buf; + + /* FIXME: Need more detail analysis of cam/ram */ + p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, + (cr->cam & MMU_CAM_P) ? 1 : 0); + + return p - buf; +} + +/** * omap_dump_tlb_entries - dump cr arrays to given buffer * @obj: target iommu * @buf: output buffer @@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) return p - buf; } -EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); - -int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) -{ - return driver_for_each_device(&omap_iommu_driver.driver, - NULL, data, fn); -} -EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); -#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ +#endif /* CONFIG_OMAP_IOMMU_DEBUG */ /* * H/W pagetable operations @@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) * @obj: target iommu * @e: an iommu tlb entry info **/ -int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) +static int +omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err; @@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) prefetch_iotlb_entry(obj, e); return err; } -EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); /** * iopgtable_lookup_entry - Lookup an iommu pte entry @@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) u32 *iopgd, *iopte; struct omap_iommu *obj = data; struct iommu_domain *domain = obj->domain; + struct omap_iommu_domain *omap_domain = domain->priv; - if (!obj->refcount) + if (!omap_domain->iommu_dev) return IRQ_NONE; errs = iommu_report_fault(obj, &da); @@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) spin_lock(&obj->iommu_lock); - /* an iommu device can only be attached once */ - if (++obj->refcount > 1) { - dev_err(dev, "%s: already attached!\n", obj->name); - err = -EBUSY; - goto err_enable; - } - obj->iopgd = iopgd; err = iommu_enable(obj); if (err) @@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) return obj; err_enable: - obj->refcount--; spin_unlock(&obj->iommu_lock); return ERR_PTR(err); } @@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj) spin_lock(&obj->iommu_lock); - if (--obj->refcount == 0) - iommu_disable(obj); - + iommu_disable(obj); obj->iopgd = NULL; spin_unlock(&obj->iommu_lock); @@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); @@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev) pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); + omap_iommu_debugfs_add(obj); + dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; } @@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev) struct omap_iommu *obj = platform_get_drvdata(pdev); iopgtable_clear_entry_all(obj); + omap_iommu_debugfs_remove(obj); pm_runtime_disable(obj->dev); @@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) e->da = da; e->pa = pa; e->valid = MMU_CAM_V; - /* FIXME: add OMAP1 support */ e->pgsz = pgsz; e->endian = MMU_RAM_ENDIAN_LITTLE; e->elsz = MMU_RAM_ELSZ_8; @@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_domain->iommu_dev = arch_data->iommu_dev = NULL; omap_domain->dev = NULL; + oiommu->domain = NULL; } static void omap_iommu_detach_dev(struct iommu_domain *domain, @@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void) bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + omap_iommu_debugfs_init(); + return platform_driver_register(&omap_iommu_driver); } /* must be ready before omap3isp is probed */ @@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void) kmem_cache_destroy(iopte_cachep); platform_driver_unregister(&omap_iommu_driver); + + omap_iommu_debugfs_exit(); } module_exit(omap_iommu_exit); diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4f1b68c08c15..d736630df3c8 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -10,9 +10,8 @@ * published by the Free Software Foundation. */ -#if defined(CONFIG_ARCH_OMAP1) -#error "iommu for this processor not implemented yet" -#endif +#ifndef _OMAP_IOMMU_H +#define _OMAP_IOMMU_H struct iotlb_entry { u32 da; @@ -30,10 +29,9 @@ struct omap_iommu { const char *name; void __iomem *regbase; struct device *dev; - void *isr_priv; struct iommu_domain *domain; + struct dentry *debug_dir; - unsigned int refcount; spinlock_t iommu_lock; /* global for this whole object */ /* @@ -67,34 +65,6 @@ struct cr_regs { }; }; -/* architecture specific functions */ -struct iommu_functions { - unsigned long version; - - int (*enable)(struct omap_iommu *obj); - void (*disable)(struct omap_iommu *obj); - void (*set_twl)(struct omap_iommu *obj, bool on); - u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); - - void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); - void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); - - struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, - struct iotlb_entry *e); - int (*cr_valid)(struct cr_regs *cr); - u32 (*cr_to_virt)(struct cr_regs *cr); - void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); - ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, - char *buf); - - u32 (*get_pte_attr)(struct iotlb_entry *e); - - void (*save_ctx)(struct omap_iommu *obj); - void (*restore_ctx)(struct omap_iommu *obj); - ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); -}; - -#ifdef CONFIG_IOMMU_API /** * dev_to_omap_iommu() - retrieves an omap iommu object from a user device * @dev: iommu client device @@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) return arch_data->iommu_dev; } -#endif /* * MMU Register offsets @@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * MMU Register bit definitions */ +/* IRQSTATUS & IRQENABLE */ +#define MMU_IRQ_MULTIHITFAULT (1 << 4) +#define MMU_IRQ_TABLEWALKFAULT (1 << 3) +#define MMU_IRQ_EMUMISS (1 << 2) +#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) +#define MMU_IRQ_TLBMISS (1 << 0) + +#define __MMU_IRQ_FAULT \ + (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) +#define MMU_IRQ_MASK \ + (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) +#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) +#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) + +/* MMU_CNTL */ +#define MMU_CNTL_SHIFT 1 +#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) +#define MMU_CNTL_EML_TLB (1 << 3) +#define MMU_CNTL_TWL_EN (1 << 2) +#define MMU_CNTL_MMU_EN (1 << 1) + +/* CAM */ #define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_MASK \ ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) @@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_CAM_PGSZ_4K (2 << 0) #define MMU_CAM_PGSZ_16M (3 << 0) +/* RAM */ #define MMU_RAM_PADDR_SHIFT 12 #define MMU_RAM_PADDR_MASK \ ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) @@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 +#define get_cam_va_mask(pgsz) \ + (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ + ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ + ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ + ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) + /* * utilities for super page(16MB, 1MB, 64KB and 4KB) */ @@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * global functions */ -extern u32 omap_iommu_arch_version(void); - -extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); - -extern int -omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); - -extern void omap_iommu_save_ctx(struct device *dev); -extern void omap_iommu_restore_ctx(struct device *dev); - -extern int omap_foreach_iommu_device(void *data, - int (*fn)(struct device *, void *)); - -extern int omap_install_iommu_arch(const struct iommu_functions *ops); -extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); - +#ifdef CONFIG_OMAP_IOMMU_DEBUG extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); extern size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); +void omap_iommu_debugfs_init(void); +void omap_iommu_debugfs_exit(void); + +void omap_iommu_debugfs_add(struct omap_iommu *obj); +void omap_iommu_debugfs_remove(struct omap_iommu *obj); +#else +static inline void omap_iommu_debugfs_init(void) { } +static inline void omap_iommu_debugfs_exit(void) { } + +static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { } +static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { } +#endif + /* * register accessors */ @@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) { __raw_writel(val, obj->regbase + offs); } + +#endif /* _OMAP_IOMMU_H */ diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c deleted file mode 100644 index 5e1ea3b0bf16..000000000000 --- a/drivers/iommu/omap-iommu2.c +++ /dev/null @@ -1,337 +0,0 @@ -/* - * omap iommu: omap2/3 architecture specific functions - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, - * Paul Mundt and Toshihiro Kobayashi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/device.h> -#include <linux/io.h> -#include <linux/jiffies.h> -#include <linux/module.h> -#include <linux/omap-iommu.h> -#include <linux/slab.h> -#include <linux/stringify.h> -#include <linux/platform_data/iommu-omap.h> - -#include "omap-iommu.h" - -/* - * omap2 architecture specific register bit definitions - */ -#define IOMMU_ARCH_VERSION 0x00000011 - -/* IRQSTATUS & IRQENABLE */ -#define MMU_IRQ_MULTIHITFAULT (1 << 4) -#define MMU_IRQ_TABLEWALKFAULT (1 << 3) -#define MMU_IRQ_EMUMISS (1 << 2) -#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) -#define MMU_IRQ_TLBMISS (1 << 0) - -#define __MMU_IRQ_FAULT \ - (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) -#define MMU_IRQ_MASK \ - (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) -#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) -#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) - -/* MMU_CNTL */ -#define MMU_CNTL_SHIFT 1 -#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) -#define MMU_CNTL_EML_TLB (1 << 3) -#define MMU_CNTL_TWL_EN (1 << 2) -#define MMU_CNTL_MMU_EN (1 << 1) - -#define get_cam_va_mask(pgsz) \ - (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ - ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ - ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ - ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) - -/* IOMMU errors */ -#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0) -#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1) -#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2) -#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3) -#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4) - -static void __iommu_set_twl(struct omap_iommu *obj, bool on) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - if (on) - iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); - else - iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); - - l &= ~MMU_CNTL_MASK; - if (on) - l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); - else - l |= (MMU_CNTL_MMU_EN); - - iommu_write_reg(obj, l, MMU_CNTL); -} - - -static int omap2_iommu_enable(struct omap_iommu *obj) -{ - u32 l, pa; - - if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) - return -EINVAL; - - pa = virt_to_phys(obj->iopgd); - if (!IS_ALIGNED(pa, SZ_16K)) - return -EINVAL; - - l = iommu_read_reg(obj, MMU_REVISION); - dev_info(obj->dev, "%s: version %d.%d\n", obj->name, - (l >> 4) & 0xf, l & 0xf); - - iommu_write_reg(obj, pa, MMU_TTB); - - if (obj->has_bus_err_back) - iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); - - __iommu_set_twl(obj, true); - - return 0; -} - -static void omap2_iommu_disable(struct omap_iommu *obj) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - l &= ~MMU_CNTL_MASK; - iommu_write_reg(obj, l, MMU_CNTL); - - dev_dbg(obj->dev, "%s is shutting down\n", obj->name); -} - -static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) -{ - __iommu_set_twl(obj, false); -} - -static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) -{ - u32 stat, da; - u32 errs = 0; - - stat = iommu_read_reg(obj, MMU_IRQSTATUS); - stat &= MMU_IRQ_MASK; - if (!stat) { - *ra = 0; - return 0; - } - - da = iommu_read_reg(obj, MMU_FAULT_AD); - *ra = da; - - if (stat & MMU_IRQ_TLBMISS) - errs |= OMAP_IOMMU_ERR_TLB_MISS; - if (stat & MMU_IRQ_TRANSLATIONFAULT) - errs |= OMAP_IOMMU_ERR_TRANS_FAULT; - if (stat & MMU_IRQ_EMUMISS) - errs |= OMAP_IOMMU_ERR_EMU_MISS; - if (stat & MMU_IRQ_TABLEWALKFAULT) - errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; - if (stat & MMU_IRQ_MULTIHITFAULT) - errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; - iommu_write_reg(obj, stat, MMU_IRQSTATUS); - - return errs; -} - -static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - cr->cam = iommu_read_reg(obj, MMU_READ_CAM); - cr->ram = iommu_read_reg(obj, MMU_READ_RAM); -} - -static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); - iommu_write_reg(obj, cr->ram, MMU_RAM); -} - -static u32 omap2_cr_to_virt(struct cr_regs *cr) -{ - u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; - u32 mask = get_cam_va_mask(cr->cam & page_size); - - return cr->cam & mask; -} - -static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - struct cr_regs *cr; - - if (e->da & ~(get_cam_va_mask(e->pgsz))) { - dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, - e->da); - return ERR_PTR(-EINVAL); - } - - cr = kmalloc(sizeof(*cr), GFP_KERNEL); - if (!cr) - return ERR_PTR(-ENOMEM); - - cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; - cr->ram = e->pa | e->endian | e->elsz | e->mixed; - - return cr; -} - -static inline int omap2_cr_valid(struct cr_regs *cr) -{ - return cr->cam & MMU_CAM_V; -} - -static u32 omap2_get_pte_attr(struct iotlb_entry *e) -{ - u32 attr; - - attr = e->mixed << 5; - attr |= e->endian; - attr |= e->elsz >> 3; - attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || - (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); - return attr; -} - -static ssize_t -omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) -{ - char *p = buf; - - /* FIXME: Need more detail analysis of cam/ram */ - p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, - (cr->cam & MMU_CAM_P) ? 1 : 0); - - return p - buf; -} - -#define pr_reg(name) \ - do { \ - ssize_t bytes; \ - const char *str = "%20s: %08x\n"; \ - const int maxcol = 32; \ - bytes = snprintf(p, maxcol, str, __stringify(name), \ - iommu_read_reg(obj, MMU_##name)); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - goto out; \ - } while (0) - -static ssize_t -omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) -{ - char *p = buf; - - pr_reg(REVISION); - pr_reg(IRQSTATUS); - pr_reg(IRQENABLE); - pr_reg(WALKING_ST); - pr_reg(CNTL); - pr_reg(FAULT_AD); - pr_reg(TTB); - pr_reg(LOCK); - pr_reg(LD_TLB); - pr_reg(CAM); - pr_reg(RAM); - pr_reg(GFLUSH); - pr_reg(FLUSH_ENTRY); - pr_reg(READ_CAM); - pr_reg(READ_RAM); - pr_reg(EMU_FAULT_AD); -out: - return p - buf; -} - -static void omap2_iommu_save_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - p[i] = iommu_read_reg(obj, i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); -} - -static void omap2_iommu_restore_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - iommu_write_reg(obj, p[i], i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); -} - -static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - e->da = cr->cam & MMU_CAM_VATAG_MASK; - e->pa = cr->ram & MMU_RAM_PADDR_MASK; - e->valid = cr->cam & MMU_CAM_V; - e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; - e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; - e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; - e->mixed = cr->ram & MMU_RAM_MIXED; -} - -static const struct iommu_functions omap2_iommu_ops = { - .version = IOMMU_ARCH_VERSION, - - .enable = omap2_iommu_enable, - .disable = omap2_iommu_disable, - .set_twl = omap2_iommu_set_twl, - .fault_isr = omap2_iommu_fault_isr, - - .tlb_read_cr = omap2_tlb_read_cr, - .tlb_load_cr = omap2_tlb_load_cr, - - .cr_to_e = omap2_cr_to_e, - .cr_to_virt = omap2_cr_to_virt, - .alloc_cr = omap2_alloc_cr, - .cr_valid = omap2_cr_valid, - .dump_cr = omap2_dump_cr, - - .get_pte_attr = omap2_get_pte_attr, - - .save_ctx = omap2_iommu_save_ctx, - .restore_ctx = omap2_iommu_restore_ctx, - .dump_ctx = omap2_iommu_dump_ctx, -}; - -static int __init omap2_iommu_init(void) -{ - return omap_install_iommu_arch(&omap2_iommu_ops); -} -module_init(omap2_iommu_init); - -static void __exit omap2_iommu_exit(void) -{ - omap_uninstall_iommu_arch(&omap2_iommu_ops); -} -module_exit(omap2_iommu_exit); - -MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); -MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c new file mode 100644 index 000000000000..b2023af384b9 --- /dev/null +++ b/drivers/iommu/rockchip-iommu.c @@ -0,0 +1,1038 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/cacheflush.h> +#include <asm/pgtable.h> +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +/** MMU register offsets */ +#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ +#define RK_MMU_STATUS 0x04 +#define RK_MMU_COMMAND 0x08 +#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ +#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ +#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ +#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ +#define RK_MMU_INT_MASK 0x1C /* IRQ enable */ +#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ +#define RK_MMU_AUTO_GATING 0x24 + +#define DTE_ADDR_DUMMY 0xCAFEBABE +#define FORCE_RESET_TIMEOUT 100 /* ms */ + +/* RK_MMU_STATUS fields */ +#define RK_MMU_STATUS_PAGING_ENABLED BIT(0) +#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) +#define RK_MMU_STATUS_STALL_ACTIVE BIT(2) +#define RK_MMU_STATUS_IDLE BIT(3) +#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) +#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) +#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) + +/* RK_MMU_COMMAND command values */ +#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ +#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ +#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ +#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ +#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ +#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ +#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ + +/* RK_MMU_INT_* register fields */ +#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ +#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ +#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) + +#define NUM_DT_ENTRIES 1024 +#define NUM_PT_ENTRIES 1024 + +#define SPAGE_ORDER 12 +#define SPAGE_SIZE (1 << SPAGE_ORDER) + + /* + * Support mapping any size that fits in one page table: + * 4 KiB to 4 MiB + */ +#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 + +#define IOMMU_REG_POLL_COUNT_FAST 1000 + +struct rk_iommu_domain { + struct list_head iommus; + u32 *dt; /* page directory table */ + spinlock_t iommus_lock; /* lock for iommus list */ + spinlock_t dt_lock; /* lock for modifying page directory table */ +}; + +struct rk_iommu { + struct device *dev; + void __iomem *base; + int irq; + struct list_head node; /* entry in rk_iommu_domain.iommus */ + struct iommu_domain *domain; /* domain to which iommu is attached */ +}; + +static inline void rk_table_flush(u32 *va, unsigned int count) +{ + phys_addr_t pa_start = virt_to_phys(va); + phys_addr_t pa_end = virt_to_phys(va + count); + size_t size = pa_end - pa_start; + + __cpuc_flush_dcache_area(va, size); + outer_flush_range(pa_start, pa_end); +} + +/** + * Inspired by _wait_for in intel_drv.h + * This is NOT safe for use in interrupt context. + * + * Note that it's important that we check the condition again after having + * timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define rk_wait_for(COND, MS) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = (COND) ? 0 : -ETIMEDOUT; \ + break; \ + } \ + usleep_range(50, 100); \ + } \ + ret__; \ +}) + +/* + * The Rockchip rk3288 iommu uses a 2-level page table. + * The first level is the "Directory Table" (DT). + * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing + * to a "Page Table". + * The second level is the 1024 Page Tables (PT). + * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to + * a 4 KB page of physical memory. + * + * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). + * Each iommu device has a MMU_DTE_ADDR register that contains the physical + * address of the start of the DT page. + * + * The structure of the page table is as follows: + * + * DT + * MMU_DTE_ADDR -> +-----+ + * | | + * +-----+ PT + * | DTE | -> +-----+ + * +-----+ | | Memory + * | | +-----+ Page + * | | | PTE | -> +-----+ + * +-----+ +-----+ | | + * | | | | + * | | | | + * +-----+ | | + * | | + * | | + * +-----+ + */ + +/* + * Each DTE has a PT address and a valid bit: + * +---------------------+-----------+-+ + * | PT address | Reserved |V| + * +---------------------+-----------+-+ + * 31:12 - PT address (PTs always starts on a 4 KB boundary) + * 11: 1 - Reserved + * 0 - 1 if PT @ PT address is valid + */ +#define RK_DTE_PT_ADDRESS_MASK 0xfffff000 +#define RK_DTE_PT_VALID BIT(0) + +static inline phys_addr_t rk_dte_pt_address(u32 dte) +{ + return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; +} + +static inline bool rk_dte_is_pt_valid(u32 dte) +{ + return dte & RK_DTE_PT_VALID; +} + +static u32 rk_mk_dte(u32 *pt) +{ + phys_addr_t pt_phys = virt_to_phys(pt); + return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; +} + +/* + * Each PTE has a Page address, some flags and a valid bit: + * +---------------------+---+-------+-+ + * | Page address |Rsv| Flags |V| + * +---------------------+---+-------+-+ + * 31:12 - Page address (Pages always start on a 4 KB boundary) + * 11: 9 - Reserved + * 8: 1 - Flags + * 8 - Read allocate - allocate cache space on read misses + * 7 - Read cache - enable cache & prefetch of data + * 6 - Write buffer - enable delaying writes on their way to memory + * 5 - Write allocate - allocate cache space on write misses + * 4 - Write cache - different writes can be merged together + * 3 - Override cache attributes + * if 1, bits 4-8 control cache attributes + * if 0, the system bus defaults are used + * 2 - Writable + * 1 - Readable + * 0 - 1 if Page @ Page address is valid + */ +#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 +#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe +#define RK_PTE_PAGE_WRITABLE BIT(2) +#define RK_PTE_PAGE_READABLE BIT(1) +#define RK_PTE_PAGE_VALID BIT(0) + +static inline phys_addr_t rk_pte_page_address(u32 pte) +{ + return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; +} + +static inline bool rk_pte_is_page_valid(u32 pte) +{ + return pte & RK_PTE_PAGE_VALID; +} + +/* TODO: set cache flags per prot IOMMU_CACHE */ +static u32 rk_mk_pte(phys_addr_t page, int prot) +{ + u32 flags = 0; + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + page &= RK_PTE_PAGE_ADDRESS_MASK; + return page | flags | RK_PTE_PAGE_VALID; +} + +static u32 rk_mk_pte_invalid(u32 pte) +{ + return pte & ~RK_PTE_PAGE_VALID; +} + +/* + * rk3288 iova (IOMMU Virtual Address) format + * 31 22.21 12.11 0 + * +-----------+-----------+-------------+ + * | DTE index | PTE index | Page offset | + * +-----------+-----------+-------------+ + * 31:22 - DTE index - index of DTE in DT + * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address + * 11: 0 - Page offset - offset into page @ PTE.page_address + */ +#define RK_IOVA_DTE_MASK 0xffc00000 +#define RK_IOVA_DTE_SHIFT 22 +#define RK_IOVA_PTE_MASK 0x003ff000 +#define RK_IOVA_PTE_SHIFT 12 +#define RK_IOVA_PAGE_MASK 0x00000fff +#define RK_IOVA_PAGE_SHIFT 0 + +static u32 rk_iova_dte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; +} + +static u32 rk_iova_pte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; +} + +static u32 rk_iova_page_offset(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; +} + +static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) +{ + return readl(iommu->base + offset); +} + +static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) +{ + writel(value, iommu->base + offset); +} + +static void rk_iommu_command(struct rk_iommu *iommu, u32 command) +{ + writel(command, iommu->base + RK_MMU_COMMAND); +} + +static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, + size_t size) +{ + dma_addr_t iova_end = iova + size; + /* + * TODO(djkurtz): Figure out when it is more efficient to shootdown the + * entire iotlb rather than iterate over individual iovas. + */ + for (; iova < iova_end; iova += SPAGE_SIZE) + rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); +} + +static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; +} + +static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & + RK_MMU_STATUS_PAGING_ENABLED; +} + +static int rk_iommu_enable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_stall_active(iommu)) + return 0; + + /* Stall can only be enabled if paging is enabled */ + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); + + ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_stall_active(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); + + ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_enable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); + + ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); + + ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_force_reset(struct rk_iommu *iommu) +{ + int ret; + u32 dte_addr; + + /* + * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY + * and verifying that upper 5 nybbles are read back. + */ + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); + + dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { + dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); + return -EFAULT; + } + + rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); + + ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, + FORCE_RESET_TIMEOUT); + if (ret) + dev_err(iommu->dev, "FORCE_RESET command timed out\n"); + + return ret; +} + +static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) +{ + u32 dte_index, pte_index, page_offset; + u32 mmu_dte_addr; + phys_addr_t mmu_dte_addr_phys, dte_addr_phys; + u32 *dte_addr; + u32 dte; + phys_addr_t pte_addr_phys = 0; + u32 *pte_addr = NULL; + u32 pte = 0; + phys_addr_t page_addr_phys = 0; + u32 page_flags = 0; + + dte_index = rk_iova_dte_index(iova); + pte_index = rk_iova_pte_index(iova); + page_offset = rk_iova_page_offset(iova); + + mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; + + dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); + dte_addr = phys_to_virt(dte_addr_phys); + dte = *dte_addr; + + if (!rk_dte_is_pt_valid(dte)) + goto print_it; + + pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); + pte_addr = phys_to_virt(pte_addr_phys); + pte = *pte_addr; + + if (!rk_pte_is_page_valid(pte)) + goto print_it; + + page_addr_phys = rk_pte_page_address(pte) + page_offset; + page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; + +print_it: + dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", + &iova, dte_index, pte_index, page_offset); + dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", + &mmu_dte_addr_phys, &dte_addr_phys, dte, + rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, + rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); +} + +static irqreturn_t rk_iommu_irq(int irq, void *dev_id) +{ + struct rk_iommu *iommu = dev_id; + u32 status; + u32 int_status; + dma_addr_t iova; + + int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); + if (int_status == 0) + return IRQ_NONE; + + iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); + + if (int_status & RK_MMU_IRQ_PAGE_FAULT) { + int flags; + + status = rk_iommu_read(iommu, RK_MMU_STATUS); + flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? + IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; + + dev_err(iommu->dev, "Page fault at %pad of type %s\n", + &iova, + (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); + + log_iova(iommu, iova); + + /* + * Report page fault to any installed handlers. + * Ignore the return code, though, since we always zap cache + * and clear the page fault anyway. + */ + if (iommu->domain) + report_iommu_fault(iommu->domain, iommu->dev, iova, + flags); + else + dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); + + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); + } + + if (int_status & RK_MMU_IRQ_BUS_ERROR) + dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); + + if (int_status & ~RK_MMU_IRQ_MASK) + dev_err(iommu->dev, "unexpected int_status: %#08x\n", + int_status); + + rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); + + return IRQ_HANDLED; +} + +static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + phys_addr_t pt_phys, phys = 0; + u32 dte, pte; + u32 *page_table; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + if (!rk_dte_is_pt_valid(dte)) + goto out; + + pt_phys = rk_dte_pt_address(dte); + page_table = (u32 *)phys_to_virt(pt_phys); + pte = page_table[rk_iova_pte_index(iova)]; + if (!rk_pte_is_page_valid(pte)) + goto out; + + phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); +out: + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return phys; +} + +static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, + dma_addr_t iova, size_t size) +{ + struct list_head *pos; + unsigned long flags; + + /* shootdown these iova from all iommus using this domain */ + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_for_each(pos, &rk_domain->iommus) { + struct rk_iommu *iommu; + iommu = list_entry(pos, struct rk_iommu, node); + rk_iommu_zap_lines(iommu, iova, size); + } + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); +} + +static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, + dma_addr_t iova) +{ + u32 *page_table, *dte_addr; + u32 dte; + phys_addr_t pt_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; + dte = *dte_addr; + if (rk_dte_is_pt_valid(dte)) + goto done; + + page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + if (!page_table) + return ERR_PTR(-ENOMEM); + + dte = rk_mk_dte(page_table); + *dte_addr = dte; + + rk_table_flush(page_table, NUM_PT_ENTRIES); + rk_table_flush(dte_addr, 1); + + /* + * Zap the first iova of newly allocated page table so iommu evicts + * old cached value of new dte from the iotlb. + */ + rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); + +done: + pt_phys = rk_dte_pt_address(dte); + return (u32 *)phys_to_virt(pt_phys); +} + +static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, + u32 *pte_addr, dma_addr_t iova, size_t size) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + if (!rk_pte_is_page_valid(pte)) + break; + + pte_addr[pte_count] = rk_mk_pte_invalid(pte); + } + + rk_table_flush(pte_addr, pte_count); + + return pte_count * SPAGE_SIZE; +} + +static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, + dma_addr_t iova, phys_addr_t paddr, size_t size, + int prot) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + phys_addr_t page_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + + if (rk_pte_is_page_valid(pte)) + goto unwind; + + pte_addr[pte_count] = rk_mk_pte(paddr, prot); + + paddr += SPAGE_SIZE; + } + + rk_table_flush(pte_addr, pte_count); + + return 0; +unwind: + /* Unmap the range of iovas that we just mapped */ + rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); + + iova += pte_count * SPAGE_SIZE; + page_phys = rk_pte_page_address(pte_addr[pte_count]); + pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", + &iova, &page_phys, &paddr, prot); + + return -EADDRINUSE; +} + +static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + u32 *page_table, *pte_addr; + int ret; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_map() guarantees that both iova and size will be + * aligned, we will always only be mapping from a single dte here. + */ + page_table = rk_dte_get_page_table(rk_domain, iova); + if (IS_ERR(page_table)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return PTR_ERR(page_table); + } + + pte_addr = &page_table[rk_iova_pte_index(iova)]; + ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return ret; +} + +static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, + size_t size) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + phys_addr_t pt_phys; + u32 dte; + u32 *pte_addr; + size_t unmap_size; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_unmap() guarantees that both iova and size will be + * aligned, we will always only be unmapping from a single dte here. + */ + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + /* Just return 0 if iova is unmapped */ + if (!rk_dte_is_pt_valid(dte)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return 0; + } + + pt_phys = rk_dte_pt_address(dte); + pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); + + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + /* Shootdown iotlb entries for iova range that was just unmapped */ + rk_iommu_zap_iova(rk_domain, iova, unmap_size); + + return unmap_size; +} + +static struct rk_iommu *rk_iommu_from_dev(struct device *dev) +{ + struct iommu_group *group; + struct device *iommu_dev; + struct rk_iommu *rk_iommu; + + group = iommu_group_get(dev); + if (!group) + return NULL; + iommu_dev = iommu_group_get_iommudata(group); + rk_iommu = dev_get_drvdata(iommu_dev); + iommu_group_put(group); + + return rk_iommu; +} + +static int rk_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + int ret; + phys_addr_t dte_addr; + + /* + * Allow 'virtual devices' (e.g., drm) to attach to domain. + * Such a device does not belong to an iommu group. + */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return 0; + + ret = rk_iommu_enable_stall(iommu); + if (ret) + return ret; + + ret = rk_iommu_force_reset(iommu); + if (ret) + return ret; + + iommu->domain = domain; + + ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, + IRQF_SHARED, dev_name(dev), iommu); + if (ret) + return ret; + + dte_addr = virt_to_phys(rk_domain->dt); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); + + ret = rk_iommu_enable_paging(iommu); + if (ret) + return ret; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_add_tail(&iommu->node, &rk_domain->iommus); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + dev_info(dev, "Attached to iommu domain\n"); + + rk_iommu_disable_stall(iommu); + + return 0; +} + +static void rk_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + + /* Allow 'virtual devices' (eg drm) to detach from domain */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_del_init(&iommu->node); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + /* Ignore error while disabling, just keep going */ + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); + rk_iommu_disable_stall(iommu); + + devm_free_irq(dev, iommu->irq, iommu); + + iommu->domain = NULL; + + dev_info(dev, "Detached from iommu domain\n"); +} + +static int rk_iommu_domain_init(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain; + + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); + if (!rk_domain) + return -ENOMEM; + + /* + * rk32xx iommus use a 2 level pagetable. + * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. + * Allocate one 4 KiB page for each table. + */ + rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); + if (!rk_domain->dt) + goto err_dt; + + rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); + + spin_lock_init(&rk_domain->iommus_lock); + spin_lock_init(&rk_domain->dt_lock); + INIT_LIST_HEAD(&rk_domain->iommus); + + domain->priv = rk_domain; + + return 0; +err_dt: + kfree(rk_domain); + return -ENOMEM; +} + +static void rk_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + int i; + + WARN_ON(!list_empty(&rk_domain->iommus)); + + for (i = 0; i < NUM_DT_ENTRIES; i++) { + u32 dte = rk_domain->dt[i]; + if (rk_dte_is_pt_valid(dte)) { + phys_addr_t pt_phys = rk_dte_pt_address(dte); + u32 *page_table = phys_to_virt(pt_phys); + free_page((unsigned long)page_table); + } + } + + free_page((unsigned long)rk_domain->dt); + kfree(domain->priv); + domain->priv = NULL; +} + +static bool rk_iommu_is_dev_iommu_master(struct device *dev) +{ + struct device_node *np = dev->of_node; + int ret; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); + return (ret > 0); +} + +static int rk_iommu_group_set_iommudata(struct iommu_group *group, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct platform_device *pd; + int ret; + struct of_phandle_args args; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, + &args); + if (ret) { + dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", + np->full_name, ret); + return ret; + } + if (args.args_count != 0) { + dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", + args.np->full_name, args.args_count); + return -EINVAL; + } + + pd = of_find_device_by_node(args.np); + of_node_put(args.np); + if (!pd) { + dev_err(dev, "iommu %s not found\n", args.np->full_name); + return -EPROBE_DEFER; + } + + /* TODO(djkurtz): handle multiple slave iommus for a single master */ + iommu_group_set_iommudata(group, &pd->dev, NULL); + + return 0; +} + +static int rk_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + int ret; + + if (!rk_iommu_is_dev_iommu_master(dev)) + return -ENODEV; + + group = iommu_group_get(dev); + if (!group) { + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + } + + ret = iommu_group_add_device(group, dev); + if (ret) + goto err_put_group; + + ret = rk_iommu_group_set_iommudata(group, dev); + if (ret) + goto err_remove_device; + + iommu_group_put(group); + + return 0; + +err_remove_device: + iommu_group_remove_device(dev); +err_put_group: + iommu_group_put(group); + return ret; +} + +static void rk_iommu_remove_device(struct device *dev) +{ + if (!rk_iommu_is_dev_iommu_master(dev)) + return; + + iommu_group_remove_device(dev); +} + +static const struct iommu_ops rk_iommu_ops = { + .domain_init = rk_iommu_domain_init, + .domain_destroy = rk_iommu_domain_destroy, + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .add_device = rk_iommu_add_device, + .remove_device = rk_iommu_remove_device, + .iova_to_phys = rk_iommu_iova_to_phys, + .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, +}; + +static int rk_iommu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_iommu *iommu; + struct resource *res; + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + platform_set_drvdata(pdev, iommu); + iommu->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iommu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iommu->base)) + return PTR_ERR(iommu->base); + + iommu->irq = platform_get_irq(pdev, 0); + if (iommu->irq < 0) { + dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); + return -ENXIO; + } + + return 0; +} + +static int rk_iommu_remove(struct platform_device *pdev) +{ + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id rk_iommu_dt_ids[] = { + { .compatible = "rockchip,iommu" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); +#endif + +static struct platform_driver rk_iommu_driver = { + .probe = rk_iommu_probe, + .remove = rk_iommu_remove, + .driver = { + .name = "rk_iommu", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rk_iommu_dt_ids), + }, +}; + +static int __init rk_iommu_init(void) +{ + int ret; + + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); + if (ret) + return ret; + + return platform_driver_register(&rk_iommu_driver); +} +static void __exit rk_iommu_exit(void) +{ + platform_driver_unregister(&rk_iommu_driver); +} + +subsys_initcall(rk_iommu_init); +module_exit(rk_iommu_exit); + +MODULE_DESCRIPTION("IOMMU API for Rockchip"); +MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); +MODULE_ALIAS("platform:rockchip-iommu"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 9efe5f10f97b..e12cb23d786c 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -130,3 +130,7 @@ config KEYSTONE_IRQ help Support for Texas Instruments Keystone 2 IRQ controller IP which is part of the Keystone 2 IPC mechanism + +config MIPS_GIC + bool + select MIPS_CM diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index f0909d05eae3..4954a314c31e 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o +obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c new file mode 100644 index 000000000000..2b0468e3df6a --- /dev/null +++ b/drivers/irqchip/irq-mips-gic.c @@ -0,0 +1,789 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + */ +#include <linux/bitmap.h> +#include <linux/clocksource.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/mips-gic.h> +#include <linux/of_address.h> +#include <linux/sched.h> +#include <linux/smp.h> + +#include <asm/mips-cm.h> +#include <asm/setup.h> +#include <asm/traps.h> + +#include <dt-bindings/interrupt-controller/mips-gic.h> + +#include "irqchip.h" + +unsigned int gic_present; + +struct gic_pcpu_mask { + DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); +}; + +static void __iomem *gic_base; +static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; +static DEFINE_SPINLOCK(gic_lock); +static struct irq_domain *gic_irq_domain; +static int gic_shared_intrs; +static int gic_vpes; +static unsigned int gic_cpu_pin; +static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +static void __gic_irq_dispatch(void); + +static inline unsigned int gic_read(unsigned int reg) +{ + return __raw_readl(gic_base + reg); +} + +static inline void gic_write(unsigned int reg, unsigned int val) +{ + __raw_writel(val, gic_base + reg); +} + +static inline void gic_update_bits(unsigned int reg, unsigned int mask, + unsigned int val) +{ + unsigned int regval; + + regval = gic_read(reg); + regval &= ~mask; + regval |= val; + gic_write(reg, regval); +} + +static inline void gic_reset_mask(unsigned int intr) +{ + gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), + 1 << GIC_INTR_BIT(intr)); +} + +static inline void gic_set_mask(unsigned int intr) +{ + gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), + 1 << GIC_INTR_BIT(intr)); +} + +static inline void gic_set_polarity(unsigned int intr, unsigned int pol) +{ + gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + + GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), + pol << GIC_INTR_BIT(intr)); +} + +static inline void gic_set_trigger(unsigned int intr, unsigned int trig) +{ + gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + + GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), + trig << GIC_INTR_BIT(intr)); +} + +static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) +{ + gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), + 1 << GIC_INTR_BIT(intr), + dual << GIC_INTR_BIT(intr)); +} + +static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) +{ + gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + + GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); +} + +static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) +{ + gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + + GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), + GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); +} + +#ifdef CONFIG_CLKSRC_MIPS_GIC +cycle_t gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + do { + hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); + lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); + hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); + } while (hi2 != hi); + + return (((cycle_t) hi) << 32) + lo; +} + +unsigned int gic_get_count_width(void) +{ + unsigned int bits, config; + + config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); + bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> + GIC_SH_CONFIG_COUNTBITS_SHF); + + return bits; +} + +void gic_write_compare(cycle_t cnt) +{ + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); +} + +void gic_write_cpu_compare(cycle_t cnt, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); + + local_irq_restore(flags); +} + +cycle_t gic_read_compare(void) +{ + unsigned int hi, lo; + + hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); + lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); + + return (((cycle_t) hi) << 32) + lo; +} +#endif + +static bool gic_local_irq_is_routable(int intr) +{ + u32 vpe_ctl; + + /* All local interrupts are routable in EIC mode. */ + if (cpu_has_veic) + return true; + + vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); + switch (intr) { + case GIC_LOCAL_INT_TIMER: + return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; + case GIC_LOCAL_INT_PERFCTR: + return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; + case GIC_LOCAL_INT_FDC: + return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; + case GIC_LOCAL_INT_SWINT0: + case GIC_LOCAL_INT_SWINT1: + return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; + default: + return true; + } +} + +unsigned int gic_get_timer_pending(void) +{ + unsigned int vpe_pending; + + vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); + return vpe_pending & GIC_VPE_PEND_TIMER_MSK; +} + +static void gic_bind_eic_interrupt(int irq, int set) +{ + /* Convert irq vector # to hw int # */ + irq -= GIC_PIN_TO_VEC_OFFSET; + + /* Set irq to use shadow set */ + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + + GIC_VPE_EIC_SS(irq), set); +} + +void gic_send_ipi(unsigned int intr) +{ + gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr)); +} + +int gic_get_c0_compare_int(void) +{ + if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) + return MIPS_CPU_IRQ_BASE + cp0_compare_irq; + return irq_create_mapping(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); +} + +int gic_get_c0_perfcount_int(void) +{ + if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { + /* Is the erformance counter shared with the timer? */ + if (cp0_perfcount_irq < 0) + return -1; + return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + } + return irq_create_mapping(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); +} + +static unsigned int gic_get_int(void) +{ + unsigned int i; + unsigned long *pcpu_mask; + unsigned long pending_reg, intrmask_reg; + DECLARE_BITMAP(pending, GIC_MAX_INTRS); + DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); + + /* Get per-cpu bitmaps */ + pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; + + pending_reg = GIC_REG(SHARED, GIC_SH_PEND); + intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); + + for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { + pending[i] = gic_read(pending_reg); + intrmask[i] = gic_read(intrmask_reg); + pending_reg += 0x4; + intrmask_reg += 0x4; + } + + bitmap_and(pending, pending, intrmask, gic_shared_intrs); + bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); + + return find_first_bit(pending, gic_shared_intrs); +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); +} + +static void gic_ack_irq(struct irq_data *d) +{ + unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); + + gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned long flags; + bool is_edge; + + spin_lock_irqsave(&gic_lock, flags); + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_FALLING: + gic_set_polarity(irq, GIC_POL_NEG); + gic_set_trigger(irq, GIC_TRIG_EDGE); + gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + is_edge = true; + break; + case IRQ_TYPE_EDGE_RISING: + gic_set_polarity(irq, GIC_POL_POS); + gic_set_trigger(irq, GIC_TRIG_EDGE); + gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + is_edge = true; + break; + case IRQ_TYPE_EDGE_BOTH: + /* polarity is irrelevant in this case */ + gic_set_trigger(irq, GIC_TRIG_EDGE); + gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); + is_edge = true; + break; + case IRQ_TYPE_LEVEL_LOW: + gic_set_polarity(irq, GIC_POL_NEG); + gic_set_trigger(irq, GIC_TRIG_LEVEL); + gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + is_edge = false; + break; + case IRQ_TYPE_LEVEL_HIGH: + default: + gic_set_polarity(irq, GIC_POL_POS); + gic_set_trigger(irq, GIC_TRIG_LEVEL); + gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + is_edge = false; + break; + } + + if (is_edge) { + __irq_set_chip_handler_name_locked(d->irq, + &gic_edge_irq_controller, + handle_edge_irq, NULL); + } else { + __irq_set_chip_handler_name_locked(d->irq, + &gic_level_irq_controller, + handle_level_irq, NULL); + } + spin_unlock_irqrestore(&gic_lock, flags); + + return 0; +} + +#ifdef CONFIG_SMP +static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, + bool force) +{ + unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); + cpumask_t tmp = CPU_MASK_NONE; + unsigned long flags; + int i; + + cpumask_and(&tmp, cpumask, cpu_online_mask); + if (cpus_empty(tmp)) + return -EINVAL; + + /* Assumption : cpumask refers to a single CPU */ + spin_lock_irqsave(&gic_lock, flags); + + /* Re-route this IRQ */ + gic_map_to_vpe(irq, first_cpu(tmp)); + + /* Update the pcpu_masks */ + for (i = 0; i < NR_CPUS; i++) + clear_bit(irq, pcpu_masks[i].pcpu_mask); + set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + + cpumask_copy(d->affinity, cpumask); + spin_unlock_irqrestore(&gic_lock, flags); + + return IRQ_SET_MASK_OK_NOCOPY; +} +#endif + +static struct irq_chip gic_level_irq_controller = { + .name = "MIPS GIC", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_set_type = gic_set_type, +#ifdef CONFIG_SMP + .irq_set_affinity = gic_set_affinity, +#endif +}; + +static struct irq_chip gic_edge_irq_controller = { + .name = "MIPS GIC", + .irq_ack = gic_ack_irq, + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_set_type = gic_set_type, +#ifdef CONFIG_SMP + .irq_set_affinity = gic_set_affinity, +#endif +}; + +static unsigned int gic_get_local_int(void) +{ + unsigned long pending, masked; + + pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); + masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); + + bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); + + return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS); +} + +static void gic_mask_local_irq(struct irq_data *d) +{ + int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); +} + +static void gic_unmask_local_irq(struct irq_data *d) +{ + int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); +} + +static struct irq_chip gic_local_irq_controller = { + .name = "MIPS GIC Local", + .irq_mask = gic_mask_local_irq, + .irq_unmask = gic_unmask_local_irq, +}; + +static void gic_mask_local_irq_all_vpes(struct irq_data *d) +{ + int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + int i; + unsigned long flags; + + spin_lock_irqsave(&gic_lock, flags); + for (i = 0; i < gic_vpes; i++) { + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); + } + spin_unlock_irqrestore(&gic_lock, flags); +} + +static void gic_unmask_local_irq_all_vpes(struct irq_data *d) +{ + int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + int i; + unsigned long flags; + + spin_lock_irqsave(&gic_lock, flags); + for (i = 0; i < gic_vpes; i++) { + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); + } + spin_unlock_irqrestore(&gic_lock, flags); +} + +static struct irq_chip gic_all_vpes_local_irq_controller = { + .name = "MIPS GIC Local", + .irq_mask = gic_mask_local_irq_all_vpes, + .irq_unmask = gic_unmask_local_irq_all_vpes, +}; + +static void __gic_irq_dispatch(void) +{ + unsigned int intr, virq; + + while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) { + virq = irq_linear_revmap(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(intr)); + do_IRQ(virq); + } + + while ((intr = gic_get_int()) != gic_shared_intrs) { + virq = irq_linear_revmap(gic_irq_domain, + GIC_SHARED_TO_HWIRQ(intr)); + do_IRQ(virq); + } +} + +static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) +{ + __gic_irq_dispatch(); +} + +#ifdef CONFIG_MIPS_GIC_IPI +static int gic_resched_int_base; +static int gic_call_int_base; + +unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) +{ + return gic_resched_int_base + cpu; +} + +unsigned int plat_ipi_call_int_xlate(unsigned int cpu) +{ + return gic_call_int_base + cpu; +} + +static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) +{ + scheduler_ipi(); + + return IRQ_HANDLED; +} + +static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) +{ + smp_call_function_interrupt(); + + return IRQ_HANDLED; +} + +static struct irqaction irq_resched = { + .handler = ipi_resched_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI resched" +}; + +static struct irqaction irq_call = { + .handler = ipi_call_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI call" +}; + +static __init void gic_ipi_init_one(unsigned int intr, int cpu, + struct irqaction *action) +{ + int virq = irq_create_mapping(gic_irq_domain, + GIC_SHARED_TO_HWIRQ(intr)); + int i; + + gic_map_to_vpe(intr, cpu); + for (i = 0; i < NR_CPUS; i++) + clear_bit(intr, pcpu_masks[i].pcpu_mask); + set_bit(intr, pcpu_masks[cpu].pcpu_mask); + + irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); + + irq_set_handler(virq, handle_percpu_irq); + setup_irq(virq, action); +} + +static __init void gic_ipi_init(void) +{ + int i; + + /* Use last 2 * NR_CPUS interrupts as IPIs */ + gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; + gic_call_int_base = gic_resched_int_base - nr_cpu_ids; + + for (i = 0; i < nr_cpu_ids; i++) { + gic_ipi_init_one(gic_call_int_base + i, i, &irq_call); + gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched); + } +} +#else +static inline void gic_ipi_init(void) +{ +} +#endif + +static void __init gic_basic_init(void) +{ + unsigned int i; + + board_bind_eic_interrupt = &gic_bind_eic_interrupt; + + /* Setup defaults */ + for (i = 0; i < gic_shared_intrs; i++) { + gic_set_polarity(i, GIC_POL_POS); + gic_set_trigger(i, GIC_TRIG_LEVEL); + gic_reset_mask(i); + } + + for (i = 0; i < gic_vpes; i++) { + unsigned int j; + + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); + for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { + if (!gic_local_irq_is_routable(j)) + continue; + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); + } + } +} + +static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + int intr = GIC_HWIRQ_TO_LOCAL(hw); + int ret = 0; + int i; + unsigned long flags; + + if (!gic_local_irq_is_routable(intr)) + return -EPERM; + + /* + * HACK: These are all really percpu interrupts, but the rest + * of the MIPS kernel code does not use the percpu IRQ API for + * the CP0 timer and performance counter interrupts. + */ + if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) { + irq_set_chip_and_handler(virq, + &gic_local_irq_controller, + handle_percpu_devid_irq); + irq_set_percpu_devid(virq); + } else { + irq_set_chip_and_handler(virq, + &gic_all_vpes_local_irq_controller, + handle_percpu_irq); + } + + spin_lock_irqsave(&gic_lock, flags); + for (i = 0; i < gic_vpes; i++) { + u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; + + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); + + switch (intr) { + case GIC_LOCAL_INT_WD: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); + break; + case GIC_LOCAL_INT_COMPARE: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); + break; + case GIC_LOCAL_INT_TIMER: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); + break; + case GIC_LOCAL_INT_PERFCTR: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val); + break; + case GIC_LOCAL_INT_SWINT0: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val); + break; + case GIC_LOCAL_INT_SWINT1: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val); + break; + case GIC_LOCAL_INT_FDC: + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); + break; + default: + pr_err("Invalid local IRQ %d\n", intr); + ret = -EINVAL; + break; + } + } + spin_unlock_irqrestore(&gic_lock, flags); + + return ret; +} + +static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + int intr = GIC_HWIRQ_TO_SHARED(hw); + unsigned long flags; + + irq_set_chip_and_handler(virq, &gic_level_irq_controller, + handle_level_irq); + + spin_lock_irqsave(&gic_lock, flags); + gic_map_to_pin(intr, gic_cpu_pin); + /* Map to VPE 0 by default */ + gic_map_to_vpe(intr, 0); + set_bit(intr, pcpu_masks[0].pcpu_mask); + spin_unlock_irqrestore(&gic_lock, flags); + + return 0; +} + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) + return gic_local_irq_domain_map(d, virq, hw); + return gic_shared_irq_domain_map(d, virq, hw); +} + +static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, + unsigned int *out_type) +{ + if (intsize != 3) + return -EINVAL; + + if (intspec[0] == GIC_SHARED) + *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); + else if (intspec[0] == GIC_LOCAL) + *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); + else + return -EINVAL; + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static struct irq_domain_ops gic_irq_domain_ops = { + .map = gic_irq_domain_map, + .xlate = gic_irq_domain_xlate, +}; + +static void __init __gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, + unsigned int cpu_vec, unsigned int irqbase, + struct device_node *node) +{ + unsigned int gicconfig; + + gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); + + gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); + gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> + GIC_SH_CONFIG_NUMINTRS_SHF; + gic_shared_intrs = ((gic_shared_intrs + 1) * 8); + + gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> + GIC_SH_CONFIG_NUMVPES_SHF; + gic_vpes = gic_vpes + 1; + + if (cpu_has_veic) { + /* Always use vector 1 in EIC mode */ + gic_cpu_pin = 0; + set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, + __gic_irq_dispatch); + } else { + gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, + gic_irq_dispatch); + } + + gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + + gic_shared_intrs, irqbase, + &gic_irq_domain_ops, NULL); + if (!gic_irq_domain) + panic("Failed to add GIC IRQ domain"); + + gic_basic_init(); + + gic_ipi_init(); +} + +void __init gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, + unsigned int cpu_vec, unsigned int irqbase) +{ + __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); +} + +static int __init gic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct resource res; + unsigned int cpu_vec, i = 0, reserved = 0; + phys_addr_t gic_base; + size_t gic_len; + + /* Find the first available CPU vector. */ + while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", + i++, &cpu_vec)) + reserved |= BIT(cpu_vec); + for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { + if (!(reserved & BIT(cpu_vec))) + break; + } + if (cpu_vec == 8) { + pr_err("No CPU vectors available for GIC\n"); + return -ENODEV; + } + + if (of_address_to_resource(node, 0, &res)) { + /* + * Probe the CM for the GIC base address if not specified + * in the device-tree. + */ + if (mips_cm_present()) { + gic_base = read_gcr_gic_base() & + ~CM_GCR_GIC_BASE_GICEN_MSK; + gic_len = 0x20000; + } else { + pr_err("Failed to get GIC memory range\n"); + return -ENODEV; + } + } else { + gic_base = res.start; + gic_len = resource_size(&res); + } + + if (mips_cm_present()) + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); + gic_present = true; + + __gic_init(gic_base, gic_len, cpu_vec, 0, node); + + return 0; +} +IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 9f454d76cc06..67c21876c35f 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -1334,7 +1334,7 @@ init_card(struct l1oip *hc, int pri, int bundle) if (id[l1oip_cnt] == 0) { printk(KERN_WARNING "Warning: No 'id' value given or " "0, this is highly unsecure. Please use 32 " - "bit randmom number 0x...\n"); + "bit random number 0x...\n"); } hc->id = id[l1oip_cnt]; if (debug & DEBUG_L1OIP_INIT) diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index dcfd7a1d317e..207d6e82403b 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -290,7 +290,7 @@ struct cx18_options { * list_entry_is_past_end - check if a previous loop cursor is off list end * @pos: the type * previously used as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Check if the entry's list_head is the head of the list, thus it's not a * real entry but was the loop cursor that walked past the end diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c index 2cb35c23d2ac..a4d8867e1d7b 100644 --- a/drivers/media/pci/ttpci/budget-patch.c +++ b/drivers/media/pci/ttpci/budget-patch.c @@ -490,7 +490,7 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte if(detected == 0) printk("budget-patch not detected or saa7146 in non-default state.\n" - "try enabling ressetting of 7146 with MASK_31 in MC1 register\n"); + "try enabling resetting of 7146 with MASK_31 in MC1 register\n"); else printk("BUDGET-PATCH DETECTED.\n"); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 3d2b8677ec8a..b5b6bda44a00 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -336,6 +336,8 @@ struct cxl_sste { struct cxl_afu { irq_hw_number_t psl_hwirq; irq_hw_number_t serr_hwirq; + char *err_irq_name; + char *psl_irq_name; unsigned int serr_virq; void __iomem *p1n_mmio; void __iomem *p2n_mmio; @@ -379,6 +381,12 @@ struct cxl_afu { bool enabled; }; + +struct cxl_irq_name { + struct list_head list; + char *name; +}; + /* * This is a cxl context. If the PSL is in dedicated mode, there will be one * of these per AFU. If in AFU directed there can be lots of these. @@ -403,6 +411,7 @@ struct cxl_context { unsigned long *irq_bitmap; /* Accessed from IRQ context */ struct cxl_irq_ranges irqs; + struct list_head irq_names; u64 fault_addr; u64 fault_dsisr; u64 afu_err; @@ -444,6 +453,7 @@ struct cxl { struct dentry *trace; struct dentry *psl_err_chk; struct dentry *debugfs; + char *irq_name; struct bin_attribute cxl_attr; int adapter_num; int user_irqs; @@ -563,9 +573,6 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode); int cxl_afu_deactivate_mode(struct cxl_afu *afu); int cxl_afu_select_best_mode(struct cxl_afu *afu); -unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie); -void cxl_unmap_irq(unsigned int virq, void *cookie); int cxl_register_psl_irq(struct cxl_afu *afu); void cxl_release_psl_irq(struct cxl_afu *afu); int cxl_register_psl_err_irq(struct cxl *adapter); @@ -612,7 +619,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr); int cxl_detach_process(struct cxl_context *ctx); -int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info); +int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info); int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); int cxl_check_error(struct cxl_afu *afu); diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index c99e896604ee..f8684bca2d79 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -133,7 +133,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, { unsigned flt = 0; int result; - unsigned long access, flags; + unsigned long access, flags, inv_flags = 0; if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { pr_devel("copro_handle_mm_fault failed: %#x\n", result); @@ -149,8 +149,12 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, access |= _PAGE_RW; if ((!ctx->kernel) || ~(dar & (1ULL << 63))) access |= _PAGE_USER; + + if (dsisr & DSISR_NOHPTE) + inv_flags |= HPTE_NOHPTE_UPDATE; + local_irq_save(flags); - hash_page_mm(mm, dar, access, 0x300); + hash_page_mm(mm, dar, access, 0x300, inv_flags); local_irq_restore(flags); pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 336020c8e1af..c294925f73ee 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -92,20 +92,13 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da return IRQ_HANDLED; } -static irqreturn_t cxl_irq(int irq, void *data) +static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) { struct cxl_context *ctx = data; - struct cxl_irq_info irq_info; u64 dsisr, dar; - int result; - - if ((result = cxl_get_irq(ctx, &irq_info))) { - WARN(1, "Unable to get CXL IRQ Info: %i\n", result); - return IRQ_HANDLED; - } - dsisr = irq_info.dsisr; - dar = irq_info.dar; + dsisr = irq_info->dsisr; + dar = irq_info->dar; pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); @@ -149,9 +142,9 @@ static irqreturn_t cxl_irq(int irq, void *data) if (dsisr & CXL_PSL_DSISR_An_UR) pr_devel("CXL interrupt: AURP PTE not found\n"); if (dsisr & CXL_PSL_DSISR_An_PE) - return handle_psl_slice_error(ctx, dsisr, irq_info.errstat); + return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); if (dsisr & CXL_PSL_DSISR_An_AE) { - pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err); + pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err); if (ctx->pending_afu_err) { /* @@ -163,10 +156,10 @@ static irqreturn_t cxl_irq(int irq, void *data) */ dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " "undelivered to pe %i: %.llx\n", - ctx->pe, irq_info.afu_err); + ctx->pe, irq_info->afu_err); } else { spin_lock(&ctx->lock); - ctx->afu_err = irq_info.afu_err; + ctx->afu_err = irq_info->afu_err; ctx->pending_afu_err = 1; spin_unlock(&ctx->lock); @@ -182,24 +175,43 @@ static irqreturn_t cxl_irq(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) +{ + if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + + return IRQ_HANDLED; +} + static irqreturn_t cxl_irq_multiplexed(int irq, void *data) { struct cxl_afu *afu = data; struct cxl_context *ctx; + struct cxl_irq_info irq_info; int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; int ret; + if ((ret = cxl_get_irq(afu, &irq_info))) { + WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); + return fail_psl_irq(afu, &irq_info); + } + rcu_read_lock(); ctx = idr_find(&afu->contexts_idr, ph); if (ctx) { - ret = cxl_irq(irq, ctx); + ret = cxl_irq(irq, ctx, &irq_info); rcu_read_unlock(); return ret; } rcu_read_unlock(); - WARN(1, "Unable to demultiplex CXL PSL IRQ\n"); - return IRQ_HANDLED; + WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR" + " %.16llx\n(Possible AFU HW issue - was a term/remove acked" + " with outstanding transactions?)\n", ph, irq_info.dsisr, + irq_info.dar); + return fail_psl_irq(afu, &irq_info); } static irqreturn_t cxl_irq_afu(int irq, void *data) @@ -243,7 +255,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) } unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie) + irq_handler_t handler, void *cookie, const char *name) { unsigned int virq; int result; @@ -259,7 +271,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); - result = request_irq(virq, handler, 0, "cxl", cookie); + result = request_irq(virq, handler, 0, name, cookie); if (result) { dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); return 0; @@ -278,14 +290,15 @@ static int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, void *cookie, irq_hw_number_t *dest_hwirq, - unsigned int *dest_virq) + unsigned int *dest_virq, + const char *name) { int hwirq, virq; if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) return hwirq; - if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie))) + if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) goto err; *dest_hwirq = hwirq; @@ -302,10 +315,19 @@ int cxl_register_psl_err_irq(struct cxl *adapter) { int rc; + adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&adapter->dev)); + if (!adapter->irq_name) + return -ENOMEM; + if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, &adapter->err_hwirq, - &adapter->err_virq))) + &adapter->err_virq, + adapter->irq_name))) { + kfree(adapter->irq_name); + adapter->irq_name = NULL; return rc; + } cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); @@ -317,6 +339,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter) cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); cxl_unmap_irq(adapter->err_virq, adapter); cxl_release_one_irq(adapter, adapter->err_hwirq); + kfree(adapter->irq_name); } int cxl_register_serr_irq(struct cxl_afu *afu) @@ -324,10 +347,18 @@ int cxl_register_serr_irq(struct cxl_afu *afu) u64 serr; int rc; + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, &afu->serr_hwirq, - &afu->serr_virq))) + &afu->serr_virq, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; return rc; + } serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); @@ -341,24 +372,50 @@ void cxl_release_serr_irq(struct cxl_afu *afu) cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); cxl_unmap_irq(afu->serr_virq, afu); cxl_release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); } int cxl_register_psl_irq(struct cxl_afu *afu) { - return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, - &afu->psl_hwirq, &afu->psl_virq); + int rc; + + afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", + dev_name(&afu->dev)); + if (!afu->psl_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, + &afu->psl_hwirq, &afu->psl_virq, + afu->psl_irq_name))) { + kfree(afu->psl_irq_name); + afu->psl_irq_name = NULL; + } + return rc; } void cxl_release_psl_irq(struct cxl_afu *afu) { cxl_unmap_irq(afu->psl_virq, afu); cxl_release_one_irq(afu->adapter, afu->psl_hwirq); + kfree(afu->psl_irq_name); +} + +void afu_irq_name_free(struct cxl_context *ctx) +{ + struct cxl_irq_name *irq_name, *tmp; + + list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { + kfree(irq_name->name); + list_del(&irq_name->list); + kfree(irq_name); + } } int afu_register_irqs(struct cxl_context *ctx, u32 count) { irq_hw_number_t hwirq; - int rc, r, i; + int rc, r, i, j = 1; + struct cxl_irq_name *irq_name; if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) return rc; @@ -372,15 +429,47 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count) sizeof(*ctx->irq_bitmap), GFP_KERNEL); if (!ctx->irq_bitmap) return -ENOMEM; + + /* + * Allocate names first. If any fail, bail out before allocating + * actual hardware IRQs. + */ + INIT_LIST_HEAD(&ctx->irq_names); + for (r = 1; r < CXL_IRQ_RANGES; r++) { + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + irq_name = kmalloc(sizeof(struct cxl_irq_name), + GFP_KERNEL); + if (!irq_name) + goto out; + irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", + dev_name(&ctx->afu->dev), + ctx->pe, j); + if (!irq_name->name) { + kfree(irq_name); + goto out; + } + /* Add to tail so next look get the correct order */ + list_add_tail(&irq_name->list, &ctx->irq_names); + j++; + } + } + + /* We've allocated all memory now, so let's do the irq allocations */ + irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); for (r = 1; r < CXL_IRQ_RANGES; r++) { hwirq = ctx->irqs.offset[r]; for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { cxl_map_irq(ctx->afu->adapter, hwirq, - cxl_irq_afu, ctx); + cxl_irq_afu, ctx, irq_name->name); + irq_name = list_next_entry(irq_name, list); } } return 0; + +out: + afu_irq_name_free(ctx); + return -ENOMEM; } void afu_release_irqs(struct cxl_context *ctx) @@ -398,5 +487,6 @@ void afu_release_irqs(struct cxl_context *ctx) } } + afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); } diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index d47532e8f4f1..9a5a442269a8 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -637,18 +637,18 @@ int cxl_detach_process(struct cxl_context *ctx) return detach_process_native_afu_directed(ctx); } -int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) +int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) { u64 pidtid; - info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); - info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); - info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); - pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); + info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); + info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); + pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An); info->pid = pidtid >> 32; info->tid = pidtid & 0xffffffff; - info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); - info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); + info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); + info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); return 0; } diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 6f27d9a1be3b..9d2e16f3150a 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -176,6 +176,7 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) /** * validate_vid_hdr - check volume identifier header. + * @ubi: UBI device description object * @vid_hdr: the volume identifier header to check * @av: information about the volume this logical eraseblock belongs to * @pnum: physical eraseblock number the VID header came from @@ -188,7 +189,8 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) * information in the VID header is consistent to the information in other VID * headers of the same volume. */ -static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, +static int validate_vid_hdr(const struct ubi_device *ubi, + const struct ubi_vid_hdr *vid_hdr, const struct ubi_ainf_volume *av, int pnum) { int vol_type = vid_hdr->vol_type; @@ -206,7 +208,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, */ if (vol_id != av->vol_id) { - ubi_err("inconsistent vol_id"); + ubi_err(ubi, "inconsistent vol_id"); goto bad; } @@ -216,17 +218,17 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, av_vol_type = UBI_VID_DYNAMIC; if (vol_type != av_vol_type) { - ubi_err("inconsistent vol_type"); + ubi_err(ubi, "inconsistent vol_type"); goto bad; } if (used_ebs != av->used_ebs) { - ubi_err("inconsistent used_ebs"); + ubi_err(ubi, "inconsistent used_ebs"); goto bad; } if (data_pad != av->data_pad) { - ubi_err("inconsistent data_pad"); + ubi_err(ubi, "inconsistent data_pad"); goto bad; } } @@ -234,7 +236,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, return 0; bad: - ubi_err("inconsistent VID header at PEB %d", pnum); + ubi_err(ubi, "inconsistent VID header at PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); ubi_dump_av(av); return -EINVAL; @@ -336,7 +338,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, * support these images anymore. Well, those images still work, * but only if no unclean reboots happened. */ - ubi_err("unsupported on-flash UBI format"); + ubi_err(ubi, "unsupported on-flash UBI format"); return -EINVAL; } @@ -377,7 +379,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, if (err == UBI_IO_BITFLIPS) bitflips = 1; else { - ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d", + ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; @@ -507,7 +509,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, * logical eraseblocks because there was an unclean reboot. */ if (aeb->sqnum == sqnum && sqnum != 0) { - ubi_err("two LEBs with same sequence number %llu", + ubi_err(ubi, "two LEBs with same sequence number %llu", sqnum); ubi_dump_aeb(aeb, 0); ubi_dump_vid_hdr(vid_hdr); @@ -527,7 +529,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, * This logical eraseblock is newer than the one * found earlier. */ - err = validate_vid_hdr(vid_hdr, av, pnum); + err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; @@ -565,7 +567,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, * attaching information. */ - err = validate_vid_hdr(vid_hdr, av, pnum); + err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; @@ -668,7 +670,8 @@ static int early_erase_peb(struct ubi_device *ubi, * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ - ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); + ubi_err(ubi, "erase counter overflow at PEB %d, EC %d", + pnum, ec); return -EINVAL; } @@ -736,7 +739,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, return aeb; } - ubi_err("no free eraseblocks"); + ubi_err(ubi, "no free eraseblocks"); return ERR_PTR(-ENOSPC); } @@ -785,9 +788,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) goto out_unlock; - ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF", + ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF", pnum); - ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); + ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); ubi_dump_vid_hdr(vid_hdr); pr_err("hexdump of PEB %d offset %d, length %d", pnum, ubi->leb_start, ubi->leb_size); @@ -859,7 +862,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, bitflips = 1; break; default: - ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); + ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d", + err); return -EINVAL; } @@ -868,7 +872,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, /* Make sure UBI version is OK */ if (ech->version != UBI_VERSION) { - ubi_err("this UBI version is %d, image version is %d", + ubi_err(ubi, "this UBI version is %d, image version is %d", UBI_VERSION, (int)ech->version); return -EINVAL; } @@ -882,7 +886,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, * flash. Upgrade UBI and use 64-bit erase counters * internally. */ - ubi_err("erase counter overflow, max is %d", + ubi_err(ubi, "erase counter overflow, max is %d", UBI_MAX_ERASECOUNTER); ubi_dump_ec_hdr(ech); return -EINVAL; @@ -903,7 +907,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, if (!ubi->image_seq) ubi->image_seq = image_seq; if (image_seq && ubi->image_seq != image_seq) { - ubi_err("bad image sequence number %d in PEB %d, expected %d", + ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d", image_seq, pnum, ubi->image_seq); ubi_dump_ec_hdr(ech); return -EINVAL; @@ -981,7 +985,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, return err; goto adjust_mean_ec; default: - ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", + ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d", err); return -EINVAL; } @@ -999,7 +1003,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, case UBI_COMPAT_DELETE: if (vol_id != UBI_FM_SB_VOLUME_ID && vol_id != UBI_FM_DATA_VOLUME_ID) { - ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it", + ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", vol_id, lnum); } err = add_to_list(ai, pnum, vol_id, lnum, @@ -1009,13 +1013,13 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, return 0; case UBI_COMPAT_RO: - ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode", + ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode", vol_id, lnum); ubi->ro_mode = 1; break; case UBI_COMPAT_PRESERVE: - ubi_msg("\"preserve\" compatible internal volume %d:%d found", + ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 0, &ai->alien); @@ -1024,14 +1028,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, return 0; case UBI_COMPAT_REJECT: - ubi_err("incompatible internal volume %d:%d found", + ubi_err(ubi, "incompatible internal volume %d:%d found", vol_id, lnum); return -EINVAL; } } if (ec_err) - ubi_warn("valid VID header but corrupted EC header at PEB %d", + ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d", pnum); err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); if (err) @@ -1075,7 +1079,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) * with the flash HW or driver. */ if (ai->corr_peb_count) { - ubi_err("%d PEBs are corrupted and preserved", + ubi_err(ubi, "%d PEBs are corrupted and preserved", ai->corr_peb_count); pr_err("Corrupted PEBs are:"); list_for_each_entry(aeb, &ai->corr, u.list) @@ -1087,7 +1091,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) * otherwise, only print a warning. */ if (ai->corr_peb_count >= max_corr) { - ubi_err("too many corrupted PEBs, refusing"); + ubi_err(ubi, "too many corrupted PEBs, refusing"); return -EINVAL; } } @@ -1110,11 +1114,11 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) */ if (ai->maybe_bad_peb_count <= 2) { ai->is_empty = 1; - ubi_msg("empty MTD device detected"); + ubi_msg(ubi, "empty MTD device detected"); get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); } else { - ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); + ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); return -EINVAL; } @@ -1248,7 +1252,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, goto out_vidh; } - ubi_msg("scanning is finished"); + ubi_msg(ubi, "scanning is finished"); /* Calculate mean erase counter */ if (ai->ec_count) @@ -1515,37 +1519,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) vols_found += 1; if (ai->is_empty) { - ubi_err("bad is_empty flag"); + ubi_err(ubi, "bad is_empty flag"); goto bad_av; } if (av->vol_id < 0 || av->highest_lnum < 0 || av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || av->data_pad < 0 || av->last_data_size < 0) { - ubi_err("negative values"); + ubi_err(ubi, "negative values"); goto bad_av; } if (av->vol_id >= UBI_MAX_VOLUMES && av->vol_id < UBI_INTERNAL_VOL_START) { - ubi_err("bad vol_id"); + ubi_err(ubi, "bad vol_id"); goto bad_av; } if (av->vol_id > ai->highest_vol_id) { - ubi_err("highest_vol_id is %d, but vol_id %d is there", + ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there", ai->highest_vol_id, av->vol_id); goto out; } if (av->vol_type != UBI_DYNAMIC_VOLUME && av->vol_type != UBI_STATIC_VOLUME) { - ubi_err("bad vol_type"); + ubi_err(ubi, "bad vol_type"); goto bad_av; } if (av->data_pad > ubi->leb_size / 2) { - ubi_err("bad data_pad"); + ubi_err(ubi, "bad data_pad"); goto bad_av; } @@ -1557,48 +1561,48 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) leb_count += 1; if (aeb->pnum < 0 || aeb->ec < 0) { - ubi_err("negative values"); + ubi_err(ubi, "negative values"); goto bad_aeb; } if (aeb->ec < ai->min_ec) { - ubi_err("bad ai->min_ec (%d), %d found", + ubi_err(ubi, "bad ai->min_ec (%d), %d found", ai->min_ec, aeb->ec); goto bad_aeb; } if (aeb->ec > ai->max_ec) { - ubi_err("bad ai->max_ec (%d), %d found", + ubi_err(ubi, "bad ai->max_ec (%d), %d found", ai->max_ec, aeb->ec); goto bad_aeb; } if (aeb->pnum >= ubi->peb_count) { - ubi_err("too high PEB number %d, total PEBs %d", + ubi_err(ubi, "too high PEB number %d, total PEBs %d", aeb->pnum, ubi->peb_count); goto bad_aeb; } if (av->vol_type == UBI_STATIC_VOLUME) { if (aeb->lnum >= av->used_ebs) { - ubi_err("bad lnum or used_ebs"); + ubi_err(ubi, "bad lnum or used_ebs"); goto bad_aeb; } } else { if (av->used_ebs != 0) { - ubi_err("non-zero used_ebs"); + ubi_err(ubi, "non-zero used_ebs"); goto bad_aeb; } } if (aeb->lnum > av->highest_lnum) { - ubi_err("incorrect highest_lnum or lnum"); + ubi_err(ubi, "incorrect highest_lnum or lnum"); goto bad_aeb; } } if (av->leb_count != leb_count) { - ubi_err("bad leb_count, %d objects in the tree", + ubi_err(ubi, "bad leb_count, %d objects in the tree", leb_count); goto bad_av; } @@ -1609,13 +1613,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) aeb = last_aeb; if (aeb->lnum != av->highest_lnum) { - ubi_err("bad highest_lnum"); + ubi_err(ubi, "bad highest_lnum"); goto bad_aeb; } } if (vols_found != ai->vols_found) { - ubi_err("bad ai->vols_found %d, should be %d", + ubi_err(ubi, "bad ai->vols_found %d, should be %d", ai->vols_found, vols_found); goto out; } @@ -1632,7 +1636,8 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); if (err && err != UBI_IO_BITFLIPS) { - ubi_err("VID header is not OK (%d)", err); + ubi_err(ubi, "VID header is not OK (%d)", + err); if (err > 0) err = -EIO; return err; @@ -1641,37 +1646,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; if (av->vol_type != vol_type) { - ubi_err("bad vol_type"); + ubi_err(ubi, "bad vol_type"); goto bad_vid_hdr; } if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { - ubi_err("bad sqnum %llu", aeb->sqnum); + ubi_err(ubi, "bad sqnum %llu", aeb->sqnum); goto bad_vid_hdr; } if (av->vol_id != be32_to_cpu(vidh->vol_id)) { - ubi_err("bad vol_id %d", av->vol_id); + ubi_err(ubi, "bad vol_id %d", av->vol_id); goto bad_vid_hdr; } if (av->compat != vidh->compat) { - ubi_err("bad compat %d", vidh->compat); + ubi_err(ubi, "bad compat %d", vidh->compat); goto bad_vid_hdr; } if (aeb->lnum != be32_to_cpu(vidh->lnum)) { - ubi_err("bad lnum %d", aeb->lnum); + ubi_err(ubi, "bad lnum %d", aeb->lnum); goto bad_vid_hdr; } if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { - ubi_err("bad used_ebs %d", av->used_ebs); + ubi_err(ubi, "bad used_ebs %d", av->used_ebs); goto bad_vid_hdr; } if (av->data_pad != be32_to_cpu(vidh->data_pad)) { - ubi_err("bad data_pad %d", av->data_pad); + ubi_err(ubi, "bad data_pad %d", av->data_pad); goto bad_vid_hdr; } } @@ -1680,12 +1685,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) continue; if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { - ubi_err("bad highest_lnum %d", av->highest_lnum); + ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum); goto bad_vid_hdr; } if (av->last_data_size != be32_to_cpu(vidh->data_size)) { - ubi_err("bad last_data_size %d", av->last_data_size); + ubi_err(ubi, "bad last_data_size %d", + av->last_data_size); goto bad_vid_hdr; } } @@ -1726,7 +1732,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) err = 0; for (pnum = 0; pnum < ubi->peb_count; pnum++) if (!buf[pnum]) { - ubi_err("PEB %d is not referred", pnum); + ubi_err(ubi, "PEB %d is not referred", pnum); err = 1; } @@ -1736,18 +1742,18 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) return 0; bad_aeb: - ubi_err("bad attaching information about LEB %d", aeb->lnum); + ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum); ubi_dump_aeb(aeb, 0); ubi_dump_av(av); goto out; bad_av: - ubi_err("bad attaching information about volume %d", av->vol_id); + ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); goto out; bad_vid_hdr: - ubi_err("bad attaching information about volume %d", av->vol_id); + ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); ubi_dump_vid_hdr(vidh); diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 8876c7d3d712..6b6bce28bd63 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -111,13 +111,13 @@ static int __init ubiblock_set_param(const char *val, len = strnlen(val, UBIBLOCK_PARAM_LEN); if (len == 0) { - ubi_warn("block: empty 'block=' parameter - ignored\n"); + pr_warn("UBI: block: empty 'block=' parameter - ignored\n"); return 0; } if (len == UBIBLOCK_PARAM_LEN) { - ubi_err("block: parameter \"%s\" is too long, max. is %d\n", - val, UBIBLOCK_PARAM_LEN); + pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n", + val, UBIBLOCK_PARAM_LEN); return -EINVAL; } @@ -188,9 +188,8 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer, ret = ubi_read(dev->desc, leb, buffer, offset, len); if (ret) { - ubi_err("%s: error %d while reading from LEB %d (offset %d, " - "length %d)", dev->gd->disk_name, ret, leb, offset, - len); + dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)", + ret, leb, offset, len); return ret; } return 0; @@ -328,8 +327,8 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode) dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); if (IS_ERR(dev->desc)) { - ubi_err("%s failed to open ubi volume %d_%d", - dev->gd->disk_name, dev->ubi_num, dev->vol_id); + dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d", + dev->ubi_num, dev->vol_id); ret = PTR_ERR(dev->desc); dev->desc = NULL; goto out_unlock; @@ -405,7 +404,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Initialize the gendisk of this ubiblock device */ gd = alloc_disk(1); if (!gd) { - ubi_err("block: alloc_disk failed"); + pr_err("UBI: block: alloc_disk failed"); ret = -ENODEV; goto out_free_dev; } @@ -421,7 +420,7 @@ int ubiblock_create(struct ubi_volume_info *vi) spin_lock_init(&dev->queue_lock); dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); if (!dev->rq) { - ubi_err("block: blk_init_queue failed"); + dev_err(disk_to_dev(gd), "blk_init_queue failed"); ret = -ENODEV; goto out_put_disk; } @@ -446,8 +445,8 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Must be the last step: anyone can call file ops from now on */ add_disk(dev->gd); - ubi_msg("%s created from ubi%d:%d(%s)", - dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name); + dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", + dev->ubi_num, dev->vol_id, vi->name); return 0; out_free_queue: @@ -464,7 +463,7 @@ static void ubiblock_cleanup(struct ubiblock *dev) { del_gendisk(dev->gd); blk_cleanup_queue(dev->rq); - ubi_msg("%s released", dev->gd->disk_name); + dev_info(disk_to_dev(dev->gd), "released"); put_disk(dev->gd); } @@ -518,8 +517,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi) } if ((sector_t)disk_capacity != disk_capacity) { mutex_unlock(&devices_mutex); - ubi_warn("%s: the volume is too big (%d LEBs), cannot resize", - dev->gd->disk_name, vi->size); + dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize", + vi->size); return -EFBIG; } @@ -527,8 +526,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi) if (get_capacity(dev->gd) != disk_capacity) { set_capacity(dev->gd, disk_capacity); - ubi_msg("%s resized to %lld bytes", dev->gd->disk_name, - vi->used_bytes); + dev_info(disk_to_dev(dev->gd), "resized to %lld bytes", + vi->used_bytes); } mutex_unlock(&dev->dev_mutex); mutex_unlock(&devices_mutex); @@ -596,8 +595,8 @@ static int __init ubiblock_create_from_param(void) desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); if (IS_ERR(desc)) { - ubi_err("block: can't open volume, err=%ld\n", - PTR_ERR(desc)); + pr_err("UBI: block: can't open volume, err=%ld\n", + PTR_ERR(desc)); ret = PTR_ERR(desc); break; } @@ -607,8 +606,8 @@ static int __init ubiblock_create_from_param(void) ret = ubiblock_create(&vi); if (ret) { - ubi_err("block: can't add '%s' volume, err=%d\n", - vi.name, ret); + pr_err("UBI: block: can't add '%s' volume, err=%d\n", + vi.name, ret); break; } } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 6e30a3c280d0..3405be46ebe9 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -166,7 +166,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) case UBI_VOLUME_RESIZED: case UBI_VOLUME_RENAMED: if (ubi_update_fastmap(ubi)) { - ubi_err("Unable to update fastmap!"); + ubi_err(ubi, "Unable to update fastmap!"); ubi_ro_mode(ubi); } } @@ -517,7 +517,7 @@ static int uif_init(struct ubi_device *ubi, int *ref) */ err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); if (err) { - ubi_err("cannot register UBI character devices"); + ubi_err(ubi, "cannot register UBI character devices"); return err; } @@ -528,7 +528,7 @@ static int uif_init(struct ubi_device *ubi, int *ref) err = cdev_add(&ubi->cdev, dev, 1); if (err) { - ubi_err("cannot add character device"); + ubi_err(ubi, "cannot add character device"); goto out_unreg; } @@ -540,7 +540,7 @@ static int uif_init(struct ubi_device *ubi, int *ref) if (ubi->volumes[i]) { err = ubi_add_volume(ubi, ubi->volumes[i]); if (err) { - ubi_err("cannot add volume %d", i); + ubi_err(ubi, "cannot add volume %d", i); goto out_volumes; } } @@ -556,7 +556,8 @@ out_sysfs: cdev_del(&ubi->cdev); out_unreg: unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); - ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); + ubi_err(ubi, "cannot initialize UBI %s, error %d", + ubi->ubi_name, err); return err; } @@ -650,7 +651,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) * guess we should just pick the largest region. But this is * not implemented. */ - ubi_err("multiple regions, not implemented"); + ubi_err(ubi, "multiple regions, not implemented"); return -EINVAL; } @@ -685,7 +686,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) * which allows us to avoid costly division operations. */ if (!is_power_of_2(ubi->min_io_size)) { - ubi_err("min. I/O unit (%d) is not power of 2", + ubi_err(ubi, "min. I/O unit (%d) is not power of 2", ubi->min_io_size); return -EINVAL; } @@ -702,7 +703,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) if (ubi->max_write_size < ubi->min_io_size || ubi->max_write_size % ubi->min_io_size || !is_power_of_2(ubi->max_write_size)) { - ubi_err("bad write buffer size %d for %d min. I/O unit", + ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit", ubi->max_write_size, ubi->min_io_size); return -EINVAL; } @@ -739,7 +740,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) /* The shift must be aligned to 32-bit boundary */ if (ubi->vid_hdr_shift % 4) { - ubi_err("unaligned VID header shift %d", + ubi_err(ubi, "unaligned VID header shift %d", ubi->vid_hdr_shift); return -EINVAL; } @@ -749,7 +750,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || ubi->leb_start & (ubi->min_io_size - 1)) { - ubi_err("bad VID header (%d) or data offsets (%d)", + ubi_err(ubi, "bad VID header (%d) or data offsets (%d)", ubi->vid_hdr_offset, ubi->leb_start); return -EINVAL; } @@ -769,14 +770,14 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) * read-only mode. */ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { - ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); + ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); ubi->ro_mode = 1; } ubi->leb_size = ubi->peb_size - ubi->leb_start; if (!(ubi->mtd->flags & MTD_WRITEABLE)) { - ubi_msg("MTD device %d is write-protected, attach in read-only mode", + ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode", ubi->mtd->index); ubi->ro_mode = 1; } @@ -809,7 +810,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id) int err, old_reserved_pebs = vol->reserved_pebs; if (ubi->ro_mode) { - ubi_warn("skip auto-resize because of R/O mode"); + ubi_warn(ubi, "skip auto-resize because of R/O mode"); return 0; } @@ -830,21 +831,22 @@ static int autoresize(struct ubi_device *ubi, int vol_id) vtbl_rec = ubi->vtbl[vol_id]; err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); if (err) - ubi_err("cannot clean auto-resize flag for volume %d", + ubi_err(ubi, "cannot clean auto-resize flag for volume %d", vol_id); } else { desc.vol = vol; err = ubi_resize_volume(&desc, old_reserved_pebs + ubi->avail_pebs); if (err) - ubi_err("cannot auto-resize volume %d", vol_id); + ubi_err(ubi, "cannot auto-resize volume %d", + vol_id); } if (err) return err; - ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, - vol->name, old_reserved_pebs, vol->reserved_pebs); + ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs", + vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs); return 0; } @@ -885,7 +887,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, for (i = 0; i < UBI_MAX_DEVICES; i++) { ubi = ubi_devices[i]; if (ubi && mtd->index == ubi->mtd->index) { - ubi_err("mtd%d is already attached to ubi%d", + ubi_err(ubi, "mtd%d is already attached to ubi%d", mtd->index, i); return -EEXIST; } @@ -900,7 +902,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * no sense to attach emulated MTD devices, so we prohibit this. */ if (mtd->type == MTD_UBIVOLUME) { - ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI", + ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI", mtd->index); return -EINVAL; } @@ -911,7 +913,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi_devices[ubi_num]) break; if (ubi_num == UBI_MAX_DEVICES) { - ubi_err("only %d UBI devices may be created", + ubi_err(ubi, "only %d UBI devices may be created", UBI_MAX_DEVICES); return -ENFILE; } @@ -921,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, /* Make sure ubi_num is not busy */ if (ubi_devices[ubi_num]) { - ubi_err("ubi%d already exists", ubi_num); + ubi_err(ubi, "ubi%d already exists", ubi_num); return -EEXIST; } } @@ -953,13 +955,14 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) { - ubi_err("More than %i PEBs are needed for fastmap, sorry.", + ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.", UBI_FM_MAX_START); ubi->fm_disabled = 1; } - ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size); - ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); + ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size); + ubi_msg(ubi, "default fastmap WL pool size: %d", + ubi->fm_wl_pool.max_size); #else ubi->fm_disabled = 1; #endif @@ -970,7 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, mutex_init(&ubi->fm_mutex); init_rwsem(&ubi->fm_sem); - ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); + ubi_msg(ubi, "attaching mtd%d to ubi%d", mtd->index, ubi_num); err = io_init(ubi, max_beb_per1024); if (err) @@ -989,7 +992,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, #endif err = ubi_attach(ubi, 0); if (err) { - ubi_err("failed to attach mtd%d, error %d", mtd->index, err); + ubi_err(ubi, "failed to attach mtd%d, error %d", + mtd->index, err); goto out_free; } @@ -1010,28 +1014,28 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); if (IS_ERR(ubi->bgt_thread)) { err = PTR_ERR(ubi->bgt_thread); - ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, - err); + ubi_err(ubi, "cannot spawn \"%s\", error %d", + ubi->bgt_name, err); goto out_debugfs; } - ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d", - mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num); - ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes", + ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)", + mtd->index, mtd->name, ubi->flash_size >> 20); + ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes", ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); - ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d", + ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d", ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); - ubi_msg("VID header offset: %d (aligned %d), data offset: %d", + ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d", ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); - ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", + ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); - ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d", + ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d", ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, ubi->vtbl_slots); - ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", + ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, ubi->image_seq); - ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", + ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs); /* @@ -1100,7 +1104,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) return -EBUSY; } /* This may only happen if there is a bug */ - ubi_err("%s reference count %d, destroy anyway", + ubi_err(ubi, "%s reference count %d, destroy anyway", ubi->ubi_name, ubi->ref_count); } ubi_devices[ubi_num] = NULL; @@ -1108,7 +1112,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) ubi_assert(ubi_num == ubi->ubi_num); ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); - ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); + ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index); #ifdef CONFIG_MTD_UBI_FASTMAP /* If we don't write a new fastmap at detach time we lose all * EC updates that have been made since the last written fastmap. */ @@ -1136,7 +1140,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) put_mtd_device(ubi->mtd); vfree(ubi->peb_buf); vfree(ubi->fm_buf); - ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); + ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index); put_device(&ubi->dev); return 0; } @@ -1218,7 +1222,8 @@ static int __init ubi_init(void) BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); if (mtd_devs > UBI_MAX_DEVICES) { - ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); + pr_err("UBI error: too many MTD devices, maximum is %d", + UBI_MAX_DEVICES); return -EINVAL; } @@ -1226,19 +1231,19 @@ static int __init ubi_init(void) ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); if (IS_ERR(ubi_class)) { err = PTR_ERR(ubi_class); - ubi_err("cannot create UBI class"); + pr_err("UBI error: cannot create UBI class"); goto out; } err = class_create_file(ubi_class, &ubi_version); if (err) { - ubi_err("cannot create sysfs file"); + pr_err("UBI error: cannot create sysfs file"); goto out_class; } err = misc_register(&ubi_ctrl_cdev); if (err) { - ubi_err("cannot register device"); + pr_err("UBI error: cannot register device"); goto out_version; } @@ -1265,7 +1270,8 @@ static int __init ubi_init(void) mtd = open_mtd_device(p->name); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); - ubi_err("cannot open mtd %s, error %d", p->name, err); + pr_err("UBI error: cannot open mtd %s, error %d", + p->name, err); /* See comment below re-ubi_is_module(). */ if (ubi_is_module()) goto out_detach; @@ -1277,7 +1283,8 @@ static int __init ubi_init(void) p->vid_hdr_offs, p->max_beb_per1024); mutex_unlock(&ubi_devices_mutex); if (err < 0) { - ubi_err("cannot attach mtd%d", mtd->index); + pr_err("UBI error: cannot attach mtd%d", + mtd->index); put_mtd_device(mtd); /* @@ -1300,7 +1307,7 @@ static int __init ubi_init(void) err = ubiblock_init(); if (err) { - ubi_err("block: cannot initialize, error %d", err); + pr_err("UBI error: block: cannot initialize, error %d", err); /* See comment above re-ubi_is_module(). */ if (ubi_is_module()) @@ -1326,7 +1333,7 @@ out_version: out_class: class_destroy(ubi_class); out: - ubi_err("cannot initialize UBI, error %d", err); + pr_err("UBI error: cannot initialize UBI, error %d", err); return err; } late_initcall(ubi_init); @@ -1365,7 +1372,7 @@ static int __init bytes_str_to_int(const char *str) result = simple_strtoul(str, &endp, 0); if (str == endp || result >= INT_MAX) { - ubi_err("incorrect bytes count: \"%s\"\n", str); + pr_err("UBI error: incorrect bytes count: \"%s\"\n", str); return -EINVAL; } @@ -1381,7 +1388,7 @@ static int __init bytes_str_to_int(const char *str) case '\0': break; default: - ubi_err("incorrect bytes count: \"%s\"\n", str); + pr_err("UBI error: incorrect bytes count: \"%s\"\n", str); return -EINVAL; } @@ -1408,20 +1415,20 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) return -EINVAL; if (mtd_devs == UBI_MAX_DEVICES) { - ubi_err("too many parameters, max. is %d\n", - UBI_MAX_DEVICES); + pr_err("UBI error: too many parameters, max. is %d\n", + UBI_MAX_DEVICES); return -EINVAL; } len = strnlen(val, MTD_PARAM_LEN_MAX); if (len == MTD_PARAM_LEN_MAX) { - ubi_err("parameter \"%s\" is too long, max. is %d\n", - val, MTD_PARAM_LEN_MAX); + pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n", + val, MTD_PARAM_LEN_MAX); return -EINVAL; } if (len == 0) { - pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n"); + pr_err("UBI warning: empty 'mtd=' parameter - ignored\n"); return 0; } @@ -1435,7 +1442,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) tokens[i] = strsep(&pbuf, ","); if (pbuf) { - ubi_err("too many arguments at \"%s\"\n", val); + pr_err("UBI error: too many arguments at \"%s\"\n", val); return -EINVAL; } @@ -1455,8 +1462,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) int err = kstrtoint(token, 10, &p->max_beb_per1024); if (err) { - ubi_err("bad value for max_beb_per1024 parameter: %s", - token); + pr_err("UBI error: bad value for max_beb_per1024 parameter: %s", + token); return -EINVAL; } } @@ -1466,7 +1473,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) int err = kstrtoint(token, 10, &p->ubi_num); if (err) { - ubi_err("bad value for ubi_num parameter: %s", token); + pr_err("UBI error: bad value for ubi_num parameter: %s", + token); return -EINVAL; } } else diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 59de69a24e40..3410ea8109f8 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -48,13 +48,14 @@ /** * get_exclusive - get exclusive access to an UBI volume. + * @ubi: UBI device description object * @desc: volume descriptor * * This function changes UBI volume open mode to "exclusive". Returns previous * mode value (positive integer) in case of success and a negative error code * in case of failure. */ -static int get_exclusive(struct ubi_volume_desc *desc) +static int get_exclusive(struct ubi_device *ubi, struct ubi_volume_desc *desc) { int users, err; struct ubi_volume *vol = desc->vol; @@ -63,7 +64,7 @@ static int get_exclusive(struct ubi_volume_desc *desc) users = vol->readers + vol->writers + vol->exclusive; ubi_assert(users > 0); if (users > 1) { - ubi_err("%d users for volume %d", users, vol->vol_id); + ubi_err(ubi, "%d users for volume %d", users, vol->vol_id); err = -EBUSY; } else { vol->readers = vol->writers = 0; @@ -134,7 +135,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file) vol->ubi->ubi_num, vol->vol_id, desc->mode); if (vol->updating) { - ubi_warn("update of volume %d not finished, volume is damaged", + ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged", vol->vol_id); ubi_assert(!vol->changing_leb); vol->updating = 0; @@ -158,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) if (vol->updating) { /* Update is in progress, seeking is prohibited */ - ubi_err("updating"); + ubi_err(vol->ubi, "updating"); return -EBUSY; } @@ -193,11 +194,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, count, *offp, vol->vol_id); if (vol->updating) { - ubi_err("updating"); + ubi_err(vol->ubi, "updating"); return -EBUSY; } if (vol->upd_marker) { - ubi_err("damaged volume, update marker is set"); + ubi_err(vol->ubi, "damaged volume, update marker is set"); return -EBADF; } if (*offp == vol->used_bytes || count == 0) @@ -277,7 +278,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); if (off & (ubi->min_io_size - 1)) { - ubi_err("unaligned position"); + ubi_err(ubi, "unaligned position"); return -EINVAL; } @@ -286,7 +287,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, /* We can write only in fractions of the minimum I/O unit */ if (count & (ubi->min_io_size - 1)) { - ubi_err("unaligned write length"); + ubi_err(ubi, "unaligned write length"); return -EINVAL; } @@ -348,7 +349,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, err = ubi_more_leb_change_data(ubi, vol, buf, count); if (err < 0) { - ubi_err("cannot accept more %zd bytes of data, error %d", + ubi_err(ubi, "cannot accept more %zd bytes of data, error %d", count, err); return err; } @@ -370,7 +371,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, return err; if (err) { - ubi_warn("volume %d on UBI device %d is corrupted", + ubi_warn(ubi, "volume %d on UBI device %d is corrupted", vol->vol_id, ubi->ubi_num); vol->corrupted = 1; } @@ -420,7 +421,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, break; } - err = get_exclusive(desc); + err = get_exclusive(ubi, desc); if (err < 0) break; @@ -456,7 +457,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, req.bytes < 0 || req.lnum >= vol->usable_leb_size) break; - err = get_exclusive(desc); + err = get_exclusive(ubi, desc); if (err < 0) break; @@ -642,7 +643,7 @@ static int verify_mkvol_req(const struct ubi_device *ubi, return 0; bad: - ubi_err("bad volume creation request"); + ubi_err(ubi, "bad volume creation request"); ubi_dump_mkvol_req(req); return err; } @@ -708,12 +709,12 @@ static int rename_volumes(struct ubi_device *ubi, for (i = 0; i < req->count - 1; i++) { for (n = i + 1; n < req->count; n++) { if (req->ents[i].vol_id == req->ents[n].vol_id) { - ubi_err("duplicated volume id %d", + ubi_err(ubi, "duplicated volume id %d", req->ents[i].vol_id); return -EINVAL; } if (!strcmp(req->ents[i].name, req->ents[n].name)) { - ubi_err("duplicated volume name \"%s\"", + ubi_err(ubi, "duplicated volume name \"%s\"", req->ents[i].name); return -EINVAL; } @@ -736,7 +737,8 @@ static int rename_volumes(struct ubi_device *ubi, re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE); if (IS_ERR(re->desc)) { err = PTR_ERR(re->desc); - ubi_err("cannot open volume %d, error %d", vol_id, err); + ubi_err(ubi, "cannot open volume %d, error %d", + vol_id, err); kfree(re); goto out_free; } @@ -795,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi, continue; /* The volume exists but busy, or an error occurred */ - ubi_err("cannot open volume \"%s\", error %d", + ubi_err(ubi, "cannot open volume \"%s\", error %d", re->new_name, err); goto out_free; } diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 63cb1d7236ce..7335c9ff9d99 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -43,12 +43,12 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len) return; err = mtd_read(ubi->mtd, addr, len, &read, buf); if (err && err != -EUCLEAN) { - ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes", + ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes", err, len, pnum, offset, read); goto out; } - ubi_msg("dumping %d bytes of data from PEB %d, offset %d", + ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d", len, pnum, offset); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); out: @@ -238,8 +238,8 @@ int ubi_debugfs_init(void) if (IS_ERR_OR_NULL(dfs_rootdir)) { int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); - ubi_err("cannot create \"ubi\" debugfs directory, error %d\n", - err); + pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", + err); return err; } @@ -433,7 +433,7 @@ out_remove: debugfs_remove_recursive(d->dfs_dir); out: err = dent ? PTR_ERR(dent) : -ENODEV; - ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n", + ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n", fname, err); return err; } diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 2402d3b50171..a40020cf0923 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -422,7 +422,7 @@ retry: */ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_BAD_HDR) { - ubi_warn("corrupted VID header at PEB %d, LEB %d:%d", + ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", pnum, vol_id, lnum); err = -EBADMSG; } else @@ -448,7 +448,7 @@ retry: goto out_unlock; scrub = 1; if (!check) { - ubi_msg("force data checking"); + ubi_msg(ubi, "force data checking"); check = 1; goto retry; } @@ -459,7 +459,7 @@ retry: if (check) { uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); if (crc1 != crc) { - ubi_warn("CRC error: calculated %#08x, must be %#08x", + ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x", crc1, crc); err = -EBADMSG; goto out_unlock; @@ -513,7 +513,8 @@ retry: return new_pnum; } - ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); + ubi_msg(ubi, "recover PEB %d, move data to PEB %d", + pnum, new_pnum); err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); if (err && err != UBI_IO_BITFLIPS) { @@ -554,7 +555,7 @@ retry: up_read(&ubi->fm_sem); ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); - ubi_msg("data was successfully recovered"); + ubi_msg(ubi, "data was successfully recovered"); return 0; out_unlock: @@ -569,13 +570,13 @@ write_error: * Bad luck? This physical eraseblock is bad too? Crud. Let's try to * get another one. */ - ubi_warn("failed to write to PEB %d", new_pnum); + ubi_warn(ubi, "failed to write to PEB %d", new_pnum); ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); if (++tries > UBI_IO_RETRIES) { ubi_free_vid_hdr(ubi, vid_hdr); return err; } - ubi_msg("try again"); + ubi_msg(ubi, "try again"); goto retry; } @@ -613,7 +614,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, err = ubi_io_write_data(ubi, buf, pnum, offset, len); if (err) { - ubi_warn("failed to write data to PEB %d", pnum); + ubi_warn(ubi, "failed to write data to PEB %d", pnum); if (err == -EIO && ubi->bad_allowed) err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); @@ -654,7 +655,7 @@ retry: err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); if (err) { - ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", vol_id, lnum, pnum); goto write_error; } @@ -662,7 +663,7 @@ retry: if (len) { err = ubi_io_write_data(ubi, buf, pnum, offset, len); if (err) { - ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", + ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", len, offset, vol_id, lnum, pnum); goto write_error; } @@ -698,7 +699,7 @@ write_error: } vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); - ubi_msg("try another PEB"); + ubi_msg(ubi, "try another PEB"); goto retry; } @@ -775,14 +776,14 @@ retry: err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); if (err) { - ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", vol_id, lnum, pnum); goto write_error; } err = ubi_io_write_data(ubi, buf, pnum, 0, len); if (err) { - ubi_warn("failed to write %d bytes of data to PEB %d", + ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", len, pnum); goto write_error; } @@ -818,7 +819,7 @@ write_error: } vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); - ubi_msg("try another PEB"); + ubi_msg(ubi, "try another PEB"); goto retry; } @@ -893,14 +894,14 @@ retry: err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); if (err) { - ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", vol_id, lnum, pnum); goto write_error; } err = ubi_io_write_data(ubi, buf, pnum, 0, len); if (err) { - ubi_warn("failed to write %d bytes of data to PEB %d", + ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", len, pnum); goto write_error; } @@ -940,7 +941,7 @@ write_error: } vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); - ubi_msg("try another PEB"); + ubi_msg(ubi, "try another PEB"); goto retry; } @@ -1063,7 +1064,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, dbg_wl("read %d bytes of data", aldata_size); err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); if (err && err != UBI_IO_BITFLIPS) { - ubi_warn("error %d while reading data from PEB %d", + ubi_warn(ubi, "error %d while reading data from PEB %d", err, from); err = MOVE_SOURCE_RD_ERR; goto out_unlock_buf; @@ -1113,7 +1114,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); if (err) { if (err != UBI_IO_BITFLIPS) { - ubi_warn("error %d while reading VID header back from PEB %d", + ubi_warn(ubi, "error %d while reading VID header back from PEB %d", err, to); if (is_error_sane(err)) err = MOVE_TARGET_RD_ERR; @@ -1140,7 +1141,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); if (err) { if (err != UBI_IO_BITFLIPS) { - ubi_warn("error %d while reading data back from PEB %d", + ubi_warn(ubi, "error %d while reading data back from PEB %d", err, to); if (is_error_sane(err)) err = MOVE_TARGET_RD_ERR; @@ -1152,7 +1153,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, cond_resched(); if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { - ubi_warn("read data back from PEB %d and it is different", + ubi_warn(ubi, "read data back from PEB %d and it is different", to); err = -EINVAL; goto out_unlock_buf; @@ -1205,10 +1206,10 @@ static void print_rsvd_warning(struct ubi_device *ubi, return; } - ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", + ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); if (ubi->corr_peb_count) - ubi_warn("%d PEBs are corrupted and not used", + ubi_warn(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); } @@ -1286,7 +1287,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, fm_eba[i][j] == UBI_LEB_UNMAPPED) continue; - ubi_err("LEB:%i:%i is PEB:%i instead of %i!", + ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!", vol->vol_id, i, fm_eba[i][j], scan_eba[i][j]); ubi_assert(0); @@ -1366,10 +1367,10 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) } if (ubi->avail_pebs < EBA_RESERVED_PEBS) { - ubi_err("no enough physical eraseblocks (%d, need %d)", + ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, EBA_RESERVED_PEBS); if (ubi->corr_peb_count) - ubi_err("%d PEBs are corrupted and not used", + ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); err = -ENOSPC; goto out_free; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index cfd5b5e90156..b56672bf3294 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -330,7 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, if (found) av = tmp_av; else { - ubi_err("orphaned volume in fastmap pool!"); + ubi_err(ubi, "orphaned volume in fastmap pool!"); kmem_cache_free(ai->aeb_slab_cache, new_aeb); return UBI_BAD_FASTMAP; } @@ -414,14 +414,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, pnum = be32_to_cpu(pebs[i]); if (ubi_io_is_bad(ubi, pnum)) { - ubi_err("bad PEB in fastmap pool!"); + ubi_err(ubi, "bad PEB in fastmap pool!"); ret = UBI_BAD_FASTMAP; goto out; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { - ubi_err("unable to read EC header! PEB:%i err:%i", + ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; @@ -435,7 +435,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, image_seq = be32_to_cpu(ech->image_seq); if (image_seq && (image_seq != ubi->image_seq)) { - ubi_err("bad image seq: 0x%x, expected: 0x%x", + ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", be32_to_cpu(ech->image_seq), ubi->image_seq); ret = UBI_BAD_FASTMAP; goto out; @@ -493,7 +493,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, } } else { /* We are paranoid and fall back to scanning mode */ - ubi_err("fastmap pool PEBs contains damaged PEBs!"); + ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } @@ -588,7 +588,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, goto fail_bad; if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { - ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", + ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); goto fail_bad; } @@ -598,7 +598,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { - ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", + ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } @@ -608,7 +608,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { - ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", + ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } @@ -619,25 +619,26 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { - ubi_err("bad pool size: %i", pool_size); + ubi_err(ubi, "bad pool size: %i", pool_size); goto fail_bad; } if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { - ubi_err("bad WL pool size: %i", wl_pool_size); + ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); goto fail_bad; } if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_pool_size < 0) { - ubi_err("bad maximal pool size: %i", fm->max_pool_size); + ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); goto fail_bad; } if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_wl_pool_size < 0) { - ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); + ubi_err(ubi, "bad maximal WL pool size: %i", + fm->max_wl_pool_size); goto fail_bad; } @@ -696,8 +697,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, goto fail_bad; if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { - ubi_err("bad fastmap vol header magic: 0x%x, " \ - "expected: 0x%x", + ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); goto fail_bad; } @@ -722,8 +722,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, goto fail_bad; if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { - ubi_err("bad fastmap EBA header magic: 0x%x, " \ - "expected: 0x%x", + ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); goto fail_bad; } @@ -788,7 +787,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, int err; if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { - ubi_err("bad PEB in fastmap EBA orphan list"); + ubi_err(ubi, "bad PEB in fastmap EBA orphan list"); ret = UBI_BAD_FASTMAP; kfree(ech); goto fail; @@ -796,8 +795,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { - ubi_err("unable to read EC header! PEB:%i " \ - "err:%i", tmp_aeb->pnum, err); + ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", + tmp_aeb->pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; kfree(ech); @@ -908,14 +907,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, fm->to_be_tortured[0] = 1; if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { - ubi_err("bad super block magic: 0x%x, expected: 0x%x", + ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } if (fmsb->version != UBI_FM_FMT_VERSION) { - ubi_err("bad fastmap version: %i, expected: %i", + ubi_err(ubi, "bad fastmap version: %i, expected: %i", fmsb->version, UBI_FM_FMT_VERSION); ret = UBI_BAD_FASTMAP; goto free_fm_sb; @@ -923,15 +922,16 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, used_blocks = be32_to_cpu(fmsb->used_blocks); if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { - ubi_err("number of fastmap blocks is invalid: %i", used_blocks); + ubi_err(ubi, "number of fastmap blocks is invalid: %i", + used_blocks); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } fm_size = ubi->leb_size * used_blocks; if (fm_size != ubi->fm_size) { - ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, - ubi->fm_size); + ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", + fm_size, ubi->fm_size); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } @@ -960,7 +960,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (ret && ret != UBI_IO_BITFLIPS) { - ubi_err("unable to read fastmap block# %i EC (PEB: %i)", + ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", i, pnum); if (ret > 0) ret = UBI_BAD_FASTMAP; @@ -977,7 +977,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, * we shouldn't fail if image_seq == 0. */ if (image_seq && (image_seq != ubi->image_seq)) { - ubi_err("wrong image seq:%d instead of %d", + ubi_err(ubi, "wrong image seq:%d instead of %d", be32_to_cpu(ech->image_seq), ubi->image_seq); ret = UBI_BAD_FASTMAP; goto free_hdr; @@ -985,15 +985,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (ret && ret != UBI_IO_BITFLIPS) { - ubi_err("unable to read fastmap block# %i (PEB: %i)", + ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", i, pnum); goto free_hdr; } if (i == 0) { if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { - ubi_err("bad fastmap anchor vol_id: 0x%x," \ - " expected: 0x%x", + ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_SB_VOLUME_ID); ret = UBI_BAD_FASTMAP; @@ -1001,8 +1000,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, } } else { if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { - ubi_err("bad fastmap data vol_id: 0x%x," \ - " expected: 0x%x", + ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_DATA_VOLUME_ID); ret = UBI_BAD_FASTMAP; @@ -1016,7 +1014,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, ubi->leb_start, ubi->leb_size); if (ret && ret != UBI_IO_BITFLIPS) { - ubi_err("unable to read fastmap block# %i (PEB: %i, " \ + ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " "err: %i)", i, pnum, ret); goto free_hdr; } @@ -1030,8 +1028,9 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, fmsb2->data_crc = 0; crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); if (crc != tmp_crc) { - ubi_err("fastmap data CRC is invalid"); - ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); + ubi_err(ubi, "fastmap data CRC is invalid"); + ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", + tmp_crc, crc); ret = UBI_BAD_FASTMAP; goto free_hdr; } @@ -1067,9 +1066,10 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ubi->fm = fm; ubi->fm_pool.max_size = ubi->fm->max_pool_size; ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; - ubi_msg("attached by fastmap"); - ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); - ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); + ubi_msg(ubi, "attached by fastmap"); + ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); + ubi_msg(ubi, "fastmap WL pool size: %d", + ubi->fm_wl_pool.max_size); ubi->fm_disabled = 0; ubi_free_vid_hdr(ubi, vh); @@ -1077,7 +1077,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, out: mutex_unlock(&ubi->fm_mutex); if (ret == UBI_BAD_FASTMAP) - ubi_err("Attach by fastmap failed, doing a full scan!"); + ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); return ret; free_hdr: @@ -1273,7 +1273,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); if (ret) { - ubi_err("unable to write vid_hdr to fastmap SB!"); + ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); goto out_kfree; } @@ -1293,7 +1293,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); if (ret) { - ubi_err("unable to write vid_hdr to PEB %i!", + ubi_err(ubi, "unable to write vid_hdr to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } @@ -1303,7 +1303,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); if (ret) { - ubi_err("unable to write fastmap to PEB %i!", + ubi_err(ubi, "unable to write fastmap to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } @@ -1450,7 +1450,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) ubi->fm = NULL; if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { - ubi_err("fastmap too large"); + ubi_err(ubi, "fastmap too large"); ret = -ENOSPC; goto err; } @@ -1462,7 +1462,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) if (!tmp_e && !old_fm) { int j; - ubi_err("could not get any free erase block"); + ubi_err(ubi, "could not get any free erase block"); for (j = 1; j < i; j++) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); @@ -1478,7 +1478,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); - ubi_err("could not erase old fastmap PEB"); + ubi_err(ubi, "could not erase old fastmap PEB"); goto err; } @@ -1504,7 +1504,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) ret = erase_block(ubi, old_fm->e[0]->pnum); if (ret < 0) { int i; - ubi_err("could not erase old anchor PEB"); + ubi_err(ubi, "could not erase old anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], @@ -1525,7 +1525,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) } else { if (!tmp_e) { int i; - ubi_err("could not find any anchor PEB"); + ubi_err(ubi, "could not find any anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); @@ -1555,13 +1555,13 @@ out_unlock: err: kfree(new_fm); - ubi_warn("Unable to write new fastmap, err=%i", ret); + ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); ret = 0; if (old_fm) { ret = invalidate_fastmap(ubi, old_fm); if (ret < 0) - ubi_err("Unable to invalidiate current fastmap!"); + ubi_err(ubi, "Unable to invalidiate current fastmap!"); else if (ret) ret = 0; } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index d36134925d31..396aaa543362 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -177,19 +177,20 @@ retry: * enabled. A corresponding message will be printed * later, when it is has been scrubbed. */ - ubi_msg("fixable bit-flip detected at PEB %d", pnum); + ubi_msg(ubi, "fixable bit-flip detected at PEB %d", + pnum); ubi_assert(len == read); return UBI_IO_BITFLIPS; } if (retries++ < UBI_IO_RETRIES) { - ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry", + ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry", err, errstr, len, pnum, offset, read); yield(); goto retry; } - ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes", + ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes", err, errstr, len, pnum, offset, read); dump_stack(); @@ -246,7 +247,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); if (ubi->ro_mode) { - ubi_err("read-only mode"); + ubi_err(ubi, "read-only mode"); return -EROFS; } @@ -273,7 +274,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, } if (ubi_dbg_is_write_failure(ubi)) { - ubi_err("cannot write %d bytes to PEB %d:%d (emulated)", + ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)", len, pnum, offset); dump_stack(); return -EIO; @@ -282,7 +283,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, addr = (loff_t)pnum * ubi->peb_size + offset; err = mtd_write(ubi->mtd, addr, len, &written, buf); if (err) { - ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes", + ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes", err, len, pnum, offset, written); dump_stack(); ubi_dump_flash(ubi, pnum, offset, len); @@ -338,7 +339,7 @@ static int do_sync_erase(struct ubi_device *ubi, int pnum) ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->ro_mode) { - ubi_err("read-only mode"); + ubi_err(ubi, "read-only mode"); return -EROFS; } @@ -355,12 +356,12 @@ retry: err = mtd_erase(ubi->mtd, &ei); if (err) { if (retries++ < UBI_IO_RETRIES) { - ubi_warn("error %d while erasing PEB %d, retry", + ubi_warn(ubi, "error %d while erasing PEB %d, retry", err, pnum); yield(); goto retry; } - ubi_err("cannot erase PEB %d, error %d", pnum, err); + ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err); dump_stack(); return err; } @@ -368,17 +369,18 @@ retry: err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || ei.state == MTD_ERASE_FAILED); if (err) { - ubi_err("interrupted PEB %d erasure", pnum); + ubi_err(ubi, "interrupted PEB %d erasure", pnum); return -EINTR; } if (ei.state == MTD_ERASE_FAILED) { if (retries++ < UBI_IO_RETRIES) { - ubi_warn("error while erasing PEB %d, retry", pnum); + ubi_warn(ubi, "error while erasing PEB %d, retry", + pnum); yield(); goto retry; } - ubi_err("cannot erase PEB %d", pnum); + ubi_err(ubi, "cannot erase PEB %d", pnum); dump_stack(); return -EIO; } @@ -388,7 +390,7 @@ retry: return err; if (ubi_dbg_is_erase_failure(ubi)) { - ubi_err("cannot erase PEB %d (emulated)", pnum); + ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum); return -EIO; } @@ -411,7 +413,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) { int err, i, patt_count; - ubi_msg("run torture test for PEB %d", pnum); + ubi_msg(ubi, "run torture test for PEB %d", pnum); patt_count = ARRAY_SIZE(patterns); ubi_assert(patt_count > 0); @@ -428,7 +430,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); if (err == 0) { - ubi_err("erased PEB %d, but a non-0xFF byte found", + ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found", pnum); err = -EIO; goto out; @@ -448,7 +450,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) err = ubi_check_pattern(ubi->peb_buf, patterns[i], ubi->peb_size); if (err == 0) { - ubi_err("pattern %x checking failed for PEB %d", + ubi_err(ubi, "pattern %x checking failed for PEB %d", patterns[i], pnum); err = -EIO; goto out; @@ -456,7 +458,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) } err = patt_count; - ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum); + ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum); out: mutex_unlock(&ubi->buf_mutex); @@ -466,7 +468,7 @@ out: * has not passed because it happened on a freshly erased * physical eraseblock which means something is wrong with it. */ - ubi_err("read problems on freshly erased PEB %d, must be bad", + ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad", pnum); err = -EIO; } @@ -542,7 +544,7 @@ error: * it. Supposedly the flash media or the driver is screwed up, so * return an error. */ - ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err); + ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err); ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); return -EIO; } @@ -574,7 +576,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) return err; if (ubi->ro_mode) { - ubi_err("read-only mode"); + ubi_err(ubi, "read-only mode"); return -EROFS; } @@ -616,7 +618,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); if (ret < 0) - ubi_err("error %d while checking if PEB %d is bad", + ubi_err(ubi, "error %d while checking if PEB %d is bad", ret, pnum); else if (ret) dbg_io("PEB %d is bad", pnum); @@ -642,7 +644,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->ro_mode) { - ubi_err("read-only mode"); + ubi_err(ubi, "read-only mode"); return -EROFS; } @@ -651,7 +653,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); if (err) - ubi_err("cannot mark PEB %d bad, error %d", pnum, err); + ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err); return err; } @@ -674,32 +676,32 @@ static int validate_ec_hdr(const struct ubi_device *ubi, leb_start = be32_to_cpu(ec_hdr->data_offset); if (ec_hdr->version != UBI_VERSION) { - ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d", + ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d", UBI_VERSION, (int)ec_hdr->version); goto bad; } if (vid_hdr_offset != ubi->vid_hdr_offset) { - ubi_err("bad VID header offset %d, expected %d", + ubi_err(ubi, "bad VID header offset %d, expected %d", vid_hdr_offset, ubi->vid_hdr_offset); goto bad; } if (leb_start != ubi->leb_start) { - ubi_err("bad data offset %d, expected %d", + ubi_err(ubi, "bad data offset %d, expected %d", leb_start, ubi->leb_start); goto bad; } if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { - ubi_err("bad erase counter %lld", ec); + ubi_err(ubi, "bad erase counter %lld", ec); goto bad; } return 0; bad: - ubi_err("bad EC header"); + ubi_err(ubi, "bad EC header"); ubi_dump_ec_hdr(ec_hdr); dump_stack(); return 1; @@ -765,7 +767,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ if (verbose) - ubi_warn("no EC header found at PEB %d, only 0xFF bytes", + ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes", pnum); dbg_bld("no EC header found at PEB %d, only 0xFF bytes", pnum); @@ -780,7 +782,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, * 0xFF bytes. Report that the header is corrupted. */ if (verbose) { - ubi_warn("bad magic number at PEB %d: %08x instead of %08x", + ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dump_ec_hdr(ec_hdr); } @@ -794,7 +796,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (hdr_crc != crc) { if (verbose) { - ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x", + ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); ubi_dump_ec_hdr(ec_hdr); } @@ -810,7 +812,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, /* And of course validate what has just been read from the media */ err = validate_ec_hdr(ubi, ec_hdr); if (err) { - ubi_err("validation failed for PEB %d", pnum); + ubi_err(ubi, "validation failed for PEB %d", pnum); return -EINVAL; } @@ -884,40 +886,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi, int usable_leb_size = ubi->leb_size - data_pad; if (copy_flag != 0 && copy_flag != 1) { - ubi_err("bad copy_flag"); + ubi_err(ubi, "bad copy_flag"); goto bad; } if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || data_pad < 0) { - ubi_err("negative values"); + ubi_err(ubi, "negative values"); goto bad; } if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { - ubi_err("bad vol_id"); + ubi_err(ubi, "bad vol_id"); goto bad; } if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { - ubi_err("bad compat"); + ubi_err(ubi, "bad compat"); goto bad; } if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && compat != UBI_COMPAT_REJECT) { - ubi_err("bad compat"); + ubi_err(ubi, "bad compat"); goto bad; } if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { - ubi_err("bad vol_type"); + ubi_err(ubi, "bad vol_type"); goto bad; } if (data_pad >= ubi->leb_size / 2) { - ubi_err("bad data_pad"); + ubi_err(ubi, "bad data_pad"); goto bad; } @@ -929,45 +931,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi, * mapped logical eraseblocks. */ if (used_ebs == 0) { - ubi_err("zero used_ebs"); + ubi_err(ubi, "zero used_ebs"); goto bad; } if (data_size == 0) { - ubi_err("zero data_size"); + ubi_err(ubi, "zero data_size"); goto bad; } if (lnum < used_ebs - 1) { if (data_size != usable_leb_size) { - ubi_err("bad data_size"); + ubi_err(ubi, "bad data_size"); goto bad; } } else if (lnum == used_ebs - 1) { if (data_size == 0) { - ubi_err("bad data_size at last LEB"); + ubi_err(ubi, "bad data_size at last LEB"); goto bad; } } else { - ubi_err("too high lnum"); + ubi_err(ubi, "too high lnum"); goto bad; } } else { if (copy_flag == 0) { if (data_crc != 0) { - ubi_err("non-zero data CRC"); + ubi_err(ubi, "non-zero data CRC"); goto bad; } if (data_size != 0) { - ubi_err("non-zero data_size"); + ubi_err(ubi, "non-zero data_size"); goto bad; } } else { if (data_size == 0) { - ubi_err("zero data_size of copy"); + ubi_err(ubi, "zero data_size of copy"); goto bad; } } if (used_ebs != 0) { - ubi_err("bad used_ebs"); + ubi_err(ubi, "bad used_ebs"); goto bad; } } @@ -975,7 +977,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi, return 0; bad: - ubi_err("bad VID header"); + ubi_err(ubi, "bad VID header"); ubi_dump_vid_hdr(vid_hdr); dump_stack(); return 1; @@ -1020,7 +1022,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { if (verbose) - ubi_warn("no VID header found at PEB %d, only 0xFF bytes", + ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes", pnum); dbg_bld("no VID header found at PEB %d, only 0xFF bytes", pnum); @@ -1031,7 +1033,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, } if (verbose) { - ubi_warn("bad magic number at PEB %d: %08x instead of %08x", + ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dump_vid_hdr(vid_hdr); } @@ -1045,7 +1047,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (hdr_crc != crc) { if (verbose) { - ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x", + ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); ubi_dump_vid_hdr(vid_hdr); } @@ -1059,7 +1061,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, err = validate_vid_hdr(ubi, vid_hdr); if (err) { - ubi_err("validation failed for PEB %d", pnum); + ubi_err(ubi, "validation failed for PEB %d", pnum); return -EINVAL; } @@ -1129,7 +1131,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum) if (!err) return err; - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "self-check failed for PEB %d", pnum); dump_stack(); return err > 0 ? -EINVAL : err; } @@ -1154,14 +1156,14 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { - ubi_err("bad magic %#08x, must be %#08x", + ubi_err(ubi, "bad magic %#08x, must be %#08x", magic, UBI_EC_HDR_MAGIC); goto fail; } err = validate_ec_hdr(ubi, ec_hdr); if (err) { - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "self-check failed for PEB %d", pnum); goto fail; } @@ -1201,8 +1203,9 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); if (hdr_crc != crc) { - ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x", + crc, hdr_crc); + ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_ec_hdr(ec_hdr); dump_stack(); err = -EINVAL; @@ -1236,21 +1239,21 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { - ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", + ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x", magic, pnum, UBI_VID_HDR_MAGIC); goto fail; } err = validate_vid_hdr(ubi, vid_hdr); if (err) { - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "self-check failed for PEB %d", pnum); goto fail; } return err; fail: - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); dump_stack(); return -EINVAL; @@ -1288,9 +1291,9 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { - ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x", + ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); - ubi_err("self-check failed for PEB %d", pnum); + ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); dump_stack(); err = -EINVAL; @@ -1329,7 +1332,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); if (!buf1) { - ubi_err("cannot allocate memory to check writes"); + ubi_err(ubi, "cannot allocate memory to check writes"); return 0; } @@ -1345,15 +1348,15 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, if (c == c1) continue; - ubi_err("self-check failed for PEB %d:%d, len %d", + ubi_err(ubi, "self-check failed for PEB %d:%d, len %d", pnum, offset, len); - ubi_msg("data differ at position %d", i); + ubi_msg(ubi, "data differ at position %d", i); dump_len = max_t(int, 128, len - i); - ubi_msg("hex dump of the original buffer from %d to %d", + ubi_msg(ubi, "hex dump of the original buffer from %d to %d", i, i + dump_len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf + i, dump_len, 1); - ubi_msg("hex dump of the read buffer from %d to %d", + ubi_msg(ubi, "hex dump of the read buffer from %d to %d", i, i + dump_len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf1 + i, dump_len, 1); @@ -1393,20 +1396,20 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); if (!buf) { - ubi_err("cannot allocate memory to check for 0xFFs"); + ubi_err(ubi, "cannot allocate memory to check for 0xFFs"); return 0; } err = mtd_read(ubi->mtd, addr, len, &read, buf); if (err && !mtd_is_bitflip(err)) { - ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes", + ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes", err, len, pnum, offset, read); goto error; } err = ubi_check_pattern(buf, 0xFF, len); if (err == 0) { - ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes", + ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes", pnum, offset, len); goto fail; } @@ -1415,8 +1418,9 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) return 0; fail: - ubi_err("self-check failed for PEB %d", pnum); - ubi_msg("hex dump of the %d-%d region", offset, offset + len); + ubi_err(ubi, "self-check failed for PEB %d", pnum); + ubi_msg(ubi, "hex dump of the %d-%d region", + offset, offset + len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); err = -EINVAL; error: diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 3aac1acceeb4..f3bab669f6bb 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -204,7 +204,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) return ERR_PTR(err); } if (err == 1) { - ubi_warn("volume %d on UBI device %d is corrupted", + ubi_warn(ubi, "volume %d on UBI device %d is corrupted", vol_id, ubi->ubi_num); vol->corrupted = 1; } @@ -221,7 +221,7 @@ out_free: kfree(desc); out_put_ubi: ubi_put_device(ubi); - ubi_err("cannot open device %d, volume %d, error %d", + ubi_err(ubi, "cannot open device %d, volume %d, error %d", ubi_num, vol_id, err); return ERR_PTR(err); } @@ -411,7 +411,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { - ubi_warn("mark volume %d as corrupted", vol_id); + ubi_warn(ubi, "mark volume %d as corrupted", vol_id); vol->corrupted = 1; } diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index f913d701a5b3..dbda77e556cb 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -111,7 +111,7 @@ void ubi_update_reserved(struct ubi_device *ubi) ubi->avail_pebs -= need; ubi->rsvd_pebs += need; ubi->beb_rsvd_pebs += need; - ubi_msg("reserved more %d PEBs for bad PEB handling", need); + ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need); } /** @@ -128,7 +128,7 @@ void ubi_calculate_reserved(struct ubi_device *ubi) ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count; if (ubi->beb_rsvd_level < 0) { ubi->beb_rsvd_level = 0; - ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)", + ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)", ubi->bad_peb_count, ubi->bad_peb_limit); } } diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 320fc38fa2a1..f80ffaba9058 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -50,13 +50,14 @@ #define UBI_NAME_STR "ubi" /* Normal UBI messages */ -#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__) +#define ubi_msg(ubi, fmt, ...) pr_notice("UBI-%d: %s:" fmt "\n", \ + ubi->ubi_num, __func__, ##__VA_ARGS__) /* UBI warning messages */ -#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \ - __func__, ##__VA_ARGS__) +#define ubi_warn(ubi, fmt, ...) pr_warn("UBI-%d warning: %s: " fmt "\n", \ + ubi->ubi_num, __func__, ##__VA_ARGS__) /* UBI error messages */ -#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \ - __func__, ##__VA_ARGS__) +#define ubi_err(ubi, fmt, ...) pr_err("UBI-%d error: %s: " fmt "\n", \ + ubi->ubi_num, __func__, ##__VA_ARGS__) /* Background thread name pattern */ #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" @@ -987,7 +988,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi) { if (!ubi->ro_mode) { ubi->ro_mode = 1; - ubi_warn("switch to read-only mode"); + ubi_warn(ubi, "switch to read-only mode"); dump_stack(); } } diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index ec2c2dc1c1ca..2a1b6e037e1a 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c @@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, ubi_assert(!vol->updating && !vol->changing_leb); vol->updating = 1; + vol->upd_buf = vmalloc(ubi->leb_size); + if (!vol->upd_buf) + return -ENOMEM; + err = set_update_marker(ubi, vol); if (err) return err; @@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, err = clear_update_marker(ubi, vol, 0); if (err) return err; + + vfree(vol->upd_buf); vol->updating = 0; return 0; } - vol->upd_buf = vmalloc(ubi->leb_size); - if (!vol->upd_buf) - return -ENOMEM; - vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, vol->usable_leb_size); vol->upd_bytes = bytes; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 8330703c098f..ff4d97848d1c 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -223,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) } if (vol_id == UBI_VOL_NUM_AUTO) { - ubi_err("out of volume IDs"); + ubi_err(ubi, "out of volume IDs"); err = -ENFILE; goto out_unlock; } @@ -237,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) /* Ensure that this volume does not exist */ err = -EEXIST; if (ubi->volumes[vol_id]) { - ubi_err("volume %d already exists", vol_id); + ubi_err(ubi, "volume %d already exists", vol_id); goto out_unlock; } @@ -246,7 +246,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (ubi->volumes[i] && ubi->volumes[i]->name_len == req->name_len && !strcmp(ubi->volumes[i]->name, req->name)) { - ubi_err("volume \"%s\" exists (ID %d)", req->name, i); + ubi_err(ubi, "volume \"%s\" exists (ID %d)", + req->name, i); goto out_unlock; } @@ -257,9 +258,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) /* Reserve physical eraseblocks */ if (vol->reserved_pebs > ubi->avail_pebs) { - ubi_err("not enough PEBs, only %d available", ubi->avail_pebs); + ubi_err(ubi, "not enough PEBs, only %d available", + ubi->avail_pebs); if (ubi->corr_peb_count) - ubi_err("%d PEBs are corrupted and not used", + ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); err = -ENOSPC; goto out_unlock; @@ -314,7 +316,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); err = cdev_add(&vol->cdev, dev, 1); if (err) { - ubi_err("cannot add character device"); + ubi_err(ubi, "cannot add character device"); goto out_mapping; } @@ -326,7 +328,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); if (err) { - ubi_err("cannot register device"); + ubi_err(ubi, "cannot register device"); goto out_cdev; } @@ -386,7 +388,7 @@ out_unlock: kfree(vol); else put_device(&vol->dev); - ubi_err("cannot create volume %d, error %d", vol_id, err); + ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err); return err; } @@ -454,7 +456,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) return err; out_err: - ubi_err("cannot remove volume %d, error %d", vol_id, err); + ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err); spin_lock(&ubi->volumes_lock); ubi->volumes[vol_id] = vol; out_unlock: @@ -487,7 +489,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) if (vol->vol_type == UBI_STATIC_VOLUME && reserved_pebs < vol->used_ebs) { - ubi_err("too small size %d, %d LEBs contain data", + ubi_err(ubi, "too small size %d, %d LEBs contain data", reserved_pebs, vol->used_ebs); return -EINVAL; } @@ -516,10 +518,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) if (pebs > 0) { spin_lock(&ubi->volumes_lock); if (pebs > ubi->avail_pebs) { - ubi_err("not enough PEBs: requested %d, available %d", + ubi_err(ubi, "not enough PEBs: requested %d, available %d", pebs, ubi->avail_pebs); if (ubi->corr_peb_count) - ubi_err("%d PEBs are corrupted and not used", + ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); spin_unlock(&ubi->volumes_lock); err = -ENOSPC; @@ -643,7 +645,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); err = cdev_add(&vol->cdev, dev, 1); if (err) { - ubi_err("cannot add character device for volume %d, error %d", + ubi_err(ubi, "cannot add character device for volume %d, error %d", vol_id, err); return err; } @@ -710,7 +712,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id) if (!vol) { if (reserved_pebs) { - ubi_err("no volume info, but volume exists"); + ubi_err(ubi, "no volume info, but volume exists"); goto fail; } spin_unlock(&ubi->volumes_lock); @@ -719,90 +721,91 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id) if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || vol->name_len < 0) { - ubi_err("negative values"); + ubi_err(ubi, "negative values"); goto fail; } if (vol->alignment > ubi->leb_size || vol->alignment == 0) { - ubi_err("bad alignment"); + ubi_err(ubi, "bad alignment"); goto fail; } n = vol->alignment & (ubi->min_io_size - 1); if (vol->alignment != 1 && n) { - ubi_err("alignment is not multiple of min I/O unit"); + ubi_err(ubi, "alignment is not multiple of min I/O unit"); goto fail; } n = ubi->leb_size % vol->alignment; if (vol->data_pad != n) { - ubi_err("bad data_pad, has to be %lld", n); + ubi_err(ubi, "bad data_pad, has to be %lld", n); goto fail; } if (vol->vol_type != UBI_DYNAMIC_VOLUME && vol->vol_type != UBI_STATIC_VOLUME) { - ubi_err("bad vol_type"); + ubi_err(ubi, "bad vol_type"); goto fail; } if (vol->upd_marker && vol->corrupted) { - ubi_err("update marker and corrupted simultaneously"); + ubi_err(ubi, "update marker and corrupted simultaneously"); goto fail; } if (vol->reserved_pebs > ubi->good_peb_count) { - ubi_err("too large reserved_pebs"); + ubi_err(ubi, "too large reserved_pebs"); goto fail; } n = ubi->leb_size - vol->data_pad; if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) { - ubi_err("bad usable_leb_size, has to be %lld", n); + ubi_err(ubi, "bad usable_leb_size, has to be %lld", n); goto fail; } if (vol->name_len > UBI_VOL_NAME_MAX) { - ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX); + ubi_err(ubi, "too long volume name, max is %d", + UBI_VOL_NAME_MAX); goto fail; } n = strnlen(vol->name, vol->name_len + 1); if (n != vol->name_len) { - ubi_err("bad name_len %lld", n); + ubi_err(ubi, "bad name_len %lld", n); goto fail; } n = (long long)vol->used_ebs * vol->usable_leb_size; if (vol->vol_type == UBI_DYNAMIC_VOLUME) { if (vol->corrupted) { - ubi_err("corrupted dynamic volume"); + ubi_err(ubi, "corrupted dynamic volume"); goto fail; } if (vol->used_ebs != vol->reserved_pebs) { - ubi_err("bad used_ebs"); + ubi_err(ubi, "bad used_ebs"); goto fail; } if (vol->last_eb_bytes != vol->usable_leb_size) { - ubi_err("bad last_eb_bytes"); + ubi_err(ubi, "bad last_eb_bytes"); goto fail; } if (vol->used_bytes != n) { - ubi_err("bad used_bytes"); + ubi_err(ubi, "bad used_bytes"); goto fail; } } else { if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { - ubi_err("bad used_ebs"); + ubi_err(ubi, "bad used_ebs"); goto fail; } if (vol->last_eb_bytes < 0 || vol->last_eb_bytes > vol->usable_leb_size) { - ubi_err("bad last_eb_bytes"); + ubi_err(ubi, "bad last_eb_bytes"); goto fail; } if (vol->used_bytes < 0 || vol->used_bytes > n || vol->used_bytes < n - vol->usable_leb_size) { - ubi_err("bad used_bytes"); + ubi_err(ubi, "bad used_bytes"); goto fail; } } @@ -820,7 +823,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id) if (alignment != vol->alignment || data_pad != vol->data_pad || upd_marker != vol->upd_marker || vol_type != vol->vol_type || name_len != vol->name_len || strncmp(name, vol->name, name_len)) { - ubi_err("volume info is different"); + ubi_err(ubi, "volume info is different"); goto fail; } @@ -828,7 +831,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id) return 0; fail: - ubi_err("self-check failed for volume %d", vol_id); + ubi_err(ubi, "self-check failed for volume %d", vol_id); if (vol) ubi_dump_vol_info(vol); ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 07cac5f9ffb8..f8fc3081bbb4 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -30,9 +30,12 @@ * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each * other. This redundancy guarantees robustness to unclean reboots. The volume * table is basically an array of volume table records. Each record contains - * full information about the volume and protected by a CRC checksum. + * full information about the volume and protected by a CRC checksum. Note, + * nowadays we use the atomic LEB change operation when updating the volume + * table, so we do not really need 2 LEBs anymore, but we preserve the older + * design for the backward compatibility reasons. * - * The volume table is changed, it is first changed in RAM. Then LEB 0 is + * When the volume table is changed, it is first changed in RAM. Then LEB 0 is * erased, and the updated volume table is written back to LEB 0. Then same for * LEB 1. This scheme guarantees recoverability from unclean reboots. * @@ -96,12 +99,8 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { - err = ubi_eba_unmap_leb(ubi, layout_vol, i); - if (err) - return err; - - err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, - ubi->vtbl_size); + err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl, + ubi->vtbl_size); if (err) return err; } @@ -148,12 +147,8 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi, layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { - err = ubi_eba_unmap_leb(ubi, layout_vol, i); - if (err) - return err; - - err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, - ubi->vtbl_size); + err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl, + ubi->vtbl_size); if (err) return err; } @@ -190,7 +185,7 @@ static int vtbl_check(const struct ubi_device *ubi, crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); if (be32_to_cpu(vtbl[i].crc) != crc) { - ubi_err("bad CRC at record %u: %#08x, not %#08x", + ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x", i, crc, be32_to_cpu(vtbl[i].crc)); ubi_dump_vtbl_record(&vtbl[i], i); return 1; @@ -224,7 +219,7 @@ static int vtbl_check(const struct ubi_device *ubi, n = ubi->leb_size % alignment; if (data_pad != n) { - ubi_err("bad data_pad, has to be %d", n); + ubi_err(ubi, "bad data_pad, has to be %d", n); err = 6; goto bad; } @@ -240,7 +235,7 @@ static int vtbl_check(const struct ubi_device *ubi, } if (reserved_pebs > ubi->good_peb_count) { - ubi_err("too large reserved_pebs %d, good PEBs %d", + ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d", reserved_pebs, ubi->good_peb_count); err = 9; goto bad; @@ -270,7 +265,7 @@ static int vtbl_check(const struct ubi_device *ubi, if (len1 > 0 && len1 == len2 && !strncmp(vtbl[i].name, vtbl[n].name, len1)) { - ubi_err("volumes %d and %d have the same name \"%s\"", + ubi_err(ubi, "volumes %d and %d have the same name \"%s\"", i, n, vtbl[i].name); ubi_dump_vtbl_record(&vtbl[i], i); ubi_dump_vtbl_record(&vtbl[n], n); @@ -282,7 +277,7 @@ static int vtbl_check(const struct ubi_device *ubi, return 0; bad: - ubi_err("volume table check failed: record %d, error %d", i, err); + ubi_err(ubi, "volume table check failed: record %d, error %d", i, err); ubi_dump_vtbl_record(&vtbl[i], i); return -EINVAL; } @@ -446,11 +441,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); if (leb_corrupted[1]) { - ubi_warn("volume table copy #2 is corrupted"); + ubi_warn(ubi, "volume table copy #2 is corrupted"); err = create_vtbl(ubi, ai, 1, leb[0]); if (err) goto out_free; - ubi_msg("volume table was restored"); + ubi_msg(ubi, "volume table was restored"); } /* Both LEB 1 and LEB 2 are OK and consistent */ @@ -465,15 +460,15 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, } if (leb_corrupted[1]) { /* Both LEB 0 and LEB 1 are corrupted */ - ubi_err("both volume tables are corrupted"); + ubi_err(ubi, "both volume tables are corrupted"); goto out_free; } - ubi_warn("volume table copy #1 is corrupted"); + ubi_warn(ubi, "volume table copy #1 is corrupted"); err = create_vtbl(ubi, ai, 0, leb[1]); if (err) goto out_free; - ubi_msg("volume table was restored"); + ubi_msg(ubi, "volume table was restored"); vfree(leb[0]); return leb[1]; @@ -562,7 +557,7 @@ static int init_volumes(struct ubi_device *ubi, if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { /* Auto re-size flag may be set only for one volume */ if (ubi->autoresize_vol_id != -1) { - ubi_err("more than one auto-resize volume (%d and %d)", + ubi_err(ubi, "more than one auto-resize volume (%d and %d)", ubi->autoresize_vol_id, i); kfree(vol); return -EINVAL; @@ -608,7 +603,7 @@ static int init_volumes(struct ubi_device *ubi, * We found a static volume which misses several * eraseblocks. Treat it as corrupted. */ - ubi_warn("static volume %d misses %d LEBs - corrupted", + ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted", av->vol_id, av->used_ebs - av->leb_count); vol->corrupted = 1; continue; @@ -646,10 +641,10 @@ static int init_volumes(struct ubi_device *ubi, vol->ubi = ubi; if (reserved_pebs > ubi->avail_pebs) { - ubi_err("not enough PEBs, required %d, available %d", + ubi_err(ubi, "not enough PEBs, required %d, available %d", reserved_pebs, ubi->avail_pebs); if (ubi->corr_peb_count) - ubi_err("%d PEBs are corrupted and not used", + ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); } ubi->rsvd_pebs += reserved_pebs; @@ -660,13 +655,14 @@ static int init_volumes(struct ubi_device *ubi, /** * check_av - check volume attaching information. + * @ubi: UBI device description object * @vol: UBI volume description object * @av: volume attaching information * * This function returns zero if the volume attaching information is consistent * to the data read from the volume tabla, and %-EINVAL if not. */ -static int check_av(const struct ubi_volume *vol, +static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol, const struct ubi_ainf_volume *av) { int err; @@ -694,7 +690,7 @@ static int check_av(const struct ubi_volume *vol, return 0; bad: - ubi_err("bad attaching information, error %d", err); + ubi_err(ubi, "bad attaching information, error %d", err); ubi_dump_av(av); ubi_dump_vol_info(vol); return -EINVAL; @@ -718,14 +714,15 @@ static int check_attaching_info(const struct ubi_device *ubi, struct ubi_volume *vol; if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { - ubi_err("found %d volumes while attaching, maximum is %d + %d", + ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d", ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); return -EINVAL; } if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && ai->highest_vol_id < UBI_INTERNAL_VOL_START) { - ubi_err("too large volume ID %d found", ai->highest_vol_id); + ubi_err(ubi, "too large volume ID %d found", + ai->highest_vol_id); return -EINVAL; } @@ -753,10 +750,10 @@ static int check_attaching_info(const struct ubi_device *ubi, * reboot while the volume was being removed. Discard * these eraseblocks. */ - ubi_msg("finish volume %d removal", av->vol_id); + ubi_msg(ubi, "finish volume %d removal", av->vol_id); ubi_remove_av(ai, av); } else if (av) { - err = check_av(vol, av); + err = check_av(ubi, vol, av); if (err) return err; } @@ -807,13 +804,13 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) if (IS_ERR(ubi->vtbl)) return PTR_ERR(ubi->vtbl); } else { - ubi_err("the layout volume was not found"); + ubi_err(ubi, "the layout volume was not found"); return -EINVAL; } } else { if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { /* This must not happen with proper UBI images */ - ubi_err("too many LEBs (%d) in layout volume", + ubi_err(ubi, "too many LEBs (%d) in layout volume", av->leb_count); return -EINVAL; } @@ -862,7 +859,7 @@ static void self_vtbl_check(const struct ubi_device *ubi) return; if (vtbl_check(ubi, ubi->vtbl)) { - ubi_err("self-check failed"); + ubi_err(ubi, "self-check failed"); BUG(); } } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6654f191868e..834f6fe1f5fa 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -253,7 +253,7 @@ static int do_work(struct ubi_device *ubi) */ err = wrk->func(ubi, wrk, 0); if (err) - ubi_err("work failed with error code %d", err); + ubi_err(ubi, "work failed with error code %d", err); up_read(&ubi->work_sem); return err; @@ -470,8 +470,11 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) { struct ubi_wl_entry *e = NULL; - if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) + if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) { + ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d", + anchor, ubi->free_count, ubi->beb_rsvd_pebs); goto out; + } if (anchor) e = find_anchor_wl_entry(&ubi->free); @@ -507,7 +510,7 @@ static int __wl_get_peb(struct ubi_device *ubi) retry: if (!ubi->free.rb_node) { if (ubi->works_count == 0) { - ubi_err("no free eraseblocks"); + ubi_err(ubi, "no free eraseblocks"); ubi_assert(list_empty(&ubi->works)); return -ENOSPC; } @@ -520,7 +523,7 @@ retry: e = find_mean_wl_entry(ubi, &ubi->free); if (!e) { - ubi_err("no free eraseblocks"); + ubi_err(ubi, "no free eraseblocks"); return -ENOSPC; } @@ -692,7 +695,8 @@ int ubi_wl_get_peb(struct ubi_device *ubi) err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, ubi->peb_size - ubi->vid_hdr_aloffset); if (err) { - ubi_err("new PEB %d does not contain all 0xFF bytes", peb); + ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", + peb); return err; } @@ -760,7 +764,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ - ubi_err("erase counter overflow at PEB %d, EC %llu", + ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", e->pnum, ec); err = -EINVAL; goto out_free; @@ -1137,7 +1141,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_not_moved; } - ubi_err("error %d while reading VID header from PEB %d", + ubi_err(ubi, "error %d while reading VID header from PEB %d", err, e1->pnum); goto out_error; } @@ -1181,7 +1185,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * UBI from trying to move it over and over again. */ if (ubi->erroneous_peb_count > ubi->max_erroneous) { - ubi_err("too many erroneous eraseblocks (%d)", + ubi_err(ubi, "too many erroneous eraseblocks (%d)", ubi->erroneous_peb_count); goto out_error; } @@ -1197,7 +1201,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* The PEB has been successfully moved */ if (scrubbing) - ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", + ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", e1->pnum, vol_id, lnum, e2->pnum); ubi_free_vid_hdr(ubi, vid_hdr); @@ -1212,7 +1216,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { - kmem_cache_free(ubi_wl_entry_slab, e1); if (e2) kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; @@ -1226,10 +1229,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", e2->pnum, vol_id, lnum); err = do_sync_erase(ubi, e2, vol_id, lnum, 0); - if (err) { - kmem_cache_free(ubi_wl_entry_slab, e2); + if (err) goto out_ro; - } } dbg_wl("done"); @@ -1265,19 +1266,18 @@ out_not_moved: ubi_free_vid_hdr(ubi, vid_hdr); err = do_sync_erase(ubi, e2, vol_id, lnum, torture); - if (err) { - kmem_cache_free(ubi_wl_entry_slab, e2); + if (err) goto out_ro; - } + mutex_unlock(&ubi->move_mutex); return 0; out_error: if (vol_id != -1) - ubi_err("error %d while moving PEB %d to PEB %d", + ubi_err(ubi, "error %d while moving PEB %d to PEB %d", err, e1->pnum, e2->pnum); else - ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", + ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d", err, e1->pnum, vol_id, lnum, e2->pnum); spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; @@ -1458,7 +1458,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, return err; } - ubi_err("failed to erase PEB %d, error %d", pnum, err); + ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || @@ -1486,7 +1486,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, /* It is %-EIO, the PEB went bad */ if (!ubi->bad_allowed) { - ubi_err("bad physical eraseblock %d detected", pnum); + ubi_err(ubi, "bad physical eraseblock %d detected", pnum); goto out_ro; } @@ -1494,7 +1494,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, if (ubi->beb_rsvd_pebs == 0) { if (ubi->avail_pebs == 0) { spin_unlock(&ubi->volumes_lock); - ubi_err("no reserved/available physical eraseblocks"); + ubi_err(ubi, "no reserved/available physical eraseblocks"); goto out_ro; } ubi->avail_pebs -= 1; @@ -1502,7 +1502,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, } spin_unlock(&ubi->volumes_lock); - ubi_msg("mark PEB %d as bad", pnum); + ubi_msg(ubi, "mark PEB %d as bad", pnum); err = ubi_io_mark_bad(ubi, pnum); if (err) goto out_ro; @@ -1523,11 +1523,12 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi->good_peb_count -= 1; ubi_calculate_reserved(ubi); if (available_consumed) - ubi_warn("no PEBs in the reserved pool, used an available PEB"); + ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB"); else if (ubi->beb_rsvd_pebs) - ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); + ubi_msg(ubi, "%d PEBs left in the reserve", + ubi->beb_rsvd_pebs); else - ubi_warn("last PEB from the reserve was used"); + ubi_warn(ubi, "last PEB from the reserve was used"); spin_unlock(&ubi->volumes_lock); return err; @@ -1613,7 +1614,7 @@ retry: } else { err = prot_queue_del(ubi, e->pnum); if (err) { - ubi_err("PEB %d not found", pnum); + ubi_err(ubi, "PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; @@ -1646,7 +1647,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) { struct ubi_wl_entry *e; - ubi_msg("schedule PEB %d for scrubbing", pnum); + ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum); retry: spin_lock(&ubi->wl_lock); @@ -1678,7 +1679,7 @@ retry: err = prot_queue_del(ubi, e->pnum); if (err) { - ubi_err("PEB %d not found", pnum); + ubi_err(ubi, "PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; @@ -1798,15 +1799,18 @@ int ubi_thread(void *u) int failures = 0; struct ubi_device *ubi = u; - ubi_msg("background thread \"%s\" started, PID %d", + ubi_msg(ubi, "background thread \"%s\" started, PID %d", ubi->bgt_name, task_pid_nr(current)); set_freezable(); for (;;) { int err; - if (kthread_should_stop()) + if (kthread_should_stop()) { + ubi_msg(ubi, "background thread \"%s\" should stop, PID %d", + ubi->bgt_name, task_pid_nr(current)); break; + } if (try_to_freeze()) continue; @@ -1823,14 +1827,14 @@ int ubi_thread(void *u) err = do_work(ubi); if (err) { - ubi_err("%s: work failed with error code %d", + ubi_err(ubi, "%s: work failed with error code %d", ubi->bgt_name, err); if (failures++ > WL_MAX_FAILURES) { /* * Too many failures, disable the thread and * switch to read-only mode. */ - ubi_msg("%s: %d consecutive failures", + ubi_msg(ubi, "%s: %d consecutive failures", ubi->bgt_name, WL_MAX_FAILURES); ubi_ro_mode(ubi); ubi->thread_enabled = 0; @@ -1981,10 +1985,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) #endif if (ubi->avail_pebs < reserved_pebs) { - ubi_err("no enough physical eraseblocks (%d, need %d)", + ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, reserved_pebs); if (ubi->corr_peb_count) - ubi_err("%d PEBs are corrupted and not used", + ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); goto out_free; } @@ -2072,8 +2076,8 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) read_ec = be64_to_cpu(ec_hdr->ec); if (ec != read_ec && read_ec - ec > 1) { - ubi_err("self-check failed for PEB %d", pnum); - ubi_err("read EC is %lld, should be %d", read_ec, ec); + ubi_err(ubi, "self-check failed for PEB %d", pnum); + ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec); dump_stack(); err = 1; } else @@ -2102,7 +2106,7 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi, if (in_wl_tree(e, root)) return 0; - ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", + ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ", e->pnum, e->ec, root); dump_stack(); return -EINVAL; @@ -2130,7 +2134,7 @@ static int self_check_in_pq(const struct ubi_device *ubi, if (p == e) return 0; - ubi_err("self-check failed for PEB %d, EC %d, Protect queue", + ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue", e->pnum, e->ec); dump_stack(); return -EINVAL; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 72fb86b9aa24..c9946c6c119e 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); MODULE_AUTHOR("Jie Yang"); MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>"); -MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver"); +MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATL1C_DRV_VERSION); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 90c86cd3be14..45c408ef67d0 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -466,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, return err; } -static u32 next_idx(u32 idx, struct vio_dring_state *dr) -{ - if (++idx == dr->num_entries) - idx = 0; - return idx; -} - -static u32 prev_idx(u32 idx, struct vio_dring_state *dr) -{ - if (idx == 0) - idx = dr->num_entries - 1; - else - idx--; - - return idx; -} - static struct vio_net_desc *get_rx_desc(struct vnet_port *port, struct vio_dring_state *dr, u32 index) @@ -556,7 +539,8 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, int ack_start = -1, ack_end = -1; bool send_ack = true; - end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); + end = (end == (u32) -1) ? vio_dring_prev(dr, start) + : vio_dring_next(dr, end); viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); @@ -570,7 +554,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, if (ack_start == -1) ack_start = start; ack_end = start; - start = next_idx(start, dr); + start = vio_dring_next(dr, start); if (ack && start != end) { err = vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_ACTIVE); @@ -584,7 +568,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, } } if (unlikely(ack_start == -1)) - ack_start = ack_end = prev_idx(start, dr); + ack_start = ack_end = vio_dring_prev(dr, start); if (send_ack) { port->napi_resume = false; return vnet_send_ack(port, dr, ack_start, ack_end, @@ -633,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end) found = 1; break; } - idx = next_idx(idx, dr); + idx = vio_dring_next(dr, idx); } return found; } @@ -663,7 +647,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ - dr->cons = next_idx(end, dr); + dr->cons = vio_dring_next(dr, end); desc = vio_dring_entry(dr, dr->cons); if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { /* vnet_start_xmit() just populated this dring but missed @@ -784,7 +768,7 @@ ldc_ctrl: pkt->tag.stype = VIO_SUBTYPE_INFO; pkt->tag.stype_env = VIO_DRING_DATA; pkt->seq = dr->rcv_nxt; - pkt->start_idx = next_idx(port->napi_stop_idx, dr); + pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); pkt->end_idx = -1; goto napi_resume; } diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index 93caf8e68901..2399a3921762 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -1,12 +1,13 @@ config ATH5K tristate "Atheros 5xxx wireless cards support" - depends on PCI && MAC80211 + depends on (PCI || ATH25) && MAC80211 select ATH_COMMON select MAC80211_LEDS select LEDS_CLASS select NEW_LEDS select AVERAGE - select ATH5K_PCI + select ATH5K_AHB if ATH25 + select ATH5K_PCI if !ATH25 ---help--- This module adds support for wireless adapters based on Atheros 5xxx chipset. @@ -51,9 +52,16 @@ config ATH5K_TRACER If unsure, say N. +config ATH5K_AHB + bool "Atheros 5xxx AHB bus support" + depends on ATH25 + ---help--- + This adds support for WiSoC type chipsets of the 5xxx Atheros + family. + config ATH5K_PCI bool "Atheros 5xxx PCI bus support" - depends on PCI + depends on (!ATH25 && PCI) ---help--- This adds support for PCI type chipsets of the 5xxx Atheros family. diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile index 51e2d8668041..1b3a34f7f224 100644 --- a/drivers/net/wireless/ath/ath5k/Makefile +++ b/drivers/net/wireless/ath/ath5k/Makefile @@ -17,5 +17,6 @@ ath5k-y += ani.o ath5k-y += sysfs.o ath5k-y += mac80211-ops.o ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o +ath5k-$(CONFIG_ATH5K_AHB) += ahb.o ath5k-$(CONFIG_ATH5K_PCI) += pci.o obj-$(CONFIG_ATH5K) += ath5k.o diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c new file mode 100644 index 000000000000..8f387cf67340 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/nl80211.h> +#include <linux/platform_device.h> +#include <linux/etherdevice.h> +#include <linux/export.h> +#include <ath25_platform.h> +#include "ath5k.h" +#include "debug.h" +#include "base.h" +#include "reg.h" + +/* return bus cachesize in 4B word units */ +static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) +{ + *csz = L1_CACHE_BYTES >> 2; +} + +static bool +ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) +{ + struct ath5k_hw *ah = common->priv; + struct platform_device *pdev = to_platform_device(ah->dev); + struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); + u16 *eeprom, *eeprom_end; + + eeprom = (u16 *) bcfg->radio; + eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; + + eeprom += off; + if (eeprom > eeprom_end) + return false; + + *data = *eeprom; + return true; +} + +int ath5k_hw_read_srev(struct ath5k_hw *ah) +{ + struct platform_device *pdev = to_platform_device(ah->dev); + struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); + ah->ah_mac_srev = bcfg->devid; + return 0; +} + +static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) +{ + struct platform_device *pdev = to_platform_device(ah->dev); + struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); + u8 *cfg_mac; + + if (to_platform_device(ah->dev)->id == 0) + cfg_mac = bcfg->config->wlan0_mac; + else + cfg_mac = bcfg->config->wlan1_mac; + + memcpy(mac, cfg_mac, ETH_ALEN); + return 0; +} + +static const struct ath_bus_ops ath_ahb_bus_ops = { + .ath_bus_type = ATH_AHB, + .read_cachesize = ath5k_ahb_read_cachesize, + .eeprom_read = ath5k_ahb_eeprom_read, + .eeprom_read_mac = ath5k_ahb_eeprom_read_mac, +}; + +/*Initialization*/ +static int ath_ahb_probe(struct platform_device *pdev) +{ + struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); + struct ath5k_hw *ah; + struct ieee80211_hw *hw; + struct resource *res; + void __iomem *mem; + int irq; + int ret = 0; + u32 reg; + + if (!dev_get_platdata(&pdev->dev)) { + dev_err(&pdev->dev, "no platform data specified\n"); + ret = -EINVAL; + goto err_out; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "no memory resource found\n"); + ret = -ENXIO; + goto err_out; + } + + mem = ioremap_nocache(res->start, resource_size(res)); + if (mem == NULL) { + dev_err(&pdev->dev, "ioremap failed\n"); + ret = -ENOMEM; + goto err_out; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res == NULL) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + ret = -ENXIO; + goto err_iounmap; + } + + irq = res->start; + + hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops); + if (hw == NULL) { + dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); + ret = -ENOMEM; + goto err_iounmap; + } + + ah = hw->priv; + ah->hw = hw; + ah->dev = &pdev->dev; + ah->iobase = mem; + ah->irq = irq; + ah->devid = bcfg->devid; + + if (bcfg->devid >= AR5K_SREV_AR2315_R6) { + /* Enable WMAC AHB arbitration */ + reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); + reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN; + iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); + + /* Enable global WMAC swapping */ + reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP); + reg |= AR5K_AR2315_BYTESWAP_WMAC; + iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP); + } else { + /* Enable WMAC DMA access (assuming 5312 or 231x*/ + /* TODO: check other platforms */ + reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); + if (to_platform_device(ah->dev)->id == 0) + reg |= AR5K_AR5312_ENABLE_WLAN0; + else + reg |= AR5K_AR5312_ENABLE_WLAN1; + iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); + + /* + * On a dual-band AR5312, the multiband radio is only + * used as pass-through. Disable 2 GHz support in the + * driver for it + */ + if (to_platform_device(ah->dev)->id == 0 && + (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == + (BD_WLAN1 | BD_WLAN0)) + ah->ah_capabilities.cap_needs_2GHz_ovr = true; + else + ah->ah_capabilities.cap_needs_2GHz_ovr = false; + } + + ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); + if (ret != 0) { + dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret); + ret = -ENODEV; + goto err_free_hw; + } + + platform_set_drvdata(pdev, hw); + + return 0; + + err_free_hw: + ieee80211_free_hw(hw); + err_iounmap: + iounmap(mem); + err_out: + return ret; +} + +static int ath_ahb_remove(struct platform_device *pdev) +{ + struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); + struct ieee80211_hw *hw = platform_get_drvdata(pdev); + struct ath5k_hw *ah; + u32 reg; + + if (!hw) + return 0; + + ah = hw->priv; + + if (bcfg->devid >= AR5K_SREV_AR2315_R6) { + /* Disable WMAC AHB arbitration */ + reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); + reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN; + iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); + } else { + /*Stop DMA access */ + reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); + if (to_platform_device(ah->dev)->id == 0) + reg &= ~AR5K_AR5312_ENABLE_WLAN0; + else + reg &= ~AR5K_AR5312_ENABLE_WLAN1; + iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); + } + + ath5k_deinit_ah(ah); + iounmap(ah->iobase); + ieee80211_free_hw(hw); + + return 0; +} + +static struct platform_driver ath_ahb_driver = { + .probe = ath_ahb_probe, + .remove = ath_ahb_remove, + .driver = { + .name = "ar231x-wmac", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(ath_ahb_driver); diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index ed2468220216..1ed7a88aeea9 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1647,6 +1647,32 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah) return &(ath5k_hw_common(ah)->regulatory); } +#ifdef CONFIG_ATH5K_AHB +#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000) + +static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg) +{ + /* On AR2315 and AR2317 the PCI clock domain registers + * are outside of the WMAC register space */ + if (unlikely((reg >= 0x4000) && (reg < 0x5000) && + (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6))) + return AR5K_AR2315_PCI_BASE + reg; + + return ah->iobase + reg; +} + +static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) +{ + return ioread32(ath5k_ahb_reg(ah, reg)); +} + +static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) +{ + iowrite32(val, ath5k_ahb_reg(ah, reg)); +} + +#else + static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) { return ioread32(ah->iobase + reg); @@ -1657,6 +1683,8 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) iowrite32(val, ah->iobase + reg); } +#endif + static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah) { return ath5k_hw_common(ah)->bus_ops->ath_bus_type; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index a4a09bb8f2f3..bc9cb356fa69 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -99,6 +99,15 @@ static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, /* Known SREVs */ static const struct ath5k_srev_name srev_names[] = { +#ifdef CONFIG_ATH5K_AHB + { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 }, + { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 }, + { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 }, + { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 }, + { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 }, + { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 }, + { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 }, +#else { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, @@ -117,6 +126,7 @@ static const struct ath5k_srev_name srev_names[] = { { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, +#endif { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, @@ -132,6 +142,10 @@ static const struct ath5k_srev_name srev_names[] = { { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, +#ifdef CONFIG_ATH5K_AHB + { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, + { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, +#endif { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, }; diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 0beb7e7d6075..ca4b7ccd697f 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -163,14 +163,20 @@ int ath5k_init_leds(struct ath5k_hw *ah) { int ret = 0; struct ieee80211_hw *hw = ah->hw; +#ifndef CONFIG_ATH5K_AHB struct pci_dev *pdev = ah->pdev; +#endif char name[ATH5K_LED_MAX_NAME_LEN + 1]; const struct pci_device_id *match; if (!ah->pdev) return 0; +#ifdef CONFIG_ATH5K_AHB + match = NULL; +#else match = pci_match_id(&ath5k_led_devices[0], pdev); +#endif if (match) { __set_bit(ATH_STAT_LEDSOFT, ah->status); ah->led_pin = ATH_PIN(match->driver_data); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a7ac72639c52..cab05f31223f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1138,8 +1138,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state); * @dev: PCI device that we're dealing with * @state: Saved state returned from pci_store_saved_state() */ -static int pci_load_saved_state(struct pci_dev *dev, - struct pci_saved_state *state) +int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state) { struct pci_cap_saved_data *cap; @@ -1167,6 +1167,7 @@ static int pci_load_saved_state(struct pci_dev *dev, dev->state_saved = true; return 0; } +EXPORT_SYMBOL_GPL(pci_load_saved_state); /** * pci_load_and_free_saved_state - Reload the save state pointed to by state, diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index b0ce7cdee0c2..910e90bf16c6 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -147,7 +147,6 @@ config TCIC config PCMCIA_ALCHEMY_DEVBOARD tristate "Alchemy Db/Pb1xxx PCMCIA socket services" depends on MIPS_ALCHEMY && PCMCIA - select 64BIT_PHYS_ADDR help Enable this driver of you want PCMCIA support on your Alchemy Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300 @@ -158,7 +157,6 @@ config PCMCIA_ALCHEMY_DEVBOARD config PCMCIA_XXS1500 tristate "MyCable XXS1500 PCMCIA socket support" depends on PCMCIA && MIPS_XXS1500 - select 64BIT_PHYS_ADDR help Support for the PCMCIA/CF socket interface on MyCable XXS1500 systems. diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index ff8a027a4afb..d2ab06048169 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c @@ -93,6 +93,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) for (i = 0; i < sinfo->nskt; i++) soc_pcmcia_remove_one(&sinfo->skt[i]); + clk_put(sinfo->clk); kfree(sinfo); return 0; } diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c index 65b02c3e14ce..7bae7e549d8b 100644 --- a/drivers/pcmcia/sa1111_generic.c +++ b/drivers/pcmcia/sa1111_generic.c @@ -145,6 +145,12 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops, return -ENOMEM; s->soc.nr = ops->first + i; + s->soc.clk = clk_get(&dev->dev, NULL); + if (IS_ERR(s->soc.clk)) { + ret = PTR_ERR(s->soc.clk); + kfree(s); + return ret; + } soc_pcmcia_init_one(&s->soc, ops, &dev->dev); s->dev = dev; if (s->soc.nr) { @@ -220,6 +226,7 @@ static int pcmcia_remove(struct sa1111_dev *dev) for (; s; s = next) { next = s->next; soc_pcmcia_remove_one(&s->soc); + clk_put(s->soc.clk); kfree(s); } diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c index 54d3089d157b..cf6de2c2b329 100644 --- a/drivers/pcmcia/sa11xx_base.c +++ b/drivers/pcmcia/sa11xx_base.c @@ -135,14 +135,16 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, static int sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt) { - return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0)); + unsigned long clk = clk_get_rate(skt->clk); + + return sa1100_pcmcia_set_mecr(skt, clk / 1000); } static int sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) { struct soc_pcmcia_timing timing; - unsigned int clock = cpufreq_get(0); + unsigned int clock = clk_get_rate(skt->clk); unsigned long mecr = MECR; char *p = buf; @@ -218,6 +220,11 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, struct skt_dev_info *sinfo; struct soc_pcmcia_socket *skt; int i, ret = 0; + struct clk *clk; + + clk = clk_get(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); sa11xx_drv_pcmcia_ops(ops); @@ -226,12 +233,14 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, return -ENOMEM; sinfo->nskt = nr; + sinfo->clk = clk; /* Initialize processor specific parameters */ for (i = 0; i < nr; i++) { skt = &sinfo->skt[i]; skt->nr = first + i; + skt->clk = clk; soc_pcmcia_init_one(skt, ops, dev); ret = sa11xx_drv_pcmcia_add_one(skt); @@ -242,6 +251,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, if (ret) { while (--i >= 0) soc_pcmcia_remove_one(&sinfo->skt[i]); + clk_put(clk); kfree(sinfo); } else { dev_set_drvdata(dev, sinfo); diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index a2bc6ee1702e..933f4657515b 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -120,6 +120,8 @@ static void __soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt, if (skt->ops->hw_shutdown) skt->ops->hw_shutdown(skt); + + clk_disable_unprepare(skt->clk); } static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) @@ -131,6 +133,8 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret = 0, i; + clk_prepare_enable(skt->clk); + if (skt->ops->hw_init) { ret = skt->ops->hw_init(skt); if (ret) diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c index bddb79105d67..ce5f22c4151d 100644 --- a/drivers/pinctrl/spear/pinctrl-plgpio.c +++ b/drivers/pinctrl/spear/pinctrl-plgpio.c @@ -724,5 +724,5 @@ static int __init plgpio_init(void) subsys_initcall(plgpio_init); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); -MODULE_DESCRIPTION("ST Microlectronics SPEAr PLGPIO driver"); +MODULE_DESCRIPTION("STMicroelectronics SPEAr PLGPIO driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index c0242ed13d9e..ecd36e332c3c 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -593,7 +593,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips) return; if (!ips->gpu_turbo_disable()) - dev_err(&ips->dev->dev, "failed to disable graphis turbo\n"); + dev_err(&ips->dev->dev, "failed to disable graphics turbo\n"); else ips->__gpu_turbo_on = false; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 4511ddc1ac31..f15cddfeb897 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -987,6 +987,17 @@ config RTC_DRV_NUC900 If you say yes here you get support for the RTC subsystem of the NUC910/NUC920 used in embedded systems. +config RTC_DRV_OPAL + tristate "IBM OPAL RTC driver" + depends on PPC_POWERNV + default y + help + If you say yes here you get support for the PowerNV platform RTC + driver based on OPAL interfaces. + + This driver can also be built as a module. If so, the module + will be called rtc-opal. + comment "on-CPU RTC drivers" config RTC_DRV_DAVINCI diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index b188323c096a..c8ef3e1e6ccd 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o +obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c new file mode 100644 index 000000000000..95f652165fe9 --- /dev/null +++ b/drivers/rtc/rtc-opal.c @@ -0,0 +1,261 @@ +/* + * IBM OPAL RTC driver + * Copyright (C) 2014 IBM + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + */ + +#define DRVNAME "rtc-opal" +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/rtc.h> +#include <linux/delay.h> +#include <linux/bcd.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <asm/opal.h> +#include <asm/firmware.h> + +static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) +{ + tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) + + bcd2bin((y_m_d >> 16) & 0xff)) - 1900; + tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1; + tm->tm_mday = bcd2bin(y_m_d & 0xff); + tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff); + tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff); + tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff); + + GregorianDay(tm); +} + +static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) +{ + *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24; + *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16; + *y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8; + *y_m_d |= ((u32)bin2bcd(tm->tm_mday)); + + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56; + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48; + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40; +} + +static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) +{ + long rc = OPAL_BUSY; + u32 y_m_d; + u64 h_m_s_ms; + __be32 __y_m_d; + __be64 __h_m_s_ms; + + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); + if (rc == OPAL_BUSY_EVENT) + opal_poll_events(NULL); + else + msleep(10); + } + + if (rc != OPAL_SUCCESS) + return -EIO; + + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = be64_to_cpu(__h_m_s_ms); + opal_to_tm(y_m_d, h_m_s_ms, tm); + + return 0; +} + +static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) +{ + long rc = OPAL_BUSY; + u32 y_m_d = 0; + u64 h_m_s_ms = 0; + + tm_to_opal(tm, &y_m_d, &h_m_s_ms); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_rtc_write(y_m_d, h_m_s_ms); + if (rc == OPAL_BUSY_EVENT) + opal_poll_events(NULL); + else + msleep(10); + } + + return rc == OPAL_SUCCESS ? 0 : -EIO; +} + +/* + * TPO Timed Power-On + * + * TPO get/set OPAL calls care about the hour and min and to make it consistent + * with the rtc utility time conversion functions, we use the 'u64' to store + * its value and perform bit shift by 32 before use.. + */ +static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) +{ + __be32 __y_m_d, __h_m; + struct opal_msg msg; + int rc, token; + u64 h_m_s_ms; + u32 y_m_d; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + if (token != -ERESTARTSYS) + pr_err("Failed to get the async token\n"); + + return token; + } + + rc = opal_tpo_read(token, &__y_m_d, &__h_m); + if (rc != OPAL_ASYNC_COMPLETION) { + rc = -EIO; + goto exit; + } + + rc = opal_async_wait_response(token, &msg); + if (rc) { + rc = -EIO; + goto exit; + } + + rc = be64_to_cpu(msg.params[1]); + if (rc != OPAL_SUCCESS) { + rc = -EIO; + goto exit; + } + + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); + opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); + +exit: + opal_async_release_token(token); + return rc; +} + +/* Set Timed Power-On */ +static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) +{ + u64 h_m_s_ms = 0, token; + struct opal_msg msg; + u32 y_m_d = 0; + int rc; + + tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms); + + token = opal_async_get_token_interruptible(); + if (token < 0) { + if (token != -ERESTARTSYS) + pr_err("Failed to get the async token\n"); + + return token; + } + + /* TPO, we care about hour and minute */ + rc = opal_tpo_write(token, y_m_d, + (u32)((h_m_s_ms >> 32) & 0xffff0000)); + if (rc != OPAL_ASYNC_COMPLETION) { + rc = -EIO; + goto exit; + } + + rc = opal_async_wait_response(token, &msg); + if (rc) { + rc = -EIO; + goto exit; + } + + rc = be64_to_cpu(msg.params[1]); + if (rc != OPAL_SUCCESS) + rc = -EIO; + +exit: + opal_async_release_token(token); + return rc; +} + +static const struct rtc_class_ops opal_rtc_ops = { + .read_time = opal_get_rtc_time, + .set_time = opal_set_rtc_time, + .read_alarm = opal_get_tpo_time, + .set_alarm = opal_set_tpo_time, +}; + +static int opal_rtc_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo", + NULL)) + device_set_wakeup_capable(&pdev->dev, true); + + rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + rtc->uie_unsupported = 1; + + return 0; +} + +static const struct of_device_id opal_rtc_match[] = { + { + .compatible = "ibm,opal-rtc", + }, + { } +}; +MODULE_DEVICE_TABLE(of, opal_rtc_match); + +static const struct platform_device_id opal_rtc_driver_ids[] = { + { + .name = "opal-rtc", + }, + { } +}; +MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids); + +static struct platform_driver opal_rtc_driver = { + .probe = opal_rtc_probe, + .id_table = opal_rtc_driver_ids, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + .of_match_table = opal_rtc_match, + }, +}; + +static int __init opal_rtc_init(void) +{ + if (!firmware_has_feature(FW_FEATURE_OPAL)) + return -ENODEV; + + return platform_driver_register(&opal_rtc_driver); +} + +static void __exit opal_rtc_exit(void) +{ + platform_driver_unregister(&opal_rtc_driver); +} + +MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("IBM OPAL RTC driver"); +MODULE_LICENSE("GPL"); + +module_init(opal_rtc_init); +module_exit(opal_rtc_exit); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 329db997ee66..4abf11965484 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1377,6 +1377,20 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) "I/O error, retry"); break; case -EINVAL: + /* + * device not valid so no I/O could be running + * handle CQR as termination successful + */ + cqr->status = DASD_CQR_CLEARED; + cqr->stopclk = get_tod_clock(); + cqr->starttime = 0; + /* no retries for invalid devices */ + cqr->retries = -1; + DBF_DEV_EVENT(DBF_ERR, device, "%s", + "EINVAL, handle as terminated"); + /* fake rc to success */ + rc = 0; + break; case -EBUSY: DBF_DEV_EVENT(DBF_ERR, device, "%s", "device busy, retry later"); @@ -1683,11 +1697,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (cqr->status == DASD_CQR_CLEAR_PENDING && scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { cqr->status = DASD_CQR_CLEARED; - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); - wake_up(&generic_waitq); dasd_schedule_device_bh(device); return; } @@ -2326,21 +2337,11 @@ retry: return -EAGAIN; /* normal recovery for basedev IO */ - if (__dasd_sleep_on_erp(cqr)) { + if (__dasd_sleep_on_erp(cqr)) + /* handle erp first */ goto retry; - /* remember that ERP was needed */ - rc = 1; - /* skip processing for active cqr */ - if (cqr->status != DASD_CQR_TERMINATED && - cqr->status != DASD_CQR_NEED_ERP) - break; - } } - /* start ERP requests in upper loop */ - if (rc) - goto retry; - return 0; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index f224d59c4b6b..90f39f79f5d7 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -99,15 +99,37 @@ void dasd_gendisk_free(struct dasd_block *block) int dasd_scan_partitions(struct dasd_block *block) { struct block_device *bdev; + int retry, rc; + retry = 5; bdev = bdget_disk(block->gdp, 0); - if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) + if (!bdev) { + DBF_DEV_EVENT(DBF_ERR, block->base, "%s", + "scan partitions error, bdget returned NULL"); return -ENODEV; + } + + rc = blkdev_get(bdev, FMODE_READ, NULL); + if (rc < 0) { + DBF_DEV_EVENT(DBF_ERR, block->base, + "scan partitions error, blkdev_get returned %d", + rc); + return -ENODEV; + } /* * See fs/partition/check.c:register_disk,rescan_partitions * Can't call rescan_partitions directly. Use ioctl. */ - ioctl_by_bdev(bdev, BLKRRPART, 0); + rc = ioctl_by_bdev(bdev, BLKRRPART, 0); + while (rc == -EBUSY && retry > 0) { + schedule(); + rc = ioctl_by_bdev(bdev, BLKRRPART, 0); + retry--; + DBF_DEV_EVENT(DBF_ERR, block->base, + "scan partitions error, retry %d rc %d", + retry, rc); + } + /* * Since the matching blkdev_put call to the blkdev_get in * this function is not called before dasd_destroy_partitions diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 56046ab39629..75d9896deccb 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/genhd.h> @@ -20,13 +21,18 @@ debug_info_t *scm_debug; static int scm_major; +static mempool_t *aidaw_pool; static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(inactive_requests); static unsigned int nr_requests = 64; +static unsigned int nr_requests_per_io = 8; static atomic_t nr_devices = ATOMIC_INIT(0); module_param(nr_requests, uint, S_IRUGO); MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); +module_param(nr_requests_per_io, uint, S_IRUGO); +MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); + MODULE_DESCRIPTION("Block driver for s390 storage class memory."); MODULE_LICENSE("GPL"); MODULE_ALIAS("scm:scmdev*"); @@ -36,8 +42,8 @@ static void __scm_free_rq(struct scm_request *scmrq) struct aob_rq_header *aobrq = to_aobrq(scmrq); free_page((unsigned long) scmrq->aob); - free_page((unsigned long) scmrq->aidaw); __scm_free_rq_cluster(scmrq); + kfree(scmrq->request); kfree(aobrq); } @@ -53,6 +59,8 @@ static void scm_free_rqs(void) __scm_free_rq(scmrq); } spin_unlock_irq(&list_lock); + + mempool_destroy(aidaw_pool); } static int __scm_alloc_rq(void) @@ -65,17 +73,17 @@ static int __scm_alloc_rq(void) return -ENOMEM; scmrq = (void *) aobrq->data; - scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA); scmrq->aob = (void *) get_zeroed_page(GFP_DMA); - if (!scmrq->aob || !scmrq->aidaw) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + if (!scmrq->aob) + goto free; - if (__scm_alloc_rq_cluster(scmrq)) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), + GFP_KERNEL); + if (!scmrq->request) + goto free; + + if (__scm_alloc_rq_cluster(scmrq)) + goto free; INIT_LIST_HEAD(&scmrq->list); spin_lock_irq(&list_lock); @@ -83,12 +91,19 @@ static int __scm_alloc_rq(void) spin_unlock_irq(&list_lock); return 0; +free: + __scm_free_rq(scmrq); + return -ENOMEM; } static int scm_alloc_rqs(unsigned int nrqs) { int ret = 0; + aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); + if (!aidaw_pool) + return -ENOMEM; + while (nrqs-- && !ret) ret = __scm_alloc_rq(); @@ -112,6 +127,18 @@ out: static void scm_request_done(struct scm_request *scmrq) { unsigned long flags; + struct msb *msb; + u64 aidaw; + int i; + + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { + msb = &scmrq->aob->msb[i]; + aidaw = msb->data_addr; + + if ((msb->flags & MSB_FLAG_IDA) && aidaw && + IS_ALIGNED(aidaw, PAGE_SIZE)) + mempool_free(virt_to_page(aidaw), aidaw_pool); + } spin_lock_irqsave(&list_lock, flags); list_add(&scmrq->list, &inactive_requests); @@ -123,48 +150,90 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; } -static void scm_request_prepare(struct scm_request *scmrq) +static inline struct aidaw *scm_aidaw_alloc(void) +{ + struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); + + return page ? page_address(page) : NULL; +} + +static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) +{ + unsigned long _aidaw = (unsigned long) aidaw; + unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; + + return (bytes / sizeof(*aidaw)) * PAGE_SIZE; +} + +struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) +{ + struct aidaw *aidaw; + + if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) + return scmrq->next_aidaw; + + aidaw = scm_aidaw_alloc(); + if (aidaw) + memset(aidaw, 0, PAGE_SIZE); + return aidaw; +} + +static int scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct aidaw *aidaw = scmrq->aidaw; - struct msb *msb = &scmrq->aob->msb[0]; + int pos = scmrq->aob->request.msb_count; + struct msb *msb = &scmrq->aob->msb[pos]; + struct request *req = scmrq->request[pos]; struct req_iterator iter; + struct aidaw *aidaw; struct bio_vec bv; + aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); + if (!aidaw) + return -ENOMEM; + msb->bs = MSB_BS_4K; - scmrq->aob->request.msb_count = 1; - msb->scm_addr = scmdev->address + - ((u64) blk_rq_pos(scmrq->request) << 9); - msb->oc = (rq_data_dir(scmrq->request) == READ) ? - MSB_OC_READ : MSB_OC_WRITE; + scmrq->aob->request.msb_count++; + msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); + msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; msb->data_addr = (u64) aidaw; - rq_for_each_segment(bv, scmrq->request, iter) { + rq_for_each_segment(bv, req, iter) { WARN_ON(bv.bv_offset); msb->blk_count += bv.bv_len >> 12; aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } + + scmrq->next_aidaw = aidaw; + return 0; +} + +static inline void scm_request_set(struct scm_request *scmrq, + struct request *req) +{ + scmrq->request[scmrq->aob->request.msb_count] = req; } static inline void scm_request_init(struct scm_blk_dev *bdev, - struct scm_request *scmrq, - struct request *req) + struct scm_request *scmrq) { struct aob_rq_header *aobrq = to_aobrq(scmrq); struct aob *aob = scmrq->aob; + memset(scmrq->request, 0, + nr_requests_per_io * sizeof(scmrq->request[0])); memset(aob, 0, sizeof(*aob)); - memset(scmrq->aidaw, 0, PAGE_SIZE); aobrq->scmdev = bdev->scmdev; aob->request.cmd_code = ARQB_CMD_MOVE; aob->request.data = (u64) aobrq; - scmrq->request = req; scmrq->bdev = bdev; scmrq->retries = 4; scmrq->error = 0; + /* We don't use all msbs - place aidaws at the end of the aob page. */ + scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; scm_request_cluster_init(scmrq); } @@ -180,9 +249,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_requeue_request(bdev->rq, scmrq->request); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_requeue_request(bdev->rq, scmrq->request[i]); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); @@ -191,20 +263,41 @@ void scm_request_requeue(struct scm_request *scmrq) void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_end_request_all(scmrq->request, scmrq->error); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_end_request_all(scmrq->request[i], scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } +static int scm_request_start(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + int ret; + + atomic_inc(&bdev->queued_reqs); + if (!scmrq->aob->request.msb_count) { + scm_request_requeue(scmrq); + return -EINVAL; + } + + ret = eadm_start_aob(scmrq->aob); + if (ret) { + SCM_LOG(5, "no subchannel"); + scm_request_requeue(scmrq); + } + return ret; +} + static void scm_blk_request(struct request_queue *rq) { struct scm_device *scmdev = rq->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); - struct scm_request *scmrq; + struct scm_request *scmrq = NULL; struct request *req; - int ret; while ((req = blk_peek_request(rq))) { if (req->cmd_type != REQ_TYPE_FS) { @@ -214,39 +307,64 @@ static void scm_blk_request(struct request_queue *rq) continue; } - if (!scm_permit_request(bdev, req)) { - scm_ensure_queue_restart(bdev); - return; - } - scmrq = scm_request_fetch(); + if (!scm_permit_request(bdev, req)) + goto out; + if (!scmrq) { - SCM_LOG(5, "no request"); - scm_ensure_queue_restart(bdev); - return; + scmrq = scm_request_fetch(); + if (!scmrq) { + SCM_LOG(5, "no request"); + goto out; + } + scm_request_init(bdev, scmrq); } - scm_request_init(bdev, scmrq, req); + scm_request_set(scmrq, req); + if (!scm_reserve_cluster(scmrq)) { SCM_LOG(5, "cluster busy"); + scm_request_set(scmrq, NULL); + if (scmrq->aob->request.msb_count) + goto out; + scm_request_done(scmrq); return; } + if (scm_need_cluster_request(scmrq)) { - atomic_inc(&bdev->queued_reqs); - blk_start_request(req); - scm_initiate_cluster_request(scmrq); - return; + if (scmrq->aob->request.msb_count) { + /* Start cluster requests separately. */ + scm_request_set(scmrq, NULL); + if (scm_request_start(scmrq)) + return; + } else { + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + scm_initiate_cluster_request(scmrq); + } + scmrq = NULL; + continue; + } + + if (scm_request_prepare(scmrq)) { + SCM_LOG(5, "aidaw alloc failed"); + scm_request_set(scmrq, NULL); + goto out; } - scm_request_prepare(scmrq); - atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = eadm_start_aob(scmrq->aob); - if (ret) { - SCM_LOG(5, "no subchannel"); - scm_request_requeue(scmrq); + if (scmrq->aob->request.msb_count < nr_requests_per_io) + continue; + + if (scm_request_start(scmrq)) return; - } + + scmrq = NULL; } +out: + if (scmrq) + scm_request_start(scmrq); + else + scm_ensure_queue_restart(bdev); } static void __scmrq_log_error(struct scm_request *scmrq) @@ -443,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) spin_unlock_irqrestore(&bdev->lock, flags); } +static bool __init scm_blk_params_valid(void) +{ + if (!nr_requests_per_io || nr_requests_per_io > 64) + return false; + + return scm_cluster_size_valid(); +} + static int __init scm_blk_init(void) { int ret = -EINVAL; - if (!scm_cluster_size_valid()) + if (!scm_blk_params_valid()) goto out; ret = register_blkdev(0, "scm"); diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index e59331e6c2e5..09218cdc5129 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -30,8 +30,8 @@ struct scm_blk_dev { struct scm_request { struct scm_blk_dev *bdev; - struct request *request; - struct aidaw *aidaw; + struct aidaw *next_aidaw; + struct request **request; struct aob *aob; struct list_head list; u8 retries; @@ -55,6 +55,8 @@ void scm_blk_irq(struct scm_device *, void *, int); void scm_request_finish(struct scm_request *); void scm_request_requeue(struct scm_request *); +struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes); + int scm_drv_init(void); void scm_drv_cleanup(void); diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 9aae909d47a5..09db45296eed 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq) scmrq->cluster.state = CLUSTER_NONE; } -static bool clusters_intersect(struct scm_request *A, struct scm_request *B) +static bool clusters_intersect(struct request *A, struct request *B) { unsigned long firstA, lastA, firstB, lastB; - firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; - lastA = (((u64) blk_rq_pos(A->request) << 9) + - blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; + firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; + lastA = (((u64) blk_rq_pos(A) << 9) + + blk_rq_bytes(A) - 1) / CLUSTER_SIZE; - firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; - lastB = (((u64) blk_rq_pos(B->request) << 9) + - blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; + firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; + lastB = (((u64) blk_rq_pos(B) << 9) + + blk_rq_bytes(B) - 1) / CLUSTER_SIZE; return (firstB <= lastA && firstA <= lastB); } bool scm_reserve_cluster(struct scm_request *scmrq) { + struct request *req = scmrq->request[scmrq->aob->request.msb_count]; struct scm_blk_dev *bdev = scmrq->bdev; struct scm_request *iter; + int pos, add = 1; if (write_cluster_size == 0) return true; spin_lock(&bdev->lock); list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { - if (clusters_intersect(scmrq, iter) && - (rq_data_dir(scmrq->request) == WRITE || - rq_data_dir(iter->request) == WRITE)) { - spin_unlock(&bdev->lock); - return false; + if (iter == scmrq) { + /* + * We don't have to use clusters_intersect here, since + * cluster requests are always started separately. + */ + add = 0; + continue; + } + for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { + if (clusters_intersect(req, iter->request[pos]) && + (rq_data_dir(req) == WRITE || + rq_data_dir(iter->request[pos]) == WRITE)) { + spin_unlock(&bdev->lock); + return false; + } } } - list_add(&scmrq->cluster.list, &bdev->cluster_list); + if (add) + list_add(&scmrq->cluster.list, &bdev->cluster_list); spin_unlock(&bdev->lock); return true; @@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); } -static void scm_prepare_cluster_request(struct scm_request *scmrq) +static int scm_prepare_cluster_request(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct request *req = scmrq->request; - struct aidaw *aidaw = scmrq->aidaw; + struct request *req = scmrq->request[0]; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; + struct aidaw *aidaw; struct bio_vec bv; int i = 0; u64 addr; @@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) scmrq->cluster.state = CLUSTER_READ; /* fall through */ case CLUSTER_READ: - scmrq->aob->request.msb_count = 1; msb->bs = MSB_BS_4K; msb->oc = MSB_OC_READ; msb->flags = MSB_FLAG_IDA; - msb->data_addr = (u64) aidaw; msb->blk_count = write_cluster_size; addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); @@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) CLUSTER_SIZE)) msb->blk_count = 2 * write_cluster_size; + aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); + if (!aidaw) + return -ENOMEM; + + scmrq->aob->request.msb_count = 1; + msb->data_addr = (u64) aidaw; for (i = 0; i < msb->blk_count; i++) { aidaw->data_addr = (u64) scmrq->cluster.buf[i]; aidaw++; @@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) break; case CLUSTER_WRITE: + aidaw = (void *) msb->data_addr; msb->oc = MSB_OC_WRITE; for (addr = msb->scm_addr; @@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) } break; } + return 0; } bool scm_need_cluster_request(struct scm_request *scmrq) { - if (rq_data_dir(scmrq->request) == READ) + int pos = scmrq->aob->request.msb_count; + + if (rq_data_dir(scmrq->request[pos]) == READ) return false; - return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; + return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; } /* Called with queue lock held. */ void scm_initiate_cluster_request(struct scm_request *scmrq) { - scm_prepare_cluster_request(scmrq); + if (scm_prepare_cluster_request(scmrq)) + goto requeue; if (eadm_start_aob(scmrq->aob)) - scm_request_requeue(scmrq); + goto requeue; + return; +requeue: + scm_request_requeue(scmrq); } bool scm_test_cluster_request(struct scm_request *scmrq) diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index db2cb1f8a1b5..a5c6f7e157aa 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -102,6 +102,16 @@ config SCLP_ASYNC want for inform other people about your kernel panics, need this feature and intend to run your kernel in LPAR. +config SCLP_ASYNC_ID + string "Component ID for Call Home" + depends on SCLP_ASYNC + default "000000000" + help + The Component ID for Call Home is used to identify the correct + problem reporting queue the call home records should be sent to. + + If your are unsure, please use the default value "000000000". + config HMC_DRV def_tristate m prompt "Support for file transfers from HMC drive CD/DVD-ROM" diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 5f9f929e891c..19c25427f27f 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -137,7 +137,8 @@ static int sclp_async_send_wait(char *message) * Retain Queue * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) */ - strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id)); + strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID, + sizeof(sccb->evbuf.comp_id)); sccb->evbuf.header.length = sizeof(sccb->evbuf); sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); sccb->header.function_code = SCLP_NORMAL_WRITE; diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 327cb19ad0b0..d3d1936057b4 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -1090,7 +1090,7 @@ tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) "channel path 0x%x on CU", sense->fmt.f71.md[1]); else - snprintf(service, BUFSIZE, "Repair will disable cannel" + snprintf(service, BUFSIZE, "Repair will disable channel" " paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; @@ -1481,7 +1481,7 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, } if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { - DBF_EVENT(2, "cannel end\n"); + DBF_EVENT(2, "channel end\n"); return TAPE_IO_PENDING; } diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 37f0834300ea..bee8c11cd086 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -31,7 +31,7 @@ MODULE_DESCRIPTION("driver for s390 eadm subchannels"); MODULE_LICENSE("GPL"); -#define EADM_TIMEOUT (5 * HZ) +#define EADM_TIMEOUT (7 * HZ) static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(eadm_list); diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 5ff1ce7ba1f4..cdd4ab683be9 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -373,10 +373,10 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, if (unlikely(task->ata_task.device_control_reg_update)) scb->header.opcode = CONTROL_ATA_DEV; - else if (dev->sata_dev.command_set == ATA_COMMAND_SET) - scb->header.opcode = INITIATE_ATA_TASK; - else + else if (dev->sata_dev.class == ATA_DEV_ATAPI) scb->header.opcode = INITIATE_ATAPI_TASK; + else + scb->header.opcode = INITIATE_ATA_TASK; scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ if (dev->port->oob_mode == SAS_OOB_MODE) @@ -387,7 +387,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, if (likely(!task->ata_task.device_control_reg_update)) scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + if (dev->sata_dev.class == ATA_DEV_ATAPI) memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, 16); scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); @@ -399,7 +399,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, if (task->ata_task.dma_xfer) flags |= DATA_XFER_MODE_DMA; if (task->ata_task.use_ncq && - dev->sata_dev.command_set != ATAPI_COMMAND_SET) + dev->sata_dev.class != ATA_DEV_ATAPI) flags |= ATA_Q_TYPE_NCQ; flags |= data_dir_flags[task->data_dir]; scb->ata_task.ata_flags = flags; diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 56e38096f0c4..cfd0084f1cd2 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -694,7 +694,7 @@ sci_io_request_construct_sata(struct isci_request *ireq, } /* ATAPI */ - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && + if (dev->sata_dev.class == ATA_DEV_ATAPI && task->ata_task.fis.command == ATA_CMD_PACKET) { sci_atapi_construct(ireq); return SCI_SUCCESS; @@ -2980,7 +2980,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) state = SCI_REQ_SMP_WAIT_RESP; } else if (task && sas_protocol_ata(task->task_proto) && !task->ata_task.use_ncq) { - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && + if (dev->sata_dev.class == ATA_DEV_ATAPI && task->ata_task.fis.command == ATA_CMD_PACKET) { state = SCI_REQ_ATAPI_WAIT_H2D; } else if (task->data_dir == DMA_NONE) { diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 3f63c6318b0d..6dcaed0c1fc8 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -588,7 +588,7 @@ int isci_task_abort_task(struct sas_task *task) ret = TMF_RESP_FUNC_COMPLETE; } else { - /* Fill in the tmf stucture */ + /* Fill in the tmf structure */ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, old_request); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 577770fdee86..932d9cc98d2f 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -138,7 +138,7 @@ static void sas_ata_task_done(struct sas_task *task) if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || ((stat->stat == SAM_STAT_CHECK_CONDITION && - dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { + dev->sata_dev.class == ATA_DEV_ATAPI))) { memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); if (!link->sactive) { @@ -272,7 +272,7 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) return to_sas_internal(dev->port->ha->core.shost->transportt); } -static void sas_get_ata_command_set(struct domain_device *dev); +static int sas_get_ata_command_set(struct domain_device *dev); int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) { @@ -297,8 +297,7 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) } memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, sizeof(struct dev_to_host_fis)); - /* TODO switch to ata_dev_classify() */ - sas_get_ata_command_set(dev); + dev->sata_dev.class = sas_get_ata_command_set(dev); } return 0; } @@ -419,18 +418,7 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, if (ret && ret != -EAGAIN) sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret); - /* XXX: if the class changes during the reset the upper layer - * should be informed, if the device has gone away we assume - * libsas will eventually delete it - */ - switch (dev->sata_dev.command_set) { - case ATA_COMMAND_SET: - *class = ATA_DEV_ATA; - break; - case ATAPI_COMMAND_SET: - *class = ATA_DEV_ATAPI; - break; - } + *class = dev->sata_dev.class; ap->cbl = ATA_CBL_SATA; return ret; @@ -619,50 +607,18 @@ void sas_ata_task_abort(struct sas_task *task) complete(waiting); } -static void sas_get_ata_command_set(struct domain_device *dev) +static int sas_get_ata_command_set(struct domain_device *dev) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; + struct ata_taskfile tf; if (dev->dev_type == SAS_SATA_PENDING) - return; + return ATA_DEV_UNKNOWN; + + ata_tf_from_fis((const u8 *)fis, &tf); - if ((fis->sector_count == 1 && /* ATA */ - fis->lbal == 1 && - fis->lbam == 0 && - fis->lbah == 0 && - fis->device == 0) - || - (fis->sector_count == 0 && /* CE-ATA (mATA) */ - fis->lbal == 0 && - fis->lbam == 0xCE && - fis->lbah == 0xAA && - (fis->device & ~0x10) == 0)) - - dev->sata_dev.command_set = ATA_COMMAND_SET; - - else if ((fis->interrupt_reason == 1 && /* ATAPI */ - fis->lbal == 1 && - fis->byte_count_low == 0x14 && - fis->byte_count_high == 0xEB && - (fis->device & ~0x10) == 0)) - - dev->sata_dev.command_set = ATAPI_COMMAND_SET; - - else if ((fis->sector_count == 1 && /* SEMB */ - fis->lbal == 1 && - fis->lbam == 0x3C && - fis->lbah == 0xC3 && - fis->device == 0) - || - (fis->interrupt_reason == 1 && /* SATA PM */ - fis->lbal == 1 && - fis->byte_count_low == 0x69 && - fis->byte_count_high == 0x96 && - (fis->device & ~0x10) == 0)) - - /* Treat it as a superset? */ - dev->sata_dev.command_set = ATAPI_COMMAND_SET; + return ata_dev_classify(&tf); } void sas_probe_sata(struct asd_sas_port *port) @@ -768,7 +724,7 @@ int sas_discover_sata(struct domain_device *dev) if (dev->dev_type == SAS_SATA_PM) return -ENODEV; - sas_get_ata_command_set(dev); + dev->sata_dev.class = sas_get_ata_command_set(dev); sas_fill_in_rphy(dev, dev->rphy); res = sas_notify_lldd_dev_found(dev); diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 953fd9b953c7..1e85c07e3b62 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -166,7 +166,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance) XXX: Since bus errors in the PDMA routines never happen on my computer, the bus error code is untested. If the code works as intended, a bus error results in Pseudo-DMA - beeing disabled, meaning that the driver switches to slow handshake. + being disabled, meaning that the driver switches to slow handshake. If bus errors are NOT extremely rare, this has to be changed. */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f05580e693d0..ff283d23788a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6783,7 +6783,7 @@ static int __init megasas_init(void) rval = pci_register_driver(&megasas_pci_driver); if (rval) { - printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); + printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); goto err_pcidrv; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index fa0567c96050..7f842c88abd2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -224,7 +224,7 @@ struct mpt2_ioctl_eventreport { }; /** - * struct mpt2_ioctl_command - generic mpt firmware passthru ioclt + * struct mpt2_ioctl_command - generic mpt firmware passthru ioctl * @hdr - generic header * @timeout - command timeout in seconds. (if zero then use driver default * value). diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 85d86a5cdb60..2d5ab6d969ec 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -479,7 +479,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, if (task->ata_task.use_ncq) flags |= MCH_FPDMA; - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { + if (dev->sata_dev.class == ATA_DEV_ATAPI) { if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) flags |= MCH_ATAPI; } @@ -546,7 +546,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS and ATAPI CDB */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + if (dev->sata_dev.class == ATA_DEV_ATAPI) memcpy(buf_cmd + STP_ATAPI_CMD, task->ata_task.atapi_packet, 16); diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 933f21471951..96dcc097a463 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4367,7 +4367,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); } if (task->ata_task.use_ncq && - dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + dev->sata_dev.class != ATA_DEV_ATAPI) { ATAP = 0x07; /* FPDMA */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index b06443a0db2d..05cce463ab01 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -4077,7 +4077,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); } if (task->ata_task.use_ncq && - dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + dev->sata_dev.class != ATA_DEV_ATAPI) { ATAP = 0x07; /* FPDMA */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index b3b48b5a984c..5298def33733 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c @@ -383,6 +383,7 @@ static int ps3rom_probe(struct ps3_system_bus_device *_dev) if (!host) { dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n", __func__, __LINE__); + error = -ENOMEM; goto fail_teardown; } diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index 09077067b0c8..7b986f9f213f 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -15,6 +15,9 @@ #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/time.h> +#ifdef CONFIG_BCM47XX +#include <bcm47xx_nvram.h> +#endif #include "ssb_private.h" @@ -210,6 +213,7 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore) static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; + struct ssb_sflash *sflash = &mcore->sflash; struct ssb_pflash *pflash = &mcore->pflash; /* When there is no chipcommon on the bus there is 4MB flash */ @@ -242,7 +246,15 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) } ssb_pflash: - if (pflash->present) { + if (sflash->present) { +#ifdef CONFIG_BCM47XX + bcm47xx_nvram_init_from_mem(sflash->window, sflash->size); +#endif + } else if (pflash->present) { +#ifdef CONFIG_BCM47XX + bcm47xx_nvram_init_from_mem(pflash->window, pflash->window_size); +#endif + ssb_pflash_data.width = pflash->buswidth; ssb_pflash_resource.start = pflash->window; ssb_pflash_resource.end = pflash->window + pflash->window_size; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index e9186cdf35e9..33ac39bf75e5 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -335,7 +335,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, desc += XCOPY_SEGMENT_DESC_LEN; break; default: - pr_err("XCOPY unspported segment descriptor" + pr_err("XCOPY unsupported segment descriptor" "type: 0x%02x\n", desc[0]); goto out; } diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c index 946562389ca8..3be9519654e5 100644 --- a/drivers/tc/tc.c +++ b/drivers/tc/tc.c @@ -83,8 +83,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus) /* Found a board, allocate it an entry in the list */ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL); if (!tdev) { - printk(KERN_ERR "tc%x: unable to allocate tc_dev\n", - slot); + pr_err("tc%x: unable to allocate tc_dev\n", slot); goto out_err; } dev_set_name(&tdev->dev, "tc%x", slot); @@ -117,10 +116,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus) tdev->resource.start = extslotaddr; tdev->resource.end = extslotaddr + devsize - 1; } else { - printk(KERN_ERR "%s: Cannot provide slot space " - "(%dMiB required, up to %dMiB supported)\n", - dev_name(&tdev->dev), devsize >> 20, - max(slotsize, extslotsize) >> 20); + pr_err("%s: Cannot provide slot space " + "(%ldMiB required, up to %ldMiB supported)\n", + dev_name(&tdev->dev), (long)(devsize >> 20), + (long)(max(slotsize, extslotsize) >> 20)); kfree(tdev); goto out_err; } @@ -147,14 +146,12 @@ static int __init tc_init(void) { /* Initialize the TURBOchannel bus */ if (tc_bus_get_info(&tc_bus)) - return 0; + goto out_err; INIT_LIST_HEAD(&tc_bus.devices); dev_set_name(&tc_bus.dev, "tc"); - if (device_register(&tc_bus.dev)) { - put_device(&tc_bus.dev); - return 0; - } + if (device_register(&tc_bus.dev)) + goto out_err_device; if (tc_bus.info.slot_size) { unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; @@ -172,8 +169,8 @@ static int __init tc_init(void) tc_bus.resource[0].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[0]) < 0) { - printk(KERN_ERR "tc: Cannot reserve resource\n"); - return 0; + pr_err("tc: Cannot reserve resource\n"); + goto out_err_device; } if (tc_bus.ext_slot_size) { tc_bus.resource[1].start = tc_bus.ext_slot_base; @@ -184,10 +181,8 @@ static int __init tc_init(void) tc_bus.resource[1].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[1]) < 0) { - printk(KERN_ERR - "tc: Cannot reserve resource\n"); - release_resource(&tc_bus.resource[0]); - return 0; + pr_err("tc: Cannot reserve resource\n"); + goto out_err_resource; } } @@ -195,6 +190,13 @@ static int __init tc_init(void) } return 0; + +out_err_resource: + release_resource(&tc_bus.resource[0]); +out_err_device: + put_device(&tc_bus.dev); +out_err: + return 0; } subsys_initcall(tc_init); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 98f8bcaf3e7e..a26653fe788c 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -701,7 +701,7 @@ config PDC_CONSOLE Saying Y here will enable the software based PDC console to be used as the system console. This is useful for machines in which the hardware based console has not been written yet. The - following steps must be competed to use the PDC console: + following steps must be completed to use the PDC console: 1. create the device entry (mknod /dev/ttyB0 c 11 0) 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 96fafed92b76..0ffb4ed0a945 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -103,6 +103,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04f3, 0x009b), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x010c), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x016f), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 59ab62c92b66..ea2b9c374305 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -396,7 +396,7 @@ static int hidg_setup(struct usb_function *f, case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_REPORT): - VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength); + VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength); goto stall; break; diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index ebf09f439f3a..ff97ac93ac03 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c @@ -28,7 +28,7 @@ * * Why is *this* driver using two configurations, rather than setting up * two interfaces with different functions? To help verify that multiple - * configuration infrastucture is working correctly; also, so that it can + * configuration infrastructure is working correctly; also, so that it can * work with low capability USB controllers without four bulk endpoints. */ diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c index f6459dfb6f54..5e44407aa099 100644 --- a/drivers/usb/host/ehci-sysfs.c +++ b/drivers/usb/host/ehci-sysfs.c @@ -132,7 +132,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, if (allocated_max > uframe_periodic_max) { ehci_info(ehci, - "cannot decrease uframe_periodic_max becase " + "cannot decrease uframe_periodic_max because " "periodic bandwidth is already allocated " "(%u > %u)\n", allocated_max, uframe_periodic_max); diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 3de1278677d0..ecf02b2623e8 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -4958,7 +4958,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, if (allocated_max > uframe_periodic_max) { fotg210_info(fotg210, - "cannot decrease uframe_periodic_max becase " + "cannot decrease uframe_periodic_max because " "periodic bandwidth is already allocated " "(%u > %u)\n", allocated_max, uframe_periodic_max); diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c index abe42f31559f..664d2aa1239c 100644 --- a/drivers/usb/host/fusbh200-hcd.c +++ b/drivers/usb/host/fusbh200-hcd.c @@ -4893,7 +4893,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, if (allocated_max > uframe_periodic_max) { fusbh200_info(fusbh200, - "cannot decrease uframe_periodic_max becase " + "cannot decrease uframe_periodic_max because " "periodic bandwidth is already allocated " "(%u > %u)\n", allocated_max, uframe_periodic_max); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 7064eb8d6142..8bfc47c29828 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -64,7 +64,7 @@ DEVICE(vivopay, VIVOPAY_IDS); /* Motorola USB Phone driver */ #define MOTO_IDS() \ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \ - { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ \ + { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Motorola phone */ \ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \ { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \ { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c index cee9602f9a7b..716bfad6a1c0 100644 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c +++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c @@ -570,5 +570,5 @@ static struct platform_driver exynos_mipi_dsi_driver = { module_platform_driver(exynos_mipi_dsi_driver); MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("Samusung SoC MIPI-DSI driver"); +MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c index 85edabfdef5a..2358a2fbbbcd 100644 --- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c +++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c @@ -876,5 +876,5 @@ int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim, } MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("Samusung SoC MIPI-DSI common driver"); +MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index 9690216d38ff..c0abe276ee55 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -178,6 +178,7 @@ #include <linux/dma-mapping.h> #include <linux/mutex.h> #include <linux/io.h> +#include <linux/clk.h> #include <video/sa1100fb.h> @@ -416,9 +417,9 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) var->transp.offset); #ifdef CONFIG_CPU_FREQ - dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n", + dev_dbg(fbi->dev, "dma period = %d ps, clock = %ld kHz\n", sa1100fb_display_dma_period(var), - cpufreq_get(smp_processor_id())); + clk_get_rate(fbi->clk) / 1000); #endif return 0; @@ -592,9 +593,10 @@ static struct fb_ops sa1100fb_ops = { * Calculate the PCD value from the clock rate (in picoseconds). * We take account of the PPCR clock setting. */ -static inline unsigned int get_pcd(unsigned int pixclock, unsigned int cpuclock) +static inline unsigned int get_pcd(struct sa1100fb_info *fbi, + unsigned int pixclock) { - unsigned int pcd = cpuclock / 100; + unsigned int pcd = clk_get_rate(fbi->clk) / 100 / 1000; pcd *= pixclock; pcd /= 10000000; @@ -673,7 +675,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_ LCCR2_BegFrmDel(var->upper_margin) + LCCR2_EndFrmDel(var->lower_margin); - pcd = get_pcd(var->pixclock, cpufreq_get(0)); + pcd = get_pcd(fbi, var->pixclock); new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); @@ -787,6 +789,9 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi) fbi->palette_cpu[0] &= 0xcfff; fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var); + /* enable LCD controller clock */ + clk_prepare_enable(fbi->clk); + /* Sequence from 11.7.10 */ writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3); writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2); @@ -831,6 +836,9 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi) schedule_timeout(20 * HZ / 1000); remove_wait_queue(&fbi->ctrlr_wait, &wait); + + /* disable LCD controller clock */ + clk_disable_unprepare(fbi->clk); } /* @@ -1009,7 +1017,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct sa1100fb_info *fbi = TO_INF(nb, freq_transition); - struct cpufreq_freqs *f = data; u_int pcd; switch (val) { @@ -1018,7 +1025,7 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val, break; case CPUFREQ_POSTCHANGE: - pcd = get_pcd(fbi->fb.var.pixclock, f->new); + pcd = get_pcd(fbi, fbi->fb.var.pixclock); fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); break; @@ -1225,6 +1232,13 @@ static int sa1100fb_probe(struct platform_device *pdev) if (!fbi) goto failed; + fbi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(fbi->clk)) { + ret = PTR_ERR(fbi->clk); + fbi->clk = NULL; + goto failed; + } + fbi->base = ioremap(res->start, resource_size(res)); if (!fbi->base) goto failed; @@ -1277,6 +1291,8 @@ static int sa1100fb_probe(struct platform_device *pdev) failed: if (fbi) iounmap(fbi->base); + if (fbi->clk) + clk_put(fbi->clk); kfree(fbi); release_mem_region(res->start, resource_size(res)); return ret; diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h index fc5d4292fad6..0139d13377a5 100644 --- a/drivers/video/fbdev/sa1100fb.h +++ b/drivers/video/fbdev/sa1100fb.h @@ -68,6 +68,7 @@ struct sa1100fb_info { #endif const struct sa1100fb_mach_info *inf; + struct clk *clk; }; #define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index ebd8f218a788..810ad419e34c 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; phys_addr_t paddr = dma; - BUG_ON(paddr != dma); /* truncation has occurred, should never happen */ - paddr |= baddr & ~PAGE_MASK; return paddr; @@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * buffering it. */ if (dma_capable(dev, dev_addr, size) && - !range_straddles_page_boundary(phys, size) && !swiotlb_force) { + !range_straddles_page_boundary(phys, size) && + !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) && + !swiotlb_force) { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ - xen_dma_map_page(dev, page, offset, size, dir, attrs); + xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); return dev_addr; } @@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, return DMA_ERROR_CODE; xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), - map & ~PAGE_MASK, size, dir, attrs); + dev_addr, map & ~PAGE_MASK, size, dir, attrs); dev_addr = xen_phys_to_bus(map); /* @@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); - xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); + xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) { @@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); if (target == SYNC_FOR_CPU) - xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); + xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); if (target == SYNC_FOR_DEVICE) - xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); + xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); if (dir != DMA_FROM_DEVICE) return; @@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, dma_addr_t dev_addr = xen_phys_to_bus(paddr); if (swiotlb_force || + xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) || !dma_capable(hwdev, dev_addr, sg->length) || range_straddles_page_boundary(paddr, sg->length)) { phys_addr_t map = swiotlb_tbl_map_single(hwdev, @@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, return 0; } xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), + dev_addr, map & ~PAGE_MASK, sg->length, dir, @@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), + dev_addr, paddr & ~PAGE_MASK, sg->length, dir, diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c index 828dddc360df..f16a30e2a110 100644 --- a/drivers/xen/xen-pciback/passthrough.c +++ b/drivers/xen/xen-pciback/passthrough.c @@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, } static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, - struct pci_dev *dev) + struct pci_dev *dev, bool lock) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; @@ -87,8 +87,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, mutex_unlock(&dev_data->lock); - if (found_dev) + if (found_dev) { + if (lock) + device_lock(&found_dev->dev); pcistub_put_pci_dev(found_dev); + if (lock) + device_unlock(&found_dev->dev); + } } static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) @@ -156,8 +161,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) struct pci_dev_entry *dev_entry, *t; list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { + struct pci_dev *dev = dev_entry->dev; list_del(&dev_entry->list); - pcistub_put_pci_dev(dev_entry->dev); + device_lock(&dev->dev); + pcistub_put_pci_dev(dev); + device_unlock(&dev->dev); kfree(dev_entry); } diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 017069a455d4..cc3cbb4435f8 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -105,7 +105,7 @@ static void pcistub_device_release(struct kref *kref) */ __pci_reset_function_locked(dev); if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) - dev_dbg(&dev->dev, "Could not reload PCI state\n"); + dev_info(&dev->dev, "Could not reload PCI state\n"); else pci_restore_state(dev); @@ -250,11 +250,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove * * As such we have to be careful. + * + * To make this easier, the caller has to hold the device lock. */ void pcistub_put_pci_dev(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; unsigned long flags; + struct xen_pcibk_dev_data *dev_data; + int ret; spin_lock_irqsave(&pcistub_devices_lock, flags); @@ -276,13 +280,20 @@ void pcistub_put_pci_dev(struct pci_dev *dev) /* Cleanup our device * (so it's ready for the next domain) */ + device_lock_assert(&dev->dev); + __pci_reset_function_locked(dev); - /* This is OK - we are running from workqueue context - * and want to inhibit the user from fiddling with 'reset' - */ - pci_reset_function(dev); - pci_restore_state(dev); - + dev_data = pci_get_drvdata(dev); + ret = pci_load_saved_state(dev, dev_data->pci_saved_state); + if (!ret) { + /* + * The usual sequence is pci_save_state & pci_restore_state + * but the guest might have messed the configuration space up. + * Use the initial version (when device was bound to us). + */ + pci_restore_state(dev); + } else + dev_info(&dev->dev, "Could not reload PCI state\n"); /* This disables the device. */ xen_pcibk_reset_device(dev); @@ -554,12 +565,14 @@ static void pcistub_remove(struct pci_dev *dev) spin_unlock_irqrestore(&pcistub_devices_lock, flags); if (found_psdev) { - dev_dbg(&dev->dev, "found device to remove - in use? %p\n", - found_psdev->pdev); + dev_dbg(&dev->dev, "found device to remove %s\n", + found_psdev->pdev ? "- in-use" : ""); if (found_psdev->pdev) { - pr_warn("****** removing device %s while still in-use! ******\n", - pci_name(found_psdev->dev)); + int domid = xen_find_device_domain_owner(dev); + + pr_warn("****** removing device %s while still in-use by domain %d! ******\n", + pci_name(found_psdev->dev), domid); pr_warn("****** driver domain may still access this device's i/o resources!\n"); pr_warn("****** shutdown driver domain before binding device\n"); pr_warn("****** to other drivers or domains\n"); @@ -567,7 +580,8 @@ static void pcistub_remove(struct pci_dev *dev) /* N.B. This ends up calling pcistub_put_pci_dev which ends up * doing the FLR. */ xen_pcibk_release_pci_dev(found_psdev->pdev, - found_psdev->dev); + found_psdev->dev, + false /* caller holds the lock. */); } spin_lock_irqsave(&pcistub_devices_lock, flags); @@ -629,10 +643,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, { pci_ers_result_t res = result; struct xen_pcie_aer_op *aer_op; + struct xen_pcibk_device *pdev = psdev->pdev; + struct xen_pci_sharedinfo *sh_info = pdev->sh_info; int ret; /*with PV AER drivers*/ - aer_op = &(psdev->pdev->sh_info->aer_op); + aer_op = &(sh_info->aer_op); aer_op->cmd = aer_cmd ; /*useful for error_detected callback*/ aer_op->err = state; @@ -653,36 +669,36 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, * this flag to judge whether we need to check pci-front give aer * service ack signal */ - set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); + set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); /*It is possible that a pcifront conf_read_write ops request invokes * the callback which cause the spurious execution of wake_up. * Yet it is harmless and better than a spinlock here */ set_bit(_XEN_PCIB_active, - (unsigned long *)&psdev->pdev->sh_info->flags); + (unsigned long *)&sh_info->flags); wmb(); - notify_remote_via_irq(psdev->pdev->evtchn_irq); + notify_remote_via_irq(pdev->evtchn_irq); ret = wait_event_timeout(xen_pcibk_aer_wait_queue, !(test_bit(_XEN_PCIB_active, (unsigned long *) - &psdev->pdev->sh_info->flags)), 300*HZ); + &sh_info->flags)), 300*HZ); if (!ret) { if (test_bit(_XEN_PCIB_active, - (unsigned long *)&psdev->pdev->sh_info->flags)) { + (unsigned long *)&sh_info->flags)) { dev_err(&psdev->dev->dev, "pcifront aer process not responding!\n"); clear_bit(_XEN_PCIB_active, - (unsigned long *)&psdev->pdev->sh_info->flags); + (unsigned long *)&sh_info->flags); aer_op->err = PCI_ERS_RESULT_NONE; return res; } } - clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); + clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); if (test_bit(_XEN_PCIF_active, - (unsigned long *)&psdev->pdev->sh_info->flags)) { + (unsigned long *)&sh_info->flags)) { dev_dbg(&psdev->dev->dev, "schedule pci_conf service in " DRV_NAME "\n"); xen_pcibk_test_and_schedule_op(psdev->pdev); @@ -1502,6 +1518,53 @@ parse_error: fs_initcall(pcistub_init); #endif +#ifdef CONFIG_PCI_IOV +static struct pcistub_device *find_vfs(const struct pci_dev *pdev) +{ + struct pcistub_device *psdev = NULL; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&pcistub_devices_lock, flags); + list_for_each_entry(psdev, &pcistub_devices, dev_list) { + if (!psdev->pdev && psdev->dev != pdev + && pci_physfn(psdev->dev) == pdev) { + found = true; + break; + } + } + spin_unlock_irqrestore(&pcistub_devices_lock, flags); + if (found) + return psdev; + return NULL; +} + +static int pci_stub_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + const struct pci_dev *pdev = to_pci_dev(dev); + + if (action != BUS_NOTIFY_UNBIND_DRIVER) + return NOTIFY_DONE; + + if (!pdev->is_physfn) + return NOTIFY_DONE; + + for (;;) { + struct pcistub_device *psdev = find_vfs(pdev); + if (!psdev) + break; + device_release_driver(&psdev->dev->dev); + } + return NOTIFY_DONE; +} + +static struct notifier_block pci_stub_nb = { + .notifier_call = pci_stub_notifier, +}; +#endif + static int __init xen_pcibk_init(void) { int err; @@ -1523,12 +1586,19 @@ static int __init xen_pcibk_init(void) err = xen_pcibk_xenbus_register(); if (err) pcistub_exit(); +#ifdef CONFIG_PCI_IOV + else + bus_register_notifier(&pci_bus_type, &pci_stub_nb); +#endif return err; } static void __exit xen_pcibk_cleanup(void) { +#ifdef CONFIG_PCI_IOV + bus_unregister_notifier(&pci_bus_type, &pci_stub_nb); +#endif xen_pcibk_xenbus_unregister(); pcistub_exit(); } diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index f72af87640e0..58e38d586f52 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -99,7 +99,8 @@ struct xen_pcibk_backend { unsigned int *domain, unsigned int *bus, unsigned int *devfn); int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb); - void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev); + void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev, + bool lock); int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev, int devid, publish_pci_dev_cb publish_cb); struct pci_dev *(*get)(struct xen_pcibk_device *pdev, @@ -122,10 +123,10 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, } static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, - struct pci_dev *dev) + struct pci_dev *dev, bool lock) { if (xen_pcibk_backend && xen_pcibk_backend->release) - return xen_pcibk_backend->release(pdev, dev); + return xen_pcibk_backend->release(pdev, dev, lock); } static inline struct pci_dev * diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c index 51afff96c515..c99f8bb1c56c 100644 --- a/drivers/xen/xen-pciback/vpci.c +++ b/drivers/xen/xen-pciback/vpci.c @@ -145,7 +145,7 @@ out: } static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, - struct pci_dev *dev) + struct pci_dev *dev, bool lock) { int slot; struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; @@ -169,8 +169,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, out: mutex_unlock(&vpci_dev->lock); - if (found_dev) + if (found_dev) { + if (lock) + device_lock(&found_dev->dev); pcistub_put_pci_dev(found_dev); + if (lock) + device_unlock(&found_dev->dev); + } } static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) @@ -208,8 +213,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) struct pci_dev_entry *e, *tmp; list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot], list) { + struct pci_dev *dev = e->dev; list_del(&e->list); - pcistub_put_pci_dev(e->dev); + device_lock(&dev->dev); + pcistub_put_pci_dev(dev); + device_unlock(&dev->dev); kfree(e); } } diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index ad8d30c088fe..fe17c80ff4b7 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -247,7 +247,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev, if (err) goto out; - dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); + dev_info(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); if (xen_register_device_domain_owner(dev, pdev->xdev->otherend_id) != 0) { dev_err(&dev->dev, "Stealing ownership from dom%d.\n", @@ -291,7 +291,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, /* N.B. This ends up calling pcistub_put_pci_dev which ends up * doing the FLR. */ - xen_pcibk_release_pci_dev(pdev, dev); + xen_pcibk_release_pci_dev(pdev, dev, true /* use the lock. */); out: return err; |