diff options
Diffstat (limited to 'drivers')
85 files changed, 4609 insertions, 498 deletions
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 2d912b71e543..dcde9ddd105a 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c @@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file, state = simple_strtoul(str, NULL, 0); #ifdef CONFIG_SOFTWARE_SUSPEND if (state == 4) { - error = software_suspend(); + error = pm_suspend(PM_SUSPEND_DISK); goto Done; } #endif diff --git a/drivers/base/Makefile b/drivers/base/Makefile index e9eb7382ac3a..b39ea3f59c9b 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -2,10 +2,10 @@ obj-y := core.o sys.o bus.o dd.o \ driver.o class.o platform.o \ - cpu.o firmware.o init.o map.o dmapool.o \ - dma-mapping.o devres.o \ + cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o obj-y += power/ +obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 5d6562171533..27a139025ced 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1480,7 +1480,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp, break; case FDFMTEND: floppy_off(drive); - invalidate_bdev(inode->i_bdev, 0); + invalidate_bdev(inode->i_bdev); break; case FDGETPRM: memset((void *)&getprm, 0, sizeof (getprm)); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6b5b64207407..0d4ccd4a0957 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -833,7 +833,7 @@ out_clr: lo->lo_backing_file = NULL; lo->lo_flags = 0; set_capacity(disks[lo->lo_number], 0); - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); bd_set_size(bdev, 0); mapping_set_gfp_mask(mapping, lo->old_gfp_mask); lo->lo_state = Lo_unbound; @@ -917,7 +917,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); set_capacity(disks[lo->lo_number], 0); bd_set_size(bdev, 0); mapping_set_gfp_mask(filp->f_mapping, gfp); diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 485aa87e9bcd..43d4ebcb3b44 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -403,7 +403,7 @@ static void __exit rd_cleanup(void) struct block_device *bdev = rd_bdev[i]; rd_bdev[i] = NULL; if (bdev) { - invalidate_bdev(bdev, 1); + invalidate_bdev(bdev); blkdev_put(bdev); } del_gendisk(rd_disks[i]); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index b36f44d4d1bf..3625a05bc3d3 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2384,7 +2384,7 @@ static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, return -EACCES; if (!CDROM_CAN(CDC_RESET)) return -ENOSYS; - invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); return cdi->ops->reset(cdi); } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d0c978fbc204..a26d91743b24 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -905,8 +905,8 @@ config SONYPI To compile this driver as a module, choose M here: the module will be called sonypi. -config TANBAC_TB0219 - tristate "TANBAC TB0219 base board support" +config GPIO_TB0219 + tristate "TANBAC TB0219 GPIO support" depends on TANBAC_TB022X select GPIO_VR41XX diff --git a/drivers/char/Makefile b/drivers/char/Makefile index ae8567cc529c..2f56ecc035aa 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -91,7 +91,7 @@ obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o -obj-$(CONFIG_TANBAC_TB0219) += tb0219.o +obj-$(CONFIG_GPIO_TB0219) += tb0219.o obj-$(CONFIG_TELCLOCK) += tlclk.o obj-$(CONFIG_WATCHDOG) += watchdog/ diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 7a32df594907..389da364e6b6 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -3720,11 +3720,10 @@ int tty_register_driver(struct tty_driver *driver) if (driver->flags & TTY_DRIVER_INSTALLED) return 0; - if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) { - p = kmalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL); + if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) { + p = kzalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL); if (!p) return -ENOMEM; - memset(p, 0, driver->num * 3 * sizeof(void *)); } if (!driver->major) { diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index 214fbb1423c5..7ed92dc3d833 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c @@ -351,8 +351,10 @@ static void tps65010_interrupt(struct tps65010 *tps) #if 0 /* REVISIT: this might need its own workqueue * plus tweaks including deadlock avoidance ... + * also needs to get error handling and probably + * an #ifdef CONFIG_SOFTWARE_SUSPEND */ - software_suspend(); + pm_suspend(PM_SUSPEND_DISK); #endif poll = 1; } diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 1d796e7c8199..a06bcc65a871 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -43,6 +43,8 @@ #include "core_priv.h" +#define PFX "fmr_pool: " + enum { IB_FMR_MAX_REMAPS = 32, @@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) #ifdef DEBUG if (fmr->ref_count !=0) { - printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d", + printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d", fmr, fmr->ref_count); } #endif @@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ret = ib_unmap_fmr(&fmr_list); if (ret) - printk(KERN_WARNING "ib_unmap_fmr returned %d", ret); + printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret); spin_lock_irq(&pool->pool_lock); list_splice(&unmap_list, &pool->free_list); @@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, device = pd->device; if (!device->alloc_fmr || !device->dealloc_fmr || !device->map_phys_fmr || !device->unmap_fmr) { - printk(KERN_WARNING "Device %s does not support fast memory regions", + printk(KERN_INFO PFX "Device %s does not support FMRs\n", device->name); return ERR_PTR(-ENOSYS); } attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { - printk(KERN_WARNING "couldn't allocate device attr struct"); + printk(KERN_WARNING PFX "couldn't allocate device attr struct"); return ERR_PTR(-ENOMEM); } ret = ib_query_device(device, attr); if (ret) { - printk(KERN_WARNING "couldn't query device"); + printk(KERN_WARNING PFX "couldn't query device: %d", ret); kfree(attr); return ERR_PTR(ret); } @@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, pool = kmalloc(sizeof *pool, GFP_KERNEL); if (!pool) { - printk(KERN_WARNING "couldn't allocate pool struct"); + printk(KERN_WARNING PFX "couldn't allocate pool struct"); return ERR_PTR(-ENOMEM); } @@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, GFP_KERNEL); if (!pool->cache_bucket) { - printk(KERN_WARNING "Failed to allocate cache in pool"); + printk(KERN_WARNING PFX "Failed to allocate cache in pool"); ret = -ENOMEM; goto out_free_pool; } @@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, "ib_fmr(%s)", device->name); if (IS_ERR(pool->thread)) { - printk(KERN_WARNING "couldn't start cleanup thread"); + printk(KERN_WARNING PFX "couldn't start cleanup thread"); ret = PTR_ERR(pool->thread); goto out_free_pool; } @@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), GFP_KERNEL); if (!fmr) { - printk(KERN_WARNING "failed to allocate fmr struct " - "for FMR %d", i); + printk(KERN_WARNING PFX "failed to allocate fmr " + "struct for FMR %d", i); goto out_fail; } @@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); if (IS_ERR(fmr->fmr)) { - printk(KERN_WARNING "fmr_create failed for FMR %d", i); + printk(KERN_WARNING PFX "fmr_create failed " + "for FMR %d", i); kfree(fmr); goto out_fail; } @@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) } if (i < pool->pool_size) - printk(KERN_WARNING "pool still has %d regions registered", + printk(KERN_WARNING PFX "pool still has %d regions registered", pool->pool_size - i); kfree(pool->cache_bucket); @@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, list_add(&fmr->list, &pool->free_list); spin_unlock_irqrestore(&pool->pool_lock, flags); - printk(KERN_WARNING "fmr_map returns %d\n", - result); + printk(KERN_WARNING PFX "fmr_map returns %d\n", result); return ERR_PTR(result); } @@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) #ifdef DEBUG if (fmr->ref_count < 0) - printk(KERN_WARNING "FMR %p has ref count %d < 0", + printk(KERN_WARNING PFX "FMR %p has ref count %d < 0", fmr, fmr->ref_count); #endif diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6edfecf1be72..85ccf13b8041 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2771,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device, cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, ib_mad_thread_completion_handler, - NULL, port_priv, cq_size); + NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4fd75afa6a3a..bab66769be14 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -802,6 +802,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, INIT_LIST_HEAD(&obj->async_list); cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, + cmd.comp_vector, file->ucontext, &udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index f8bc822a3cc3..d44e54799651 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -752,7 +752,7 @@ static void ib_uverbs_add_one(struct ib_device *device) spin_unlock(&map_lock); uverbs_dev->ib_dev = device; - uverbs_dev->num_comp_vectors = 1; + uverbs_dev->num_comp_vectors = device->num_comp_vectors; uverbs_dev->dev = cdev_alloc(); if (!uverbs_dev->dev) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ccdf93d30b01..86ed8af9c7e6 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -609,11 +609,11 @@ EXPORT_SYMBOL(ib_destroy_qp); struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), - void *cq_context, int cqe) + void *cq_context, int cqe, int comp_vector) { struct ib_cq *cq; - cq = device->create_cq(device, cqe, NULL, NULL); + cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); if (!IS_ERR(cq)) { cq->device = device; diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 04a9db5de881..fa58200217a1 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq); extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); +extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); /* CM */ extern int c2_llp_connect(struct iw_cm_id *cm_id, diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 5175c99ee586..d2b3366786d6 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) return npolled; } -int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct c2_mq_shared __iomem *shared; struct c2_cq *cq; + unsigned long flags; + int ret = 0; cq = to_c2cq(ibcq); shared = cq->mq.peer; - if (notify == IB_CQ_NEXT_COMP) + if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP) writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); - else if (notify == IB_CQ_SOLICITED) + else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); else return -EINVAL; @@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) */ readb(&shared->armed); - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + spin_lock_irqsave(&cq->lock, flags); + ret = !c2_mq_empty(&cq->mq); + spin_unlock_irqrestore(&cq->lock, flags); + } + + return ret; } static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 607c09bf764c..109166223c09 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp) return 0; } -static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, +static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev) memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); dev->ibdev.phys_port_cnt = 1; + dev->ibdev.num_comp_vectors = 1; dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.query_device = c2_query_device; dev->ibdev.query_port = c2_query_port; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index f5e9aeec6f6e..76049afc7655 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, return -EIO; } } + + return 1; } + return 0; } diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 90d7b8972cb4..ff7290eacefb 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -38,6 +38,7 @@ #include "firmware_exports.h" #define T3_MAX_SGE 4 +#define T3_MAX_INLINE 64 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr)) #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \ diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 3b4b0acd707f..b2faff5abce8 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) PDBG("%s ep %p\n", __FUNCTION__, ep); + /* + * We get 2 abort replies from the HW. The first one must + * be ignored except for scribbling that we need one more. + */ + if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { + ep->flags |= ABORT_REQ_IN_PROGRESS; + return CPL_RET_BUF_DONE; + } + close_complete_upcall(ep); state_set(&ep->com, DEAD); release_ep_resources(ep); @@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep) } req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); skb->priority = 1; ep->com.tdev->send(ep->com.tdev, skb); @@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) int ret; int state; + /* + * We get 2 peer aborts from the HW. The first one must + * be ignored except for scribbling that we need one more. + */ + if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { + ep->flags |= PEER_ABORT_IN_PROGRESS; + return CPL_RET_BUF_DONE; + } + if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h index 0c6f281bd4a0..21a388c313cf 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h @@ -143,6 +143,11 @@ enum iwch_ep_state { DEAD, }; +enum iwch_ep_flags { + PEER_ABORT_IN_PROGRESS = (1 << 0), + ABORT_REQ_IN_PROGRESS = (1 << 1), +}; + struct iwch_ep_common { struct iw_cm_id *cm_id; struct iwch_qp *qp; @@ -181,6 +186,7 @@ struct iwch_ep { u16 plen; u32 ird; u32 ord; + u32 flags; }; static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index af28a317016d..a891493fd340 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq) return 0; } -static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, +static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { @@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) #endif } -static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct iwch_dev *rhp; struct iwch_cq *chp; @@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) chp = to_iwch_cq(ibcq); rhp = chp->rhp; - if (notify == IB_CQ_SOLICITED) + if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) cq_op = CQ_ARM_SE; else cq_op = CQ_ARM_AN; @@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); spin_unlock_irqrestore(&chp->lock, flag); - if (err) + if (err < 0) printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, chp->cq.cqid); + if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) + err = 0; return err; } @@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, if (rqsize > T3_MAX_RQ_SIZE) return ERR_PTR(-EINVAL); + if (attrs->cap.max_inline_data > T3_MAX_INLINE) + return ERR_PTR(-EINVAL); + /* * NOTE: The SQ and total WQ sizes don't need to be * a power of two. However, all the code assumes @@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.node_type = RDMA_NODE_RNIC; memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; + dev->ibdev.num_comp_vectors = 1; dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 0a472c9b44db..714dddbc9a98 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp, return err; } -static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) +static inline void build_term_codes(struct respQ_msg_t *rsp_msg, + u8 *layer_type, u8 *ecode) { - switch (t3err) { + int status = TPT_ERR_INTERNAL_ERR; + int tagged = 0; + int opcode = -1; + int rqtype = 0; + int send_inv = 0; + + if (rsp_msg) { + status = CQE_STATUS(rsp_msg->cqe); + opcode = CQE_OPCODE(rsp_msg->cqe); + rqtype = RQ_TYPE(rsp_msg->cqe); + send_inv = (opcode == T3_SEND_WITH_INV) || + (opcode == T3_SEND_WITH_SE_INV); + tagged = (opcode == T3_RDMA_WRITE) || + (rqtype && (opcode == T3_READ_RESP)); + } + + switch (status) { case TPT_ERR_STAG: - if (tagged == 1) { - *layer_type = LAYER_DDP|DDP_TAGGED_ERR; - *ecode = DDPT_INV_STAG; - } else if (tagged == 2) { + if (send_inv) { + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_CANT_INV_STAG; + } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_INV_STAG; } break; case TPT_ERR_PDID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + if ((opcode == T3_SEND_WITH_INV) || + (opcode == T3_SEND_WITH_SE_INV)) + *ecode = RDMAP_CANT_INV_STAG; + else + *ecode = RDMAP_STAG_NOT_ASSOC; + break; case TPT_ERR_QPID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_STAG_NOT_ASSOC; + break; case TPT_ERR_ACCESS: - if (tagged == 1) { - *layer_type = LAYER_DDP|DDP_TAGGED_ERR; - *ecode = DDPT_STAG_NOT_ASSOC; - } else if (tagged == 2) { - *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; - *ecode = RDMAP_STAG_NOT_ASSOC; - } + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_ACC_VIOL; break; case TPT_ERR_WRAP: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_TO_WRAP; break; case TPT_ERR_BOUND: - if (tagged == 1) { + if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; - } else if (tagged == 2) { + } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_BASE_BOUNDS; - } else { - *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; - *ecode = DDPU_MSG_TOOBIG; } break; case TPT_ERR_INVALIDATE_SHARED_MR: @@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) { union t3_wr *wqe; struct terminate_message *term; - int status; - int tagged = 0; struct sk_buff *skb; PDBG("%s %d\n", __FUNCTION__, __LINE__); @@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) /* immediate data starts here. */ term = (struct terminate_message *)wqe->send.sgl; - if (rsp_msg) { - status = CQE_STATUS(rsp_msg->cqe); - if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE) - tagged = 1; - if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) || - (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) - tagged = 2; - } else { - status = TPT_ERR_INTERNAL_ERR; - } - build_term_codes(status, &term->layer_etype, &term->ecode, tagged); + build_term_codes(rsp_msg, &term->layer_etype, &term->ecode); build_fw_riwrh((void *)wqe, T3_WR_SEND, T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1, qhp->ep->hwtid, 5); diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index e2cdc1a16fe9..67f0670fe3b1 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) return ret; } -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 95fd59fb4528..e14b029332c8 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata); @@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); -int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags); struct ib_qp *ehca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 4700085ba834..2d370543e96d 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca) shca->ib_device.node_type = RDMA_NODE_IB_CA; shca->ib_device.phys_port_cnt = shca->num_ports; + shca->ib_device.num_comp_vectors = 1; shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; @@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) return -EPERM; } - ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); + ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0); if (IS_ERR(ibcq)) { ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); return PTR_ERR(ibcq); diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 08d3f892d9f3..caec9dee09e1 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -634,11 +634,13 @@ poll_cq_exit0: return ret; } -int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags) { struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + unsigned long spl_flags; + int ret = 0; - switch (cq_notify) { + switch (notify_flags & IB_CQ_SOLICITED_MASK) { case IB_CQ_SOLICITED: hipz_set_cqx_n0(my_cq, 1); break; @@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) return -EINVAL; } - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + spin_lock_irqsave(&my_cq->spinlock, spl_flags); + ret = ipz_qeit_is_valid(&my_cq->ipz_queue); + spin_unlock_irqrestore(&my_cq->spinlock, spl_flags); + } + + return ret; } diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 8199c45768a3..57f141a36bce 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h @@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) return cqe; } +static inline int ipz_qeit_is_valid(struct ipz_queue *queue) +{ + struct ehca_cqe *cqe = ipz_qeit_get(queue); + u32 cqe_flags = cqe->cqe_flags; + + return cqe_flags >> 7 == (queue->toggle_state & 1); +} + /* * returns and resets Queue Entry iterator * returns address (kv) of first Queue Entry diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index ea78e6dddc90..3e9241badba0 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c @@ -204,7 +204,7 @@ static void send_complete(unsigned long data) * * Called by ib_create_cq() in the generic verbs code. */ -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) wc; int err; + u32 s = sizeof *wc + sizeof(struct ib_wc) * entries; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); + cq->ip = ipath_create_mmap_info(dev, s, context, wc); + if (!cq->ip) { + ret = ERR_PTR(-ENOMEM); goto bail_wc; } - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wc; + err = ib_copy_to_udata(udata, &cq->ip->offset, + sizeof(cq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; } - cq->ip = ip; - ip->context = context; - ip->obj = wc; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(*wc) + - sizeof(struct ib_wc) * entries); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); } else cq->ip = NULL; @@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, if (dev->n_cqs_allocated == ib_ipath_max_cqs) { spin_unlock(&dev->n_cqs_lock); ret = ERR_PTR(-ENOMEM); - goto bail_wc; + goto bail_ip; } dev->n_cqs_allocated++; spin_unlock(&dev->n_cqs_lock); + if (cq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + /* * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * The number of entries should be >= the number requested or return @@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, goto done; +bail_ip: + kfree(cq->ip); bail_wc: vfree(wc); - bail_cq: kfree(cq); - done: return ret; } @@ -340,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq) /** * ipath_req_notify_cq - change the notification type for a completion queue * @ibcq: the completion queue - * @notify: the type of notification to request + * @notify_flags: the type of notification to request * * Returns 0 for success. * * This may be called from interrupt context. Also called by * ib_req_notify_cq() in the generic verbs code. */ -int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct ipath_cq *cq = to_icq(ibcq); unsigned long flags; + int ret = 0; spin_lock_irqsave(&cq->lock, flags); /* @@ -358,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). */ if (cq->notify != IB_CQ_NEXT_COMP) - cq->notify = notify; + cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + cq->queue->head != cq->queue->tail) + ret = 1; + spin_unlock_irqrestore(&cq->lock, flags); - return 0; + + return ret; } /** @@ -443,13 +444,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (cq->ip) { struct ipath_ibdev *dev = to_idev(ibcq->device); struct ipath_mmap_info *ip = cq->ip; + u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe; - ip->obj = wc; - ip->size = PAGE_ALIGN(sizeof(*wc) + - sizeof(struct ib_wc) * cqe); + ipath_update_mmap_info(dev, ip, s, wc); spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c index a82157db4689..937bc3396b53 100644 --- a/drivers/infiniband/hw/ipath/ipath_mmap.c +++ b/drivers/infiniband/hw/ipath/ipath_mmap.c @@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref) { struct ipath_mmap_info *ip = container_of(ref, struct ipath_mmap_info, ref); + struct ipath_ibdev *dev = to_idev(ip->context->device); + + spin_lock_irq(&dev->pending_lock); + list_del(&ip->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); vfree(ip->obj); kfree(ip); @@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma) struct ipath_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); - ip->mmap_cnt++; } static void ipath_vma_close(struct vm_area_struct *vma) { struct ipath_mmap_info *ip = vma->vm_private_data; - ip->mmap_cnt--; kref_put(&ip->ref, ipath_release_mmap_info); } @@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) struct ipath_ibdev *dev = to_idev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; - struct ipath_mmap_info *ip, **pp; + struct ipath_mmap_info *ip, *pp; int ret = -EINVAL; /* @@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_irq(&dev->pending_lock); - for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { + list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, + pending_mmaps) { /* Only the creator is allowed to mmap the object */ - if (context != ip->context || (void *) offset != ip->obj) + if (context != ip->context || (__u64) offset != ip->offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->size) break; - *pp = ip->next; + list_del_init(&ip->pending_mmaps); spin_unlock_irq(&dev->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); @@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) done: return ret; } + +/* + * Allocate information for ipath_mmap + */ +struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj) { + struct ipath_mmap_info *ip; + + ip = kmalloc(sizeof *ip, GFP_KERNEL); + if (!ip) + goto bail; + + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + INIT_LIST_HEAD(&ip->pending_mmaps); + ip->size = size; + ip->context = context; + ip->obj = obj; + kref_init(&ip->ref); + +bail: + return ip; +} + +void ipath_update_mmap_info(struct ipath_ibdev *dev, + struct ipath_mmap_info *ip, + u32 size, void *obj) { + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + ip->size = size; + ip->obj = obj; +} diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 16db9ac0b402..bfef08ecd342 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) qp->r_rq.wq; int err; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_rwq; - } + if (!qp->r_rq.wq) { + __u64 offset = 0; - if (qp->r_rq.wq) { - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { + err = ib_copy_to_udata(udata, &offset, + sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_rwq; + } + } else { + u32 s = sizeof(struct ipath_rwq) + + qp->r_rq.size * sz; + + qp->ip = + ipath_create_mmap_info(dev, s, + ibpd->uobject->context, + qp->r_rq.wq); + if (!qp->ip) { ret = ERR_PTR(-ENOMEM); goto bail_rwq; } - qp->ip = ip; - ip->context = ibpd->uobject->context; - ip->obj = qp->r_rq.wq; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - qp->r_rq.size * sz); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); + + err = ib_copy_to_udata(udata, &(qp->ip->offset), + sizeof(qp->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } } } @@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, dev->n_qps_allocated++; spin_unlock(&dev->n_qps_lock); + if (qp->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + ret = &qp->ibqp; goto bail; diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index b4b88d0b53f5..1915771fd038 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, case OP(RDMA_READ_RESPONSE_LAST): case OP(RDMA_READ_RESPONSE_ONLY): case OP(ATOMIC_ACKNOWLEDGE): - qp->s_ack_state = OP(ACKNOWLEDGE); + /* + * We can increment the tail pointer now that the last + * response has been sent instead of only being + * constructed. + */ + if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) + qp->s_tail_ack_queue = 0; /* FALLTHROUGH */ + case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->s_flags & IPATH_S_ACK_PENDING) goto normal; + qp->s_ack_state = OP(ACKNOWLEDGE); goto bail; } @@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, if (len > pmtu) { len = pmtu; qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); - } else { + } else qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); - if (++qp->s_tail_ack_queue > - IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; - } ohdr->u.aeth = ipath_compute_aeth(qp); hwords++; qp->s_ack_rdma_psn = e->psn; @@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, cpu_to_be32(e->atomic_data); hwords += sizeof(ohdr->u.at) / sizeof(u32); bth2 = e->psn; - if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; } bth0 = qp->s_ack_state << 24; break; @@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ohdr->u.aeth = ipath_compute_aeth(qp); hwords++; qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); - if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; } bth0 = qp->s_ack_state << 24; bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; @@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, * the ACK before setting s_ack_state to ACKNOWLEDGE * (see above). */ - qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); + qp->s_ack_state = OP(SEND_ONLY); qp->s_flags &= ~IPATH_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) @@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp, /* Sending responses has higher priority over sending requests. */ if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || (qp->s_flags & IPATH_S_ACK_PENDING) || - qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && + qp->s_ack_state != OP(ACKNOWLEDGE)) && ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) goto done; if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || - qp->s_rnr_timeout) + qp->s_rnr_timeout || qp->s_wait_credit) goto bail; /* Limit the number of packets sent without an ACK. */ if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { qp->s_wait_credit = 1; dev->n_rc_stalls++; - spin_lock(&dev->pending_lock); - if (list_empty(&qp->timerwait)) - list_add_tail(&qp->timerwait, - &dev->pending[dev->pending_index]); - spin_unlock(&dev->pending_lock); goto bail; } @@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp) u32 hwords; struct ipath_ib_header hdr; struct ipath_other_headers *ohdr; + unsigned long flags; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ - if (qp->r_head_ack_queue != qp->s_tail_ack_queue) + if (qp->r_head_ack_queue != qp->s_tail_ack_queue || + (qp->s_flags & IPATH_S_ACK_PENDING) || + qp->s_ack_state != OP(ACKNOWLEDGE)) goto queue_ack; /* Construct the header. */ @@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp) dev->n_rc_qacks++; queue_ack: - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); qp->s_flags |= IPATH_S_ACK_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); /* Call ipath_do_rc_send() in another thread. */ tasklet_hi_schedule(&qp->s_task); @@ -1261,6 +1259,7 @@ ack_err: wc.dlid_path_bits = 0; wc.port_num = 0; ipath_sqerror_qp(qp, &wc); + spin_unlock_irqrestore(&qp->s_lock, flags); bail: return; } @@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_ack_entry *e; u8 i, prev; int old_req; + unsigned long flags; if (diff > 0) { /* @@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, psn &= IPATH_PSN_MASK; e = NULL; old_req = 1; - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) old_req = 0; @@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, * after all the previous RDMA reads and atomics. */ if (i == qp->r_head_ack_queue) { - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->r_psn - 1; goto send_ack; @@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, break; } qp->r_nak_state = 0; - spin_unlock_irq(&qp->s_lock); tasklet_hi_schedule(&qp->s_task); unlock_done: - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; @@ -1453,10 +1452,12 @@ send_ack: static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) { - spin_lock_irq(&qp->s_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); qp->state = IB_QPS_ERR; ipath_error_qp(qp, err); - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); } /** diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 94033503400c..03acae66ba81 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c @@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) srq->rq.wq; int err; + u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); + srq->ip = + ipath_create_mmap_info(dev, s, + ibpd->uobject->context, + srq->rq.wq); + if (!srq->ip) { + ret = ERR_PTR(-ENOMEM); goto bail_wq; } - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wq; + err = ib_copy_to_udata(udata, &srq->ip->offset, + sizeof(srq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; } - srq->ip = ip; - ip->context = ibpd->uobject->context; - ip->obj = srq->rq.wq; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - srq->rq.size * sz); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); } else srq->ip = NULL; @@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, if (dev->n_srqs_allocated == ib_ipath_max_srqs) { spin_unlock(&dev->n_srqs_lock); ret = ERR_PTR(-ENOMEM); - goto bail_wq; + goto bail_ip; } dev->n_srqs_allocated++; spin_unlock(&dev->n_srqs_lock); + if (srq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + ret = &srq->ibsrq; goto done; +bail_ip: + kfree(srq->ip); bail_wq: vfree(srq->rq.wq); - bail_srq: kfree(srq); - done: return ret; } @@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (srq->ip) { struct ipath_mmap_info *ip = srq->ip; struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); + u32 s = sizeof(struct ipath_rwq) + size * sz; - ip->obj = wq; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - size * sz); + ipath_update_mmap_info(dev, ip, s, wq); spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, + &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } } else if (attr_mask & IB_SRQ_LIMIT) { diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 18c6df2052c2..12933e77c7e9 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd) ret = -ENOMEM; goto err_lk; } + INIT_LIST_HEAD(&idev->pending_mmaps); spin_lock_init(&idev->pending_lock); + idev->mmap_offset = PAGE_SIZE; + spin_lock_init(&idev->mmap_offset_lock); INIT_LIST_HEAD(&idev->pending[0]); INIT_LIST_HEAD(&idev->pending[1]); INIT_LIST_HEAD(&idev->pending[2]); @@ -1558,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; + dev->num_comp_vectors = 1; dev->dma_device = &dd->pcidev->dev; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 7c4929f1cb5b..7064fc222727 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -173,12 +173,12 @@ struct ipath_ah { * this as its vm_private_data. */ struct ipath_mmap_info { - struct ipath_mmap_info *next; + struct list_head pending_mmaps; struct ib_ucontext *context; void *obj; + __u64 offset; struct kref ref; unsigned size; - unsigned mmap_cnt; }; /* @@ -422,7 +422,7 @@ struct ipath_qp { #define IPATH_S_RDMAR_PENDING 0x04 #define IPATH_S_ACK_PENDING 0x08 -#define IPATH_PSN_CREDIT 2048 +#define IPATH_PSN_CREDIT 512 /* * Since struct ipath_swqe is not a fixed size, we can't simply index into @@ -485,9 +485,10 @@ struct ipath_opcode_stats { struct ipath_ibdev { struct ib_device ibdev; - struct list_head dev_list; struct ipath_devdata *dd; - struct ipath_mmap_info *pending_mmaps; + struct list_head pending_mmaps; + spinlock_t mmap_offset_lock; + u32 mmap_offset; int ib_unit; /* This is the device number */ u16 sm_lid; /* in host order */ u8 sm_sl; @@ -734,13 +735,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata); int ipath_destroy_cq(struct ib_cq *ibcq); -int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); +int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); @@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr); void ipath_release_mmap_info(struct kref *ref); +struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj); + +void ipath_update_mmap_info(struct ipath_ibdev *dev, + struct ipath_mmap_info *ip, + u32 size, void *obj); + int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index efd79ef109a6..cf0868f6e965 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -726,11 +726,12 @@ repoll: return err == 0 || err == -EAGAIN ? npolled : err; } -int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) { __be32 doorbell[2]; - doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? + doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) == + IB_CQ_SOLICITED ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn); @@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) return 0; } -int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mthca_cq *cq = to_mcq(ibcq); __be32 doorbell[2]; @@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) doorbell[0] = ci; doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | - (notify == IB_CQ_SOLICITED ? 1 : 2)); + ((flags & IB_CQ_SOLICITED_MASK) == + IB_CQ_SOLICITED ? 1 : 2)); mthca_write_db_rec(doorbell, cq->arm_db); @@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) wmb(); doorbell[0] = cpu_to_be32((sn << 28) | - (notify == IB_CQ_SOLICITED ? + ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index b7e42efaf43d..9bae3cc60603 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev); int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); -int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); +int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_ucontext *ctx, u32 pdn, struct mthca_cq *cq); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 47e6fd46d9c2..1c05486c3c68 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp) } static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, + int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; + dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 8fe6fee7a97a..fee60c852d14 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -701,6 +701,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); } + if (ibqp->qp_type == IB_QPT_RC && + cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { + u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; + + if (mthca_is_memfree(dev)) + qp_context->rlkey_arbel_sched_queue |= sched_queue; + else + qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); + + qp_param->opt_param_mask |= + cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); + } + if (attr_mask & IB_QP_TIMEOUT) { qp_context->pri_path.ackto = attr->timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index d8f6bb4f53fc..87310eeb6df0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -310,6 +310,7 @@ extern struct workqueue_struct *ipoib_workqueue; /* functions */ +int ipoib_poll(struct net_device *dev, int *budget); void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0c4e59b906cd..785bc8505f2a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -370,7 +370,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { p = wc->qp->qp_context; - if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { + if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { spin_lock_irqsave(&priv->lock, flags); p->jiffies = jiffies; /* Move this entry to list head, but do @@ -416,7 +416,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); + netif_receive_skb(skb); repost: if (unlikely(ipoib_cm_post_receive(dev, wr_id))) @@ -592,7 +592,9 @@ int ipoib_cm_dev_open(struct net_device *dev) priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); if (IS_ERR(priv->cm.id)) { printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); - return IS_ERR(priv->cm.id); + ret = PTR_ERR(priv->cm.id); + priv->cm.id = NULL; + return ret; } ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), @@ -601,6 +603,7 @@ int ipoib_cm_dev_open(struct net_device *dev) printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, IPOIB_CM_IETF_ID | priv->qp->qp_num); ib_destroy_cm_id(priv->cm.id); + priv->cm.id = NULL; return ret; } return 0; @@ -611,10 +614,11 @@ void ipoib_cm_dev_stop(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *p; - if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) + if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) return; ib_destroy_cm_id(priv->cm.id); + priv->cm.id = NULL; spin_lock_irq(&priv->lock); while (!list_empty(&priv->cm.passive_ids)) { p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); @@ -789,7 +793,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, } p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, - ipoib_sendq_size + 1); + ipoib_sendq_size + 1, 0); if (IS_ERR(p->cq)) { ret = PTR_ERR(p->cq); ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 1bdb9101911a..68d72c6f7ffb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -226,7 +226,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); + netif_receive_skb(skb); } else { ipoib_dbg_data(priv, "dropping loopback packet\n"); dev_kfree_skb_any(skb); @@ -280,28 +280,63 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status, wr_id, wc->vendor_err); } -static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) +int ipoib_poll(struct net_device *dev, int *budget) { - if (wc->wr_id & IPOIB_CM_OP_SRQ) - ipoib_cm_handle_rx_wc(dev, wc); - else if (wc->wr_id & IPOIB_OP_RECV) - ipoib_ib_handle_rx_wc(dev, wc); - else - ipoib_ib_handle_tx_wc(dev, wc); + struct ipoib_dev_priv *priv = netdev_priv(dev); + int max = min(*budget, dev->quota); + int done; + int t; + int empty; + int n, i; + + done = 0; + empty = 0; + + while (max) { + t = min(IPOIB_NUM_WC, max); + n = ib_poll_cq(priv->cq, t, priv->ibwc); + + for (i = 0; i < n; ++i) { + struct ib_wc *wc = priv->ibwc + i; + + if (wc->wr_id & IPOIB_CM_OP_SRQ) { + ++done; + --max; + ipoib_cm_handle_rx_wc(dev, wc); + } else if (wc->wr_id & IPOIB_OP_RECV) { + ++done; + --max; + ipoib_ib_handle_rx_wc(dev, wc); + } else + ipoib_ib_handle_tx_wc(dev, wc); + } + + if (n != t) { + empty = 1; + break; + } + } + + dev->quota -= done; + *budget -= done; + + if (empty) { + netif_rx_complete(dev); + if (unlikely(ib_req_notify_cq(priv->cq, + IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS)) && + netif_rx_reschedule(dev, 0)) + return 1; + + return 0; + } + + return 1; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { - struct net_device *dev = (struct net_device *) dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); - int n, i; - - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - do { - n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc); - for (i = 0; i < n; ++i) - ipoib_ib_handle_wc(dev, priv->ibwc + i); - } while (n == IPOIB_NUM_WC); + netif_rx_schedule(dev_ptr); } static inline int post_send(struct ipoib_dev_priv *priv, @@ -514,9 +549,10 @@ int ipoib_ib_dev_stop(struct net_device *dev) struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; - int i; + int i, n; clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + netif_poll_disable(dev); ipoib_cm_dev_stop(dev); @@ -568,6 +604,18 @@ int ipoib_ib_dev_stop(struct net_device *dev) goto timeout; } + do { + n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); + for (i = 0; i < n; ++i) { + if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ) + ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); + else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) + ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); + else + ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); + } + } while (n == IPOIB_NUM_WC); + msleep(1); } @@ -596,6 +644,9 @@ timeout: msleep(1); } + netif_poll_enable(dev); + ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); + return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b4c380c5a3ba..0a428f2b05c7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -948,6 +948,8 @@ static void ipoib_setup(struct net_device *dev) dev->hard_header = ipoib_hard_header; dev->set_multicast_list = ipoib_set_mcast_list; dev->neigh_setup = ipoib_neigh_setup_dev; + dev->poll = ipoib_poll; + dev->weight = 100; dev->watchdog_timeo = HZ; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 7f3ec205e35f..5c3c6a43a52b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) if (!ret) size += ipoib_recvq_size; - priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size); + priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); if (IS_ERR(priv->cq)) { printk(KERN_WARNING "%s: failed to create CQ\n", ca->name); goto out_free_mr; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 1fc967464a28..89d6008bb673 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -76,7 +76,7 @@ static int iser_create_device_ib_res(struct iser_device *device) iser_cq_callback, iser_cq_event_callback, (void *)device, - ISER_MAX_CQ_LEN); + ISER_MAX_CQ_LEN, 0); if (IS_ERR(device->cq)) goto cq_err; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5e8ac577f0ad..39bf057fbc43 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -197,7 +197,7 @@ static int srp_create_target_ib(struct srp_target_port *target) return -ENOMEM; target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, - NULL, target, SRP_CQ_SIZE); + NULL, target, SRP_CQ_SIZE, 0); if (IS_ERR(target->cq)) { ret = PTR_ERR(target->cq); goto out; @@ -1468,6 +1468,25 @@ static ssize_t show_dgid(struct class_device *cdev, char *buf) be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); } +static ssize_t show_orig_dgid(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + if (target->state == SRP_TARGET_DEAD || + target->state == SRP_TARGET_REMOVED) + return -ENODEV; + + return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", + be16_to_cpu(target->orig_dgid[0]), + be16_to_cpu(target->orig_dgid[1]), + be16_to_cpu(target->orig_dgid[2]), + be16_to_cpu(target->orig_dgid[3]), + be16_to_cpu(target->orig_dgid[4]), + be16_to_cpu(target->orig_dgid[5]), + be16_to_cpu(target->orig_dgid[6]), + be16_to_cpu(target->orig_dgid[7])); +} + static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(cdev)); @@ -1498,6 +1517,7 @@ static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static CLASS_DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); @@ -1508,6 +1528,7 @@ static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_service_id, &class_device_attr_pkey, &class_device_attr_dgid, + &class_device_attr_orig_dgid, &class_device_attr_zero_req_lim, &class_device_attr_local_ib_port, &class_device_attr_local_ib_device, @@ -1516,7 +1537,8 @@ static struct class_device_attribute *srp_host_attrs[] = { static struct scsi_host_template srp_template = { .module = THIS_MODULE, - .name = DRV_NAME, + .name = "InfiniBand SRP initiator", + .proc_name = DRV_NAME, .info = srp_target_info, .queuecommand = srp_queuecommand, .eh_abort_handler = srp_abort, @@ -1662,6 +1684,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); } kfree(p); + memcpy(target->orig_dgid, target->path.dgid.raw, 16); break; case SRP_OPT_PKEY: diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 2f3319c719a5..1d53c7bc368f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -129,6 +129,7 @@ struct srp_target_port { unsigned int scsi_id; struct ib_sa_path_rec path; + __be16 orig_dgid[8]; struct ib_sa_query *path_query; int path_query_id; diff --git a/drivers/input/touchscreen/hp680_ts_input.c b/drivers/input/touchscreen/hp680_ts_input.c index 249087472740..61c15024c2a0 100644 --- a/drivers/input/touchscreen/hp680_ts_input.c +++ b/drivers/input/touchscreen/hp680_ts_input.c @@ -21,7 +21,7 @@ static void do_softint(void *data); static struct input_dev *hp680_ts_dev; -static DECLARE_WORK(work, do_softint, 0); +static DECLARE_WORK(work, do_softint); static void do_softint(void *data) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 509171ca7fa8..2b4315d7e5d6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3080,7 +3080,7 @@ static int do_md_run(mddev_t * mddev) if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); - invalidate_bdev(rdev->bdev, 0); + invalidate_bdev(rdev->bdev); } md_probe(mddev->unit, NULL, NULL); diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c index 3956c257556c..2d666b56020c 100644 --- a/drivers/media/video/cx88/cx88-alsa.c +++ b/drivers/media/video/cx88/cx88-alsa.c @@ -27,6 +27,8 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> +#include <linux/dma-mapping.h> + #include <asm/delay.h> #include <sound/driver.h> #include <sound/core.h> diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c index b2eb32e01aee..2ebde2fdbcbe 100644 --- a/drivers/media/video/cx88/cx88-mpeg.c +++ b/drivers/media/video/cx88/cx88-mpeg.c @@ -26,6 +26,7 @@ #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/device.h> +#include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <asm/delay.h> diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index fbce1d50578b..b94ef8ab28c1 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -1,4 +1,3 @@ - /* * * device driver for Conexant 2388x based TV cards @@ -34,6 +33,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/kthread.h> #include <asm/div64.h> diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index ce47544dc120..fc4cc8ba9e29 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -40,13 +40,11 @@ struct block2mtd_dev { static LIST_HEAD(blkmtd_device_list); -static struct page* page_read(struct address_space *mapping, int index) +static struct page *page_read(struct address_space *mapping, int index) { - filler_t *filler = (filler_t*)mapping->a_ops->readpage; - return read_cache_page(mapping, index, filler, NULL); + return read_mapping_page(mapping, index, NULL); } - /* erase a specified part of the device */ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) { diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index d847ee1da3d9..3dba5733ed1f 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -940,8 +940,7 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache, { struct ltree_entry *le = obj; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) != - SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CTOR_CONSTRUCTOR) return; le->users = 0; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9e6933a5088e..279ec625cec4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -822,7 +822,7 @@ config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 select MII - depends on NET_ETHERNET && (ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00) + depends on NET_ETHERNET && (ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN) help This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index 042e27e291cd..b112317f033e 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -38,7 +38,7 @@ #define DRV_VERSION "1.0-ko" /* Firmware version */ -#define FW_VERSION_MAJOR 3 -#define FW_VERSION_MINOR 3 +#define FW_VERSION_MAJOR 4 +#define FW_VERSION_MINOR 0 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index d2767e6584a9..7053026d6c76 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -55,6 +55,53 @@ #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) +#elif defined(CONFIG_BFIN) + +#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH + +# if defined (CONFIG_BFIN561_EZKIT) +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 +#define SMC_USE_BFIN_DMA 0 + + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_outsl(a, r, p, l) outsl((unsigned long *)((a) + (r)), p, l) +#define SMC_insl(a, r, p, l) insl ((unsigned long *)((a) + (r)), p, l) +# else +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 +#define SMC_USE_BFIN_DMA 0 + + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outsw(a, r, p, l) outsw((unsigned long *)((a) + (r)), p, l) +#define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) +# endif +/* check if the mac in reg is valid */ +#define SMC_GET_MAC_ADDR(addr) \ + do { \ + unsigned int __v; \ + __v = SMC_inw(ioaddr, ADDR0_REG); \ + addr[0] = __v; addr[1] = __v >> 8; \ + __v = SMC_inw(ioaddr, ADDR1_REG); \ + addr[2] = __v; addr[3] = __v >> 8; \ + __v = SMC_inw(ioaddr, ADDR2_REG); \ + addr[4] = __v; addr[5] = __v >> 8; \ + if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ + random_ether_addr(addr); \ + } \ + } while (0) #elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6) /* We can only do 16-bit reads and writes in the static memory space. */ diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index 99baabc23599..948efc775a78 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c @@ -360,7 +360,6 @@ static struct platform_driver at91_cf_driver = { .name = (char *) driver_name, .owner = THIS_MODULE, }, - .probe = at91_cf_probe, .remove = __exit_p(at91_cf_remove), .suspend = at91_cf_suspend, .resume = at91_cf_resume, @@ -370,7 +369,7 @@ static struct platform_driver at91_cf_driver = { static int __init at91_cf_init(void) { - return platform_driver_register(&at91_cf_driver); + return platform_driver_probe(&at91_cf_driver, at91_cf_probe); } module_init(at91_cf_init); diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 18e111e12339..143c6efc478a 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -234,6 +234,89 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) /*======================================================================*/ +struct pcmcia_dynid { + struct list_head node; + struct pcmcia_device_id id; +}; + +/** + * pcmcia_store_new_id - add a new PCMCIA device ID to this driver and re-probe devices + * @driver: target device driver + * @buf: buffer for scanning device ID data + * @count: input size + * + * Adds a new dynamic PCMCIA device ID to this driver, + * and causes the driver to probe for all devices again. + */ +static ssize_t +pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count) +{ + struct pcmcia_dynid *dynid; + struct pcmcia_driver *pdrv = to_pcmcia_drv(driver); + __u16 match_flags, manf_id, card_id; + __u8 func_id, function, device_no; + __u32 prod_id_hash[4] = {0, 0, 0, 0}; + int fields=0; + int retval = 0; + + fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x", + &match_flags, &manf_id, &card_id, &func_id, &function, &device_no, + &prod_id_hash[0], &prod_id_hash[1], &prod_id_hash[2], &prod_id_hash[3]); + if (fields < 6) + return -EINVAL; + + dynid = kzalloc(sizeof(struct pcmcia_dynid), GFP_KERNEL); + if (!dynid) + return -ENOMEM; + + INIT_LIST_HEAD(&dynid->node); + dynid->id.match_flags = match_flags; + dynid->id.manf_id = manf_id; + dynid->id.card_id = card_id; + dynid->id.func_id = func_id; + dynid->id.function = function; + dynid->id.device_no = device_no; + memcpy(dynid->id.prod_id_hash, prod_id_hash, sizeof(__u32) * 4); + + spin_lock(&pdrv->dynids.lock); + list_add_tail(&pdrv->dynids.list, &dynid->node); + spin_unlock(&pdrv->dynids.lock); + + if (get_driver(&pdrv->drv)) { + retval = driver_attach(&pdrv->drv); + put_driver(&pdrv->drv); + } + + if (retval) + return retval; + return count; +} +static DRIVER_ATTR(new_id, S_IWUSR, NULL, pcmcia_store_new_id); + +static void +pcmcia_free_dynids(struct pcmcia_driver *drv) +{ + struct pcmcia_dynid *dynid, *n; + + spin_lock(&drv->dynids.lock); + list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { + list_del(&dynid->node); + kfree(dynid); + } + spin_unlock(&drv->dynids.lock); +} + +static int +pcmcia_create_newid_file(struct pcmcia_driver *drv) +{ + int error = 0; + if (drv->probe != NULL) + error = sysfs_create_file(&drv->drv.kobj, + &driver_attr_new_id.attr); + return error; +} + + /** * pcmcia_register_driver - register a PCMCIA driver with the bus core * @@ -241,6 +324,8 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) */ int pcmcia_register_driver(struct pcmcia_driver *driver) { + int error; + if (!driver) return -EINVAL; @@ -249,10 +334,20 @@ int pcmcia_register_driver(struct pcmcia_driver *driver) /* initialize common fields */ driver->drv.bus = &pcmcia_bus_type; driver->drv.owner = driver->owner; + spin_lock_init(&driver->dynids.lock); + INIT_LIST_HEAD(&driver->dynids.list); ds_dbg(3, "registering driver %s\n", driver->drv.name); - return driver_register(&driver->drv); + error = driver_register(&driver->drv); + if (error < 0) + return error; + + error = pcmcia_create_newid_file(driver); + if (error) + driver_unregister(&driver->drv); + + return error; } EXPORT_SYMBOL(pcmcia_register_driver); @@ -263,6 +358,7 @@ void pcmcia_unregister_driver(struct pcmcia_driver *driver) { ds_dbg(3, "unregistering driver %s\n", driver->drv.name); driver_unregister(&driver->drv); + pcmcia_free_dynids(driver); } EXPORT_SYMBOL(pcmcia_unregister_driver); @@ -927,6 +1023,21 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) { struct pcmcia_device * p_dev = to_pcmcia_dev(dev); struct pcmcia_driver * p_drv = to_pcmcia_drv(drv); struct pcmcia_device_id *did = p_drv->id_table; + struct pcmcia_dynid *dynid; + + /* match dynamic devices first */ + spin_lock(&p_drv->dynids.lock); + list_for_each_entry(dynid, &p_drv->dynids.list, node) { + ds_dbg(3, "trying to match %s to %s\n", dev->bus_id, + drv->name); + if (pcmcia_devmatch(p_dev, &dynid->id)) { + ds_dbg(0, "matched %s to %s\n", dev->bus_id, + drv->name); + spin_unlock(&p_drv->dynids.lock); + return 1; + } + } + spin_unlock(&p_drv->dynids.lock); #ifdef CONFIG_PCMCIA_IOCTL /* matching by cardmgr */ diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 95826b92ca4b..ef1eae98ba44 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -354,4 +354,14 @@ config RTC_DRV_V3020 This driver can also be built as a module. If so, the module will be called rtc-v3020. +config RTC_DRV_BFIN + tristate "Blackfin On-Chip RTC" + depends on RTC_CLASS && BFIN + help + If you say yes here you will get support for the + Blackfin On-Chip Real Time Clock. + + This driver can also be built as a module. If so, the module + will be called rtc-bfin. + endmenu diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 92bfe1b3a5fa..9218cf28d6ed 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o +obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c new file mode 100644 index 000000000000..260ead959918 --- /dev/null +++ b/drivers/rtc/rtc-bfin.c @@ -0,0 +1,445 @@ +/* + * Blackfin On-Chip Real Time Clock Driver + * Supports BF531/BF532/BF533/BF534/BF536/BF537 + * + * Copyright 2004-2007 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +/* The biggest issue we deal with in this driver is that register writes are + * synced to the RTC frequency of 1Hz. So if you write to a register and + * attempt to write again before the first write has completed, the new write + * is simply discarded. This can easily be troublesome if userspace disables + * one event (say periodic) and then right after enables an event (say alarm). + * Since all events are maintained in the same interrupt mask register, if + * we wrote to it to disable the first event and then wrote to it again to + * enable the second event, that second event would not be enabled as the + * write would be discarded and things quickly fall apart. + * + * To keep this delay from significantly degrading performance (we, in theory, + * would have to sleep for up to 1 second everytime we wanted to write a + * register), we only check the write pending status before we start to issue + * a new write. We bank on the idea that it doesnt matter when the sync + * happens so long as we don't attempt another write before it does. The only + * time userspace would take this penalty is when they try and do multiple + * operations right after another ... but in this case, they need to take the + * sync penalty, so we should be OK. + * + * Also note that the RTC_ISTAT register does not suffer this penalty; its + * writes to clear status registers complete immediately. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/bcd.h> +#include <linux/rtc.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + +#include <asm/blackfin.h> + +#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __FUNCTION__, __LINE__, ## args) +#define stampit() stamp("here i am") + +struct bfin_rtc { + struct rtc_device *rtc_dev; + struct rtc_time rtc_alarm; + spinlock_t lock; +}; + +/* Bit values for the ISTAT / ICTL registers */ +#define RTC_ISTAT_WRITE_COMPLETE 0x8000 +#define RTC_ISTAT_WRITE_PENDING 0x4000 +#define RTC_ISTAT_ALARM_DAY 0x0040 +#define RTC_ISTAT_24HR 0x0020 +#define RTC_ISTAT_HOUR 0x0010 +#define RTC_ISTAT_MIN 0x0008 +#define RTC_ISTAT_SEC 0x0004 +#define RTC_ISTAT_ALARM 0x0002 +#define RTC_ISTAT_STOPWATCH 0x0001 + +/* Shift values for RTC_STAT register */ +#define DAY_BITS_OFF 17 +#define HOUR_BITS_OFF 12 +#define MIN_BITS_OFF 6 +#define SEC_BITS_OFF 0 + +/* Some helper functions to convert between the common RTC notion of time + * and the internal Blackfin notion that is stored in 32bits. + */ +static inline u32 rtc_time_to_bfin(unsigned long now) +{ + u32 sec = (now % 60); + u32 min = (now % (60 * 60)) / 60; + u32 hour = (now % (60 * 60 * 24)) / (60 * 60); + u32 days = (now / (60 * 60 * 24)); + return (sec << SEC_BITS_OFF) + + (min << MIN_BITS_OFF) + + (hour << HOUR_BITS_OFF) + + (days << DAY_BITS_OFF); +} +static inline unsigned long rtc_bfin_to_time(u32 rtc_bfin) +{ + return (((rtc_bfin >> SEC_BITS_OFF) & 0x003F)) + + (((rtc_bfin >> MIN_BITS_OFF) & 0x003F) * 60) + + (((rtc_bfin >> HOUR_BITS_OFF) & 0x001F) * 60 * 60) + + (((rtc_bfin >> DAY_BITS_OFF) & 0x7FFF) * 60 * 60 * 24); +} +static inline void rtc_bfin_to_tm(u32 rtc_bfin, struct rtc_time *tm) +{ + rtc_time_to_tm(rtc_bfin_to_time(rtc_bfin), tm); +} + +/* Wait for the previous write to a RTC register to complete. + * Unfortunately, we can't sleep here as that introduces a race condition when + * turning on interrupt events. Consider this: + * - process sets alarm + * - process enables alarm + * - process sleeps while waiting for rtc write to sync + * - interrupt fires while process is sleeping + * - interrupt acks the event by writing to ISTAT + * - interrupt sets the WRITE PENDING bit + * - interrupt handler finishes + * - process wakes up, sees WRITE PENDING bit set, goes to sleep + * - interrupt fires while process is sleeping + * If anyone can point out the obvious solution here, i'm listening :). This + * shouldn't be an issue on an SMP or preempt system as this function should + * only be called with the rtc lock held. + */ +static void rtc_bfin_sync_pending(void) +{ + stampit(); + while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_COMPLETE)) { + if (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)) + break; + } + bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE); +} + +static void rtc_bfin_reset(struct bfin_rtc *rtc) +{ + /* Initialize the RTC. Enable pre-scaler to scale RTC clock + * to 1Hz and clear interrupt/status registers. */ + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_PREN(0x1); + bfin_write_RTC_ICTL(0); + bfin_write_RTC_SWCNT(0); + bfin_write_RTC_ALARM(0); + bfin_write_RTC_ISTAT(0xFFFF); + spin_unlock_irq(&rtc->lock); +} + +static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id) +{ + struct platform_device *pdev = to_platform_device(dev_id); + struct bfin_rtc *rtc = platform_get_drvdata(pdev); + unsigned long events = 0; + u16 rtc_istat; + + stampit(); + + spin_lock_irq(&rtc->lock); + + rtc_istat = bfin_read_RTC_ISTAT(); + + if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) { + bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY); + events |= RTC_AF | RTC_IRQF; + } + + if (rtc_istat & RTC_ISTAT_STOPWATCH) { + bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH); + events |= RTC_PF | RTC_IRQF; + bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq); + } + + if (rtc_istat & RTC_ISTAT_SEC) { + bfin_write_RTC_ISTAT(RTC_ISTAT_SEC); + events |= RTC_UF | RTC_IRQF; + } + + rtc_update_irq(rtc->rtc_dev, 1, events); + + spin_unlock_irq(&rtc->lock); + + return IRQ_HANDLED; +} + +static int bfin_rtc_open(struct device *dev) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + int ret; + + stampit(); + + ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_DISABLED, "rtc-bfin", dev); + if (unlikely(ret)) { + dev_err(dev, "request RTC IRQ failed with %d\n", ret); + return ret; + } + + rtc_bfin_reset(rtc); + + return ret; +} + +static void bfin_rtc_release(struct device *dev) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + stampit(); + rtc_bfin_reset(rtc); + free_irq(IRQ_RTC, dev); +} + +static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + + stampit(); + + switch (cmd) { + case RTC_PIE_ON: + stampit(); + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH); + bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_STOPWATCH); + spin_unlock_irq(&rtc->lock); + return 0; + case RTC_PIE_OFF: + stampit(); + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_SWCNT(0); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_STOPWATCH); + spin_unlock_irq(&rtc->lock); + return 0; + + case RTC_UIE_ON: + stampit(); + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_ISTAT(RTC_ISTAT_SEC); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_SEC); + spin_unlock_irq(&rtc->lock); + return 0; + case RTC_UIE_OFF: + stampit(); + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_SEC); + spin_unlock_irq(&rtc->lock); + return 0; + + case RTC_AIE_ON: { + unsigned long rtc_alarm; + u16 which_alarm; + int ret = 0; + + stampit(); + + spin_lock_irq(&rtc->lock); + + rtc_bfin_sync_pending(); + if (rtc->rtc_alarm.tm_yday == -1) { + struct rtc_time now; + rtc_bfin_to_tm(bfin_read_RTC_STAT(), &now); + now.tm_sec = rtc->rtc_alarm.tm_sec; + now.tm_min = rtc->rtc_alarm.tm_min; + now.tm_hour = rtc->rtc_alarm.tm_hour; + ret = rtc_tm_to_time(&now, &rtc_alarm); + which_alarm = RTC_ISTAT_ALARM; + } else { + ret = rtc_tm_to_time(&rtc->rtc_alarm, &rtc_alarm); + which_alarm = RTC_ISTAT_ALARM_DAY; + } + if (ret == 0) { + bfin_write_RTC_ISTAT(which_alarm); + bfin_write_RTC_ALARM(rtc_time_to_bfin(rtc_alarm)); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | which_alarm); + } + + spin_unlock_irq(&rtc->lock); + + return ret; + } + case RTC_AIE_OFF: + stampit(); + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); + spin_unlock_irq(&rtc->lock); + return 0; + } + + return -ENOIOCTLCMD; +} + +static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + + stampit(); + + spin_lock_irq(&rtc->lock); + rtc_bfin_sync_pending(); + rtc_bfin_to_tm(bfin_read_RTC_STAT(), tm); + spin_unlock_irq(&rtc->lock); + + return 0; +} + +static int bfin_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + int ret; + unsigned long now; + + stampit(); + + spin_lock_irq(&rtc->lock); + + ret = rtc_tm_to_time(tm, &now); + if (ret == 0) { + rtc_bfin_sync_pending(); + bfin_write_RTC_STAT(rtc_time_to_bfin(now)); + } + + spin_unlock_irq(&rtc->lock); + + return ret; +} + +static int bfin_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + stampit(); + memcpy(&alrm->time, &rtc->rtc_alarm, sizeof(struct rtc_time)); + alrm->pending = !!(bfin_read_RTC_ICTL() & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); + return 0; +} + +static int bfin_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + stampit(); + memcpy(&rtc->rtc_alarm, &alrm->time, sizeof(struct rtc_time)); + return 0; +} + +static int bfin_rtc_proc(struct device *dev, struct seq_file *seq) +{ +#define yesno(x) (x ? "yes" : "no") + u16 ictl = bfin_read_RTC_ICTL(); + stampit(); + seq_printf(seq, "alarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM)); + seq_printf(seq, "wkalarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM_DAY)); + seq_printf(seq, "seconds_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_SEC)); + seq_printf(seq, "periodic_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_STOPWATCH)); +#ifdef DEBUG + seq_printf(seq, "RTC_STAT\t: 0x%08X\n", bfin_read_RTC_STAT()); + seq_printf(seq, "RTC_ICTL\t: 0x%04X\n", bfin_read_RTC_ICTL()); + seq_printf(seq, "RTC_ISTAT\t: 0x%04X\n", bfin_read_RTC_ISTAT()); + seq_printf(seq, "RTC_SWCNT\t: 0x%04X\n", bfin_read_RTC_SWCNT()); + seq_printf(seq, "RTC_ALARM\t: 0x%08X\n", bfin_read_RTC_ALARM()); + seq_printf(seq, "RTC_PREN\t: 0x%04X\n", bfin_read_RTC_PREN()); +#endif + return 0; +} + +static int bfin_irq_set_freq(struct device *dev, int freq) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + stampit(); + rtc->rtc_dev->irq_freq = freq; + return 0; +} + +static struct rtc_class_ops bfin_rtc_ops = { + .open = bfin_rtc_open, + .release = bfin_rtc_release, + .ioctl = bfin_rtc_ioctl, + .read_time = bfin_rtc_read_time, + .set_time = bfin_rtc_set_time, + .read_alarm = bfin_rtc_read_alarm, + .set_alarm = bfin_rtc_set_alarm, + .proc = bfin_rtc_proc, + .irq_set_freq = bfin_irq_set_freq, +}; + +static int __devinit bfin_rtc_probe(struct platform_device *pdev) +{ + struct bfin_rtc *rtc; + int ret = 0; + + stampit(); + + rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); + if (unlikely(!rtc)) + return -ENOMEM; + + spin_lock_init(&rtc->lock); + + rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); + if (unlikely(IS_ERR(rtc))) { + ret = PTR_ERR(rtc->rtc_dev); + goto err; + } + rtc->rtc_dev->irq_freq = 0; + rtc->rtc_dev->max_user_freq = (2 << 16); /* stopwatch is an unsigned 16 bit reg */ + + platform_set_drvdata(pdev, rtc); + + return 0; + +err: + kfree(rtc); + return ret; +} + +static int __devexit bfin_rtc_remove(struct platform_device *pdev) +{ + struct bfin_rtc *rtc = platform_get_drvdata(pdev); + + rtc_device_unregister(rtc->rtc_dev); + platform_set_drvdata(pdev, NULL); + kfree(rtc); + + return 0; +} + +static struct platform_driver bfin_rtc_driver = { + .driver = { + .name = "rtc-bfin", + .owner = THIS_MODULE, + }, + .probe = bfin_rtc_probe, + .remove = __devexit_p(bfin_rtc_remove), +}; + +static int __init bfin_rtc_init(void) +{ + stampit(); + return platform_driver_register(&bfin_rtc_driver); +} + +static void __exit bfin_rtc_exit(void) +{ + platform_driver_unregister(&bfin_rtc_driver); +} + +module_init(bfin_rtc_init); +module_exit(bfin_rtc_exit); + +MODULE_DESCRIPTION("Blackfin On-Chip Real Time Clock Driver"); +MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 8c766bcd1095..bbeb2451d32f 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/types.h> +#include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 90621c3312bc..c9832d963f1e 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -251,9 +251,16 @@ static const struct serial8250_config uart_config[] = { .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO | UART_CAP_UUE, }, + [PORT_RM9000] = { + .name = "RM9000", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO, + }, }; -#ifdef CONFIG_SERIAL_8250_AU1X00 +#if defined (CONFIG_SERIAL_8250_AU1X00) /* Au1x00 UART hardware has a weird register layout */ static const u8 au_io_in_map[] = { @@ -289,6 +296,44 @@ static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) return au_io_out_map[offset]; } +#elif defined (CONFIG_SERIAL_8250_RM9K) + +static const u8 + regmap_in[8] = { + [UART_RX] = 0x00, + [UART_IER] = 0x0c, + [UART_IIR] = 0x14, + [UART_LCR] = 0x1c, + [UART_MCR] = 0x20, + [UART_LSR] = 0x24, + [UART_MSR] = 0x28, + [UART_SCR] = 0x2c + }, + regmap_out[8] = { + [UART_TX] = 0x04, + [UART_IER] = 0x0c, + [UART_FCR] = 0x18, + [UART_LCR] = 0x1c, + [UART_MCR] = 0x20, + [UART_LSR] = 0x24, + [UART_MSR] = 0x28, + [UART_SCR] = 0x2c + }; + +static inline int map_8250_in_reg(struct uart_8250_port *up, int offset) +{ + if (up->port.iotype != UPIO_RM9000) + return offset; + return regmap_in[offset]; +} + +static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) +{ + if (up->port.iotype != UPIO_RM9000) + return offset; + return regmap_out[offset]; +} + #else /* sane hardware needs no mapping */ @@ -308,8 +353,10 @@ static unsigned int serial_in(struct uart_8250_port *up, int offset) return inb(up->port.iobase + 1); case UPIO_MEM: + case UPIO_DWAPB: return readb(up->port.membase + offset); + case UPIO_RM9000: case UPIO_MEM32: return readl(up->port.membase + offset); @@ -333,6 +380,8 @@ static unsigned int serial_in(struct uart_8250_port *up, int offset) static void serial_out(struct uart_8250_port *up, int offset, int value) { + /* Save the offset before it's remapped */ + int save_offset = offset; offset = map_8250_out_reg(up, offset) << up->port.regshift; switch (up->port.iotype) { @@ -345,6 +394,7 @@ serial_out(struct uart_8250_port *up, int offset, int value) writeb(value, up->port.membase + offset); break; + case UPIO_RM9000: case UPIO_MEM32: writel(value, up->port.membase + offset); break; @@ -359,6 +409,18 @@ serial_out(struct uart_8250_port *up, int offset, int value) writeb(value, up->port.membase + offset); break; + case UPIO_DWAPB: + /* Save the LCR value so it can be re-written when a + * Busy Detect interrupt occurs. */ + if (save_offset == UART_LCR) + up->lcr = value; + writeb(value, up->port.membase + offset); + /* Read the IER to ensure any interrupt is cleared before + * returning from ISR. */ + if (save_offset == UART_TX || save_offset == UART_IER) + value = serial_in(up, UART_IER); + break; + default: outb(value, up->port.iobase + offset); } @@ -373,6 +435,7 @@ serial_out_sync(struct uart_8250_port *up, int offset, int value) #ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: #endif + case UPIO_DWAPB: serial_out(up, offset, value); serial_in(up, UART_LCR); /* safe, no side-effects */ break; @@ -403,7 +466,7 @@ static inline void _serial_dl_write(struct uart_8250_port *up, int value) serial_outp(up, UART_DLM, value >> 8 & 0xff); } -#ifdef CONFIG_SERIAL_8250_AU1X00 +#if defined (CONFIG_SERIAL_8250_AU1X00) /* Au1x00 haven't got a standard divisor latch */ static int serial_dl_read(struct uart_8250_port *up) { @@ -420,6 +483,24 @@ static void serial_dl_write(struct uart_8250_port *up, int value) else _serial_dl_write(up, value); } +#elif defined (CONFIG_SERIAL_8250_RM9K) +static int serial_dl_read(struct uart_8250_port *up) +{ + return (up->port.iotype == UPIO_RM9000) ? + (((__raw_readl(up->port.membase + 0x10) << 8) | + (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff) : + _serial_dl_read(up); +} + +static void serial_dl_write(struct uart_8250_port *up, int value) +{ + if (up->port.iotype == UPIO_RM9000) { + __raw_writel(value, up->port.membase + 0x08); + __raw_writel(value >> 8, up->port.membase + 0x10); + } else { + _serial_dl_write(up, value); + } +} #else #define serial_dl_read(up) _serial_dl_read(up) #define serial_dl_write(up, value) _serial_dl_write(up, value) @@ -621,7 +702,7 @@ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p) * its clones. (We treat the broken original StarTech 16650 V1 as a * 16550, and why not? Startech doesn't seem to even acknowledge its * existence.) - * + * * What evil have men's minds wrought... */ static void autoconfig_has_efr(struct uart_8250_port *up) @@ -674,7 +755,7 @@ static void autoconfig_has_efr(struct uart_8250_port *up) up->bugs |= UART_BUG_QUOT; return; } - + /* * We check for a XR16C850 by setting DLL and DLM to 0, and then * reading back DLL and DLM. The chip type depends on the DLM @@ -817,7 +898,7 @@ static void autoconfig_16550a(struct uart_8250_port *up) status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_outp(up, 0x04, status1); - + serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); @@ -922,7 +1003,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) /* * Do a simple existence test first; if we fail this, * there's no point trying anything else. - * + * * 0x80 is used as a nonsense port to prevent against * false positives due to ISA bus float. The * assumption is that 0x80 is a non-existent port; @@ -961,7 +1042,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) save_mcr = serial_in(up, UART_MCR); save_lcr = serial_in(up, UART_LCR); - /* + /* * Check to see if a UART is really there. Certain broken * internal modems based on the Rockwell chipset fail this * test, because they apparently don't implement the loopback @@ -1068,7 +1149,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) else serial_outp(up, UART_IER, 0); - out: + out: spin_unlock_irqrestore(&up->port.lock, flags); // restore_flags(flags); DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name); @@ -1094,7 +1175,7 @@ static void autoconfig_irq(struct uart_8250_port *up) save_mcr = serial_inp(up, UART_MCR); save_ier = serial_inp(up, UART_IER); serial_outp(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2); - + irqs = probe_irq_on(); serial_outp(up, UART_MCR, 0); udelay (10); @@ -1159,8 +1240,11 @@ static void serial8250_start_tx(struct uart_port *port) if (up->bugs & UART_BUG_TXEN) { unsigned char lsr, iir; lsr = serial_in(up, UART_LSR); - iir = serial_in(up, UART_IIR); - if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) + iir = serial_in(up, UART_IIR) & 0x0f; + if ((up->port.type == PORT_RM9000) ? + (lsr & UART_LSR_THRE && + (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) : + (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT)) transmit_chars(up); } } @@ -1389,6 +1473,19 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id) handled = 1; end = NULL; + } else if (up->port.iotype == UPIO_DWAPB && + (iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* The DesignWare APB UART has an Busy Detect (0x07) + * interrupt meaning an LCR write attempt occured while the + * UART was busy. The interrupt must be cleared by reading + * the UART status register (USR) and the LCR re-written. */ + unsigned int status; + status = *(volatile u32 *)up->port.private_data; + serial_out(up, UART_LCR, up->lcr); + + handled = 1; + + end = NULL; } else if (end == NULL) end = l; @@ -1928,7 +2025,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, /* * Ask the core to calculate the divisor for us. */ - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = serial8250_get_divisor(port, baud); /* @@ -2090,6 +2187,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: + case UPIO_DWAPB: if (!up->port.mapbase) break; @@ -2127,6 +2225,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up) case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: + case UPIO_DWAPB: if (!up->port.mapbase) break; diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index ad9f321968e1..924e9bd757f0 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -254,6 +254,15 @@ config SERIAL_8250_AU1X00 to this option. The driver can handle 1 or 2 serial ports. If unsure, say N. +config SERIAL_8250_RM9K + bool "Support for MIPS RM9xxx integrated serial port" + depends on SERIAL_8250 != n && SERIAL_RM9000 + select SERIAL_8250_SHARE_IRQ + help + Selecting this option will add support for the integrated serial + port hardware found on MIPS RM9122 and similar processors. + If unsure, say N. + comment "Non-8250 serial port support" config SERIAL_AMBA_PL010 @@ -499,6 +508,100 @@ config SERIAL_SA1100_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +config SERIAL_BFIN + tristate "Blackfin serial port support" + depends on BFIN + select SERIAL_CORE + select SERIAL_BFIN_UART0 if (BF531 || BF532 || BF533 || BF561) + help + Add support for the built-in UARTs on the Blackfin. + + To compile this driver as a module, choose M here: the + module will be called bfin_5xx. + +config SERIAL_BFIN_CONSOLE + bool "Console on Blackfin serial port" + depends on SERIAL_BFIN + select SERIAL_CORE_CONSOLE + +choice + prompt "UART Mode" + depends on SERIAL_BFIN + default SERIAL_BFIN_DMA + help + This driver supports the built-in serial ports of the Blackfin family + of CPUs + +config SERIAL_BFIN_DMA + bool "DMA mode" + depends on DMA_UNCACHED_1M + help + This driver works under DMA mode. If this option is selected, the + blackfin simple dma driver is also enabled. + +config SERIAL_BFIN_PIO + bool "PIO mode" + help + This driver works under PIO mode. + +endchoice + +config SERIAL_BFIN_UART0 + bool "Enable UART0" + depends on SERIAL_BFIN + help + Enable UART0 + +config BFIN_UART0_CTSRTS + bool "Enable UART0 hardware flow control" + depends on SERIAL_BFIN_UART0 + help + Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS + signal. + +config UART0_CTS_PIN + int "UART0 CTS pin" + depends on BFIN_UART0_CTSRTS + default 23 + help + The default pin is GPIO_GP7. + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config UART0_RTS_PIN + int "UART0 RTS pin" + depends on BFIN_UART0_CTSRTS + default 22 + help + The default pin is GPIO_GP6. + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config SERIAL_BFIN_UART1 + bool "Enable UART1" + depends on SERIAL_BFIN && (BF534 || BF536 || BF537) + help + Enable UART1 + +config BFIN_UART1_CTSRTS + bool "Enable UART1 hardware flow control" + depends on SERIAL_BFIN_UART1 + help + Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS + signal. + +config UART1_CTS_PIN + int "UART1 CTS pin" + depends on BFIN_UART1_CTSRTS + default -1 + help + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + +config UART1_RTS_PIN + int "UART1 RTS pin" + depends on BFIN_UART1_CTSRTS + default -1 + help + Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. + config SERIAL_IMX bool "IMX serial port support" depends on ARM && ARCH_IMX diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index 6b3560c5749a..4959bcb8d1ef 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o obj-$(CONFIG_SERIAL_PXA) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o obj-$(CONFIG_SERIAL_SA1100) += sa1100.o +obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c new file mode 100644 index 000000000000..408390f93db9 --- /dev/null +++ b/drivers/serial/bfin_5xx.c @@ -0,0 +1,1012 @@ +/* + * File: drivers/serial/bfin_5xx.c + * Based on: Based on drivers/serial/sa1100.c + * Author: Aubrey Li <aubrey.li@analog.com> + * + * Created: + * Description: Driver for blackfin 5xx serial ports + * + * Rev: $Id: bfin_5xx.c,v 1.19 2006/09/24 02:33:53 aubrey Exp $ + * + * Modified: + * Copyright 2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/sysrq.h> +#include <linux/platform_device.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial_core.h> + +#include <asm/gpio.h> +#include <asm/mach/bfin_serial_5xx.h> + +#ifdef CONFIG_SERIAL_BFIN_DMA +#include <linux/dma-mapping.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/cacheflush.h> +#endif + +/* UART name and device definitions */ +#define BFIN_SERIAL_NAME "ttyBF" +#define BFIN_SERIAL_MAJOR 204 +#define BFIN_SERIAL_MINOR 64 + +/* + * Setup for console. Argument comes from the menuconfig + */ +#define DMA_RX_XCOUNT 512 +#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) + +#define DMA_RX_FLUSH_JIFFIES 5 + +#ifdef CONFIG_SERIAL_BFIN_DMA +static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); +#else +static void bfin_serial_do_work(struct work_struct *work); +static void bfin_serial_tx_chars(struct bfin_serial_port *uart); +static void local_put_char(struct bfin_serial_port *uart, char ch); +#endif + +static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); + +/* + * interrupts are disabled on entry + */ +static void bfin_serial_stop_tx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + disable_dma(uart->tx_dma_channel); +#else + unsigned short ier; + + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); +#endif +} + +/* + * port is locked and interrupts are disabled + */ +static void bfin_serial_start_tx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + bfin_serial_dma_tx_chars(uart); +#else + unsigned short ier; + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); + bfin_serial_tx_chars(uart); +#endif +} + +/* + * Interrupts are enabled + */ +static void bfin_serial_stop_rx(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned short ier; + + ier = UART_GET_IER(uart); + ier &= ~ERBFI; + UART_PUT_IER(uart, ier); +} + +/* + * Set the modem control timer to fire immediately. + */ +static void bfin_serial_enable_ms(struct uart_port *port) +{ +} + +#ifdef CONFIG_SERIAL_BFIN_PIO +static void local_put_char(struct bfin_serial_port *uart, char ch) +{ + unsigned short status; + int flags = 0; + + spin_lock_irqsave(&uart->port.lock, flags); + + do { + status = UART_GET_LSR(uart); + } while (!(status & THRE)); + + UART_PUT_CHAR(uart, ch); + SSYNC(); + + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static void bfin_serial_rx_chars(struct bfin_serial_port *uart) +{ + struct tty_struct *tty = uart->port.info?uart->port.info->tty:0; + unsigned int status, ch, flg; +#ifdef BF533_FAMILY + static int in_break = 0; +#endif + + status = UART_GET_LSR(uart); + ch = UART_GET_CHAR(uart); + uart->port.icount.rx++; + +#ifdef BF533_FAMILY + /* The BF533 family of processors have a nice misbehavior where + * they continuously generate characters for a "single" break. + * We have to basically ignore this flood until the "next" valid + * character comes across. All other Blackfin families operate + * properly though. + */ + if (in_break) { + if (ch != 0) { + in_break = 0; + ch = UART_GET_CHAR(uart); + } + return; + } +#endif + + if (status & BI) { +#ifdef BF533_FAMILY + in_break = 1; +#endif + uart->port.icount.brk++; + if (uart_handle_break(&uart->port)) + goto ignore_char; + flg = TTY_BREAK; + } else if (status & PE) { + flg = TTY_PARITY; + uart->port.icount.parity++; + } else if (status & OE) { + flg = TTY_OVERRUN; + uart->port.icount.overrun++; + } else if (status & FE) { + flg = TTY_FRAME; + uart->port.icount.frame++; + } else + flg = TTY_NORMAL; + + if (uart_handle_sysrq_char(&uart->port, ch)) + goto ignore_char; + if (tty) + uart_insert_char(&uart->port, status, 2, ch, flg); + +ignore_char: + if (tty) + tty_flip_buffer_push(tty); +} + +static void bfin_serial_tx_chars(struct bfin_serial_port *uart) +{ + struct circ_buf *xmit = &uart->port.info->xmit; + + if (uart->port.x_char) { + UART_PUT_CHAR(uart, uart->port.x_char); + uart->port.icount.tx++; + uart->port.x_char = 0; + return; + } + /* + * Check the modem control lines before + * transmitting anything. + */ + bfin_serial_mctrl_check(uart); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { + bfin_serial_stop_tx(&uart->port); + return; + } + + local_put_char(uart, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + uart->port.icount.tx++; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uart->port); + + if (uart_circ_empty(xmit)) + bfin_serial_stop_tx(&uart->port); +} + +static irqreturn_t bfin_serial_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + unsigned short status; + + spin_lock(&uart->port.lock); + status = UART_GET_IIR(uart); + do { + if ((status & IIR_STATUS) == IIR_TX_READY) + bfin_serial_tx_chars(uart); + if ((status & IIR_STATUS) == IIR_RX_READY) + bfin_serial_rx_chars(uart); + status = UART_GET_IIR(uart); + } while (status & (IIR_TX_READY | IIR_RX_READY)); + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} + +static void bfin_serial_do_work(struct work_struct *work) +{ + struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); + + bfin_serial_mctrl_check(uart); +} + +#endif + +#ifdef CONFIG_SERIAL_BFIN_DMA +static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) +{ + struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; + int flags = 0; + + if (!uart->tx_done) + return; + + uart->tx_done = 0; + + if (uart->port.x_char) { + UART_PUT_CHAR(uart, uart->port.x_char); + uart->port.icount.tx++; + uart->port.x_char = 0; + uart->tx_done = 1; + return; + } + /* + * Check the modem control lines before + * transmitting anything. + */ + bfin_serial_mctrl_check(uart); + + if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { + bfin_serial_stop_tx(&uart->port); + uart->tx_done = 1; + return; + } + + spin_lock_irqsave(&uart->port.lock, flags); + uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); + if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) + uart->tx_count = UART_XMIT_SIZE - xmit->tail; + blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail), + (unsigned long)(xmit->buf+xmit->tail+uart->tx_count)); + set_dma_config(uart->tx_dma_channel, + set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, + INTR_ON_BUF, + DIMENSION_LINEAR, + DATA_SIZE_8)); + set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); + set_dma_x_count(uart->tx_dma_channel, uart->tx_count); + set_dma_x_modify(uart->tx_dma_channel, 1); + enable_dma(uart->tx_dma_channel); + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static void bfin_serial_dma_rx_chars(struct bfin_serial_port * uart) +{ + struct tty_struct *tty = uart->port.info->tty; + int i, flg, status; + + status = UART_GET_LSR(uart); + uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; + + if (status & BI) { + uart->port.icount.brk++; + if (uart_handle_break(&uart->port)) + goto dma_ignore_char; + flg = TTY_BREAK; + } else if (status & PE) { + flg = TTY_PARITY; + uart->port.icount.parity++; + } else if (status & OE) { + flg = TTY_OVERRUN; + uart->port.icount.overrun++; + } else if (status & FE) { + flg = TTY_FRAME; + uart->port.icount.frame++; + } else + flg = TTY_NORMAL; + + for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { + if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) + goto dma_ignore_char; + uart_insert_char(&uart->port, status, 2, uart->rx_dma_buf.buf[i], flg); + } +dma_ignore_char: + tty_flip_buffer_push(tty); +} + +void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) +{ + int x_pos, pos; + int flags = 0; + + bfin_serial_dma_tx_chars(uart); + + spin_lock_irqsave(&uart->port.lock, flags); + x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); + if (x_pos == DMA_RX_XCOUNT) + x_pos = 0; + + pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; + + if (pos>uart->rx_dma_buf.tail) { + uart->rx_dma_buf.tail = pos; + bfin_serial_dma_rx_chars(uart); + uart->rx_dma_buf.head = uart->rx_dma_buf.tail; + } + spin_unlock_irqrestore(&uart->port.lock, flags); + uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; + add_timer(&(uart->rx_dma_timer)); +} + +static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; + + spin_lock(&uart->port.lock); + if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { + clear_dma_irqstat(uart->tx_dma_channel); + disable_dma(uart->tx_dma_channel); + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); + xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); + uart->port.icount.tx+=uart->tx_count; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uart->port); + + if (uart_circ_empty(xmit)) + bfin_serial_stop_tx(&uart->port); + uart->tx_done = 1; + } + + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} + +static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) +{ + struct bfin_serial_port *uart = dev_id; + unsigned short irqstat; + + uart->rx_dma_nrows++; + if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { + uart->rx_dma_nrows = 0; + uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; + bfin_serial_dma_rx_chars(uart); + uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; + } + spin_lock(&uart->port.lock); + irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); + clear_dma_irqstat(uart->rx_dma_channel); + + spin_unlock(&uart->port.lock); + return IRQ_HANDLED; +} +#endif + +/* + * Return TIOCSER_TEMT when transmitter is not busy. + */ +static unsigned int bfin_serial_tx_empty(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned short lsr; + + lsr = UART_GET_LSR(uart); + if (lsr & TEMT) + return TIOCSER_TEMT; + else + return 0; +} + +static unsigned int bfin_serial_get_mctrl(struct uart_port *port) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + if (uart->cts_pin < 0) + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; + + if (gpio_get_value(uart->cts_pin)) + return TIOCM_DSR | TIOCM_CAR; + else +#endif + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + if (uart->rts_pin < 0) + return; + + if (mctrl & TIOCM_RTS) + gpio_set_value(uart->rts_pin, 0); + else + gpio_set_value(uart->rts_pin, 1); +#endif +} + +/* + * Handle any change of modem status signal since we were last called. + */ +static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) +{ +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + unsigned int status; +# ifdef CONFIG_SERIAL_BFIN_DMA + struct uart_info *info = uart->port.info; + struct tty_struct *tty = info->tty; + + status = bfin_serial_get_mctrl(&uart->port); + if (!(status & TIOCM_CTS)) { + tty->hw_stopped = 1; + } else { + tty->hw_stopped = 0; + } +# else + status = bfin_serial_get_mctrl(&uart->port); + uart_handle_cts_change(&uart->port, status & TIOCM_CTS); + if (!(status & TIOCM_CTS)) + schedule_work(&uart->cts_workqueue); +# endif +#endif +} + +/* + * Interrupts are always disabled. + */ +static void bfin_serial_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int bfin_serial_startup(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + dma_addr_t dma_handle; + + if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) { + printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n"); + return -EBUSY; + } + + if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) { + printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n"); + free_dma(uart->rx_dma_channel); + return -EBUSY; + } + + set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart); + set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart); + + uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); + uart->rx_dma_buf.head = 0; + uart->rx_dma_buf.tail = 0; + uart->rx_dma_nrows = 0; + + set_dma_config(uart->rx_dma_channel, + set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, + INTR_ON_ROW, DIMENSION_2D, + DATA_SIZE_8)); + set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); + set_dma_x_modify(uart->rx_dma_channel, 1); + set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); + set_dma_y_modify(uart->rx_dma_channel, 1); + set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf); + enable_dma(uart->rx_dma_channel); + + uart->rx_dma_timer.data = (unsigned long)(uart); + uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout; + uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; + add_timer(&(uart->rx_dma_timer)); +#else + if (request_irq + (uart->port.irq, bfin_serial_int, IRQF_DISABLED, + "BFIN_UART_RX", uart)) { + printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); + return -EBUSY; + } + + if (request_irq + (uart->port.irq+1, bfin_serial_int, IRQF_DISABLED, + "BFIN_UART_TX", uart)) { + printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n"); + free_irq(uart->port.irq, uart); + return -EBUSY; + } +#endif + UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI); + return 0; +} + +static void bfin_serial_shutdown(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + +#ifdef CONFIG_SERIAL_BFIN_DMA + disable_dma(uart->tx_dma_channel); + free_dma(uart->tx_dma_channel); + disable_dma(uart->rx_dma_channel); + free_dma(uart->rx_dma_channel); + del_timer(&(uart->rx_dma_timer)); +#else + free_irq(uart->port.irq, uart); + free_irq(uart->port.irq+1, uart); +#endif +} + +static void +bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + unsigned long flags; + unsigned int baud, quot; + unsigned short val, ier, lsr, lcr = 0; + + switch (termios->c_cflag & CSIZE) { + case CS8: + lcr = WLS(8); + break; + case CS7: + lcr = WLS(7); + break; + case CS6: + lcr = WLS(6); + break; + case CS5: + lcr = WLS(5); + break; + default: + printk(KERN_ERR "%s: word lengh not supported\n", + __FUNCTION__); + } + + if (termios->c_cflag & CSTOPB) + lcr |= STB; + if (termios->c_cflag & PARENB) { + lcr |= PEN; + if (!(termios->c_cflag & PARODD)) + lcr |= EPS; + } + + /* These controls are not implemented for this port */ + termios->c_iflag |= INPCK | BRKINT | PARMRK; + termios->c_iflag &= ~(IGNPAR | IGNBRK); + + /* These controls are not implemented for this port */ + termios->c_iflag |= INPCK | BRKINT | PARMRK; + termios->c_iflag &= ~(IGNPAR | IGNBRK); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + spin_lock_irqsave(&uart->port.lock, flags); + + do { + lsr = UART_GET_LSR(uart); + } while (!(lsr & TEMT)); + + /* Disable UART */ + ier = UART_GET_IER(uart); + UART_PUT_IER(uart, 0); + + /* Set DLAB in LCR to Access DLL and DLH */ + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); + + UART_PUT_DLL(uart, quot & 0xFF); + SSYNC(); + UART_PUT_DLH(uart, (quot >> 8) & 0xFF); + SSYNC(); + + /* Clear DLAB in LCR to Access THR RBR IER */ + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); + + UART_PUT_LCR(uart, lcr); + + /* Enable UART */ + UART_PUT_IER(uart, ier); + + val = UART_GET_GCTL(uart); + val |= UCEN; + UART_PUT_GCTL(uart, val); + + spin_unlock_irqrestore(&uart->port.lock, flags); +} + +static const char *bfin_serial_type(struct uart_port *port) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + + return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL; +} + +/* + * Release the memory region(s) being used by 'port'. + */ +static void bfin_serial_release_port(struct uart_port *port) +{ +} + +/* + * Request the memory region(s) being used by 'port'. + */ +static int bfin_serial_request_port(struct uart_port *port) +{ + return 0; +} + +/* + * Configure/autoconfigure the port. + */ +static void bfin_serial_config_port(struct uart_port *port, int flags) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + + if (flags & UART_CONFIG_TYPE && + bfin_serial_request_port(&uart->port) == 0) + uart->port.type = PORT_BFIN; +} + +/* + * Verify the new serial_struct (for TIOCSSERIAL). + * The only change we allow are to the flags and type, and + * even then only between PORT_BFIN and PORT_UNKNOWN + */ +static int +bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + return 0; +} + +static struct uart_ops bfin_serial_pops = { + .tx_empty = bfin_serial_tx_empty, + .set_mctrl = bfin_serial_set_mctrl, + .get_mctrl = bfin_serial_get_mctrl, + .stop_tx = bfin_serial_stop_tx, + .start_tx = bfin_serial_start_tx, + .stop_rx = bfin_serial_stop_rx, + .enable_ms = bfin_serial_enable_ms, + .break_ctl = bfin_serial_break_ctl, + .startup = bfin_serial_startup, + .shutdown = bfin_serial_shutdown, + .set_termios = bfin_serial_set_termios, + .type = bfin_serial_type, + .release_port = bfin_serial_release_port, + .request_port = bfin_serial_request_port, + .config_port = bfin_serial_config_port, + .verify_port = bfin_serial_verify_port, +}; + +static void __init bfin_serial_init_ports(void) +{ + static int first = 1; + int i; + + if (!first) + return; + first = 0; + + for (i = 0; i < nr_ports; i++) { + bfin_serial_ports[i].port.uartclk = get_sclk(); + bfin_serial_ports[i].port.ops = &bfin_serial_pops; + bfin_serial_ports[i].port.line = i; + bfin_serial_ports[i].port.iotype = UPIO_MEM; + bfin_serial_ports[i].port.membase = + (void __iomem *)bfin_serial_resource[i].uart_base_addr; + bfin_serial_ports[i].port.mapbase = + bfin_serial_resource[i].uart_base_addr; + bfin_serial_ports[i].port.irq = + bfin_serial_resource[i].uart_irq; + bfin_serial_ports[i].port.flags = UPF_BOOT_AUTOCONF; +#ifdef CONFIG_SERIAL_BFIN_DMA + bfin_serial_ports[i].tx_done = 1; + bfin_serial_ports[i].tx_count = 0; + bfin_serial_ports[i].tx_dma_channel = + bfin_serial_resource[i].uart_tx_dma_channel; + bfin_serial_ports[i].rx_dma_channel = + bfin_serial_resource[i].uart_rx_dma_channel; + init_timer(&(bfin_serial_ports[i].rx_dma_timer)); +#else + INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); +#endif +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + bfin_serial_ports[i].cts_pin = + bfin_serial_resource[i].uart_cts_pin; + bfin_serial_ports[i].rts_pin = + bfin_serial_resource[i].uart_rts_pin; +#endif + bfin_serial_hw_init(&bfin_serial_ports[i]); + + } +} + +#ifdef CONFIG_SERIAL_BFIN_CONSOLE +static void bfin_serial_console_putchar(struct uart_port *port, int ch) +{ + struct bfin_serial_port *uart = (struct bfin_serial_port *)port; + while (!(UART_GET_LSR(uart))) + barrier(); + UART_PUT_CHAR(uart, ch); + SSYNC(); +} + +/* + * Interrupts are disabled on entering + */ +static void +bfin_serial_console_write(struct console *co, const char *s, unsigned int count) +{ + struct bfin_serial_port *uart = &bfin_serial_ports[co->index]; + int flags = 0; + + spin_lock_irqsave(&uart->port.lock, flags); + uart_console_write(&uart->port, s, count, bfin_serial_console_putchar); + spin_unlock_irqrestore(&uart->port.lock, flags); + +} + +/* + * If the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, + int *parity, int *bits) +{ + unsigned short status; + + status = UART_GET_IER(uart) & (ERBFI | ETBEI); + if (status == (ERBFI | ETBEI)) { + /* ok, the port was enabled */ + unsigned short lcr, val; + unsigned short dlh, dll; + + lcr = UART_GET_LCR(uart); + + *parity = 'n'; + if (lcr & PEN) { + if (lcr & EPS) + *parity = 'e'; + else + *parity = 'o'; + } + switch (lcr & 0x03) { + case 0: *bits = 5; break; + case 1: *bits = 6; break; + case 2: *bits = 7; break; + case 3: *bits = 8; break; + } + /* Set DLAB in LCR to Access DLL and DLH */ + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); + + dll = UART_GET_DLL(uart); + dlh = UART_GET_DLH(uart); + + /* Clear DLAB in LCR to Access THR RBR IER */ + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); + + *baud = get_sclk() / (16*(dll | dlh << 8)); + } + pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __FUNCTION__, *baud, *parity, *bits); +} + +static int __init +bfin_serial_console_setup(struct console *co, char *options) +{ + struct bfin_serial_port *uart; + int baud = 57600; + int bits = 8; + int parity = 'n'; +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + int flow = 'r'; +#else + int flow = 'n'; +#endif + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= nr_ports) + co->index = 0; + uart = &bfin_serial_ports[co->index]; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + bfin_serial_console_get_options(uart, &baud, &parity, &bits); + + return uart_set_options(&uart->port, co, baud, parity, bits, flow); +} + +static struct uart_driver bfin_serial_reg; +static struct console bfin_serial_console = { + .name = BFIN_SERIAL_NAME, + .write = bfin_serial_console_write, + .device = uart_console_device, + .setup = bfin_serial_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &bfin_serial_reg, +}; + +static int __init bfin_serial_rs_console_init(void) +{ + bfin_serial_init_ports(); + register_console(&bfin_serial_console); + return 0; +} +console_initcall(bfin_serial_rs_console_init); + +#define BFIN_SERIAL_CONSOLE &bfin_serial_console +#else +#define BFIN_SERIAL_CONSOLE NULL +#endif + +static struct uart_driver bfin_serial_reg = { + .owner = THIS_MODULE, + .driver_name = "bfin-uart", + .dev_name = BFIN_SERIAL_NAME, + .major = BFIN_SERIAL_MAJOR, + .minor = BFIN_SERIAL_MINOR, + .nr = NR_PORTS, + .cons = BFIN_SERIAL_CONSOLE, +}; + +static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state) +{ + struct bfin_serial_port *uart = platform_get_drvdata(dev); + + if (uart) + uart_suspend_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static int bfin_serial_resume(struct platform_device *dev) +{ + struct bfin_serial_port *uart = platform_get_drvdata(dev); + + if (uart) + uart_resume_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static int bfin_serial_probe(struct platform_device *dev) +{ + struct resource *res = dev->resource; + int i; + + for (i = 0; i < dev->num_resources; i++, res++) + if (res->flags & IORESOURCE_MEM) + break; + + if (i < dev->num_resources) { + for (i = 0; i < nr_ports; i++, res++) { + if (bfin_serial_ports[i].port.mapbase != res->start) + continue; + bfin_serial_ports[i].port.dev = &dev->dev; + uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); + platform_set_drvdata(dev, &bfin_serial_ports[i]); + } + } + + return 0; +} + +static int bfin_serial_remove(struct platform_device *pdev) +{ + struct bfin_serial_port *uart = platform_get_drvdata(pdev); + + +#ifdef CONFIG_SERIAL_BFIN_CTSRTS + gpio_free(uart->cts_pin); + gpio_free(uart->rts_pin); +#endif + + platform_set_drvdata(pdev, NULL); + + if (uart) + uart_remove_one_port(&bfin_serial_reg, &uart->port); + + return 0; +} + +static struct platform_driver bfin_serial_driver = { + .probe = bfin_serial_probe, + .remove = bfin_serial_remove, + .suspend = bfin_serial_suspend, + .resume = bfin_serial_resume, + .driver = { + .name = "bfin-uart", + }, +}; + +static int __init bfin_serial_init(void) +{ + int ret; + + pr_info("Serial: Blackfin serial driver\n"); + + bfin_serial_init_ports(); + + ret = uart_register_driver(&bfin_serial_reg); + if (ret == 0) { + ret = platform_driver_register(&bfin_serial_driver); + if (ret) { + pr_debug("uart register failed\n"); + uart_unregister_driver(&bfin_serial_reg); + } + } + return ret; +} + +static void __exit bfin_serial_exit(void) +{ + platform_driver_unregister(&bfin_serial_driver); + uart_unregister_driver(&bfin_serial_reg); +} + +module_init(bfin_serial_init); +module_exit(bfin_serial_exit); + +MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>"); +MODULE_DESCRIPTION("Blackfin generic serial port driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR); diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h deleted file mode 100644 index 4a23340663aa..000000000000 --- a/drivers/serial/crisv10.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * serial.h: Arch-dep definitions for the Etrax100 serial driver. - * - * Copyright (C) 1998, 1999, 2000 Axis Communications AB - */ - -#ifndef _ETRAX_SERIAL_H -#define _ETRAX_SERIAL_H - -#include <linux/circ_buf.h> -#include <asm/termios.h> - -/* Software state per channel */ - -#ifdef __KERNEL__ -/* - * This is our internal structure for each serial port's state. - * - * Many fields are paralleled by the structure used by the serial_struct - * structure. - * - * For definitions of the flags field, see tty.h - */ - -#define SERIAL_RECV_DESCRIPTORS 8 - -struct etrax_recv_buffer { - struct etrax_recv_buffer *next; - unsigned short length; - unsigned char error; - unsigned char pad; - - unsigned char buffer[0]; -}; - -struct e100_serial { - int baud; - volatile u8 *port; /* R_SERIALx_CTRL */ - u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */ - - /* Output registers */ - volatile u8 *oclrintradr; /* adr to R_DMA_CHx_CLR_INTR */ - volatile u32 *ofirstadr; /* adr to R_DMA_CHx_FIRST */ - volatile u8 *ocmdadr; /* adr to R_DMA_CHx_CMD */ - const volatile u8 *ostatusadr; /* adr to R_DMA_CHx_STATUS */ - - /* Input registers */ - volatile u8 *iclrintradr; /* adr to R_DMA_CHx_CLR_INTR */ - volatile u32 *ifirstadr; /* adr to R_DMA_CHx_FIRST */ - volatile u8 *icmdadr; /* adr to R_DMA_CHx_CMD */ - volatile u32 *idescradr; /* adr to R_DMA_CHx_DESCR */ - - int flags; /* defined in tty.h */ - - u8 rx_ctrl; /* shadow for R_SERIALx_REC_CTRL */ - u8 tx_ctrl; /* shadow for R_SERIALx_TR_CTRL */ - u8 iseteop; /* bit number for R_SET_EOP for the input dma */ - int enabled; /* Set to 1 if the port is enabled in HW config */ - - u8 dma_out_enabled:1; /* Set to 1 if DMA should be used */ - u8 dma_in_enabled:1; /* Set to 1 if DMA should be used */ - - /* end of fields defined in rs_table[] in .c-file */ - u8 uses_dma_in; /* Set to 1 if DMA is used */ - u8 uses_dma_out; /* Set to 1 if DMA is used */ - u8 forced_eop; /* a fifo eop has been forced */ - int baud_base; /* For special baudrates */ - int custom_divisor; /* For special baudrates */ - struct etrax_dma_descr tr_descr; - struct etrax_dma_descr rec_descr[SERIAL_RECV_DESCRIPTORS]; - int cur_rec_descr; - - volatile int tr_running; /* 1 if output is running */ - - struct tty_struct *tty; - int read_status_mask; - int ignore_status_mask; - int x_char; /* xon/xoff character */ - int close_delay; - unsigned short closing_wait; - unsigned short closing_wait2; - unsigned long event; - unsigned long last_active; - int line; - int type; /* PORT_ETRAX */ - int count; /* # of fd on device */ - int blocked_open; /* # of blocked opens */ - struct circ_buf xmit; - struct etrax_recv_buffer *first_recv_buffer; - struct etrax_recv_buffer *last_recv_buffer; - unsigned int recv_cnt; - unsigned int max_recv_cnt; - - struct work_struct work; - struct async_icount icount; /* error-statistics etc.*/ - struct ktermios normal_termios; - struct ktermios callout_termios; -#ifdef DECLARE_WAITQUEUE - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; -#else - struct wait_queue *open_wait; - struct wait_queue *close_wait; -#endif - - unsigned long char_time_usec; /* The time for 1 char, in usecs */ - unsigned long flush_time_usec; /* How often we should flush */ - unsigned long last_tx_active_usec; /* Last tx usec in the jiffies */ - unsigned long last_tx_active; /* Last tx time in jiffies */ - unsigned long last_rx_active_usec; /* Last rx usec in the jiffies */ - unsigned long last_rx_active; /* Last rx time in jiffies */ - - int break_detected_cnt; - int errorcode; - -#ifdef CONFIG_ETRAX_RS485 - struct rs485_control rs485; /* RS-485 support */ -#endif -}; - -/* this PORT is not in the standard serial.h. it's not actually used for - * anything since we only have one type of async serial-port anyway in this - * system. - */ - -#define PORT_ETRAX 1 - -/* - * Events are used to schedule things to happen at timer-interrupt - * time, instead of at rs interrupt time. - */ -#define RS_EVENT_WRITE_WAKEUP 0 - -#endif /* __KERNEL__ */ - -#endif /* !_ETRAX_SERIAL_H */ diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c index 3d2fcc57b1ce..d09f2097d5b0 100644 --- a/drivers/serial/mpsc.c +++ b/drivers/serial/mpsc.c @@ -183,6 +183,7 @@ struct mpsc_port_info { u8 *txb_p; /* Phys addr of txb */ int txr_head; /* Where new data goes */ int txr_tail; /* Where sent data comes off */ + spinlock_t tx_lock; /* transmit lock */ /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ u32 MPSC_MPCR_m; @@ -1212,6 +1213,9 @@ mpsc_tx_intr(struct mpsc_port_info *pi) { struct mpsc_tx_desc *txre; int rc = 0; + unsigned long iflags; + + spin_lock_irqsave(&pi->tx_lock, iflags); if (!mpsc_sdma_tx_active(pi)) { txre = (struct mpsc_tx_desc *)(pi->txr + @@ -1248,6 +1252,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi) mpsc_sdma_start_tx(pi); /* start next desc if ready */ } + spin_unlock_irqrestore(&pi->tx_lock, iflags); return rc; } @@ -1338,11 +1343,16 @@ static void mpsc_start_tx(struct uart_port *port) { struct mpsc_port_info *pi = (struct mpsc_port_info *)port; + unsigned long iflags; + + spin_lock_irqsave(&pi->tx_lock, iflags); mpsc_unfreeze(pi); mpsc_copy_tx_data(pi); mpsc_sdma_start_tx(pi); + spin_unlock_irqrestore(&pi->tx_lock, iflags); + pr_debug("mpsc_start_tx[%d]\n", port->line); return; } @@ -1625,6 +1635,16 @@ mpsc_console_write(struct console *co, const char *s, uint count) struct mpsc_port_info *pi = &mpsc_ports[co->index]; u8 *bp, *dp, add_cr = 0; int i; + unsigned long iflags; + + spin_lock_irqsave(&pi->tx_lock, iflags); + + while (pi->txr_head != pi->txr_tail) { + while (mpsc_sdma_tx_active(pi)) + udelay(100); + mpsc_sdma_intr_ack(pi); + mpsc_tx_intr(pi); + } while (mpsc_sdma_tx_active(pi)) udelay(100); @@ -1668,6 +1688,7 @@ mpsc_console_write(struct console *co, const char *s, uint count) pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); } + spin_unlock_irqrestore(&pi->tx_lock, iflags); return; } @@ -2005,7 +2026,8 @@ mpsc_drv_probe(struct platform_device *dev) if (!(rc = mpsc_drv_map_regs(pi, dev))) { mpsc_drv_get_platform_data(pi, dev, dev->id); - if (!(rc = mpsc_make_ready(pi))) + if (!(rc = mpsc_make_ready(pi))) { + spin_lock_init(&pi->tx_lock); if (!(rc = uart_add_one_port(&mpsc_reg, &pi->port))) rc = 0; @@ -2014,6 +2036,7 @@ mpsc_drv_probe(struct platform_device *dev) (struct uart_port *)pi); mpsc_drv_unmap_regs(pi); } + } else mpsc_drv_unmap_regs(pi); } diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 09b0b736a751..336d0f4580d9 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -48,7 +48,8 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev, port->iotype = UPIO_MEM; port->type = type; port->uartclk = *clk; - port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP; + port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP + | UPF_FIXED_PORT; port->dev = &ofdev->dev; port->custom_divisor = *clk / (16 * (*spd)); diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 0422c0f1f852..326020f86f75 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -37,13 +37,6 @@ #include <asm/irq.h> #include <asm/uaccess.h> -#undef DEBUG -#ifdef DEBUG -#define DPRINTK(x...) printk(x) -#else -#define DPRINTK(x...) do { } while (0) -#endif - /* * This is used to lock changes in serial line configuration. */ @@ -552,7 +545,7 @@ static void uart_flush_buffer(struct tty_struct *tty) return; } - DPRINTK("uart_flush_buffer(%d) called\n", tty->index); + pr_debug("uart_flush_buffer(%d) called\n", tty->index); spin_lock_irqsave(&port->lock, flags); uart_circ_clear(&state->info->xmit); @@ -672,19 +665,21 @@ static int uart_set_info(struct uart_state *state, */ mutex_lock(&state->mutex); - change_irq = new_serial.irq != port->irq; + change_irq = !(port->flags & UPF_FIXED_PORT) + && new_serial.irq != port->irq; /* * Since changing the 'type' of the port changes its resource * allocations, we should treat type changes the same as * IO port changes. */ - change_port = new_port != port->iobase || - (unsigned long)new_serial.iomem_base != port->mapbase || - new_serial.hub6 != port->hub6 || - new_serial.io_type != port->iotype || - new_serial.iomem_reg_shift != port->regshift || - new_serial.type != port->type; + change_port = !(port->flags & UPF_FIXED_PORT) + && (new_port != port->iobase || + (unsigned long)new_serial.iomem_base != port->mapbase || + new_serial.hub6 != port->hub6 || + new_serial.io_type != port->iotype || + new_serial.iomem_reg_shift != port->regshift || + new_serial.type != port->type); old_flags = port->flags; new_flags = new_serial.flags; @@ -796,8 +791,10 @@ static int uart_set_info(struct uart_state *state, } } - port->irq = new_serial.irq; - port->uartclk = new_serial.baud_base * 16; + if (change_irq) + port->irq = new_serial.irq; + if (!(port->flags & UPF_FIXED_PORT)) + port->uartclk = new_serial.baud_base * 16; port->flags = (port->flags & ~UPF_CHANGE_MASK) | (new_flags & UPF_CHANGE_MASK); port->custom_divisor = new_serial.custom_divisor; @@ -1220,7 +1217,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) port = state->port; - DPRINTK("uart_close(%d) called\n", port->line); + pr_debug("uart_close(%d) called\n", port->line); mutex_lock(&state->mutex); @@ -1339,7 +1336,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) expire = jiffies + timeout; - DPRINTK("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", + pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", port->line, jiffies, expire); /* @@ -1368,7 +1365,7 @@ static void uart_hangup(struct tty_struct *tty) struct uart_state *state = tty->driver_data; BUG_ON(!kernel_locked()); - DPRINTK("uart_hangup(%d)\n", state->port->line); + pr_debug("uart_hangup(%d)\n", state->port->line); mutex_lock(&state->mutex); if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) { @@ -1566,7 +1563,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) int retval, line = tty->index; BUG_ON(!kernel_locked()); - DPRINTK("uart_open(%d) called\n", line); + pr_debug("uart_open(%d) called\n", line); /* * tty->driver->num won't change, so we won't fail here with @@ -2064,6 +2061,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port) case UPIO_MEM32: case UPIO_AU: case UPIO_TSI: + case UPIO_DWAPB: snprintf(address, sizeof(address), "MMIO 0x%lx", port->mapbase); break; @@ -2409,6 +2407,7 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2) case UPIO_MEM32: case UPIO_AU: case UPIO_TSI: + case UPIO_DWAPB: return (port1->mapbase == port2->mapbase); } return 0; diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 46c40bbc4bc6..1f89496d530e 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -46,6 +46,7 @@ #endif #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) +#include <linux/ctype.h> #include <asm/clock.h> #include <asm/sh_bios.h> #include <asm/kgdb.h> @@ -61,7 +62,7 @@ struct sci_port { unsigned int type; /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ - unsigned int irqs[SCIx_NR_IRQS]; + unsigned int irqs[SCIx_NR_IRQS]; /* Port pin configuration */ void (*init_pins)(struct uart_port *port, @@ -76,6 +77,11 @@ struct sci_port { /* Break timer */ struct timer_list break_timer; int break_flag; + +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) + /* Port clock */ + struct clk *clk; +#endif }; #ifdef CONFIG_SH_KGDB @@ -163,7 +169,7 @@ static void put_string(struct sci_port *sci_port, const char *buffer, int count) usegdb |= sh_bios_in_gdb_mode(); #endif #ifdef CONFIG_SH_KGDB - usegdb |= (kgdb_in_gdb_mode && (port == kgdb_sci_port)); + usegdb |= (kgdb_in_gdb_mode && (sci_port == kgdb_sci_port)); #endif if (usegdb) { @@ -204,7 +210,7 @@ static int kgdb_sci_getchar(void) int c; /* Keep trying to read a character, this could be neater */ - while ((c = get_char(kgdb_sci_port)) < 0) + while ((c = get_char(&kgdb_sci_port->port)) < 0) cpu_relax(); return c; @@ -212,7 +218,7 @@ static int kgdb_sci_getchar(void) static inline void kgdb_sci_putchar(int c) { - put_char(kgdb_sci_port, c); + put_char(&kgdb_sci_port->port, c); } #endif /* CONFIG_SH_KGDB */ @@ -283,12 +289,23 @@ static void sci_init_pins_irda(struct uart_port *port, unsigned int cflag) #endif #if defined(SCIF_ONLY) || defined(SCI_AND_SCIF) -#if defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7710) +#if defined(CONFIG_CPU_SUBTYPE_SH7300) /* SH7300 doesn't use RTS/CTS */ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) { sci_out(port, SCFCR, 0); } +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) +static void sci_init_pins_scif(struct uart_port* port, unsigned int cflag) +{ + unsigned int fcr_val = 0; + + set_sh771x_scif_pfc(port); + if (cflag & CRTSCTS) { + fcr_val |= SCFCR_MCE; + } + sci_out(port, SCFCR, fcr_val); +} #elif defined(CONFIG_CPU_SH3) /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) @@ -350,7 +367,7 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) } else { #ifdef CONFIG_CPU_SUBTYPE_SH7343 /* Nothing */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#elif defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785) ctrl_outw(0x0080, SCSPTR0); /* Set RTS = 1 */ #else ctrl_outw(0x0080, SCSPTR2); /* Set RTS = 1 */ @@ -360,7 +377,9 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) } #endif -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780) +#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) static inline int scif_txroom(struct uart_port *port) { return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0x7f); @@ -735,12 +754,6 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr) /* Handle BREAKs */ sci_handle_breaks(port); - -#ifdef CONFIG_SH_KGDB - /* Break into the debugger if a break is detected */ - BREAKPOINT(); -#endif - sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port)); return IRQ_HANDLED; @@ -947,6 +960,10 @@ static int sci_startup(struct uart_port *port) if (s->enable) s->enable(port); +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) + s->clk = clk_get(NULL, "module_clk"); +#endif + sci_request_irq(s); sci_start_tx(port); sci_start_rx(port, 1); @@ -964,6 +981,11 @@ static void sci_shutdown(struct uart_port *port) if (s->disable) s->disable(port); + +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) + clk_put(s->clk); + s->clk = NULL; +#endif } static void sci_set_termios(struct uart_port *port, struct ktermios *termios, @@ -971,7 +993,6 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, { struct sci_port *s = &sci_ports[port->line]; unsigned int status, baud, smr_val; - unsigned long flags; int t; baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); @@ -983,18 +1004,14 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, default: { #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) - struct clk *clk = clk_get(NULL, "module_clk"); - t = SCBRR_VALUE(baud, clk_get_rate(clk)); - clk_put(clk); + t = SCBRR_VALUE(baud, clk_get_rate(s->clk)); #else t = SCBRR_VALUE(baud); #endif - } break; + } } - spin_lock_irqsave(&port->lock, flags); - do { status = sci_in(port, SCxSR); } while (!(status & SCxSR_TEND(port))); @@ -1038,8 +1055,6 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) != 0) sci_start_rx(port,0); - - spin_unlock_irqrestore(&port->lock, flags); } static const char *sci_type(struct uart_port *port) @@ -1220,10 +1235,13 @@ static int __init serial_console_setup(struct console *co, char *options) if (!port->membase || !port->mapbase) return -ENODEV; - spin_lock_init(&port->lock); - port->type = serial_console_port->type; +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) + if (!serial_console_port->clk) + serial_console_port->clk = clk_get(NULL, "module_clk"); +#endif + if (port->flags & UPF_IOREMAP) sci_config_port(port, 0); @@ -1247,7 +1265,7 @@ static struct console serial_console = { .device = uart_console_device, .write = serial_console_write, .setup = serial_console_setup, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER, .index = -1, .data = &sci_uart_driver, }; @@ -1292,11 +1310,23 @@ int __init kgdb_console_setup(struct console *co, char *options) int parity = 'n'; int flow = 'n'; - spin_lock_init(&port->lock); - if (co->index != kgdb_portnum) co->index = kgdb_portnum; + kgdb_sci_port = &sci_ports[co->index]; + port = &kgdb_sci_port->port; + + /* + * Also need to check port->type, we don't actually have any + * UPIO_PORT ports, but uart_report_port() handily misreports + * it anyways if we don't have a port available by the time this is + * called. + */ + if (!port->type) + return -ENODEV; + if (!port->membase || !port->mapbase) + return -ENODEV; + if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else @@ -1311,11 +1341,12 @@ int __init kgdb_console_setup(struct console *co, char *options) #ifdef CONFIG_SH_KGDB_CONSOLE static struct console kgdb_console = { - .name = "ttySC", - .write = kgdb_console_write, - .setup = kgdb_console_setup, - .flags = CON_PRINTBUFFER | CON_ENABLED, - .index = -1, + .name = "ttySC", + .device = uart_console_device, + .write = kgdb_console_write, + .setup = kgdb_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, .data = &sci_uart_driver, }; @@ -1361,9 +1392,19 @@ static int __devinit sci_probe(struct platform_device *dev) struct plat_sci_port *p = dev->dev.platform_data; int i; - for (i = 0; p && p->flags != 0 && i < SCI_NPORTS; p++, i++) { + for (i = 0; p && p->flags != 0; p++, i++) { struct sci_port *sciport = &sci_ports[i]; + /* Sanity check */ + if (unlikely(i == SCI_NPORTS)) { + dev_notice(&dev->dev, "Attempting to register port " + "%d when only %d are available.\n", + i+1, SCI_NPORTS); + dev_notice(&dev->dev, "Consider bumping " + "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); + break; + } + sciport->port.mapbase = p->mapbase; /* @@ -1386,6 +1427,12 @@ static int __devinit sci_probe(struct platform_device *dev) uart_add_one_port(&sci_uart_driver, &sciport->port); } +#if defined(CONFIG_SH_KGDB) && !defined(CONFIG_SH_KGDB_CONSOLE) + kgdb_sci_port = &sci_ports[kgdb_portnum]; + kgdb_getchar = kgdb_sci_getchar; + kgdb_putchar = kgdb_sci_putchar; +#endif + #ifdef CONFIG_CPU_FREQ cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER); dev_info(&dev->dev, "sci: CPU frequency notifier registered\n"); diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 77f7d6351ab1..fb04fb5f9843 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -73,9 +73,13 @@ # define SCPDR 0xA4050136 /* 16 bit SCIF */ # define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ # define SCIF_ONLY -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) # define SCSPTR0 0xA4400000 /* 16 bit SCIF */ -# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ +# define SCI_NPORTS 2 +# define SCIF_ORER 0x0001 /* overrun error bit */ +# define PACR 0xa4050100 +# define PBCR 0xa4050102 +# define SCSCR_INIT(port) 0x3B # define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH73180) # define SCPDR 0xA4050138 /* 16 bit SCIF */ @@ -140,6 +144,16 @@ # define SCIF_ORER 0x0001 /* Overrun error bit */ # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ # define SCIF_ONLY +#elif defined(CONFIG_CPU_SUBTYPE_SH7785) +# define SCSPTR0 0xffea0024 /* 16 bit SCIF */ +# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ +# define SCSPTR2 0xffec0024 /* 16 bit SCIF */ +# define SCSPTR3 0xffed0024 /* 16 bit SCIF */ +# define SCSPTR4 0xffee0024 /* 16 bit SCIF */ +# define SCSPTR5 0xffef0024 /* 16 bit SCIF */ +# define SCIF_OPER 0x0001 /* Overrun error bit */ +# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ +# define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH7206) # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ # define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ @@ -163,7 +177,10 @@ #define SCI_CTRL_FLAGS_RIE 0x40 /* all */ #define SCI_CTRL_FLAGS_TE 0x20 /* all */ #define SCI_CTRL_FLAGS_RE 0x10 /* all */ -#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7780) +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ #else #define SCI_CTRL_FLAGS_REIE 0 @@ -333,9 +350,15 @@ } #ifdef CONFIG_CPU_SH3 -#if defined(CONFIG_CPU_SUBTYPE_SH7300) || \ - defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7710) +#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) +#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ + sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ + h8_sci_offset, h8_sci_size) \ + CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) +#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ + CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) +#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \ + defined(CONFIG_CPU_SUBTYPE_SH7705) #define SCIF_FNS(name, scif_offset, scif_size) \ CPU_SCIF_FNS(name, scif_offset, scif_size) #else @@ -362,8 +385,8 @@ #endif #if defined(CONFIG_CPU_SUBTYPE_SH7300) || \ - defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7710) + defined(CONFIG_CPU_SUBTYPE_SH7705) + SCIF_FNS(SCSMR, 0x00, 16) SCIF_FNS(SCBRR, 0x04, 8) SCIF_FNS(SCSCR, 0x08, 16) @@ -385,7 +408,9 @@ SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8) SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780) +#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) @@ -471,13 +496,24 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inb(SCPDR)&0x10 ? 1 : 0; /* SCIF */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) static inline int sci_rxd_in(struct uart_port *port) { - if (port->mapbase == SCSPTR0) - return ctrl_inw(SCSPTR0 + 0x10) & 0x01 ? 1 : 0; - return 1; + return sci_in(port,SCxSR)&0x0010 ? 1 : 0; +} +static inline void set_sh771x_scif_pfc(struct uart_port *port) +{ + if (port->mapbase == 0xA4400000){ + ctrl_outw(ctrl_inw(PACR)&0xffc0,PACR); + ctrl_outw(ctrl_inw(PBCR)&0x0fff,PBCR); + return; + } + if (port->mapbase == 0xA4410000){ + ctrl_outw(ctrl_inw(PBCR)&0xf003,PBCR); + return; + } } + #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ defined(CONFIG_CPU_SUBTYPE_SH7751) || \ defined(CONFIG_CPU_SUBTYPE_SH4_202) @@ -576,6 +612,23 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ return 1; } +#elif defined(CONFIG_CPU_SUBTYPE_SH7785) +static inline int sci_rxd_in(struct uart_port *port) +{ + if (port->mapbase == 0xffea0000) + return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xffeb0000) + return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xffec0000) + return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xffed0000) + return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xffee0000) + return ctrl_inw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */ + if (port->mapbase == 0xffef0000) + return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ + return 1; +} #elif defined(CONFIG_CPU_SUBTYPE_SH7206) static inline int sci_rxd_in(struct uart_port *port) { @@ -634,7 +687,9 @@ static inline int sci_rxd_in(struct uart_port *port) * -- Mitch Davis - 15 Jul 2000 */ -#if defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7780) +#if defined(CONFIG_CPU_SUBTYPE_SH7300) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) #define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) #elif defined(CONFIG_CPU_SUBTYPE_SH7705) #define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index bfd44177a215..2a63cdba3208 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -1312,7 +1312,7 @@ static void sunsu_console_write(struct console *co, const char *s, * - initialize the serial port * Return non-zero if we didn't find a serial port. */ -static int sunsu_console_setup(struct console *co, char *options) +static int __init sunsu_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; @@ -1343,7 +1343,7 @@ static int sunsu_console_setup(struct console *co, char *options) return uart_set_options(port, co, baud, parity, bits, flow); } -static struct console sunsu_cons = { +static struct console sunsu_console = { .name = "ttyS", .write = sunsu_console_write, .device = uart_console_device, @@ -1373,9 +1373,9 @@ static inline struct console *SUNSU_CONSOLE(int num_uart) if (i == num_uart) return NULL; - sunsu_cons.index = i; + sunsu_console.index = i; - return &sunsu_cons; + return &sunsu_console; } #else #define SUNSU_CONSOLE(num_uart) (NULL) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 7e54e48efd5c..4a012d9acbff 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -58,6 +58,12 @@ config SPI_ATMEL This selects a driver for the Atmel SPI Controller, present on many AT32 (AVR32) and AT91 (ARM) chips. +config SPI_BFIN + tristate "SPI controller driver for ADI Blackfin5xx" + depends on SPI_MASTER && BFIN + help + This is the SPI controller master driver for Blackfin 5xx processor. + config SPI_BITBANG tristate "Bitbanging SPI master" depends on SPI_MASTER && EXPERIMENTAL @@ -157,7 +163,6 @@ config SPI_AT25 # Add new SPI protocol masters in alphabetical order above this line # - # (slave support would go here) endmenu # "SPI support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 3c280ad89202..a95ade857a2f 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -11,8 +11,9 @@ endif obj-$(CONFIG_SPI_MASTER) += spi.o # SPI master controller drivers (bus) -obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o +obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o +obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c new file mode 100644 index 000000000000..ce3c0ce2316e --- /dev/null +++ b/drivers/spi/spi_bfin5xx.c @@ -0,0 +1,1313 @@ +/* + * File: drivers/spi/bfin5xx_spi.c + * Based on: N/A + * Author: Luke Yang (Analog Devices Inc.) + * + * Created: March. 10th 2006 + * Description: SPI controller driver for Blackfin 5xx + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * Modified: + * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang) + * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang) + * + * Copyright 2004-2006 Analog Devices Inc. + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/spi/spi.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/delay.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/delay.h> +#include <asm/dma.h> + +#include <asm/bfin5xx_spi.h> + +MODULE_AUTHOR("Luke Yang"); +MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller"); +MODULE_LICENSE("GPL"); + +#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) + +#define DEFINE_SPI_REG(reg, off) \ +static inline u16 read_##reg(void) \ + { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \ +static inline void write_##reg(u16 v) \ + {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\ + SSYNC();} + +DEFINE_SPI_REG(CTRL, 0x00) +DEFINE_SPI_REG(FLAG, 0x04) +DEFINE_SPI_REG(STAT, 0x08) +DEFINE_SPI_REG(TDBR, 0x0C) +DEFINE_SPI_REG(RDBR, 0x10) +DEFINE_SPI_REG(BAUD, 0x14) +DEFINE_SPI_REG(SHAW, 0x18) +#define START_STATE ((void*)0) +#define RUNNING_STATE ((void*)1) +#define DONE_STATE ((void*)2) +#define ERROR_STATE ((void*)-1) +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 +int dma_requested; + +struct driver_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SPI framework hookup */ + struct spi_master *master; + + /* BFIN hookup */ + struct bfin5xx_spi_master *master_info; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + int run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + size_t len_in_bytes; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + int dma_mapped; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; + void (*write) (struct driver_data *); + void (*read) (struct driver_data *); + void (*duplex) (struct driver_data *); +}; + +struct chip_data { + u16 ctl_reg; + u16 baud; + u16 flag; + + u8 chip_select_num; + u8 n_bytes; + u32 width; /* 0 or 1 */ + u8 enable_dma; + u8 bits_per_word; /* 8 or 16 */ + u8 cs_change_per_word; + u8 cs_chg_udelay; + void (*write) (struct driver_data *); + void (*read) (struct driver_data *); + void (*duplex) (struct driver_data *); +}; + +void bfin_spi_enable(struct driver_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(); + write_CTRL(cr | BIT_CTL_ENABLE); + SSYNC(); +} + +void bfin_spi_disable(struct driver_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(); + write_CTRL(cr & (~BIT_CTL_ENABLE)); + SSYNC(); +} + +/* Caculate the SPI_BAUD register value based on input HZ */ +static u16 hz_to_spi_baud(u32 speed_hz) +{ + u_long sclk = get_sclk(); + u16 spi_baud = (sclk / (2 * speed_hz)); + + if ((sclk % (2 * speed_hz)) > 0) + spi_baud++; + + pr_debug("sclk = %ld, speed_hz = %d, spi_baud = %d\n", sclk, speed_hz, + spi_baud); + + return spi_baud; +} + +static int flush(struct driver_data *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + + /* wait for stop and clear stat */ + while (!(read_STAT() & BIT_STAT_SPIF) && limit--) + continue; + + write_STAT(BIT_STAT_CLR); + + return limit; +} + +/* stop controller and re-config current chip*/ +static void restore_state(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + /* Clear status and disable clock */ + write_STAT(BIT_STAT_CLR); + bfin_spi_disable(drv_data); + pr_debug("restoring spi ctl state\n"); + +#if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537) + pr_debug("chip select number is %d\n", chip->chip_select_num); + + switch (chip->chip_select_num) { + case 1: + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00); + SSYNC(); + break; + + case 2: + case 3: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); + SSYNC(); + break; + + case 4: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840); + SSYNC(); + break; + + case 5: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820); + SSYNC(); + break; + + case 6: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810); + SSYNC(); + break; + + case 7: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); + SSYNC(); + break; + } +#endif + + /* Load the registers */ + write_CTRL(chip->ctl_reg); + write_BAUD(chip->baud); + write_FLAG(chip->flag); +} + +/* used to kick off transfer in rx mode */ +static unsigned short dummy_read(void) +{ + unsigned short tmp; + tmp = read_RDBR(); + return tmp; +} + +static void null_writer(struct driver_data *drv_data) +{ + u8 n_bytes = drv_data->n_bytes; + + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(0); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + drv_data->tx += n_bytes; + } +} + +static void null_reader(struct driver_data *drv_data) +{ + u8 n_bytes = drv_data->n_bytes; + dummy_read(); + + while (drv_data->rx < drv_data->rx_end) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + dummy_read(); + drv_data->rx += n_bytes; + } +} + +static void u8_writer(struct driver_data *drv_data) +{ + pr_debug("cr8-s is 0x%x\n", read_STAT()); + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u8 *) (drv_data->tx)); + while (read_STAT() & BIT_STAT_TXS) + continue; + ++drv_data->tx; + } + + /* poll for SPI completion before returning */ + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; +} + +static void u8_cs_chg_writer(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u8 *) (drv_data->tx)); + while (read_STAT() & BIT_STAT_TXS) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->tx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u8_reader(struct driver_data *drv_data) +{ + pr_debug("cr-8 is 0x%x\n", read_STAT()); + + /* clear TDBR buffer before read(else it will be shifted out) */ + write_TDBR(0xFFFF); + + dummy_read(); + + while (drv_data->rx < drv_data->rx_end - 1) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + ++drv_data->rx; + } + + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_SHAW(); + ++drv_data->rx; +} + +static void u8_cs_chg_reader(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + read_RDBR(); /* kick off */ + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + *(u8 *) (drv_data->rx) = read_SHAW(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->rx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u8_duplex(struct driver_data *drv_data) +{ + /* in duplex mode, clk is triggered by writing of TDBR */ + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(*(u8 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + ++drv_data->rx; + ++drv_data->tx; + } +} + +static void u8_cs_chg_duplex(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u8 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->rx; + ++drv_data->tx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_writer(struct driver_data *drv_data) +{ + pr_debug("cr16 is 0x%x\n", read_STAT()); + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u16 *) (drv_data->tx)); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + drv_data->tx += 2; + } + + /* poll for SPI completion before returning */ + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; +} + +static void u16_cs_chg_writer(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u16 *) (drv_data->tx)); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->tx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_reader(struct driver_data *drv_data) +{ + pr_debug("cr-16 is 0x%x\n", read_STAT()); + dummy_read(); + + while (drv_data->rx < (drv_data->rx_end - 2)) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + drv_data->rx += 2; + } + + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_SHAW(); + drv_data->rx += 2; +} + +static void u16_cs_chg_reader(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + read_RDBR(); /* kick off */ + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + *(u16 *) (drv_data->rx) = read_SHAW(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->rx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_duplex(struct driver_data *drv_data) +{ + /* in duplex mode, clk is triggered by writing of TDBR */ + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u16 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + drv_data->rx += 2; + drv_data->tx += 2; + } +} + +static void u16_cs_chg_duplex(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u16 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->rx += 2; + drv_data->tx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +/* test if ther is more transfer to be done */ +static void *next_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +/* + * caller already set message->status; + * dma and pio irqs are blocked give finished message back + */ +static void giveback(struct driver_data *drv_data) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&drv_data->lock, flags); + msg = drv_data->cur_msg; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); + spin_unlock_irqrestore(&drv_data->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, transfer_list); + + msg->state = NULL; + + /* disable chip select signal. And not stop spi in autobuffer mode */ + if (drv_data->tx_dma != 0xFFFF) { + write_FLAG(0xFF00); + bfin_spi_disable(drv_data); + } + + if (msg->complete) + msg->complete(msg->context); +} + +static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + struct driver_data *drv_data = (struct driver_data *)dev_id; + struct spi_message *msg = drv_data->cur_msg; + + pr_debug("in dma_irq_handler\n"); + clear_dma_irqstat(CH_SPI); + + /* + * wait for the last transaction shifted out. yes, these two + * while loops are supposed to be the same (see the HRM). + */ + if (drv_data->tx != NULL) { + while (bfin_read_SPI_STAT() & TXS) + continue; + while (bfin_read_SPI_STAT() & TXS) + continue; + } + + while (!(bfin_read_SPI_STAT() & SPIF)) + continue; + + bfin_spi_disable(drv_data); + + msg->actual_length += drv_data->len_in_bytes; + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + /* free the irq handler before next transfer */ + pr_debug("disable dma channel irq%d\n", CH_SPI); + dma_disable_irq(CH_SPI); + + return IRQ_HANDLED; +} + +static void pump_transfers(unsigned long data) +{ + struct driver_data *drv_data = (struct driver_data *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct chip_data *chip = NULL; + u16 cr, width, dma_width, dma_config; + u32 tranf_success = 1; + + /* Get current state information */ + message = drv_data->cur_msg; + transfer = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + /* + * if msg is error or done, report it back using complete() callback + */ + + /* Handle for abort */ + if (message->state == ERROR_STATE) { + message->status = -EIO; + giveback(drv_data); + return; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + giveback(drv_data); + return; + } + + /* Delay if requested at end of transfer */ + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + /* Setup the transfer state based on the type of transfer */ + if (flush(drv_data) == 0) { + dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); + message->status = -EIO; + giveback(drv_data); + return; + } + + if (transfer->tx_buf != NULL) { + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + pr_debug("tx_buf is %p, tx_end is %p\n", transfer->tx_buf, + drv_data->tx_end); + } else { + drv_data->tx = NULL; + } + + if (transfer->rx_buf != NULL) { + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + pr_debug("rx_buf is %p, rx_end is %p\n", transfer->rx_buf, + drv_data->rx_end); + } else { + drv_data->rx = NULL; + } + + drv_data->rx_dma = transfer->rx_dma; + drv_data->tx_dma = transfer->tx_dma; + drv_data->len_in_bytes = transfer->len; + + width = chip->width; + if (width == CFG_SPI_WORDSIZE16) { + drv_data->len = (transfer->len) >> 1; + } else { + drv_data->len = transfer->len; + } + drv_data->write = drv_data->tx ? chip->write : null_writer; + drv_data->read = drv_data->rx ? chip->read : null_reader; + drv_data->duplex = chip->duplex ? chip->duplex : null_writer; + pr_debug + ("transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", + drv_data->write, chip->write, null_writer); + + /* speed and width has been set on per message */ + message->state = RUNNING_STATE; + dma_config = 0; + + /* restore spi status for each spi transfer */ + if (transfer->speed_hz) { + write_BAUD(hz_to_spi_baud(transfer->speed_hz)); + } else { + write_BAUD(chip->baud); + } + write_FLAG(chip->flag); + + pr_debug("now pumping a transfer: width is %d, len is %d\n", width, + transfer->len); + + /* + * Try to map dma buffer and do a dma transfer if + * successful use different way to r/w according to + * drv_data->cur_chip->enable_dma + */ + if (drv_data->cur_chip->enable_dma && drv_data->len > 6) { + + write_STAT(BIT_STAT_CLR); + disable_dma(CH_SPI); + clear_dma_irqstat(CH_SPI); + bfin_spi_disable(drv_data); + + /* config dma channel */ + pr_debug("doing dma transfer\n"); + if (width == CFG_SPI_WORDSIZE16) { + set_dma_x_count(CH_SPI, drv_data->len); + set_dma_x_modify(CH_SPI, 2); + dma_width = WDSIZE_16; + } else { + set_dma_x_count(CH_SPI, drv_data->len); + set_dma_x_modify(CH_SPI, 1); + dma_width = WDSIZE_8; + } + + /* set transfer width,direction. And enable spi */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); + + /* dirty hack for autobuffer DMA mode */ + if (drv_data->tx_dma == 0xFFFF) { + pr_debug("doing autobuffer DMA out.\n"); + + /* no irq in autobuffer mode */ + dma_config = + (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); + enable_dma(CH_SPI); + write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | + (CFG_SPI_ENABLE << 14)); + + /* just return here, there can only be one transfer in this mode */ + message->status = 0; + giveback(drv_data); + return; + } + + /* In dma mode, rx or tx must be NULL in one transfer */ + if (drv_data->rx != NULL) { + /* set transfer mode, and enable SPI */ + pr_debug("doing DMA in.\n"); + + /* disable SPI before write to TDBR */ + write_CTRL(cr & ~BIT_CTL_ENABLE); + + /* clear tx reg soformer data is not shifted out */ + write_TDBR(0xFF); + + set_dma_x_count(CH_SPI, drv_data->len); + + /* start dma */ + dma_enable_irq(CH_SPI); + dma_config = (WNR | RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx); + enable_dma(CH_SPI); + + cr |= + CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE << + 14); + /* set transfer mode, and enable SPI */ + write_CTRL(cr); + } else if (drv_data->tx != NULL) { + pr_debug("doing DMA out.\n"); + + /* start dma */ + dma_enable_irq(CH_SPI); + dma_config = (RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); + enable_dma(CH_SPI); + + write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | + (CFG_SPI_ENABLE << 14)); + + } + } else { + /* IO mode write then read */ + pr_debug("doing IO transfer\n"); + + write_STAT(BIT_STAT_CLR); + + if (drv_data->tx != NULL && drv_data->rx != NULL) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* clear the TIMOD bits */ + cr |= + CFG_SPI_WRITE | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO duplex: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* clear the TIMOD bits */ + cr |= + CFG_SPI_WRITE | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO write: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* cleare the TIMOD bits */ + cr |= + CFG_SPI_READ | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO read: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + + if (!tranf_success) { + pr_debug("IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += drv_data->len; + + /* Move to next transfer of this msg */ + message->state = next_transfer(drv_data); + } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + } +} + +/* pop a msg from queue and kick off real transfer */ +static void pump_messages(struct work_struct *work) +{ + struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + /* pumper kicked off but no work to do */ + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + drv_data->cur_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + list_del_init(&drv_data->cur_msg->queue); + + /* Initial message state */ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, transfer_list); + + /* Setup the SSP using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + restore_state(drv_data); + pr_debug + ("got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n", + drv_data->cur_chip->baud, drv_data->cur_chip->flag, + drv_data->cur_chip->ctl_reg); + pr_debug("the first transfer len is %d\n", drv_data->cur_transfer->len); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); + + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); +} + +/* + * got a msg to transfer, queue it in drv_data->queue. + * And kick off message pumper + */ +static int transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + pr_debug("adding an msg in transfer() \n"); + list_add_tail(&msg->queue, &drv_data->queue); + + if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return 0; +} + +/* first setup for new devices */ +static int setup(struct spi_device *spi) +{ + struct bfin5xx_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + u8 spi_flg; + + /* Abort device setup if requested features are not supported */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "requested mode not fully supported\n"); + return -EINVAL; + } + + /* Zero (the default) here means 8 bits */ + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) + return -EINVAL; + + /* Only alloc (or use chip_info) on first setup */ + chip = spi_get_ctldata(spi); + if (chip == NULL) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->enable_dma = 0; + chip_info = spi->controller_data; + } + + /* chip_info isn't always needed */ + if (chip_info) { + chip->enable_dma = chip_info->enable_dma != 0 + && drv_data->master_info->enable_dma; + chip->ctl_reg = chip_info->ctl_reg; + chip->bits_per_word = chip_info->bits_per_word; + chip->cs_change_per_word = chip_info->cs_change_per_word; + chip->cs_chg_udelay = chip_info->cs_chg_udelay; + } + + /* translate common spi framework into our register */ + if (spi->mode & SPI_CPOL) + chip->ctl_reg |= CPOL; + if (spi->mode & SPI_CPHA) + chip->ctl_reg |= CPHA; + if (spi->mode & SPI_LSB_FIRST) + chip->ctl_reg |= LSBF; + /* we dont support running in slave mode (yet?) */ + chip->ctl_reg |= MSTR; + + /* + * if any one SPI chip is registered and wants DMA, request the + * DMA channel for it + */ + if (chip->enable_dma && !dma_requested) { + /* register dma irq handler */ + if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) { + pr_debug + ("Unable to request BlackFin SPI DMA channel\n"); + return -ENODEV; + } + if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data) + < 0) { + pr_debug("Unable to set dma callback\n"); + return -EPERM; + } + dma_disable_irq(CH_SPI); + dma_requested = 1; + } + + /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + spi_flg = ~(1 << (spi->chip_select)); + chip->flag = ((u16) spi_flg << 8) | (1 << (spi->chip_select)); + chip->chip_select_num = spi->chip_select; + + switch (chip->bits_per_word) { + case 8: + chip->n_bytes = 1; + chip->width = CFG_SPI_WORDSIZE8; + chip->read = chip->cs_change_per_word ? + u8_cs_chg_reader : u8_reader; + chip->write = chip->cs_change_per_word ? + u8_cs_chg_writer : u8_writer; + chip->duplex = chip->cs_change_per_word ? + u8_cs_chg_duplex : u8_duplex; + break; + + case 16: + chip->n_bytes = 2; + chip->width = CFG_SPI_WORDSIZE16; + chip->read = chip->cs_change_per_word ? + u16_cs_chg_reader : u16_reader; + chip->write = chip->cs_change_per_word ? + u16_cs_chg_writer : u16_writer; + chip->duplex = chip->cs_change_per_word ? + u16_cs_chg_duplex : u16_duplex; + break; + + default: + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + chip->bits_per_word); + kfree(chip); + return -ENODEV; + } + + pr_debug("setup spi chip %s, width is %d, dma is %d,", + spi->modalias, chip->width, chip->enable_dma); + pr_debug("ctl_reg is 0x%x, flag_reg is 0x%x\n", + chip->ctl_reg, chip->flag); + + spi_set_ctldata(spi, chip); + + return 0; +} + +/* + * callback for spi framework. + * clean driver specific data + */ +static void cleanup(const struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + + kfree(chip); +} + +static inline int init_queue(struct driver_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->run = QUEUE_STOPPED; + drv_data->busy = 0; + + /* init transfer tasklet */ + tasklet_init(&drv_data->pump_transfers, + pump_transfers, (unsigned long)drv_data); + + /* init messages workqueue */ + INIT_WORK(&drv_data->pump_messages, pump_messages); + drv_data->workqueue = + create_singlethread_workqueue(drv_data->master->cdev.dev->bus_id); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static inline int start_queue(struct driver_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->run = QUEUE_RUNNING; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static inline int stop_queue(struct driver_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* + * This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead + */ + drv_data->run = QUEUE_STOPPED; + while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static inline int destroy_queue(struct driver_data *drv_data) +{ + int status; + + status = stop_queue(drv_data); + if (status != 0) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int __init bfin5xx_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bfin5xx_spi_master *platform_info; + struct spi_master *master; + struct driver_data *drv_data = 0; + int status = 0; + + platform_info = dev->platform_data; + + /* Allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + if (!master) { + dev_err(&pdev->dev, "can not alloc spi_master\n"); + return -ENOMEM; + } + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->master_info = platform_info; + drv_data->pdev = pdev; + + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = cleanup; + master->setup = setup; + master->transfer = transfer; + + /* Initial and start queue */ + status = init_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem initializing queue\n"); + goto out_error_queue_alloc; + } + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue\n"); + goto out_error_queue_alloc; + } + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_queue_alloc; + } + pr_debug("controller probe successfully\n"); + return status; + + out_error_queue_alloc: + destroy_queue(drv_data); + spi_master_put(master); + return status; +} + +/* stop hardware and remove the driver */ +static int __devexit bfin5xx_spi_remove(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + if (!drv_data) + return 0; + + /* Remove the queue */ + status = destroy_queue(drv_data); + if (status != 0) + return status; + + /* Disable the SSP at the peripheral and SOC level */ + bfin_spi_disable(drv_data); + + /* Release DMA */ + if (drv_data->master_info->enable_dma) { + if (dma_channel_active(CH_SPI)) + free_dma(CH_SPI); + } + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int bfin5xx_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + status = stop_queue(drv_data); + if (status != 0) + return status; + + /* stop hardware */ + bfin_spi_disable(drv_data); + + return 0; +} + +static int bfin5xx_spi_resume(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + /* Enable the SPI interface */ + bfin_spi_enable(drv_data); + + /* Start the queue running */ + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue (%d)\n", status); + return status; + } + + return 0; +} +#else +#define bfin5xx_spi_suspend NULL +#define bfin5xx_spi_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver bfin5xx_spi_driver = { + .driver = { + .name = "bfin-spi-master", + .bus = &platform_bus_type, + .owner = THIS_MODULE, + }, + .probe = bfin5xx_spi_probe, + .remove = __devexit_p(bfin5xx_spi_remove), + .suspend = bfin5xx_spi_suspend, + .resume = bfin5xx_spi_resume, +}; + +static int __init bfin5xx_spi_init(void) +{ + return platform_driver_register(&bfin5xx_spi_driver); +} + +module_init(bfin5xx_spi_init); + +static void __exit bfin5xx_spi_exit(void) +{ + platform_driver_unregister(&bfin5xx_spi_driver); +} + +module_exit(bfin5xx_spi_exit); diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index b10211c420ef..d5a710f6e445 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -342,8 +342,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) goto err_register; } - dev_dbg(hw->dev, "shutdown=%d\n", hw->bitbang.shutdown); - /* register all the devices associated */ bi = &hw->pdata->board_info[0]; diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index 4d781a2a9807..93107453f124 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c @@ -73,13 +73,6 @@ static const struct hc_driver ps3_ehci_hc_driver = { #endif }; -#if !defined(DEBUG) -#undef dev_dbg -static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg( - const struct device *_dev, const char *fmt, ...) {return 0;} -#endif - - static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev) { int result; diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c index 62283a3926de..c849f72b508a 100644 --- a/drivers/usb/host/ohci-ps3.c +++ b/drivers/usb/host/ohci-ps3.c @@ -75,14 +75,6 @@ static const struct hc_driver ps3_ohci_hc_driver = { #endif }; -/* redefine dev_dbg to do a syntax check */ - -#if !defined(DEBUG) -#undef dev_dbg -static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg( - const struct device *_dev, const char *fmt, ...) {return 0;} -#endif - static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev) { int result; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 2d89d0e9bcf4..344c37595305 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1383,6 +1383,32 @@ config FB_LEO This is the frame buffer device driver for the SBUS-based Sun ZX (leo) frame buffer cards. +config FB_XVR500 + bool "Sun XVR-500 3DLABS Wildcat support" + depends on FB && PCI && SPARC64 + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This is the framebuffer device for the Sun XVR-500 and similar + graphics cards based upon the 3DLABS Wildcat chipset. The driver + only works on sparc64 systems where the system firwmare has + mostly initialized the card already. It is treated as a + completely dumb framebuffer device. + +config FB_XVR2500 + bool "Sun XVR-2500 3DLABS Wildcat support" + depends on FB && PCI && SPARC64 + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This is the framebuffer device for the Sun XVR-2500 and similar + graphics cards based upon the 3DLABS Wildcat chipset. The driver + only works on sparc64 systems where the system firwmare has + mostly initialized the card already. It is treated as a + completely dumb framebuffer device. + config FB_PCI bool "PCI framebuffers" depends on (FB = y) && PCI && SPARC diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 869351785ee8..558473d040d6 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -67,6 +67,8 @@ obj-$(CONFIG_FB_ATARI) += atafb.o c2p.o atafb_mfb.o \ atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o obj-$(CONFIG_FB_MAC) += macfb.o obj-$(CONFIG_FB_HGA) += hgafb.o +obj-$(CONFIG_FB_XVR500) += sunxvr500.o +obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o obj-$(CONFIG_FB_IGA) += igafb.o obj-$(CONFIG_FB_APOLLO) += dnfb.o obj-$(CONFIG_FB_Q40) += q40fb.o diff --git a/drivers/video/sunxvr2500.c b/drivers/video/sunxvr2500.c new file mode 100644 index 000000000000..4010492c6920 --- /dev/null +++ b/drivers/video/sunxvr2500.c @@ -0,0 +1,277 @@ +/* s3d.c: Sun 3DLABS XVR-2500 et al. driver for sparc64 systems + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/of_device.h> + +struct s3d_info { + struct fb_info *info; + struct pci_dev *pdev; + + char __iomem *fb_base; + unsigned long fb_base_phys; + + struct device_node *of_node; + + unsigned int width; + unsigned int height; + unsigned int depth; + unsigned int fb_size; + + u32 pseudo_palette[256]; +}; + +static int __devinit s3d_get_props(struct s3d_info *sp) +{ + sp->width = of_getintprop_default(sp->of_node, "width", 0); + sp->height = of_getintprop_default(sp->of_node, "height", 0); + sp->depth = of_getintprop_default(sp->of_node, "depth", 8); + + if (!sp->width || !sp->height) { + printk(KERN_ERR "s3d: Critical properties missing for %s\n", + pci_name(sp->pdev)); + return -EINVAL; + } + + return 0; +} + +static int s3d_setcolreg(unsigned regno, + unsigned red, unsigned green, unsigned blue, + unsigned transp, struct fb_info *info) +{ + u32 value; + + if (regno >= 256) + return 1; + + red >>= 8; + green >>= 8; + blue >>= 8; + + value = (blue << 24) | (green << 16) | (red << 8); + ((u32 *)info->pseudo_palette)[regno] = value; + + return 0; +} + +static struct fb_ops s3d_ops = { + .owner = THIS_MODULE, + .fb_setcolreg = s3d_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +}; + +static int __devinit s3d_set_fbinfo(struct s3d_info *sp) +{ + struct fb_info *info = sp->info; + struct fb_var_screeninfo *var = &info->var; + + info->flags = FBINFO_DEFAULT; + info->fbops = &s3d_ops; + info->screen_base = sp->fb_base; + info->screen_size = sp->fb_size; + + info->pseudo_palette = sp->pseudo_palette; + + /* Fill fix common fields */ + strlcpy(info->fix.id, "s3d", sizeof(info->fix.id)); + info->fix.smem_start = sp->fb_base_phys; + info->fix.smem_len = sp->fb_size; + info->fix.type = FB_TYPE_PACKED_PIXELS; + if (sp->depth == 32 || sp->depth == 24) + info->fix.visual = FB_VISUAL_TRUECOLOR; + else + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + + var->xres = sp->width; + var->yres = sp->height; + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + var->bits_per_pixel = sp->depth; + + var->red.offset = 8; + var->red.length = 8; + var->green.offset = 16; + var->green.length = 8; + var->blue.offset = 24; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + + if (fb_alloc_cmap(&info->cmap, 256, 0)) { + printk(KERN_ERR "s3d: Cannot allocate color map.\n"); + return -ENOMEM; + } + + return 0; +} + +static int __devinit s3d_pci_register(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct fb_info *info; + struct s3d_info *sp; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + printk(KERN_ERR "s3d: Cannot enable PCI device %s\n", + pci_name(pdev)); + goto err_out; + } + + info = framebuffer_alloc(sizeof(struct s3d_info), &pdev->dev); + if (!info) { + printk(KERN_ERR "s3d: Cannot allocate fb_info\n"); + err = -ENOMEM; + goto err_disable; + } + + sp = info->par; + sp->info = info; + sp->pdev = pdev; + sp->of_node = pci_device_to_OF_node(pdev); + if (!sp->of_node) { + printk(KERN_ERR "s3d: Cannot find OF node of %s\n", + pci_name(pdev)); + err = -ENODEV; + goto err_release_fb; + } + + sp->fb_base_phys = pci_resource_start (pdev, 1); + + err = pci_request_region(pdev, 1, "s3d framebuffer"); + if (err < 0) { + printk("s3d: Cannot request region 1 for %s\n", + pci_name(pdev)); + goto err_release_fb; + } + + err = s3d_get_props(sp); + if (err) + goto err_release_pci; + + /* XXX 'linebytes' is often wrong, it is equal to the width + * XXX with depth of 32 on my XVR-2500 which is clearly not + * XXX right. So we don't try to use it. + */ + switch (sp->depth) { + case 8: + info->fix.line_length = sp->width; + break; + case 16: + info->fix.line_length = sp->width * 2; + break; + case 24: + info->fix.line_length = sp->width * 3; + break; + case 32: + info->fix.line_length = sp->width * 4; + break; + } + sp->fb_size = info->fix.line_length * sp->height; + + sp->fb_base = ioremap(sp->fb_base_phys, sp->fb_size); + if (!sp->fb_base) + goto err_release_pci; + + err = s3d_set_fbinfo(sp); + if (err) + goto err_unmap_fb; + + pci_set_drvdata(pdev, info); + + printk("s3d: Found device at %s\n", pci_name(pdev)); + + err = register_framebuffer(info); + if (err < 0) { + printk(KERN_ERR "s3d: Could not register framebuffer %s\n", + pci_name(pdev)); + goto err_unmap_fb; + } + + return 0; + +err_unmap_fb: + iounmap(sp->fb_base); + +err_release_pci: + pci_release_region(pdev, 1); + +err_release_fb: + framebuffer_release(info); + +err_disable: + pci_disable_device(pdev); + +err_out: + return err; +} + +static void __devexit s3d_pci_unregister(struct pci_dev *pdev) +{ + struct fb_info *info = pci_get_drvdata(pdev); + struct s3d_info *sp = info->par; + + unregister_framebuffer(info); + + iounmap(sp->fb_base); + + pci_release_region(pdev, 1); + + framebuffer_release(info); + + pci_disable_device(pdev); +} + +static struct pci_device_id s3d_pci_table[] = { + { /* XVR-2500 */ + PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x0032), + .driver_data = 1, + }, + { /* XVR-500 */ + PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x07a2), + .driver_data = 0, + }, + { 0, } +}; + +static struct pci_driver s3d_driver = { + .name = "s3d", + .id_table = s3d_pci_table, + .probe = s3d_pci_register, + .remove = __devexit_p(s3d_pci_unregister), +}; + +static int __init s3d_init(void) +{ + if (fb_get_options("s3d", NULL)) + return -ENODEV; + + return pci_register_driver(&s3d_driver); +} + +static void __exit s3d_exit(void) +{ + pci_unregister_driver(&s3d_driver); +} + +module_init(s3d_init); +module_exit(s3d_exit); + +MODULE_DESCRIPTION("framebuffer driver for Sun XVR-2500 graphics"); +MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c new file mode 100644 index 000000000000..08880a62bfa3 --- /dev/null +++ b/drivers/video/sunxvr500.c @@ -0,0 +1,443 @@ +/* sunxvr500.c: Sun 3DLABS XVR-500 Expert3D driver for sparc64 systems + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/of_device.h> + +/* XXX This device has a 'dev-comm' property which aparently is + * XXX a pointer into the openfirmware's address space which is + * XXX a shared area the kernel driver can use to keep OBP + * XXX informed about the current resolution setting. The idea + * XXX is that the kernel can change resolutions, and as long + * XXX as the values in the 'dev-comm' area are accurate then + * XXX OBP can still render text properly to the console. + * XXX + * XXX I'm still working out the layout of this and whether there + * XXX are any signatures we need to look for etc. + */ +struct e3d_info { + struct fb_info *info; + struct pci_dev *pdev; + + spinlock_t lock; + + char __iomem *fb_base; + unsigned long fb_base_phys; + + unsigned long fb8_buf_diff; + unsigned long regs_base_phys; + + void __iomem *ramdac; + + struct device_node *of_node; + + unsigned int width; + unsigned int height; + unsigned int depth; + unsigned int fb_size; + + u32 fb_base_reg; + u32 fb8_0_off; + u32 fb8_1_off; + + u32 pseudo_palette[256]; +}; + +static int __devinit e3d_get_props(struct e3d_info *ep) +{ + ep->width = of_getintprop_default(ep->of_node, "width", 0); + ep->height = of_getintprop_default(ep->of_node, "height", 0); + ep->depth = of_getintprop_default(ep->of_node, "depth", 8); + + if (!ep->width || !ep->height) { + printk(KERN_ERR "e3d: Critical properties missing for %s\n", + pci_name(ep->pdev)); + return -EINVAL; + } + + return 0; +} + +/* My XVR-500 comes up, at 1280x768 and a FB base register value of + * 0x04000000, the following video layout register values: + * + * RAMDAC_VID_WH 0x03ff04ff + * RAMDAC_VID_CFG 0x1a0b0088 + * RAMDAC_VID_32FB_0 0x04000000 + * RAMDAC_VID_32FB_1 0x04800000 + * RAMDAC_VID_8FB_0 0x05000000 + * RAMDAC_VID_8FB_1 0x05200000 + * RAMDAC_VID_XXXFB 0x05400000 + * RAMDAC_VID_YYYFB 0x05c00000 + * RAMDAC_VID_ZZZFB 0x05e00000 + */ +/* Video layout registers */ +#define RAMDAC_VID_WH 0x00000070UL /* (height-1)<<16 | (width-1) */ +#define RAMDAC_VID_CFG 0x00000074UL /* 0x1a000088|(linesz_log2<<16) */ +#define RAMDAC_VID_32FB_0 0x00000078UL /* PCI base 32bpp FB buffer 0 */ +#define RAMDAC_VID_32FB_1 0x0000007cUL /* PCI base 32bpp FB buffer 1 */ +#define RAMDAC_VID_8FB_0 0x00000080UL /* PCI base 8bpp FB buffer 0 */ +#define RAMDAC_VID_8FB_1 0x00000084UL /* PCI base 8bpp FB buffer 1 */ +#define RAMDAC_VID_XXXFB 0x00000088UL /* PCI base of XXX FB */ +#define RAMDAC_VID_YYYFB 0x0000008cUL /* PCI base of YYY FB */ +#define RAMDAC_VID_ZZZFB 0x00000090UL /* PCI base of ZZZ FB */ + +/* CLUT registers */ +#define RAMDAC_INDEX 0x000000bcUL +#define RAMDAC_DATA 0x000000c0UL + +static void e3d_clut_write(struct e3d_info *ep, int index, u32 val) +{ + void __iomem *ramdac = ep->ramdac; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + + writel(index, ramdac + RAMDAC_INDEX); + writel(val, ramdac + RAMDAC_DATA); + + spin_unlock_irqrestore(&ep->lock, flags); +} + +static int e3d_setcolreg(unsigned regno, + unsigned red, unsigned green, unsigned blue, + unsigned transp, struct fb_info *info) +{ + struct e3d_info *ep = info->par; + u32 red_8, green_8, blue_8; + u32 red_10, green_10, blue_10; + u32 value; + + if (regno >= 256) + return 1; + + red_8 = red >> 8; + green_8 = green >> 8; + blue_8 = blue >> 8; + + value = (blue_8 << 24) | (green_8 << 16) | (red_8 << 8); + ((u32 *)info->pseudo_palette)[regno] = value; + + + red_10 = red >> 6; + green_10 = green >> 6; + blue_10 = blue >> 6; + + value = (blue_10 << 20) | (green_10 << 10) | (red_10 << 0); + e3d_clut_write(ep, regno, value); + + return 0; +} + +/* XXX This is a bit of a hack. I can't figure out exactly how the + * XXX two 8bpp areas of the framebuffer work. I imagine there is + * XXX a WID attribute somewhere else in the framebuffer which tells + * XXX the ramdac which of the two 8bpp framebuffer regions to take + * XXX the pixel from. So, for now, render into both regions to make + * XXX sure the pixel shows up. + */ +static void e3d_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct e3d_info *ep = info->par; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + cfb_imageblit(info, image); + info->screen_base += ep->fb8_buf_diff; + cfb_imageblit(info, image); + info->screen_base -= ep->fb8_buf_diff; + spin_unlock_irqrestore(&ep->lock, flags); +} + +static void e3d_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct e3d_info *ep = info->par; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + cfb_fillrect(info, rect); + info->screen_base += ep->fb8_buf_diff; + cfb_fillrect(info, rect); + info->screen_base -= ep->fb8_buf_diff; + spin_unlock_irqrestore(&ep->lock, flags); +} + +static void e3d_copyarea(struct fb_info *info, const struct fb_copyarea *area) +{ + struct e3d_info *ep = info->par; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + cfb_copyarea(info, area); + info->screen_base += ep->fb8_buf_diff; + cfb_copyarea(info, area); + info->screen_base -= ep->fb8_buf_diff; + spin_unlock_irqrestore(&ep->lock, flags); +} + +static struct fb_ops e3d_ops = { + .owner = THIS_MODULE, + .fb_setcolreg = e3d_setcolreg, + .fb_fillrect = e3d_fillrect, + .fb_copyarea = e3d_copyarea, + .fb_imageblit = e3d_imageblit, +}; + +static int __devinit e3d_set_fbinfo(struct e3d_info *ep) +{ + struct fb_info *info = ep->info; + struct fb_var_screeninfo *var = &info->var; + + info->flags = FBINFO_DEFAULT; + info->fbops = &e3d_ops; + info->screen_base = ep->fb_base; + info->screen_size = ep->fb_size; + + info->pseudo_palette = ep->pseudo_palette; + + /* Fill fix common fields */ + strlcpy(info->fix.id, "e3d", sizeof(info->fix.id)); + info->fix.smem_start = ep->fb_base_phys; + info->fix.smem_len = ep->fb_size; + info->fix.type = FB_TYPE_PACKED_PIXELS; + if (ep->depth == 32 || ep->depth == 24) + info->fix.visual = FB_VISUAL_TRUECOLOR; + else + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + + var->xres = ep->width; + var->yres = ep->height; + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + var->bits_per_pixel = ep->depth; + + var->red.offset = 8; + var->red.length = 8; + var->green.offset = 16; + var->green.length = 8; + var->blue.offset = 24; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + + if (fb_alloc_cmap(&info->cmap, 256, 0)) { + printk(KERN_ERR "e3d: Cannot allocate color map.\n"); + return -ENOMEM; + } + + return 0; +} + +static int __devinit e3d_pci_register(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct fb_info *info; + struct e3d_info *ep; + unsigned int line_length; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + printk(KERN_ERR "e3d: Cannot enable PCI device %s\n", + pci_name(pdev)); + goto err_out; + } + + info = framebuffer_alloc(sizeof(struct e3d_info), &pdev->dev); + if (!info) { + printk(KERN_ERR "e3d: Cannot allocate fb_info\n"); + err = -ENOMEM; + goto err_disable; + } + + ep = info->par; + ep->info = info; + ep->pdev = pdev; + spin_lock_init(&ep->lock); + ep->of_node = pci_device_to_OF_node(pdev); + if (!ep->of_node) { + printk(KERN_ERR "e3d: Cannot find OF node of %s\n", + pci_name(pdev)); + err = -ENODEV; + goto err_release_fb; + } + + /* Read the PCI base register of the frame buffer, which we + * need in order to interpret the RAMDAC_VID_*FB* values in + * the ramdac correctly. + */ + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, + &ep->fb_base_reg); + ep->fb_base_reg &= PCI_BASE_ADDRESS_MEM_MASK; + + ep->regs_base_phys = pci_resource_start (pdev, 1); + err = pci_request_region(pdev, 1, "e3d regs"); + if (err < 0) { + printk("e3d: Cannot request region 1 for %s\n", + pci_name(pdev)); + goto err_release_fb; + } + ep->ramdac = ioremap(ep->regs_base_phys + 0x8000, 0x1000); + if (!ep->ramdac) + goto err_release_pci1; + + ep->fb8_0_off = readl(ep->ramdac + RAMDAC_VID_8FB_0); + ep->fb8_0_off -= ep->fb_base_reg; + + ep->fb8_1_off = readl(ep->ramdac + RAMDAC_VID_8FB_1); + ep->fb8_1_off -= ep->fb_base_reg; + + ep->fb8_buf_diff = ep->fb8_1_off - ep->fb8_0_off; + + ep->fb_base_phys = pci_resource_start (pdev, 0); + ep->fb_base_phys += ep->fb8_0_off; + + err = pci_request_region(pdev, 0, "e3d framebuffer"); + if (err < 0) { + printk("e3d: Cannot request region 0 for %s\n", + pci_name(pdev)); + goto err_unmap_ramdac; + } + + err = e3d_get_props(ep); + if (err) + goto err_release_pci0; + + line_length = (readl(ep->ramdac + RAMDAC_VID_CFG) >> 16) & 0xff; + line_length = 1 << line_length; + + switch (ep->depth) { + case 8: + info->fix.line_length = line_length; + break; + case 16: + info->fix.line_length = line_length * 2; + break; + case 24: + info->fix.line_length = line_length * 3; + break; + case 32: + info->fix.line_length = line_length * 4; + break; + } + ep->fb_size = info->fix.line_length * ep->height; + + ep->fb_base = ioremap(ep->fb_base_phys, ep->fb_size); + if (!ep->fb_base) + goto err_release_pci0; + + err = e3d_set_fbinfo(ep); + if (err) + goto err_unmap_fb; + + pci_set_drvdata(pdev, info); + + printk("e3d: Found device at %s\n", pci_name(pdev)); + + err = register_framebuffer(info); + if (err < 0) { + printk(KERN_ERR "e3d: Could not register framebuffer %s\n", + pci_name(pdev)); + goto err_unmap_fb; + } + + return 0; + +err_unmap_fb: + iounmap(ep->fb_base); + +err_release_pci0: + pci_release_region(pdev, 0); + +err_unmap_ramdac: + iounmap(ep->ramdac); + +err_release_pci1: + pci_release_region(pdev, 1); + +err_release_fb: + framebuffer_release(info); + +err_disable: + pci_disable_device(pdev); + +err_out: + return err; +} + +static void __devexit e3d_pci_unregister(struct pci_dev *pdev) +{ + struct fb_info *info = pci_get_drvdata(pdev); + struct e3d_info *ep = info->par; + + unregister_framebuffer(info); + + iounmap(ep->ramdac); + iounmap(ep->fb_base); + + pci_release_region(pdev, 0); + pci_release_region(pdev, 1); + + framebuffer_release(info); + + pci_disable_device(pdev); +} + +static struct pci_device_id e3d_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), }, + { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2), }, + { .vendor = PCI_VENDOR_ID_3DLABS, + .device = PCI_ANY_ID, + .subvendor = PCI_VENDOR_ID_3DLABS, + .subdevice = 0x0108, + }, + { .vendor = PCI_VENDOR_ID_3DLABS, + .device = PCI_ANY_ID, + .subvendor = PCI_VENDOR_ID_3DLABS, + .subdevice = 0x0140, + }, + { .vendor = PCI_VENDOR_ID_3DLABS, + .device = PCI_ANY_ID, + .subvendor = PCI_VENDOR_ID_3DLABS, + .subdevice = 0x1024, + }, + { 0, } +}; + +static struct pci_driver e3d_driver = { + .name = "e3d", + .id_table = e3d_pci_table, + .probe = e3d_pci_register, + .remove = __devexit_p(e3d_pci_unregister), +}; + +static int __init e3d_init(void) +{ + if (fb_get_options("e3d", NULL)) + return -ENODEV; + + return pci_register_driver(&e3d_driver); +} + +static void __exit e3d_exit(void) +{ + pci_unregister_driver(&e3d_driver); +} + +module_init(e3d_init); +module_exit(e3d_exit); + +MODULE_DESCRIPTION("framebuffer driver for Sun XVR-500 graphics"); +MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); |