diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:03 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:32 +0100 |
commit | e23c34bb41da65f354fb7eee04300c56ee48f60c (patch) | |
tree | 549fbe449d55273b81ef104a9755109bf4ae7817 /drivers/block | |
parent | staging: usbip: Remove double initialization of msg_namelen variable (diff) | |
parent | Linux 3.13-rc4 (diff) | |
download | linux-e23c34bb41da65f354fb7eee04300c56ee48f60c.tar.xz linux-e23c34bb41da65f354fb7eee04300c56ee48f60c.zip |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things
in tree (efi-stub).
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'drivers/block')
38 files changed, 7862 insertions, 1113 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e07a5fd58ad7..86b9f37d102e 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -15,6 +15,9 @@ menuconfig BLK_DEV if BLK_DEV +config BLK_DEV_NULL_BLK + tristate "Null test block driver" + config BLK_DEV_FD tristate "Normal floppy disk support" depends on ARCH_MAY_HAVE_PC_FDC @@ -107,7 +110,7 @@ source "drivers/block/mtip32xx/Kconfig" config BLK_CPQ_DA tristate "Compaq SMART2 support" - depends on PCI && VIRT_TO_BUS + depends on PCI && VIRT_TO_BUS && 0 help This is the driver for Compaq Smart Array controllers. Everyone using these boards should say Y here. See the file @@ -316,6 +319,16 @@ config BLK_DEV_NVME To compile this driver as a module, choose M here: the module will be called nvme. +config BLK_DEV_SKD + tristate "STEC S1120 Block Driver" + depends on PCI + depends on 64BIT + ---help--- + Saying Y or M here will enable support for the + STEC, Inc. S1120 PCIe SSD. + + Use device /dev/skd$N amd /dev/skd$Np$M. + config BLK_DEV_OSD tristate "OSD object-as-blkdev support" depends on SCSI_OSD_ULD @@ -505,7 +518,7 @@ config VIRTIO_BLK config BLK_DEV_HD bool "Very old hard disk (MFM/RLL/IDE) driver" depends on HAVE_IDE - depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN + depends on !ARM || ARCH_RPC || BROKEN help This is a very old hard disk driver that lacks the enhanced functionality of the newer ones. diff --git a/drivers/block/Makefile b/drivers/block/Makefile index d521b5a081b9..816d979c3266 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_MG_DISK) += mg_disk.o obj-$(CONFIG_SUNVDC) += sunvdc.o obj-$(CONFIG_BLK_DEV_NVME) += nvme.o +obj-$(CONFIG_BLK_DEV_SKD) += skd.o obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o obj-$(CONFIG_BLK_DEV_UMEM) += umem.o @@ -40,6 +41,8 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ +obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o nvme-y := nvme-core.o nvme-scsi.o +skd-y := skd_main.o swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 4ff85b8785ee..748dea4f34dc 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -343,7 +343,7 @@ static int fd_motor_on(int nr) unit[nr].motor = 1; fd_select(nr); - INIT_COMPLETION(motor_on_completion); + reinit_completion(&motor_on_completion); motor_on_timer.data = nr; mod_timer(&motor_on_timer, jiffies + HZ/2); diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 025c41d3cb33..14a9d1912318 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "83" +#define VERSION "85" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" @@ -169,6 +169,7 @@ struct aoedev { ulong ref; struct work_struct work;/* disk create work struct */ struct gendisk *gd; + struct dentry *debugfs; struct request_queue *blkq; struct hd_geometry geo; sector_t ssize; @@ -206,6 +207,7 @@ struct ktstate { int aoeblk_init(void); void aoeblk_exit(void); void aoeblk_gdalloc(void *); +void aoedisk_rm_debugfs(struct aoedev *d); void aoedisk_rm_sysfs(struct aoedev *d); int aoechr_init(void); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 916d9ed5c8aa..dd73e1ff1759 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ /* * aoeblk.c * block device routines @@ -17,11 +17,13 @@ #include <linux/mutex.h> #include <linux/export.h> #include <linux/moduleparam.h> +#include <linux/debugfs.h> #include <scsi/sg.h> #include "aoe.h" static DEFINE_MUTEX(aoeblk_mutex); static struct kmem_cache *buf_pool_cache; +static struct dentry *aoe_debugfs_dir; /* GPFS needs a larger value than the default. */ static int aoe_maxsectors; @@ -108,6 +110,55 @@ static ssize_t aoedisk_show_payload(struct device *dev, return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt); } +static int aoedisk_debugfs_show(struct seq_file *s, void *ignored) +{ + struct aoedev *d; + struct aoetgt **t, **te; + struct aoeif *ifp, *ife; + unsigned long flags; + char c; + + d = s->private; + seq_printf(s, "rttavg: %d rttdev: %d\n", + d->rttavg >> RTTSCALE, + d->rttdev >> RTTDSCALE); + seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool)); + seq_printf(s, "kicked: %ld\n", d->kicked); + seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt); + seq_printf(s, "ref: %ld\n", d->ref); + + spin_lock_irqsave(&d->lock, flags); + t = d->targets; + te = t + d->ntargets; + for (; t < te && *t; t++) { + c = '\t'; + seq_printf(s, "falloc: %ld\n", (*t)->falloc); + seq_printf(s, "ffree: %p\n", + list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next); + seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout, + (*t)->maxout, (*t)->nframes); + seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh); + seq_printf(s, "\ttaint:%d\n", (*t)->taint); + seq_printf(s, "\tr:%d\n", (*t)->rpkts); + seq_printf(s, "\tw:%d\n", (*t)->wpkts); + ifp = (*t)->ifs; + ife = ifp + ARRAY_SIZE((*t)->ifs); + for (; ifp->nd && ifp < ife; ifp++) { + seq_printf(s, "%c%s", c, ifp->nd->name); + c = ','; + } + seq_puts(s, "\n"); + } + spin_unlock_irqrestore(&d->lock, flags); + + return 0; +} + +static int aoe_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, aoedisk_debugfs_show, inode->i_private); +} + static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); @@ -130,6 +181,44 @@ static const struct attribute_group attr_group = { .attrs = aoe_attrs, }; +static const struct file_operations aoe_debugfs_fops = { + .open = aoe_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void +aoedisk_add_debugfs(struct aoedev *d) +{ + struct dentry *entry; + char *p; + + if (aoe_debugfs_dir == NULL) + return; + p = strchr(d->gd->disk_name, '/'); + if (p == NULL) + p = d->gd->disk_name; + else + p++; + BUG_ON(*p == '\0'); + entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d, + &aoe_debugfs_fops); + if (IS_ERR_OR_NULL(entry)) { + pr_info("aoe: cannot create debugfs file for %s\n", + d->gd->disk_name); + return; + } + BUG_ON(d->debugfs); + d->debugfs = entry; +} +void +aoedisk_rm_debugfs(struct aoedev *d) +{ + debugfs_remove(d->debugfs); + d->debugfs = NULL; +} + static int aoedisk_add_sysfs(struct aoedev *d) { @@ -330,6 +419,7 @@ aoeblk_gdalloc(void *vp) add_disk(gd); aoedisk_add_sysfs(d); + aoedisk_add_debugfs(d); spin_lock_irqsave(&d->lock, flags); WARN_ON(!(d->flags & DEVFL_GD_NOW)); @@ -351,6 +441,8 @@ err: void aoeblk_exit(void) { + debugfs_remove_recursive(aoe_debugfs_dir); + aoe_debugfs_dir = NULL; kmem_cache_destroy(buf_pool_cache); } @@ -362,7 +454,11 @@ aoeblk_init(void) 0, 0, NULL); if (buf_pool_cache == NULL) return -ENOMEM; - + aoe_debugfs_dir = debugfs_create_dir("aoe", NULL); + if (IS_ERR_OR_NULL(aoe_debugfs_dir)) { + pr_info("aoe: cannot create debugfs directory\n"); + aoe_debugfs_dir = NULL; + } return 0; } diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 4d45dba7fb8f..d2515435e23f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -380,7 +380,6 @@ aoecmd_ata_rw(struct aoedev *d) { struct frame *f; struct buf *buf; - struct aoetgt *t; struct sk_buff *skb; struct sk_buff_head queue; ulong bcnt, fbcnt; @@ -391,7 +390,6 @@ aoecmd_ata_rw(struct aoedev *d) f = newframe(d); if (f == NULL) return 0; - t = *d->tgt; bcnt = d->maxbcnt; if (bcnt == 0) bcnt = DEFAULTBCNT; @@ -485,7 +483,6 @@ resend(struct aoedev *d, struct frame *f) struct sk_buff *skb; struct sk_buff_head queue; struct aoe_hdr *h; - struct aoe_atahdr *ah; struct aoetgt *t; char buf[128]; u32 n; @@ -500,7 +497,6 @@ resend(struct aoedev *d, struct frame *f) return; } h = (struct aoe_hdr *) skb_mac_header(skb); - ah = (struct aoe_atahdr *) (h+1); if (!(f->flags & FFL_PROBE)) { snprintf(buf, sizeof(buf), diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 784c92e038d1..e774c50b6842 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -12,6 +12,7 @@ #include <linux/bitmap.h> #include <linux/kdev_t.h> #include <linux/moduleparam.h> +#include <linux/string.h> #include "aoe.h" static void dummy_timer(ulong); @@ -241,16 +242,12 @@ aoedev_downdev(struct aoedev *d) static int user_req(char *s, size_t slen, struct aoedev *d) { - char *p; + const char *p; size_t lim; if (!d->gd) return 0; - p = strrchr(d->gd->disk_name, '/'); - if (!p) - p = d->gd->disk_name; - else - p += 1; + p = kbasename(d->gd->disk_name); lim = sizeof(d->gd->disk_name); lim -= p - d->gd->disk_name; if (slen < lim) @@ -278,6 +275,7 @@ freedev(struct aoedev *d) del_timer_sync(&d->timer); if (d->gd) { + aoedisk_rm_debugfs(d); aoedisk_rm_sysfs(d); del_gendisk(d->gd); put_disk(d->gd); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 9bf4371755f2..d91f1a56e861 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -545,7 +545,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) mutex_lock(&brd_devices_mutex); brd = brd_init_one(MINOR(dev) >> part_shift); - kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); + kobj = brd ? get_disk(brd->brd_disk) : NULL; mutex_unlock(&brd_devices_mutex); *part = 0; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 62b6c2cc80b5..b35fc4f5237c 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, @@ -2807,7 +2808,7 @@ resend_cmd2: /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); return_status = IO_OK; - INIT_COMPLETION(wait); + reinit_completion(&wait); goto resend_cmd2; } @@ -3668,7 +3669,7 @@ static int add_to_scan_list(struct ctlr_info *h) } } if (!found && !h->busy_scanning) { - INIT_COMPLETION(h->scan_wait); + reinit_completion(&h->scan_wait); list_add_tail(&h->scan_list, &scan_q); ret = 1; } @@ -4258,6 +4259,13 @@ static void cciss_find_board_params(ctlr_info_t *h) h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); /* + * The P600 may exhibit poor performnace under some workloads + * if we use the value in the configuration table. Limit this + * controller to MAXSGENTRIES (32) instead. + */ + if (h->board_id == 0x3225103C) + h->maxsgentries = MAXSGENTRIES; + /* * Limit in-command s/g elements to 32 save dma'able memory. * Howvever spec says if 0, use 31 */ @@ -5175,7 +5183,7 @@ reinit_after_soft_reset: rebuild_lun_table(h, 1, 0); cciss_engage_scsi(h); h->busy_initializing = 0; - return 1; + return 0; clean4: cciss_free_cmd_pool(h); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 639d26b90b91..2b9440384536 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1193,6 +1193,7 @@ out_passthru: ida_pci_info_struct pciinfo; if (!arg) return -EINVAL; + memset(&pciinfo, 0, sizeof(pciinfo)); pciinfo.bus = host->pci_dev->bus->number; pciinfo.dev_fn = host->pci_dev->devfn; pciinfo.board_id = host->board_id; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 2d7f608d181c..0e06f0c5dd1e 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1474,7 +1474,8 @@ enum determine_dev_size { DS_ERROR = -1, DS_UNCHANGED = 0, DS_SHRUNK = 1, - DS_GREW = 2 + DS_GREW = 2, + DS_GREW_FROM_ZERO = 3, }; extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 55635edf563b..9e3818b1bc83 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2750,13 +2750,6 @@ int __init drbd_init(void) return err; } - err = drbd_genl_register(); - if (err) { - printk(KERN_ERR "drbd: unable to register generic netlink family\n"); - goto fail; - } - - register_reboot_notifier(&drbd_notifier); /* @@ -2767,6 +2760,15 @@ int __init drbd_init(void) drbd_proc = NULL; /* play safe for drbd_cleanup */ idr_init(&minors); + rwlock_init(&global_state_lock); + INIT_LIST_HEAD(&drbd_tconns); + + err = drbd_genl_register(); + if (err) { + printk(KERN_ERR "drbd: unable to register generic netlink family\n"); + goto fail; + } + err = drbd_create_mempools(); if (err) goto fail; @@ -2778,9 +2780,6 @@ int __init drbd_init(void) goto fail; } - rwlock_init(&global_state_lock); - INIT_LIST_HEAD(&drbd_tconns); - retry.wq = create_singlethread_workqueue("drbd-reissue"); if (!retry.wq) { printk(KERN_ERR "drbd: unable to create retry workqueue\n"); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 8cc1e640f485..c706d50a8b06 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -955,7 +955,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res } if (size > la_size_sect) - rv = DS_GREW; + rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO; if (size < la_size_sect) rv = DS_SHRUNK; @@ -1132,9 +1132,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ - if (mdev->state.conn >= C_CONNECTED) { + if (mdev->state.conn >= C_WF_REPORT_PARAMS) { if (mdev->tconn->agreed_pro_version < 94) - peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ else if (mdev->tconn->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index cc29cd3bf78b..6fa6673b36b3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1890,29 +1890,11 @@ static u32 seq_max(u32 a, u32 b) return seq_greater(a, b) ? a : b; } -static bool need_peer_seq(struct drbd_conf *mdev) -{ - struct drbd_tconn *tconn = mdev->tconn; - int tp; - - /* - * We only need to keep track of the last packet_seq number of our peer - * if we are in dual-primary mode and we have the resolve-conflicts flag set; see - * handle_write_conflicts(). - */ - - rcu_read_lock(); - tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; - rcu_read_unlock(); - - return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags); -} - static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) { unsigned int newest_peer_seq; - if (need_peer_seq(mdev)) { + if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) { spin_lock(&mdev->peer_seq_lock); newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); mdev->peer_seq = newest_peer_seq; @@ -1972,22 +1954,31 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s { DEFINE_WAIT(wait); long timeout; - int ret; + int ret = 0, tp; - if (!need_peer_seq(mdev)) + if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) return 0; spin_lock(&mdev->peer_seq_lock); for (;;) { if (!seq_greater(peer_seq - 1, mdev->peer_seq)) { mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); - ret = 0; break; } + if (signal_pending(current)) { ret = -ERESTARTSYS; break; } + + rcu_read_lock(); + tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; + rcu_read_unlock(); + + if (!tp) + break; + + /* Only need to wait if two_primaries is enabled */ prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); spin_unlock(&mdev->peer_seq_lock); rcu_read_lock(); @@ -2228,8 +2219,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) } goto out_interrupted; } - } else + } else { + update_peer_seq(mdev, peer_seq); spin_lock_irq(&mdev->tconn->req_lock); + } list_add(&peer_req->w.list, &mdev->active_ee); spin_unlock_irq(&mdev->tconn->req_lock); @@ -4132,7 +4125,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev, (unsigned int)bs.buf_len); return -EIO; } - look_ahead >>= bits; + /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */ + if (likely(bits < 64)) + look_ahead >>= bits; + else + look_ahead = 0; have -= bits; bits = bitstream_get_bits(&bs, &tmp, 64 - have); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c24379ffd4e3..fec7bef44994 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1306,6 +1306,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct int backing_limit; if (bio_size && get_ldev(mdev)) { + unsigned int max_hw_sectors = queue_max_hw_sectors(q); struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; if (b->merge_bvec_fn) { @@ -1313,6 +1314,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct limit = min(limit, backing_limit); } put_ldev(mdev); + if ((limit >> 9) > max_hw_sectors) + limit = max_hw_sectors << 9; } return limit; } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 04ceb7e2fadd..000abe2f105c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q) return; if (WARN(atomic_read(&usage_count) == 0, - "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", + "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, - current_req->cmd_flags)) + (unsigned long long) current_req->cmd_flags)) return; if (test_and_set_bit(0, &fdc_busy)) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 40e715531aa6..c8dac7305244 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, bio_list_init(&lo->lo_bio_list); - /* - * set queue make_request_fn, and add limits based on lower level - * device - */ - blk_queue_make_request(lo->lo_queue, loop_make_request); - lo->lo_queue->queuedata = lo; - if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_flush(lo->lo_queue, REQ_FLUSH); @@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i) if (!lo) goto out; + lo->lo_state = Lo_unbound; + /* allocate id, if @id >= 0, we're requesting that specific id */ if (i >= 0) { err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); @@ -1633,7 +1628,13 @@ static int loop_add(struct loop_device **l, int i) err = -ENOMEM; lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) - goto out_free_dev; + goto out_free_idr; + + /* + * set queue make_request_fn + */ + blk_queue_make_request(lo->lo_queue, loop_make_request); + lo->lo_queue->queuedata = lo; disk = lo->lo_disk = alloc_disk(1 << part_shift); if (!disk) @@ -1678,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i) out_free_queue: blk_cleanup_queue(lo->lo_queue); +out_free_idr: + idr_remove(&loop_index_idr, i); out_free_dev: kfree(lo); out: @@ -1741,7 +1744,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); if (err < 0) - kobj = ERR_PTR(err); + kobj = NULL; else kobj = get_disk(lo->lo_disk); mutex_unlock(&loop_index_mutex); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index a56cfcd5d648..7bc363f1ee82 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -636,7 +636,7 @@ ok_to_write: mg_request(host->breq); } -void mg_times_out(unsigned long data) +static void mg_times_out(unsigned long data) { struct mg_host *host = (struct mg_host *)data; char *name; @@ -936,7 +936,7 @@ static int mg_probe(struct platform_device *plat_dev) goto probe_err_3b; } err = request_irq(host->irq, mg_irq, - IRQF_DISABLED | IRQF_TRIGGER_RISING, + IRQF_TRIGGER_RISING, MG_DEV_NAME, host); if (err) { printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig index 1fca1f996b45..0ba837fc62a8 100644 --- a/drivers/block/mtip32xx/Kconfig +++ b/drivers/block/mtip32xx/Kconfig @@ -4,6 +4,6 @@ config BLK_DEV_PCIESSD_MTIP32XX tristate "Block Device Driver for Micron PCIe SSDs" - depends on PCI && GENERIC_HARDIRQS + depends on PCI help This enables the block driver for Micron PCIe SSDs. diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 952dbfe22126..050c71267f14 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -126,64 +126,30 @@ struct mtip_compat_ide_task_request_s { static bool mtip_check_surprise_removal(struct pci_dev *pdev) { u16 vendor_id = 0; + struct driver_data *dd = pci_get_drvdata(pdev); + + if (dd->sr) + return true; /* Read the vendorID from the configuration space */ pci_read_config_word(pdev, 0x00, &vendor_id); - if (vendor_id == 0xFFFF) + if (vendor_id == 0xFFFF) { + dd->sr = true; + if (dd->queue) + set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); + else + dev_warn(&dd->pdev->dev, + "%s: dd->queue is NULL\n", __func__); + if (dd->port) { + set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags); + wake_up_interruptible(&dd->port->svc_wait); + } else + dev_warn(&dd->pdev->dev, + "%s: dd->port is NULL\n", __func__); return true; /* device removed */ - - return false; /* device present */ -} - -/* - * This function is called for clean the pending command in the - * command slot during the surprise removal of device and return - * error to the upper layer. - * - * @dd Pointer to the DRIVER_DATA structure. - * - * return value - * None - */ -static void mtip_command_cleanup(struct driver_data *dd) -{ - int group = 0, commandslot = 0, commandindex = 0; - struct mtip_cmd *command; - struct mtip_port *port = dd->port; - static int in_progress; - - if (in_progress) - return; - - in_progress = 1; - - for (group = 0; group < 4; group++) { - for (commandslot = 0; commandslot < 32; commandslot++) { - if (!(port->allocated[group] & (1 << commandslot))) - continue; - - commandindex = group << 5 | commandslot; - command = &port->commands[commandindex]; - - if (atomic_read(&command->active) - && (command->async_callback)) { - command->async_callback(command->async_data, - -ENODEV); - command->async_callback = NULL; - command->async_data = NULL; - } - - dma_unmap_sg(&port->dd->pdev->dev, - command->sg, - command->scatter_ents, - command->direction); - } } - up(&port->cmd_slot); - - set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); - in_progress = 0; + return false; /* device present */ } /* @@ -222,10 +188,7 @@ static int get_slot(struct mtip_port *port) } dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); - if (mtip_check_surprise_removal(port->dd->pdev)) { - /* Device not present, clean outstanding commands */ - mtip_command_cleanup(port->dd); - } + mtip_check_surprise_removal(port->dd->pdev); return -1; } @@ -246,6 +209,107 @@ static inline void release_slot(struct mtip_port *port, int tag) } /* + * IO completion function. + * + * This completion function is called by the driver ISR when a + * command that was issued by the kernel completes. It first calls the + * asynchronous completion function which normally calls back into the block + * layer passing the asynchronous callback data, then unmaps the + * scatter list associated with the completed command, and finally + * clears the allocated bit associated with the completed command. + * + * @port Pointer to the port data structure. + * @tag Tag of the command. + * @data Pointer to driver_data. + * @status Completion status. + * + * return value + * None + */ +static void mtip_async_complete(struct mtip_port *port, + int tag, + void *data, + int status) +{ + struct mtip_cmd *command; + struct driver_data *dd = data; + int cb_status = status ? -EIO : 0; + + if (unlikely(!dd) || unlikely(!port)) + return; + + command = &port->commands[tag]; + + if (unlikely(status == PORT_IRQ_TF_ERR)) { + dev_warn(&port->dd->pdev->dev, + "Command tag %d failed due to TFE\n", tag); + } + + /* Upper layer callback */ + if (likely(command->async_callback)) + command->async_callback(command->async_data, cb_status); + + command->async_callback = NULL; + command->comp_func = NULL; + + /* Unmap the DMA scatter list entries */ + dma_unmap_sg(&dd->pdev->dev, + command->sg, + command->scatter_ents, + command->direction); + + /* Clear the allocated and active bits for the command */ + atomic_set(&port->commands[tag].active, 0); + release_slot(port, tag); + + up(&port->cmd_slot); +} + +/* + * This function is called for clean the pending command in the + * command slot during the surprise removal of device and return + * error to the upper layer. + * + * @dd Pointer to the DRIVER_DATA structure. + * + * return value + * None + */ +static void mtip_command_cleanup(struct driver_data *dd) +{ + int tag = 0; + struct mtip_cmd *cmd; + struct mtip_port *port = dd->port; + unsigned int num_cmd_slots = dd->slot_groups * 32; + + if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) + return; + + if (!port) + return; + + cmd = &port->commands[MTIP_TAG_INTERNAL]; + if (atomic_read(&cmd->active)) + if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & + (1 << MTIP_TAG_INTERNAL)) + if (cmd->comp_func) + cmd->comp_func(port, MTIP_TAG_INTERNAL, + cmd->comp_data, -ENODEV); + + while (1) { + tag = find_next_bit(port->allocated, num_cmd_slots, tag); + if (tag >= num_cmd_slots) + break; + + cmd = &port->commands[tag]; + if (atomic_read(&cmd->active)) + mtip_async_complete(port, tag, dd, -ENODEV); + } + + set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); +} + +/* * Reset the HBA (without sleeping) * * @dd Pointer to the driver data structure. @@ -584,6 +648,9 @@ static void mtip_timeout_function(unsigned long int data) if (unlikely(!port)) return; + if (unlikely(port->dd->sr)) + return; + if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { mod_timer(&port->cmd_timer, jiffies + msecs_to_jiffies(30000)); @@ -675,66 +742,6 @@ static void mtip_timeout_function(unsigned long int data) } /* - * IO completion function. - * - * This completion function is called by the driver ISR when a - * command that was issued by the kernel completes. It first calls the - * asynchronous completion function which normally calls back into the block - * layer passing the asynchronous callback data, then unmaps the - * scatter list associated with the completed command, and finally - * clears the allocated bit associated with the completed command. - * - * @port Pointer to the port data structure. - * @tag Tag of the command. - * @data Pointer to driver_data. - * @status Completion status. - * - * return value - * None - */ -static void mtip_async_complete(struct mtip_port *port, - int tag, - void *data, - int status) -{ - struct mtip_cmd *command; - struct driver_data *dd = data; - int cb_status = status ? -EIO : 0; - - if (unlikely(!dd) || unlikely(!port)) - return; - - command = &port->commands[tag]; - - if (unlikely(status == PORT_IRQ_TF_ERR)) { - dev_warn(&port->dd->pdev->dev, - "Command tag %d failed due to TFE\n", tag); - } - - /* Upper layer callback */ - if (likely(command->async_callback)) - command->async_callback(command->async_data, cb_status); - - command->async_callback = NULL; - command->comp_func = NULL; - - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&dd->pdev->dev, - command->sg, - command->scatter_ents, - command->direction); - - /* Clear the allocated and active bits for the command */ - atomic_set(&port->commands[tag].active, 0); - release_slot(port, tag); - - if (unlikely(command->unaligned)) - up(&port->cmd_slot_unal); - else - up(&port->cmd_slot); -} - -/* * Internal command completion callback function. * * This function is normally called by the driver ISR when an internal @@ -854,7 +861,6 @@ static void mtip_handle_tfe(struct driver_data *dd) "Missing completion func for tag %d", tag); if (mtip_check_surprise_removal(dd->pdev)) { - mtip_command_cleanup(dd); /* don't proceed further */ return; } @@ -1018,14 +1024,12 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, command->comp_data, 0); } else { - dev_warn(&dd->pdev->dev, - "Null completion " - "for tag %d", + dev_dbg(&dd->pdev->dev, + "Null completion for tag %d", tag); if (mtip_check_surprise_removal( dd->pdev)) { - mtip_command_cleanup(dd); return; } } @@ -1145,7 +1149,6 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) if (unlikely(port_stat & PORT_IRQ_ERR)) { if (unlikely(mtip_check_surprise_removal(dd->pdev))) { - mtip_command_cleanup(dd); /* don't proceed further */ return IRQ_HANDLED; } @@ -2806,34 +2809,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf) static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { + struct driver_data *dd = (struct driver_data *)f->private_data; int size = *offset; - char buf[MTIP_DFS_MAX_BUF_SIZE]; + char *buf; + int rv = 0; if (!len || *offset) return 0; + buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); + if (!buf) { + dev_err(&dd->pdev->dev, + "Memory allocation: status buffer\n"); + return -ENOMEM; + } + size += show_device_status(NULL, buf); *offset = size <= len ? size : len; size = copy_to_user(ubuf, buf, *offset); if (size) - return -EFAULT; + rv = -EFAULT; - return *offset; + kfree(buf); + return rv ? rv : *offset; } static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { struct driver_data *dd = (struct driver_data *)f->private_data; - char buf[MTIP_DFS_MAX_BUF_SIZE]; + char *buf; u32 group_allocated; int size = *offset; - int n; + int n, rv = 0; if (!len || size) return 0; + buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); + if (!buf) { + dev_err(&dd->pdev->dev, + "Memory allocation: register buffer\n"); + return -ENOMEM; + } + size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) @@ -2888,21 +2908,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, *offset = size <= len ? size : len; size = copy_to_user(ubuf, buf, *offset); if (size) - return -EFAULT; + rv = -EFAULT; - return *offset; + kfree(buf); + return rv ? rv : *offset; } static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { struct driver_data *dd = (struct driver_data *)f->private_data; - char buf[MTIP_DFS_MAX_BUF_SIZE]; + char *buf; int size = *offset; + int rv = 0; if (!len || size) return 0; + buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); + if (!buf) { + dev_err(&dd->pdev->dev, + "Memory allocation: flag buffer\n"); + return -ENOMEM; + } + size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", @@ -2911,9 +2940,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, *offset = size <= len ? size : len; size = copy_to_user(ubuf, buf, *offset); if (size) - return -EFAULT; + rv = -EFAULT; - return *offset; + kfree(buf); + return rv ? rv : *offset; } static const struct file_operations mtip_device_status_fops = { @@ -3006,6 +3036,46 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd) debugfs_remove_recursive(dd->dfs_node); } +static int mtip_free_orphan(struct driver_data *dd) +{ + struct kobject *kobj; + + if (dd->bdev) { + if (dd->bdev->bd_holders >= 1) + return -2; + + bdput(dd->bdev); + dd->bdev = NULL; + } + + mtip_hw_debugfs_exit(dd); + + spin_lock(&rssd_index_lock); + ida_remove(&rssd_index_ida, dd->index); + spin_unlock(&rssd_index_lock); + + if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) && + test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { + put_disk(dd->disk); + } else { + if (dd->disk) { + kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); + if (kobj) { + mtip_hw_sysfs_exit(dd, kobj); + kobject_put(kobj); + } + del_gendisk(dd->disk); + dd->disk = NULL; + } + if (dd->queue) { + dd->queue->queuedata = NULL; + blk_cleanup_queue(dd->queue); + dd->queue = NULL; + } + } + kfree(dd); + return 0; +} /* * Perform any init/resume time hardware setup @@ -3154,6 +3224,7 @@ static int mtip_service_thread(void *data) unsigned long slot, slot_start, slot_wrap; unsigned int num_cmd_slots = dd->slot_groups * 32; struct mtip_port *port = dd->port; + int ret; while (1) { /* @@ -3164,13 +3235,18 @@ static int mtip_service_thread(void *data) !(port->flags & MTIP_PF_PAUSE_IO)); if (kthread_should_stop()) + goto st_out; + + set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); + + /* If I am an orphan, start self cleanup */ + if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) break; if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) - break; + goto st_out; - set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { slot = 1; /* used to restrict the loop to one iteration */ @@ -3201,7 +3277,7 @@ static int mtip_service_thread(void *data) clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { - if (!mtip_ftl_rebuild_poll(dd)) + if (mtip_ftl_rebuild_poll(dd) < 0) set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); @@ -3209,8 +3285,30 @@ static int mtip_service_thread(void *data) clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) + goto st_out; + } + + /* wait for pci remove to exit */ + while (1) { + if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag)) break; + msleep_interruptible(1000); + if (kthread_should_stop()) + goto st_out; + } + + while (1) { + ret = mtip_free_orphan(dd); + if (!ret) { + /* NOTE: All data structures are invalid, do not + * access any here */ + return 0; + } + msleep_interruptible(1000); + if (kthread_should_stop()) + goto st_out; } +st_out: return 0; } @@ -3437,13 +3535,13 @@ static int mtip_hw_init(struct driver_data *dd) rv = -EFAULT; goto out3; } + mtip_dump_identify(dd->port); if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == MTIP_FTL_REBUILD_MAGIC) { set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); return MTIP_FTL_REBUILD_MAGIC; } - mtip_dump_identify(dd->port); /* check write protect, over temp and rebuild statuses */ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, @@ -3467,8 +3565,8 @@ static int mtip_hw_init(struct driver_data *dd) } if (buf[288] == 0xBF) { dev_info(&dd->pdev->dev, - "Drive indicates rebuild has failed.\n"); - /* TODO */ + "Drive is in security locked state.\n"); + set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); } } @@ -3523,9 +3621,8 @@ static int mtip_hw_exit(struct driver_data *dd) * Send standby immediate (E0h) to the drive so that it * saves its state. */ - if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { - - if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) + if (!dd->sr) { + if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) if (mtip_standby_immediate(dd->port)) dev_warn(&dd->pdev->dev, "STANDBY IMMEDIATE failed\n"); @@ -3551,6 +3648,7 @@ static int mtip_hw_exit(struct driver_data *dd) dd->port->command_list_dma); /* Free the memory allocated for the for structure. */ kfree(dd->port); + dd->port = NULL; return 0; } @@ -3572,7 +3670,8 @@ static int mtip_hw_shutdown(struct driver_data *dd) * Send standby immediate (E0h) to the drive so that it * saves its state. */ - mtip_standby_immediate(dd->port); + if (!dd->sr && dd->port) + mtip_standby_immediate(dd->port); return 0; } @@ -3887,6 +3986,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) bio_endio(bio, -ENODATA); return; } + if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { + bio_endio(bio, -ENXIO); + return; + } } if (unlikely(bio->bi_rw & REQ_DISCARD)) { @@ -4010,6 +4113,8 @@ static int mtip_block_initialize(struct driver_data *dd) dd->disk->private_data = dd; dd->index = index; + mtip_hw_debugfs_init(dd); + /* * if rebuild pending, start the service thread, and delay the block * queue creation and add_disk() @@ -4068,6 +4173,7 @@ skip_create_disk: /* Enable the block device and add it to /dev */ add_disk(dd->disk); + dd->bdev = bdget_disk(dd->disk, 0); /* * Now that the disk is active, initialize any sysfs attributes * managed by the protocol layer. @@ -4077,7 +4183,6 @@ skip_create_disk: mtip_hw_sysfs_init(dd, kobj); kobject_put(kobj); } - mtip_hw_debugfs_init(dd); if (dd->mtip_svc_handler) { set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); @@ -4103,7 +4208,8 @@ start_service_thread: return rv; kthread_run_error: - mtip_hw_debugfs_exit(dd); + bdput(dd->bdev); + dd->bdev = NULL; /* Delete our gendisk. This also removes the device from /dev */ del_gendisk(dd->disk); @@ -4112,6 +4218,7 @@ read_capacity_error: blk_cleanup_queue(dd->queue); block_queue_alloc_init_error: + mtip_hw_debugfs_exit(dd); disk_index_error: spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, index); @@ -4141,40 +4248,48 @@ static int mtip_block_remove(struct driver_data *dd) { struct kobject *kobj; - if (dd->mtip_svc_handler) { - set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); - wake_up_interruptible(&dd->port->svc_wait); - kthread_stop(dd->mtip_svc_handler); - } + if (!dd->sr) { + mtip_hw_debugfs_exit(dd); - /* Clean up the sysfs attributes, if created */ - if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { - kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); - if (kobj) { - mtip_hw_sysfs_exit(dd, kobj); - kobject_put(kobj); + if (dd->mtip_svc_handler) { + set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); + wake_up_interruptible(&dd->port->svc_wait); + kthread_stop(dd->mtip_svc_handler); } - } - mtip_hw_debugfs_exit(dd); - /* - * Delete our gendisk structure. This also removes the device - * from /dev - */ - if (dd->disk) { - if (dd->disk->queue) - del_gendisk(dd->disk); - else - put_disk(dd->disk); - } - - spin_lock(&rssd_index_lock); - ida_remove(&rssd_index_ida, dd->index); - spin_unlock(&rssd_index_lock); + /* Clean up the sysfs attributes, if created */ + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { + kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); + if (kobj) { + mtip_hw_sysfs_exit(dd, kobj); + kobject_put(kobj); + } + } + /* + * Delete our gendisk structure. This also removes the device + * from /dev + */ + if (dd->bdev) { + bdput(dd->bdev); + dd->bdev = NULL; + } + if (dd->disk) { + if (dd->disk->queue) { + del_gendisk(dd->disk); + blk_cleanup_queue(dd->queue); + dd->queue = NULL; + } else + put_disk(dd->disk); + } + dd->disk = NULL; - blk_cleanup_queue(dd->queue); - dd->disk = NULL; - dd->queue = NULL; + spin_lock(&rssd_index_lock); + ida_remove(&rssd_index_ida, dd->index); + spin_unlock(&rssd_index_lock); + } else { + dev_info(&dd->pdev->dev, "device %s surprise removal\n", + dd->disk->disk_name); + } /* De-initialize the protocol layer. */ mtip_hw_exit(dd); @@ -4490,8 +4605,7 @@ done: static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); - int counter = 0; - unsigned long flags; + unsigned long flags, to; set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); @@ -4500,17 +4614,22 @@ static void mtip_pci_remove(struct pci_dev *pdev) list_add(&dd->remove_list, &removing_list); spin_unlock_irqrestore(&dev_lock, flags); - if (mtip_check_surprise_removal(pdev)) { - while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { - counter++; - msleep(20); - if (counter == 10) { - /* Cleanup the outstanding commands */ - mtip_command_cleanup(dd); - break; - } - } + mtip_check_surprise_removal(pdev); + synchronize_irq(dd->pdev->irq); + + /* Spin until workers are done */ + to = jiffies + msecs_to_jiffies(4000); + do { + msleep(20); + } while (atomic_read(&dd->irq_workers_active) != 0 && + time_before(jiffies, to)); + + if (atomic_read(&dd->irq_workers_active) != 0) { + dev_warn(&dd->pdev->dev, + "Completion workers still active!\n"); } + /* Cleanup the outstanding commands */ + mtip_command_cleanup(dd); /* Clean up the block layer. */ mtip_block_remove(dd); @@ -4529,8 +4648,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) list_del_init(&dd->remove_list); spin_unlock_irqrestore(&dev_lock, flags); - kfree(dd); + if (!dd->sr) + kfree(dd); + else + set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag); + pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); + pci_set_drvdata(pdev, NULL); + pci_dev_put(pdev); + } /* diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bb8a295fbe4..9be7a1582ad3 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -140,6 +140,7 @@ enum { MTIP_PF_SVC_THD_ACTIVE_BIT = 4, MTIP_PF_ISSUE_CMDS_BIT = 5, MTIP_PF_REBUILD_BIT = 6, + MTIP_PF_SR_CLEANUP_BIT = 7, MTIP_PF_SVC_THD_STOP_BIT = 8, /* below are bit numbers in 'dd_flag' defined in driver_data */ @@ -147,15 +148,18 @@ enum { MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, - MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | - (1 << MTIP_DDF_SEC_LOCK_BIT) | - (1 << MTIP_DDF_OVER_TEMP_BIT) | - (1 << MTIP_DDF_WRITE_PROTECT_BIT)), - + MTIP_DDF_REMOVE_DONE_BIT = 4, MTIP_DDF_CLEANUP_BIT = 5, MTIP_DDF_RESUME_BIT = 6, MTIP_DDF_INIT_DONE_BIT = 7, MTIP_DDF_REBUILD_FAILED_BIT = 8, + + MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | + (1 << MTIP_DDF_SEC_LOCK_BIT) | + (1 << MTIP_DDF_OVER_TEMP_BIT) | + (1 << MTIP_DDF_WRITE_PROTECT_BIT) | + (1 << MTIP_DDF_REBUILD_FAILED_BIT)), + }; struct smart_attr { @@ -499,6 +503,8 @@ struct driver_data { bool trim_supp; /* flag indicating trim support */ + bool sr; + int numa_node; /* NUMA support */ char workq_name[32]; @@ -511,6 +517,8 @@ struct driver_data { int isr_binding; + struct block_device *bdev; + int unal_qdepth; /* qdepth of unaligned IO queue */ struct list_head online_list; /* linkage for online list */ diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c new file mode 100644 index 000000000000..f370fc13aea5 --- /dev/null +++ b/drivers/block/null_blk.c @@ -0,0 +1,635 @@ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/blk-mq.h> +#include <linux/hrtimer.h> + +struct nullb_cmd { + struct list_head list; + struct llist_node ll_list; + struct call_single_data csd; + struct request *rq; + struct bio *bio; + unsigned int tag; + struct nullb_queue *nq; +}; + +struct nullb_queue { + unsigned long *tag_map; + wait_queue_head_t wait; + unsigned int queue_depth; + + struct nullb_cmd *cmds; +}; + +struct nullb { + struct list_head list; + unsigned int index; + struct request_queue *q; + struct gendisk *disk; + struct hrtimer timer; + unsigned int queue_depth; + spinlock_t lock; + + struct nullb_queue *queues; + unsigned int nr_queues; +}; + +static LIST_HEAD(nullb_list); +static struct mutex lock; +static int null_major; +static int nullb_indexes; + +struct completion_queue { + struct llist_head list; + struct hrtimer timer; +}; + +/* + * These are per-cpu for now, they will need to be configured by the + * complete_queues parameter and appropriately mapped. + */ +static DEFINE_PER_CPU(struct completion_queue, completion_queues); + +enum { + NULL_IRQ_NONE = 0, + NULL_IRQ_SOFTIRQ = 1, + NULL_IRQ_TIMER = 2, + + NULL_Q_BIO = 0, + NULL_Q_RQ = 1, + NULL_Q_MQ = 2, +}; + +static int submit_queues = 1; +module_param(submit_queues, int, S_IRUGO); +MODULE_PARM_DESC(submit_queues, "Number of submission queues"); + +static int home_node = NUMA_NO_NODE; +module_param(home_node, int, S_IRUGO); +MODULE_PARM_DESC(home_node, "Home node for the device"); + +static int queue_mode = NULL_Q_MQ; +module_param(queue_mode, int, S_IRUGO); +MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)"); + +static int gb = 250; +module_param(gb, int, S_IRUGO); +MODULE_PARM_DESC(gb, "Size in GB"); + +static int bs = 512; +module_param(bs, int, S_IRUGO); +MODULE_PARM_DESC(bs, "Block size (in bytes)"); + +static int nr_devices = 2; +module_param(nr_devices, int, S_IRUGO); +MODULE_PARM_DESC(nr_devices, "Number of devices to register"); + +static int irqmode = NULL_IRQ_SOFTIRQ; +module_param(irqmode, int, S_IRUGO); +MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); + +static int completion_nsec = 10000; +module_param(completion_nsec, int, S_IRUGO); +MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); + +static int hw_queue_depth = 64; +module_param(hw_queue_depth, int, S_IRUGO); +MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); + +static bool use_per_node_hctx = true; +module_param(use_per_node_hctx, bool, S_IRUGO); +MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); + +static void put_tag(struct nullb_queue *nq, unsigned int tag) +{ + clear_bit_unlock(tag, nq->tag_map); + + if (waitqueue_active(&nq->wait)) + wake_up(&nq->wait); +} + +static unsigned int get_tag(struct nullb_queue *nq) +{ + unsigned int tag; + + do { + tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); + if (tag >= nq->queue_depth) + return -1U; + } while (test_and_set_bit_lock(tag, nq->tag_map)); + + return tag; +} + +static void free_cmd(struct nullb_cmd *cmd) +{ + put_tag(cmd->nq, cmd->tag); +} + +static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) +{ + struct nullb_cmd *cmd; + unsigned int tag; + + tag = get_tag(nq); + if (tag != -1U) { + cmd = &nq->cmds[tag]; + cmd->tag = tag; + cmd->nq = nq; + return cmd; + } + + return NULL; +} + +static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) +{ + struct nullb_cmd *cmd; + DEFINE_WAIT(wait); + + cmd = __alloc_cmd(nq); + if (cmd || !can_wait) + return cmd; + + do { + prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); + cmd = __alloc_cmd(nq); + if (cmd) + break; + + io_schedule(); + } while (1); + + finish_wait(&nq->wait, &wait); + return cmd; +} + +static void end_cmd(struct nullb_cmd *cmd) +{ + if (cmd->rq) { + if (queue_mode == NULL_Q_MQ) + blk_mq_end_io(cmd->rq, 0); + else { + INIT_LIST_HEAD(&cmd->rq->queuelist); + blk_end_request_all(cmd->rq, 0); + } + } else if (cmd->bio) + bio_endio(cmd->bio, 0); + + if (queue_mode != NULL_Q_MQ) + free_cmd(cmd); +} + +static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) +{ + struct completion_queue *cq; + struct llist_node *entry; + struct nullb_cmd *cmd; + + cq = &per_cpu(completion_queues, smp_processor_id()); + + while ((entry = llist_del_all(&cq->list)) != NULL) { + do { + cmd = container_of(entry, struct nullb_cmd, ll_list); + end_cmd(cmd); + entry = entry->next; + } while (entry); + } + + return HRTIMER_NORESTART; +} + +static void null_cmd_end_timer(struct nullb_cmd *cmd) +{ + struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); + + cmd->ll_list.next = NULL; + if (llist_add(&cmd->ll_list, &cq->list)) { + ktime_t kt = ktime_set(0, completion_nsec); + + hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL); + } + + put_cpu(); +} + +static void null_softirq_done_fn(struct request *rq) +{ + blk_end_request_all(rq, 0); +} + +#ifdef CONFIG_SMP + +static void null_ipi_cmd_end_io(void *data) +{ + struct completion_queue *cq; + struct llist_node *entry, *next; + struct nullb_cmd *cmd; + + cq = &per_cpu(completion_queues, smp_processor_id()); + + entry = llist_del_all(&cq->list); + + while (entry) { + next = entry->next; + cmd = llist_entry(entry, struct nullb_cmd, ll_list); + end_cmd(cmd); + entry = next; + } +} + +static void null_cmd_end_ipi(struct nullb_cmd *cmd) +{ + struct call_single_data *data = &cmd->csd; + int cpu = get_cpu(); + struct completion_queue *cq = &per_cpu(completion_queues, cpu); + + cmd->ll_list.next = NULL; + + if (llist_add(&cmd->ll_list, &cq->list)) { + data->func = null_ipi_cmd_end_io; + data->flags = 0; + __smp_call_function_single(cpu, data, 0); + } + + put_cpu(); +} + +#endif /* CONFIG_SMP */ + +static inline void null_handle_cmd(struct nullb_cmd *cmd) +{ + /* Complete IO by inline, softirq or timer */ + switch (irqmode) { + case NULL_IRQ_NONE: + end_cmd(cmd); + break; + case NULL_IRQ_SOFTIRQ: +#ifdef CONFIG_SMP + null_cmd_end_ipi(cmd); +#else + end_cmd(cmd); +#endif + break; + case NULL_IRQ_TIMER: + null_cmd_end_timer(cmd); + break; + } +} + +static struct nullb_queue *nullb_to_queue(struct nullb *nullb) +{ + int index = 0; + + if (nullb->nr_queues != 1) + index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); + + return &nullb->queues[index]; +} + +static void null_queue_bio(struct request_queue *q, struct bio *bio) +{ + struct nullb *nullb = q->queuedata; + struct nullb_queue *nq = nullb_to_queue(nullb); + struct nullb_cmd *cmd; + + cmd = alloc_cmd(nq, 1); + cmd->bio = bio; + + null_handle_cmd(cmd); +} + +static int null_rq_prep_fn(struct request_queue *q, struct request *req) +{ + struct nullb *nullb = q->queuedata; + struct nullb_queue *nq = nullb_to_queue(nullb); + struct nullb_cmd *cmd; + + cmd = alloc_cmd(nq, 0); + if (cmd) { + cmd->rq = req; + req->special = cmd; + return BLKPREP_OK; + } + + return BLKPREP_DEFER; +} + +static void null_request_fn(struct request_queue *q) +{ + struct request *rq; + + while ((rq = blk_fetch_request(q)) != NULL) { + struct nullb_cmd *cmd = rq->special; + + spin_unlock_irq(q->queue_lock); + null_handle_cmd(cmd); + spin_lock_irq(q->queue_lock); + } +} + +static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) +{ + struct nullb_cmd *cmd = rq->special; + + cmd->rq = rq; + cmd->nq = hctx->driver_data; + + null_handle_cmd(cmd); + return BLK_MQ_RQ_QUEUE_OK; +} + +static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) +{ + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, + hctx_index); +} + +static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) +{ + kfree(hctx); +} + +static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int index) +{ + struct nullb *nullb = data; + struct nullb_queue *nq = &nullb->queues[index]; + + init_waitqueue_head(&nq->wait); + nq->queue_depth = nullb->queue_depth; + nullb->nr_queues++; + hctx->driver_data = nq; + + return 0; +} + +static struct blk_mq_ops null_mq_ops = { + .queue_rq = null_queue_rq, + .map_queue = blk_mq_map_queue, + .init_hctx = null_init_hctx, +}; + +static struct blk_mq_reg null_mq_reg = { + .ops = &null_mq_ops, + .queue_depth = 64, + .cmd_size = sizeof(struct nullb_cmd), + .flags = BLK_MQ_F_SHOULD_MERGE, +}; + +static void null_del_dev(struct nullb *nullb) +{ + list_del_init(&nullb->list); + + del_gendisk(nullb->disk); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_queue(nullb->q); + else + blk_cleanup_queue(nullb->q); + put_disk(nullb->disk); + kfree(nullb); +} + +static int null_open(struct block_device *bdev, fmode_t mode) +{ + return 0; +} + +static void null_release(struct gendisk *disk, fmode_t mode) +{ +} + +static const struct block_device_operations null_fops = { + .owner = THIS_MODULE, + .open = null_open, + .release = null_release, +}; + +static int setup_commands(struct nullb_queue *nq) +{ + struct nullb_cmd *cmd; + int i, tag_size; + + nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); + if (!nq->cmds) + return 1; + + tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; + nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); + if (!nq->tag_map) { + kfree(nq->cmds); + return 1; + } + + for (i = 0; i < nq->queue_depth; i++) { + cmd = &nq->cmds[i]; + INIT_LIST_HEAD(&cmd->list); + cmd->ll_list.next = NULL; + cmd->tag = -1U; + } + + return 0; +} + +static void cleanup_queue(struct nullb_queue *nq) +{ + kfree(nq->tag_map); + kfree(nq->cmds); +} + +static void cleanup_queues(struct nullb *nullb) +{ + int i; + + for (i = 0; i < nullb->nr_queues; i++) + cleanup_queue(&nullb->queues[i]); + + kfree(nullb->queues); +} + +static int setup_queues(struct nullb *nullb) +{ + struct nullb_queue *nq; + int i; + + nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); + if (!nullb->queues) + return 1; + + nullb->nr_queues = 0; + nullb->queue_depth = hw_queue_depth; + + if (queue_mode == NULL_Q_MQ) + return 0; + + for (i = 0; i < submit_queues; i++) { + nq = &nullb->queues[i]; + init_waitqueue_head(&nq->wait); + nq->queue_depth = hw_queue_depth; + if (setup_commands(nq)) + break; + nullb->nr_queues++; + } + + if (i == submit_queues) + return 0; + + cleanup_queues(nullb); + return 1; +} + +static int null_add_dev(void) +{ + struct gendisk *disk; + struct nullb *nullb; + sector_t size; + + nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); + if (!nullb) + return -ENOMEM; + + spin_lock_init(&nullb->lock); + + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + + if (setup_queues(nullb)) + goto err; + + if (queue_mode == NULL_Q_MQ) { + null_mq_reg.numa_node = home_node; + null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; + + if (use_per_node_hctx) { + null_mq_reg.ops->alloc_hctx = null_alloc_hctx; + null_mq_reg.ops->free_hctx = null_free_hctx; + } else { + null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; + null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; + } + + nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + } else if (queue_mode == NULL_Q_BIO) { + nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + blk_queue_make_request(nullb->q, null_queue_bio); + } else { + nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + blk_queue_prep_rq(nullb->q, null_rq_prep_fn); + if (nullb->q) + blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + } + + if (!nullb->q) + goto queue_fail; + + nullb->q->queuedata = nullb; + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); + + disk = nullb->disk = alloc_disk_node(1, home_node); + if (!disk) { +queue_fail: + if (queue_mode == NULL_Q_MQ) + blk_mq_free_queue(nullb->q); + else + blk_cleanup_queue(nullb->q); + cleanup_queues(nullb); +err: + kfree(nullb); + return -ENOMEM; + } + + mutex_lock(&lock); + list_add_tail(&nullb->list, &nullb_list); + nullb->index = nullb_indexes++; + mutex_unlock(&lock); + + blk_queue_logical_block_size(nullb->q, bs); + blk_queue_physical_block_size(nullb->q, bs); + + size = gb * 1024 * 1024 * 1024ULL; + sector_div(size, bs); + set_capacity(disk, size); + + disk->flags |= GENHD_FL_EXT_DEVT; + disk->major = null_major; + disk->first_minor = nullb->index; + disk->fops = &null_fops; + disk->private_data = nullb; + disk->queue = nullb->q; + sprintf(disk->disk_name, "nullb%d", nullb->index); + add_disk(disk); + return 0; +} + +static int __init null_init(void) +{ + unsigned int i; + +#if !defined(CONFIG_SMP) + if (irqmode == NULL_IRQ_SOFTIRQ) { + pr_warn("null_blk: softirq completions not available.\n"); + pr_warn("null_blk: using direct completions.\n"); + irqmode = NULL_IRQ_NONE; + } +#endif + + if (submit_queues > nr_cpu_ids) + submit_queues = nr_cpu_ids; + else if (!submit_queues) + submit_queues = 1; + + mutex_init(&lock); + + /* Initialize a separate list for each CPU for issuing softirqs */ + for_each_possible_cpu(i) { + struct completion_queue *cq = &per_cpu(completion_queues, i); + + init_llist_head(&cq->list); + + if (irqmode != NULL_IRQ_TIMER) + continue; + + hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cq->timer.function = null_cmd_timer_expired; + } + + null_major = register_blkdev(0, "nullb"); + if (null_major < 0) + return null_major; + + for (i = 0; i < nr_devices; i++) { + if (null_add_dev()) { + unregister_blkdev(null_major, "nullb"); + return -EINVAL; + } + } + + pr_info("null: module loaded\n"); + return 0; +} + +static void __exit null_exit(void) +{ + struct nullb *nullb; + + unregister_blkdev(null_major, "nullb"); + + mutex_lock(&lock); + while (!list_empty(&nullb_list)) { + nullb = list_entry(nullb_list.next, struct nullb, list); + null_del_dev(nullb); + } + mutex_unlock(&lock); +} + +module_init(null_init); +module_exit(null_exit); + +MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ce79a590b45b..26d03fa0bf26 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -36,6 +36,7 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/poison.h> +#include <linux/ptrace.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/types.h> @@ -79,7 +80,9 @@ struct nvme_queue { u16 sq_head; u16 sq_tail; u16 cq_head; - u16 cq_phase; + u8 cq_phase; + u8 cqe_seen; + u8 q_suspended; unsigned long cmdid_data[]; }; @@ -115,6 +118,11 @@ static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; } +static unsigned nvme_queue_extra(int depth) +{ + return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info)); +} + /** * alloc_cmdid() - Allocate a Command ID * @nvmeq: The queue that will be used for this command @@ -285,6 +293,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) iod->npages = -1; iod->length = nbytes; iod->nents = 0; + iod->start_time = jiffies; } return iod; @@ -308,6 +317,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) kfree(iod); } +static void nvme_start_io_acct(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + const int rw = bio_data_dir(bio); + int cpu = part_stat_lock(); + part_round_stats(cpu, &disk->part0); + part_stat_inc(cpu, &disk->part0, ios[rw]); + part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); + part_inc_in_flight(&disk->part0, rw); + part_stat_unlock(); +} + +static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + const int rw = bio_data_dir(bio); + unsigned long duration = jiffies - start_time; + int cpu = part_stat_lock(); + part_stat_add(cpu, &disk->part0, ticks[rw], duration); + part_round_stats(cpu, &disk->part0); + part_dec_in_flight(&disk->part0, rw); + part_stat_unlock(); +} + static void bio_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) { @@ -315,9 +348,11 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, struct bio *bio = iod->private; u16 status = le16_to_cpup(&cqe->status) >> 1; - if (iod->nents) + if (iod->nents) { dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + nvme_end_io_acct(bio, iod->start_time); + } nvme_free_iod(dev, iod); if (status) bio_endio(bio, -EIO); @@ -422,10 +457,8 @@ static void nvme_bio_pair_endio(struct bio *bio, int err) if (atomic_dec_and_test(&bp->cnt)) { bio_endio(bp->parent, bp->err); - if (bp->bv1) - kfree(bp->bv1); - if (bp->bv2) - kfree(bp->bv2); + kfree(bp->bv1); + kfree(bp->bv2); kfree(bp); } } @@ -695,6 +728,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); + nvme_start_io_acct(bio); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; writel(nvmeq->sq_tail, nvmeq->q_db); @@ -709,26 +743,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return result; } -static void nvme_make_request(struct request_queue *q, struct bio *bio) -{ - struct nvme_ns *ns = q->queuedata; - struct nvme_queue *nvmeq = get_nvmeq(ns->dev); - int result = -EBUSY; - - spin_lock_irq(&nvmeq->q_lock); - if (bio_list_empty(&nvmeq->sq_cong)) - result = nvme_submit_bio_queue(nvmeq, ns, bio); - if (unlikely(result)) { - if (bio_list_empty(&nvmeq->sq_cong)) - add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, bio); - } - - spin_unlock_irq(&nvmeq->q_lock); - put_nvmeq(nvmeq); -} - -static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) +static int nvme_process_cq(struct nvme_queue *nvmeq) { u16 head, phase; @@ -758,13 +773,40 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) * a big problem. */ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) - return IRQ_NONE; + return 0; writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); nvmeq->cq_head = head; nvmeq->cq_phase = phase; - return IRQ_HANDLED; + nvmeq->cqe_seen = 1; + return 1; +} + +static void nvme_make_request(struct request_queue *q, struct bio *bio) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_queue *nvmeq = get_nvmeq(ns->dev); + int result = -EBUSY; + + if (!nvmeq) { + put_nvmeq(NULL); + bio_endio(bio, -EIO); + return; + } + + spin_lock_irq(&nvmeq->q_lock); + if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) + result = nvme_submit_bio_queue(nvmeq, ns, bio); + if (unlikely(result)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, bio); + } + + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); + put_nvmeq(nvmeq); } static irqreturn_t nvme_irq(int irq, void *data) @@ -772,7 +814,9 @@ static irqreturn_t nvme_irq(int irq, void *data) irqreturn_t result; struct nvme_queue *nvmeq = data; spin_lock(&nvmeq->q_lock); - result = nvme_process_cq(nvmeq); + nvme_process_cq(nvmeq); + result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; + nvmeq->cqe_seen = 0; spin_unlock(&nvmeq->q_lock); return result; } @@ -986,8 +1030,15 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) } } -static void nvme_free_queue_mem(struct nvme_queue *nvmeq) +static void nvme_free_queue(struct nvme_queue *nvmeq) { + spin_lock_irq(&nvmeq->q_lock); + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + bio_endio(bio, -EIO); + } + spin_unlock_irq(&nvmeq->q_lock); + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), @@ -995,17 +1046,28 @@ static void nvme_free_queue_mem(struct nvme_queue *nvmeq) kfree(nvmeq); } -static void nvme_free_queue(struct nvme_dev *dev, int qid) +static void nvme_free_queues(struct nvme_dev *dev) +{ + int i; + + for (i = dev->queue_count - 1; i >= 0; i--) { + nvme_free_queue(dev->queues[i]); + dev->queue_count--; + dev->queues[i] = NULL; + } +} + +static void nvme_disable_queue(struct nvme_dev *dev, int qid) { struct nvme_queue *nvmeq = dev->queues[qid]; int vector = dev->entry[nvmeq->cq_vector].vector; spin_lock_irq(&nvmeq->q_lock); - nvme_cancel_ios(nvmeq, false); - while (bio_list_peek(&nvmeq->sq_cong)) { - struct bio *bio = bio_list_pop(&nvmeq->sq_cong); - bio_endio(bio, -EIO); + if (nvmeq->q_suspended) { + spin_unlock_irq(&nvmeq->q_lock); + return; } + nvmeq->q_suspended = 1; spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1017,15 +1079,17 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) adapter_delete_cq(dev, qid); } - nvme_free_queue_mem(nvmeq); + spin_lock_irq(&nvmeq->q_lock); + nvme_process_cq(nvmeq); + nvme_cancel_ios(nvmeq, false); + spin_unlock_irq(&nvmeq->q_lock); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { struct device *dmadev = &dev->pci_dev->dev; - unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * - sizeof(struct nvme_cmd_info)); + unsigned extra = nvme_queue_extra(depth); struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); if (!nvmeq) return NULL; @@ -1052,6 +1116,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; nvmeq->q_depth = depth; nvmeq->cq_vector = vector; + nvmeq->q_suspended = 1; + dev->queue_count++; return nvmeq; @@ -1075,18 +1141,29 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, IRQF_DISABLED | IRQF_SHARED, name, nvmeq); } -static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, - int cq_size, int vector) +static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) { - int result; - struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); + struct nvme_dev *dev = nvmeq->dev; + unsigned extra = nvme_queue_extra(nvmeq->q_depth); - if (!nvmeq) - return ERR_PTR(-ENOMEM); + nvmeq->sq_tail = 0; + nvmeq->cq_head = 0; + nvmeq->cq_phase = 1; + nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + memset(nvmeq->cmdid_data, 0, extra); + memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); + nvme_cancel_ios(nvmeq, false); + nvmeq->q_suspended = 0; +} + +static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) +{ + struct nvme_dev *dev = nvmeq->dev; + int result; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) - goto free_nvmeq; + return result; result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) @@ -1096,19 +1173,17 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, if (result < 0) goto release_sq; - return nvmeq; + spin_lock(&nvmeq->q_lock); + nvme_init_queue(nvmeq, qid); + spin_unlock(&nvmeq->q_lock); + + return result; release_sq: adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); - free_nvmeq: - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); - return ERR_PTR(result); + return result; } static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) @@ -1152,6 +1227,30 @@ static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) return nvme_wait_ready(dev, cap, true); } +static int nvme_shutdown_ctrl(struct nvme_dev *dev) +{ + unsigned long timeout; + u32 cc; + + cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL; + writel(cc, &dev->bar->cc); + + timeout = 2 * HZ + jiffies; + while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) != + NVME_CSTS_SHST_CMPLT) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(&dev->pci_dev->dev, + "Device shutdown incomplete; abort shutdown\n"); + return -ENODEV; + } + } + + return 0; +} + static int nvme_configure_admin_queue(struct nvme_dev *dev) { int result; @@ -1159,16 +1258,17 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u64 cap = readq(&dev->bar->cap); struct nvme_queue *nvmeq; - dev->dbs = ((void __iomem *)dev->bar) + 4096; - dev->db_stride = NVME_CAP_STRIDE(cap); - result = nvme_disable_ctrl(dev, cap); if (result < 0) return result; - nvmeq = nvme_alloc_queue(dev, 0, 64, 0); - if (!nvmeq) - return -ENOMEM; + nvmeq = dev->queues[0]; + if (!nvmeq) { + nvmeq = nvme_alloc_queue(dev, 0, 64, 0); + if (!nvmeq) + return -ENOMEM; + dev->queues[0] = nvmeq; + } aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -1185,17 +1285,15 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) result = nvme_enable_ctrl(dev, cap); if (result) - goto free_q; + return result; result = queue_request_irq(dev, nvmeq, "nvme admin"); if (result) - goto free_q; - - dev->queues[0] = nvmeq; - return result; + return result; - free_q: - nvme_free_queue_mem(nvmeq); + spin_lock(&nvmeq->q_lock); + nvme_init_queue(nvmeq, 0); + spin_unlock(&nvmeq->q_lock); return result; } @@ -1314,7 +1412,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.appmask = cpu_to_le16(io.appmask); if (meta_len) { - meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); + meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, + meta_len); if (IS_ERR(meta_iod)) { status = PTR_ERR(meta_iod); meta_iod = NULL; @@ -1356,6 +1455,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) put_nvmeq(nvmeq); if (length != (io.nblocks + 1) << ns->lba_shift) status = -ENOMEM; + else if (!nvmeq || nvmeq->q_suspended) + status = -EBUSY; else status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); @@ -1453,6 +1554,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, switch (cmd) { case NVME_IOCTL_ID: + force_successful_syscall_return(); return ns->ns_id; case NVME_IOCTL_ADMIN_CMD: return nvme_user_admin_cmd(ns->dev, (void __user *)arg); @@ -1506,10 +1608,12 @@ static int nvme_kthread(void *data) if (!nvmeq) continue; spin_lock_irq(&nvmeq->q_lock); - if (nvme_process_cq(nvmeq)) - printk("process_cq did something\n"); + if (nvmeq->q_suspended) + goto unlock; + nvme_process_cq(nvmeq); nvme_cancel_ios(nvmeq, true); nvme_resubmit_bios(nvmeq); + unlock: spin_unlock_irq(&nvmeq->q_lock); } } @@ -1556,7 +1660,7 @@ static void nvme_config_discard(struct nvme_ns *ns) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } -static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, +static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, struct nvme_id_ns *id, struct nvme_lba_range_type *rt) { struct nvme_ns *ns; @@ -1631,14 +1735,19 @@ static int set_queue_count(struct nvme_dev *dev, int count) status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, &result); if (status) - return -EIO; + return status < 0 ? -EIO : -EBUSY; return min(result & 0xffff, result >> 16) + 1; } +static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) +{ + return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); +} + static int nvme_setup_io_queues(struct nvme_dev *dev) { struct pci_dev *pdev = dev->pci_dev; - int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count; + int result, cpu, i, vecs, nr_io_queues, size, q_depth; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1647,53 +1756,80 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result < nr_io_queues) nr_io_queues = result; - q_count = nr_io_queues; - /* Deregister the admin queue's interrupt */ - free_irq(dev->entry[0].vector, dev->queues[0]); - - db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); - if (db_bar_size > 8192) { + size = db_bar_size(dev, nr_io_queues); + if (size > 8192) { iounmap(dev->bar); - dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); + do { + dev->bar = ioremap(pci_resource_start(pdev, 0), size); + if (dev->bar) + break; + if (!--nr_io_queues) + return -ENOMEM; + size = db_bar_size(dev, nr_io_queues); + } while (1); dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->queues[0]->q_db = dev->dbs; } - for (i = 0; i < nr_io_queues; i++) + /* Deregister the admin queue's interrupt */ + free_irq(dev->entry[0].vector, dev->queues[0]); + + vecs = nr_io_queues; + for (i = 0; i < vecs; i++) dev->entry[i].entry = i; for (;;) { - result = pci_enable_msix(pdev, dev->entry, nr_io_queues); - if (result == 0) { - break; - } else if (result > 0) { - nr_io_queues = result; - continue; - } else { - nr_io_queues = 0; + result = pci_enable_msix(pdev, dev->entry, vecs); + if (result <= 0) break; - } + vecs = result; } - if (nr_io_queues == 0) { - nr_io_queues = q_count; + if (result < 0) { + vecs = nr_io_queues; + if (vecs > 32) + vecs = 32; for (;;) { - result = pci_enable_msi_block(pdev, nr_io_queues); + result = pci_enable_msi_block(pdev, vecs); if (result == 0) { - for (i = 0; i < nr_io_queues; i++) + for (i = 0; i < vecs; i++) dev->entry[i].vector = i + pdev->irq; break; - } else if (result > 0) { - nr_io_queues = result; - continue; - } else { - nr_io_queues = 1; + } else if (result < 0) { + vecs = 1; break; } + vecs = result; } } + /* + * Should investigate if there's a performance win from allocating + * more queues than interrupt vectors; it might allow the submission + * path to scale better, even if the receive path is limited by the + * number of interrupts. + */ + nr_io_queues = vecs; + result = queue_request_irq(dev, dev->queues[0], "nvme admin"); - /* XXX: handle failure here */ + if (result) { + dev->queues[0]->q_suspended = 1; + goto free_queues; + } + + /* Free previously allocated queues that are no longer usable */ + spin_lock(&dev_list_lock); + for (i = dev->queue_count - 1; i > nr_io_queues; i--) { + struct nvme_queue *nvmeq = dev->queues[i]; + + spin_lock(&nvmeq->q_lock); + nvme_cancel_ios(nvmeq, false); + spin_unlock(&nvmeq->q_lock); + + nvme_free_queue(nvmeq); + dev->queue_count--; + dev->queues[i] = NULL; + } + spin_unlock(&dev_list_lock); cpu = cpumask_first(cpu_online_mask); for (i = 0; i < nr_io_queues; i++) { @@ -1703,11 +1839,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, NVME_Q_DEPTH); - for (i = 0; i < nr_io_queues; i++) { - dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); - if (IS_ERR(dev->queues[i + 1])) - return PTR_ERR(dev->queues[i + 1]); - dev->queue_count++; + for (i = dev->queue_count - 1; i < nr_io_queues; i++) { + dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i); + if (!dev->queues[i + 1]) { + result = -ENOMEM; + goto free_queues; + } } for (; i < num_possible_cpus(); i++) { @@ -1715,15 +1852,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->queues[i + 1] = dev->queues[target + 1]; } - return 0; -} + for (i = 1; i < dev->queue_count; i++) { + result = nvme_create_queue(dev->queues[i], i); + if (result) { + for (--i; i > 0; i--) + nvme_disable_queue(dev, i); + goto free_queues; + } + } -static void nvme_free_queues(struct nvme_dev *dev) -{ - int i; + return 0; - for (i = dev->queue_count - 1; i >= 0; i--) - nvme_free_queue(dev, i); + free_queues: + nvme_free_queues(dev); + return result; } /* @@ -1734,7 +1876,8 @@ static void nvme_free_queues(struct nvme_dev *dev) */ static int nvme_dev_add(struct nvme_dev *dev) { - int res, nn, i; + int res; + unsigned nn, i; struct nvme_ns *ns; struct nvme_id_ctrl *ctrl; struct nvme_id_ns *id_ns; @@ -1742,10 +1885,6 @@ static int nvme_dev_add(struct nvme_dev *dev) dma_addr_t dma_addr; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; - res = nvme_setup_io_queues(dev); - if (res) - return res; - mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, GFP_KERNEL); if (!mem) @@ -1796,23 +1935,83 @@ static int nvme_dev_add(struct nvme_dev *dev) return res; } -static int nvme_dev_remove(struct nvme_dev *dev) +static int nvme_dev_map(struct nvme_dev *dev) { - struct nvme_ns *ns, *next; + int bars, result = -ENOMEM; + struct pci_dev *pdev = dev->pci_dev; + + if (pci_enable_device_mem(pdev)) + return result; + + dev->entry[0].vector = pdev->irq; + pci_set_master(pdev); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_request_selected_regions(pdev, bars, "nvme")) + goto disable_pci; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + goto disable; + + pci_set_drvdata(pdev, dev); + dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); + if (!dev->bar) + goto disable; + + dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); + dev->dbs = ((void __iomem *)dev->bar) + 4096; + + return 0; + + disable: + pci_release_regions(pdev); + disable_pci: + pci_disable_device(pdev); + return result; +} + +static void nvme_dev_unmap(struct nvme_dev *dev) +{ + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); + + if (dev->bar) { + iounmap(dev->bar); + dev->bar = NULL; + } + + pci_release_regions(dev->pci_dev); + if (pci_is_enabled(dev->pci_dev)) + pci_disable_device(dev->pci_dev); +} + +static void nvme_dev_shutdown(struct nvme_dev *dev) +{ + int i; + + for (i = dev->queue_count - 1; i >= 0; i--) + nvme_disable_queue(dev, i); spin_lock(&dev_list_lock); - list_del(&dev->node); + list_del_init(&dev->node); spin_unlock(&dev_list_lock); + if (dev->bar) + nvme_shutdown_ctrl(dev); + nvme_dev_unmap(dev); +} + +static void nvme_dev_remove(struct nvme_dev *dev) +{ + struct nvme_ns *ns, *next; + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_del(&ns->list); del_gendisk(ns->disk); nvme_ns_free(ns); } - - nvme_free_queues(dev); - - return 0; } static int nvme_setup_prp_pools(struct nvme_dev *dev) @@ -1872,15 +2071,10 @@ static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); nvme_dev_remove(dev); - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); - iounmap(dev->bar); + nvme_dev_shutdown(dev); + nvme_free_queues(dev); nvme_release_instance(dev); nvme_release_prp_pools(dev); - pci_disable_device(dev->pci_dev); - pci_release_regions(dev->pci_dev); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -1921,9 +2115,40 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; +static int nvme_dev_start(struct nvme_dev *dev) +{ + int result; + + result = nvme_dev_map(dev); + if (result) + return result; + + result = nvme_configure_admin_queue(dev); + if (result) + goto unmap; + + spin_lock(&dev_list_lock); + list_add(&dev->node, &dev_list); + spin_unlock(&dev_list_lock); + + result = nvme_setup_io_queues(dev); + if (result && result != -EBUSY) + goto disable; + + return result; + + disable: + spin_lock(&dev_list_lock); + list_del_init(&dev->node); + spin_unlock(&dev_list_lock); + unmap: + nvme_dev_unmap(dev); + return result; +} + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int bars, result = -ENOMEM; + int result = -ENOMEM; struct nvme_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1938,53 +2163,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!dev->queues) goto free; - if (pci_enable_device_mem(pdev)) - goto free; - pci_set_master(pdev); - bars = pci_select_bars(pdev, IORESOURCE_MEM); - if (pci_request_selected_regions(pdev, bars, "nvme")) - goto disable; - INIT_LIST_HEAD(&dev->namespaces); dev->pci_dev = pdev; - pci_set_drvdata(pdev, dev); - - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - else - goto disable; result = nvme_set_instance(dev); if (result) - goto disable; - - dev->entry[0].vector = pdev->irq; + goto free; result = nvme_setup_prp_pools(dev); if (result) - goto disable_msix; + goto release; - dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); - if (!dev->bar) { - result = -ENOMEM; - goto disable_msix; + result = nvme_dev_start(dev); + if (result) { + if (result == -EBUSY) + goto create_cdev; + goto release_pools; } - result = nvme_configure_admin_queue(dev); - if (result) - goto unmap; - dev->queue_count++; - - spin_lock(&dev_list_lock); - list_add(&dev->node, &dev_list); - spin_unlock(&dev_list_lock); - result = nvme_dev_add(dev); if (result) - goto delete; + goto shutdown; + create_cdev: scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); dev->miscdev.minor = MISC_DYNAMIC_MINOR; dev->miscdev.parent = &pdev->dev; @@ -1999,24 +2200,13 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) remove: nvme_dev_remove(dev); - delete: - spin_lock(&dev_list_lock); - list_del(&dev->node); - spin_unlock(&dev_list_lock); - + shutdown: + nvme_dev_shutdown(dev); + release_pools: nvme_free_queues(dev); - unmap: - iounmap(dev->bar); - disable_msix: - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); - nvme_release_instance(dev); nvme_release_prp_pools(dev); - disable: - pci_disable_device(pdev); - pci_release_regions(pdev); + release: + nvme_release_instance(dev); free: kfree(dev->queues); kfree(dev->entry); @@ -2037,8 +2227,30 @@ static void nvme_remove(struct pci_dev *pdev) #define nvme_link_reset NULL #define nvme_slot_reset NULL #define nvme_error_resume NULL -#define nvme_suspend NULL -#define nvme_resume NULL + +static int nvme_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nvme_dev *ndev = pci_get_drvdata(pdev); + + nvme_dev_shutdown(ndev); + return 0; +} + +static int nvme_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nvme_dev *ndev = pci_get_drvdata(pdev); + int ret; + + ret = nvme_dev_start(ndev); + /* XXX: should remove gendisks if resume fails */ + if (ret) + nvme_free_queues(ndev); + return ret; +} + +static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); static const struct pci_error_handlers nvme_err_handler = { .error_detected = nvme_error_detected, @@ -2062,8 +2274,9 @@ static struct pci_driver nvme_driver = { .id_table = nvme_id_table, .probe = nvme_probe, .remove = nvme_remove, - .suspend = nvme_suspend, - .resume = nvme_resume, + .driver = { + .pm = &nvme_dev_pm_ops, + }, .err_handler = &nvme_err_handler, }; diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 102de2f52b5c..4a4ff4eb8e23 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -933,13 +933,12 @@ static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, int res = SNTI_TRANSLATION_SUCCESS; int xfer_len; - inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); + inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); if (inq_response == NULL) { res = -ENOMEM; goto out_mem; } - memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ inq_response[2] = 0x00; /* Page Length MSB */ inq_response[3] = 0x3C; /* Page Length LSB */ @@ -964,12 +963,11 @@ static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, int xfer_len; u8 *log_response; - log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); + log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); if (log_response == NULL) { res = -ENOMEM; goto out_mem; } - memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; /* Subpage=0x00, Page Length MSB=0 */ @@ -1000,12 +998,11 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, u8 temp_c; u16 temp_k; - log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); + log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); if (log_response == NULL) { res = -ENOMEM; goto out_mem; } - memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH); mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), @@ -1069,12 +1066,11 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 temp_c_cur, temp_c_thresh; u16 temp_k; - log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); + log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); if (log_response == NULL) { res = -ENOMEM; goto out_mem; } - memset(log_response, 0, LOG_TEMP_PAGE_LENGTH); mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), @@ -1380,12 +1376,11 @@ static int nvme_trans_mode_page_create(struct nvme_ns *ns, blk_desc_offset = mph_size; mode_pages_offset_1 = blk_desc_offset + blk_desc_len; - response = kmalloc(resp_size, GFP_KERNEL); + response = kzalloc(resp_size, GFP_KERNEL); if (response == NULL) { res = -ENOMEM; goto out_mem; } - memset(response, 0, resp_size); res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, llbaa, mode_data_length, blk_desc_len); @@ -2480,12 +2475,11 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, } id_ns = mem; - response = kmalloc(resp_size, GFP_KERNEL); + response = kzalloc(resp_size, GFP_KERNEL); if (response == NULL) { res = -ENOMEM; goto out_dma; } - memset(response, 0, resp_size); nvme_trans_fill_read_cap(response, id_ns, cdb16); xfer_len = min(alloc_len, resp_size); @@ -2554,12 +2548,11 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, goto out_dma; } - response = kmalloc(resp_size, GFP_KERNEL); + response = kzalloc(resp_size, GFP_KERNEL); if (response == NULL) { res = -ENOMEM; goto out_dma; } - memset(response, 0, resp_size); /* The first LUN ID will always be 0 per the SAM spec */ for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { @@ -2600,12 +2593,11 @@ static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : (FIXED_FMT_SENSE_DATA_SIZE)); - response = kmalloc(resp_size, GFP_KERNEL); + response = kzalloc(resp_size, GFP_KERNEL); if (response == NULL) { res = -ENOMEM; goto out; } - memset(response, 0, resp_size); if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) { /* Descriptor Format Sense Data */ diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 1bbc681688e4..79aa179305b5 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -598,7 +598,7 @@ static ssize_t class_osdblk_remove(struct class *c, unsigned long ul; struct list_head *tmp; - rc = strict_strtoul(buf, 10, &ul); + rc = kstrtoul(buf, 10, &ul); if (rc) return rc; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index f5d0ea11d9fd..ff8668c5efb1 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -44,6 +44,8 @@ * *************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/pktcdvd.h> #include <linux/module.h> #include <linux/types.h> @@ -69,23 +71,24 @@ #define DRIVER_NAME "pktcdvd" -#if PACKET_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) -#else -#define DPRINTK(fmt, args...) -#endif - -#if PACKET_DEBUG > 1 -#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) -#else -#define VPRINTK(fmt, args...) -#endif +#define pkt_err(pd, fmt, ...) \ + pr_err("%s: " fmt, pd->name, ##__VA_ARGS__) +#define pkt_notice(pd, fmt, ...) \ + pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__) +#define pkt_info(pd, fmt, ...) \ + pr_info("%s: " fmt, pd->name, ##__VA_ARGS__) + +#define pkt_dbg(level, pd, fmt, ...) \ +do { \ + if (level == 2 && PACKET_DEBUG >= 2) \ + pr_notice("%s: %s():" fmt, \ + pd->name, __func__, ##__VA_ARGS__); \ + else if (level == 1 && PACKET_DEBUG >= 1) \ + pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \ +} while (0) #define MAX_SPEED 0xffff -#define ZONE(sector, pd) (((sector) + (pd)->offset) & \ - ~(sector_t)((pd)->settings.size - 1)) - static DEFINE_MUTEX(pktcdvd_mutex); static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; static struct proc_dir_entry *pkt_proc; @@ -103,7 +106,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); static int pkt_remove_dev(dev_t pkt_dev); static int pkt_seq_show(struct seq_file *m, void *p); - +static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) +{ + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); +} /* * create and register a pktcdvd kernel object. @@ -424,7 +430,7 @@ static int pkt_sysfs_init(void) if (ret) { kfree(class_pktcdvd); class_pktcdvd = NULL; - printk(DRIVER_NAME": failed to create class pktcdvd\n"); + pr_err("failed to create class pktcdvd\n"); return ret; } return 0; @@ -467,45 +473,31 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) { if (!pkt_debugfs_root) return; - pd->dfs_f_info = NULL; pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); - if (IS_ERR(pd->dfs_d_root)) { - pd->dfs_d_root = NULL; + if (!pd->dfs_d_root) return; - } + pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, pd->dfs_d_root, pd, &debug_fops); - if (IS_ERR(pd->dfs_f_info)) { - pd->dfs_f_info = NULL; - return; - } } static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) { if (!pkt_debugfs_root) return; - if (pd->dfs_f_info) - debugfs_remove(pd->dfs_f_info); + debugfs_remove(pd->dfs_f_info); + debugfs_remove(pd->dfs_d_root); pd->dfs_f_info = NULL; - if (pd->dfs_d_root) - debugfs_remove(pd->dfs_d_root); pd->dfs_d_root = NULL; } static void pkt_debugfs_init(void) { pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); - if (IS_ERR(pkt_debugfs_root)) { - pkt_debugfs_root = NULL; - return; - } } static void pkt_debugfs_cleanup(void) { - if (!pkt_debugfs_root) - return; debugfs_remove(pkt_debugfs_root); pkt_debugfs_root = NULL; } @@ -517,7 +509,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd) { BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { - VPRINTK(DRIVER_NAME": queue empty\n"); + pkt_dbg(2, pd, "queue empty\n"); atomic_set(&pd->iosched.attention, 1); wake_up(&pd->wqueue); } @@ -734,36 +726,33 @@ out: return ret; } +static const char *sense_key_string(__u8 index) +{ + static const char * const info[] = { + "No sense", "Recovered error", "Not ready", + "Medium error", "Hardware error", "Illegal request", + "Unit attention", "Data protect", "Blank check", + }; + + return index < ARRAY_SIZE(info) ? info[index] : "INVALID"; +} + /* * A generic sense dump / resolve mechanism should be implemented across * all ATAPI + SCSI devices. */ -static void pkt_dump_sense(struct packet_command *cgc) +static void pkt_dump_sense(struct pktcdvd_device *pd, + struct packet_command *cgc) { - static char *info[9] = { "No sense", "Recovered error", "Not ready", - "Medium error", "Hardware error", "Illegal request", - "Unit attention", "Data protect", "Blank check" }; - int i; struct request_sense *sense = cgc->sense; - printk(DRIVER_NAME":"); - for (i = 0; i < CDROM_PACKET_SIZE; i++) - printk(" %02x", cgc->cmd[i]); - printk(" - "); - - if (sense == NULL) { - printk("no sense\n"); - return; - } - - printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq); - - if (sense->sense_key > 8) { - printk(" (INVALID)\n"); - return; - } - - printk(" (%s)\n", info[sense->sense_key]); + if (sense) + pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", + CDROM_PACKET_SIZE, cgc->cmd, + sense->sense_key, sense->asc, sense->ascq, + sense_key_string(sense->sense_key)); + else + pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); } /* @@ -806,7 +795,7 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, cgc.cmd[5] = write_speed & 0xff; if ((ret = pkt_generic_packet(pd, &cgc))) - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } @@ -872,7 +861,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) need_write_seek = 0; if (need_write_seek && reads_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { - VPRINTK(DRIVER_NAME": write, waiting\n"); + pkt_dbg(2, pd, "write, waiting\n"); break; } pkt_flush_cache(pd); @@ -881,7 +870,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) } else { if (!reads_queued && writes_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { - VPRINTK(DRIVER_NAME": read, waiting\n"); + pkt_dbg(2, pd, "read, waiting\n"); break; } pd->iosched.writing = 1; @@ -943,7 +932,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que set_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; } else { - printk(DRIVER_NAME": cdrom max_phys_segments too small\n"); + pkt_err(pd, "cdrom max_phys_segments too small\n"); return -EIO; } } @@ -987,8 +976,9 @@ static void pkt_end_io_read(struct bio *bio, int err) struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio, - (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err); + pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", + bio, (unsigned long long)pkt->sector, + (unsigned long long)bio->bi_sector, err); if (err) atomic_inc(&pkt->io_errors); @@ -1005,7 +995,7 @@ static void pkt_end_io_packet_write(struct bio *bio, int err) struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err); + pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); pd->stats.pkt_ended++; @@ -1047,7 +1037,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) spin_unlock(&pkt->lock); if (pkt->cache_valid) { - VPRINTK("pkt_gather_data: zone %llx cached\n", + pkt_dbg(2, pd, "zone %llx cached\n", (unsigned long long)pkt->sector); goto out_account; } @@ -1070,7 +1060,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) p = (f * CD_FRAMESIZE) / PAGE_SIZE; offset = (f * CD_FRAMESIZE) % PAGE_SIZE; - VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n", + pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", f, pkt->pages[p], offset); if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) BUG(); @@ -1082,7 +1072,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) } out_account: - VPRINTK("pkt_gather_data: need %d frames for zone %llx\n", + pkt_dbg(2, pd, "need %d frames for zone %llx\n", frames_read, (unsigned long long)pkt->sector); pd->stats.pkt_started++; pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); @@ -1183,7 +1173,8 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" }; enum packet_data_state old_state = pkt->state; - VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector, + pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", + pkt->id, (unsigned long long)pkt->sector, state_name[old_state], state_name[state]); #endif pkt->state = state; @@ -1202,12 +1193,10 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) struct rb_node *n; int wakeup; - VPRINTK("handle_queue\n"); - atomic_set(&pd->scan_queue, 0); if (list_empty(&pd->cdrw.pkt_free_list)) { - VPRINTK("handle_queue: no pkt\n"); + pkt_dbg(2, pd, "no pkt\n"); return 0; } @@ -1224,7 +1213,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) node = first_node; while (node) { bio = node->bio; - zone = ZONE(bio->bi_sector, pd); + zone = get_zone(bio->bi_sector, pd); list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { if (p->sector == zone) { bio = NULL; @@ -1244,7 +1233,7 @@ try_next_bio: } spin_unlock(&pd->lock); if (!bio) { - VPRINTK("handle_queue: no bio\n"); + pkt_dbg(2, pd, "no bio\n"); return 0; } @@ -1260,12 +1249,12 @@ try_next_bio: * to this packet. */ spin_lock(&pd->lock); - VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone); + pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); while ((node = pkt_rbtree_find(pd, zone)) != NULL) { bio = node->bio; - VPRINTK("pkt_handle_queue: found zone=%llx\n", - (unsigned long long)ZONE(bio->bi_sector, pd)); - if (ZONE(bio->bi_sector, pd) != zone) + pkt_dbg(2, pd, "found zone=%llx\n", + (unsigned long long)get_zone(bio->bi_sector, pd)); + if (get_zone(bio->bi_sector, pd) != zone) break; pkt_rbtree_erase(pd, node); spin_lock(&pkt->lock); @@ -1316,7 +1305,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) BUG(); } - VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt); + pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); /* * Fill-in bvec with data from orig_bios. @@ -1327,7 +1316,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); spin_unlock(&pkt->lock); - VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n", + pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", pkt->write_size, (unsigned long long)pkt->sector); if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { @@ -1359,7 +1348,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data { int uptodate; - VPRINTK("run_state_machine: pkt %d\n", pkt->id); + pkt_dbg(2, pd, "pkt %d\n", pkt->id); for (;;) { switch (pkt->state) { @@ -1398,7 +1387,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data if (pkt_start_recovery(pkt)) { pkt_start_write(pd, pkt); } else { - VPRINTK("No recovery possible\n"); + pkt_dbg(2, pd, "No recovery possible\n"); pkt_set_state(pkt, PACKET_FINISHED_STATE); } break; @@ -1419,8 +1408,6 @@ static void pkt_handle_packets(struct pktcdvd_device *pd) { struct packet_data *pkt, *next; - VPRINTK("pkt_handle_packets\n"); - /* * Run state machine for active packets */ @@ -1502,9 +1489,9 @@ static int kcdrwd(void *foobar) if (PACKET_DEBUG > 1) { int states[PACKET_NUM_STATES]; pkt_count_states(pd, states); - VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", - states[0], states[1], states[2], states[3], - states[4], states[5]); + pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], + states[3], states[4], states[5]); } min_sleep_time = MAX_SCHEDULE_TIMEOUT; @@ -1513,9 +1500,9 @@ static int kcdrwd(void *foobar) min_sleep_time = pkt->sleep_time; } - VPRINTK("kcdrwd: sleeping\n"); + pkt_dbg(2, pd, "sleeping\n"); residue = schedule_timeout(min_sleep_time); - VPRINTK("kcdrwd: wake up\n"); + pkt_dbg(2, pd, "wake up\n"); /* make swsusp happy with our thread */ try_to_freeze(); @@ -1563,9 +1550,10 @@ work_to_do: static void pkt_print_settings(struct pktcdvd_device *pd) { - printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable"); - printk("%u blocks, ", pd->settings.size >> 2); - printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2'); + pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", + pd->settings.fp ? "Fixed" : "Variable", + pd->settings.size >> 2, + pd->settings.block_mode == 8 ? '1' : '2'); } static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) @@ -1699,7 +1687,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); cgc.sense = &sense; if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } @@ -1714,7 +1702,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); cgc.sense = &sense; if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } @@ -1749,14 +1737,14 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) /* * paranoia */ - printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type); + pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); return 1; } wp->packet_size = cpu_to_be32(pd->settings.size >> 2); cgc.buflen = cgc.cmd[8] = size; if ((ret = pkt_mode_select(pd, &cgc))) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } @@ -1793,7 +1781,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) if (ti->rt == 1 && ti->blank == 0) return 1; - printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); return 0; } @@ -1811,7 +1799,8 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) case 0x12: /* DVD-RAM */ return 1; default: - VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile); + pkt_dbg(2, pd, "Wrong disc profile (%x)\n", + pd->mmc3_profile); return 0; } @@ -1820,22 +1809,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) * but i'm not sure, should we leave this to user apps? probably. */ if (di->disc_type == 0xff) { - printk(DRIVER_NAME": Unknown disc. No track?\n"); + pkt_notice(pd, "unknown disc - no track?\n"); return 0; } if (di->disc_type != 0x20 && di->disc_type != 0) { - printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type); + pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); return 0; } if (di->erasable == 0) { - printk(DRIVER_NAME": Disc not erasable\n"); + pkt_notice(pd, "disc not erasable\n"); return 0; } if (di->border_status == PACKET_SESSION_RESERVED) { - printk(DRIVER_NAME": Can't write to last track (reserved)\n"); + pkt_err(pd, "can't write to last track (reserved)\n"); return 0; } @@ -1860,7 +1849,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) memset(&ti, 0, sizeof(track_information)); if ((ret = pkt_get_disc_info(pd, &di))) { - printk("failed get_disc\n"); + pkt_err(pd, "failed get_disc\n"); return ret; } @@ -1871,12 +1860,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { - printk(DRIVER_NAME": failed get_track\n"); + pkt_err(pd, "failed get_track\n"); return ret; } if (!pkt_writable_track(pd, &ti)) { - printk(DRIVER_NAME": can't write to this track\n"); + pkt_err(pd, "can't write to this track\n"); return -EROFS; } @@ -1886,11 +1875,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) */ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; if (pd->settings.size == 0) { - printk(DRIVER_NAME": detected zero packet size!\n"); + pkt_notice(pd, "detected zero packet size!\n"); return -ENXIO; } if (pd->settings.size > PACKET_MAX_SECTORS) { - printk(DRIVER_NAME": packet size is too big\n"); + pkt_err(pd, "packet size is too big\n"); return -EROFS; } pd->settings.fp = ti.fp; @@ -1932,7 +1921,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) pd->settings.block_mode = PACKET_BLOCK_MODE2; break; default: - printk(DRIVER_NAME": unknown data mode\n"); + pkt_err(pd, "unknown data mode\n"); return -EROFS; } return 0; @@ -1966,10 +1955,10 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); ret = pkt_mode_select(pd, &cgc); if (ret) { - printk(DRIVER_NAME": write caching control failed\n"); - pkt_dump_sense(&cgc); + pkt_err(pd, "write caching control failed\n"); + pkt_dump_sense(pd, &cgc); } else if (!ret && set) - printk(DRIVER_NAME": enabled write caching on %s\n", pd->name); + pkt_notice(pd, "enabled write caching\n"); return ret; } @@ -2005,7 +1994,7 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, sizeof(struct mode_page_header); ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); if (ret) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } } @@ -2064,7 +2053,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, cgc.cmd[8] = 2; ret = pkt_generic_packet(pd, &cgc); if (ret) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } size = ((unsigned int) buf[0]<<8) + buf[1] + 2; @@ -2079,16 +2068,16 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, cgc.cmd[8] = size; ret = pkt_generic_packet(pd, &cgc); if (ret) { - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } if (!(buf[6] & 0x40)) { - printk(DRIVER_NAME": Disc type is not CD-RW\n"); + pkt_notice(pd, "disc type is not CD-RW\n"); return 1; } if (!(buf[6] & 0x4)) { - printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n"); + pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); return 1; } @@ -2108,14 +2097,14 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, *speed = us_clv_to_speed[sp]; break; default: - printk(DRIVER_NAME": Unknown disc sub-type %d\n",st); + pkt_notice(pd, "unknown disc sub-type %d\n", st); return 1; } if (*speed) { - printk(DRIVER_NAME": Max. media speed: %d\n",*speed); + pkt_info(pd, "maximum media speed: %d\n", *speed); return 0; } else { - printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st); + pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); return 1; } } @@ -2126,7 +2115,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) struct request_sense sense; int ret; - VPRINTK(DRIVER_NAME": Performing OPC\n"); + pkt_dbg(2, pd, "Performing OPC\n"); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.sense = &sense; @@ -2134,7 +2123,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) cgc.cmd[0] = GPCMD_SEND_OPC; cgc.cmd[1] = 1; if ((ret = pkt_generic_packet(pd, &cgc))) - pkt_dump_sense(&cgc); + pkt_dump_sense(pd, &cgc); return ret; } @@ -2144,12 +2133,12 @@ static int pkt_open_write(struct pktcdvd_device *pd) unsigned int write_speed, media_write_speed, read_speed; if ((ret = pkt_probe_settings(pd))) { - VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name); + pkt_dbg(2, pd, "failed probe\n"); return ret; } if ((ret = pkt_set_write_settings(pd))) { - DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name); + pkt_dbg(1, pd, "failed saving write settings\n"); return -EIO; } @@ -2161,26 +2150,26 @@ static int pkt_open_write(struct pktcdvd_device *pd) case 0x13: /* DVD-RW */ case 0x1a: /* DVD+RW */ case 0x12: /* DVD-RAM */ - DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed); + pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); break; default: if ((ret = pkt_media_speed(pd, &media_write_speed))) media_write_speed = 16; write_speed = min(write_speed, media_write_speed * 177); - DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176); + pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); break; } read_speed = write_speed; if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { - DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name); + pkt_dbg(1, pd, "couldn't set write speed\n"); return -EIO; } pd->write_speed = write_speed; pd->read_speed = read_speed; if ((ret = pkt_perform_opc(pd))) { - DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name); + pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); } return 0; @@ -2205,7 +2194,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) goto out; if ((ret = pkt_get_last_written(pd, &lba))) { - printk(DRIVER_NAME": pkt_get_last_written failed\n"); + pkt_err(pd, "pkt_get_last_written failed\n"); goto out_putdev; } @@ -2235,11 +2224,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) if (write) { if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { - printk(DRIVER_NAME": not enough memory for buffers\n"); + pkt_err(pd, "not enough memory for buffers\n"); ret = -ENOMEM; goto out_putdev; } - printk(DRIVER_NAME": %lukB available on disc\n", lba << 1); + pkt_info(pd, "%lukB available on disc\n", lba << 1); } return 0; @@ -2257,7 +2246,7 @@ out: static void pkt_release_dev(struct pktcdvd_device *pd, int flush) { if (flush && pkt_flush_cache(pd)) - DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name); + pkt_dbg(1, pd, "not flushing cache\n"); pkt_lock_door(pd, 0); @@ -2279,8 +2268,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) struct pktcdvd_device *pd = NULL; int ret; - VPRINTK(DRIVER_NAME": entering open\n"); - mutex_lock(&pktcdvd_mutex); mutex_lock(&ctl_mutex); pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); @@ -2315,7 +2302,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) out_dec: pd->refcnt--; out: - VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); mutex_unlock(&ctl_mutex); mutex_unlock(&pktcdvd_mutex); return ret; @@ -2360,7 +2346,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) pd = q->queuedata; if (!pd) { - printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b)); + pr_err("%s incorrect request queue\n", + bdevname(bio->bi_bdev, b)); goto end_io; } @@ -2382,20 +2369,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) } if (!test_bit(PACKET_WRITABLE, &pd->flags)) { - printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n", - pd->name, (unsigned long long)bio->bi_sector); + pkt_notice(pd, "WRITE for ro device (%llu)\n", + (unsigned long long)bio->bi_sector); goto end_io; } if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { - printk(DRIVER_NAME": wrong bio size\n"); + pkt_err(pd, "wrong bio size\n"); goto end_io; } blk_queue_bounce(q, &bio); - zone = ZONE(bio->bi_sector, pd); - VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n", + zone = get_zone(bio->bi_sector, pd); + pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", (unsigned long long)bio->bi_sector, (unsigned long long)bio_end_sector(bio)); @@ -2405,7 +2392,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) sector_t last_zone; int first_sectors; - last_zone = ZONE(bio_end_sector(bio) - 1, pd); + last_zone = get_zone(bio_end_sector(bio) - 1, pd); if (last_zone != zone) { BUG_ON(last_zone != zone + pd->settings.size); first_sectors = last_zone - bio->bi_sector; @@ -2500,7 +2487,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec) { struct pktcdvd_device *pd = q->queuedata; - sector_t zone = ZONE(bmd->bi_sector, pd); + sector_t zone = get_zone(bmd->bi_sector, pd); int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size; int remaining = (pd->settings.size << 9) - used; int remaining2; @@ -2609,7 +2596,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) struct block_device *bdev; if (pd->pkt_dev == dev) { - printk(DRIVER_NAME": Recursive setup not allowed\n"); + pkt_err(pd, "recursive setup not allowed\n"); return -EBUSY; } for (i = 0; i < MAX_WRITERS; i++) { @@ -2617,11 +2604,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) if (!pd2) continue; if (pd2->bdev->bd_dev == dev) { - printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b)); + pkt_err(pd, "%s already setup\n", + bdevname(pd2->bdev, b)); return -EBUSY; } if (pd2->pkt_dev == dev) { - printk(DRIVER_NAME": Can't chain pktcdvd devices\n"); + pkt_err(pd, "can't chain pktcdvd devices\n"); return -EBUSY; } } @@ -2644,13 +2632,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) atomic_set(&pd->cdrw.pending_bios, 0); pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); if (IS_ERR(pd->cdrw.thread)) { - printk(DRIVER_NAME": can't start kernel thread\n"); + pkt_err(pd, "can't start kernel thread\n"); ret = -ENOMEM; goto out_mem; } proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); - DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); + pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); return 0; out_mem: @@ -2665,8 +2653,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, struct pktcdvd_device *pd = bdev->bd_disk->private_data; int ret; - VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, - MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); + pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", + cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); mutex_lock(&pktcdvd_mutex); switch (cmd) { @@ -2690,7 +2678,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, break; default: - VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); + pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); ret = -ENOTTY; } mutex_unlock(&pktcdvd_mutex); @@ -2743,7 +2731,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) if (!pkt_devs[idx]) break; if (idx == MAX_WRITERS) { - printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS); + pr_err("max %d writers supported\n", MAX_WRITERS); ret = -EBUSY; goto out_mutex; } @@ -2818,7 +2806,7 @@ out_mem: kfree(pd); out_mutex: mutex_unlock(&ctl_mutex); - printk(DRIVER_NAME": setup of pktcdvd device failed\n"); + pr_err("setup of pktcdvd device failed\n"); return ret; } @@ -2839,7 +2827,7 @@ static int pkt_remove_dev(dev_t pkt_dev) break; } if (idx == MAX_WRITERS) { - DPRINTK(DRIVER_NAME": dev not setup\n"); + pr_debug("dev not setup\n"); ret = -ENXIO; goto out; } @@ -2859,7 +2847,7 @@ static int pkt_remove_dev(dev_t pkt_dev) blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); remove_proc_entry(pd->name, pkt_proc); - DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); + pkt_dbg(1, pd, "writer unmapped\n"); del_gendisk(pd->disk); blk_cleanup_queue(pd->disk->queue); @@ -2969,7 +2957,7 @@ static int __init pkt_init(void) ret = register_blkdev(pktdev_major, DRIVER_NAME); if (ret < 0) { - printk(DRIVER_NAME": Unable to register block device\n"); + pr_err("unable to register block device\n"); goto out2; } if (!pktdev_major) @@ -2983,7 +2971,7 @@ static int __init pkt_init(void) ret = misc_register(&pkt_misc); if (ret) { - printk(DRIVER_NAME": Unable to register misc device\n"); + pr_err("unable to register misc device\n"); goto out_misc; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 191cd177fef2..cb1db2979d3d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -931,12 +931,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u64 snap_id) { u32 which; + const char *snap_name; which = rbd_dev_snap_index(rbd_dev, snap_id); if (which == BAD_SNAP_INDEX) - return NULL; + return ERR_PTR(-ENOENT); - return _rbd_dev_v1_snap_name(rbd_dev, which); + snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); + return snap_name ? snap_name : ERR_PTR(-ENOMEM); } static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) @@ -1561,11 +1563,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) obj_request, obj_request->img_request, obj_request->result, xferred, length); /* - * ENOENT means a hole in the image. We zero-fill the - * entire length of the request. A short read also implies - * zero-fill to the end of the request. Either way we - * update the xferred count to indicate the whole request - * was satisfied. + * ENOENT means a hole in the image. We zero-fill the entire + * length of the request. A short read also implies zero-fill + * to the end of the request. An error requires the whole + * length of the request to be reported finished with an error + * to the block layer. In each case we update the xferred + * count to indicate the whole request was satisfied. */ rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); if (obj_request->result == -ENOENT) { @@ -1574,14 +1577,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) else zero_pages(obj_request->pages, 0, length); obj_request->result = 0; - obj_request->xferred = length; } else if (xferred < length && !obj_request->result) { if (obj_request->type == OBJ_REQUEST_BIO) zero_bio_chain(obj_request->bio_list, xferred); else zero_pages(obj_request->pages, xferred, length); - obj_request->xferred = length; } + obj_request->xferred = length; obj_request_done_set(obj_request); } @@ -2167,9 +2169,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *next_obj_request; bool write_request = img_request_write_test(img_request); - struct bio *bio_list = 0; + struct bio *bio_list = NULL; unsigned int bio_offset = 0; - struct page **pages = 0; + struct page **pages = NULL; u64 img_offset; u64 resid; u16 opcode; @@ -2207,6 +2209,11 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, rbd_segment_name_free(object_name); if (!obj_request) goto out_unwind; + /* + * set obj_request->img_request before creating the + * osd_request so that it gets the right snapc + */ + rbd_img_obj_request_add(img_request, obj_request); if (type == OBJ_REQUEST_BIO) { unsigned int clone_size; @@ -2248,11 +2255,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, obj_request->pages, length, offset & ~PAGE_MASK, false, false); - /* - * set obj_request->img_request before formatting - * the osd_request so that it gets the right snapc - */ - rbd_img_obj_request_add(img_request, obj_request); if (write_request) rbd_osd_req_format_write(obj_request); else @@ -2812,7 +2814,7 @@ out_err: obj_request_done_set(obj_request); } -static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) +static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id) { struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; @@ -2827,16 +2829,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); if (!obj_request->osd_req) goto out; - obj_request->callback = rbd_obj_request_put; osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, notify_id, 0, 0); rbd_osd_req_format_read(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); -out: if (ret) - rbd_obj_request_put(obj_request); + goto out; + ret = rbd_obj_request_wait(obj_request); +out: + rbd_obj_request_put(obj_request); return ret; } @@ -2856,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) if (ret) rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); - rbd_obj_notify_ack(rbd_dev, notify_id); + rbd_obj_notify_ack_sync(rbd_dev, notify_id); } /* @@ -3328,6 +3331,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); } +static void rbd_dev_update_size(struct rbd_device *rbd_dev) +{ + sector_t size; + bool removing; + + /* + * Don't hold the lock while doing disk operations, + * or lock ordering will conflict with the bdev mutex via: + * rbd_add() -> blkdev_get() -> rbd_open() + */ + spin_lock_irq(&rbd_dev->lock); + removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); + spin_unlock_irq(&rbd_dev->lock); + /* + * If the device is being removed, rbd_dev->disk has + * been destroyed, so don't try to update its size + */ + if (!removing) { + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; + dout("setting size to %llu sectors", (unsigned long long)size); + set_capacity(rbd_dev->disk, size); + revalidate_disk(rbd_dev->disk); + } +} + static int rbd_dev_refresh(struct rbd_device *rbd_dev) { u64 mapping_size; @@ -3347,12 +3375,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) up_write(&rbd_dev->header_rwsem); if (mapping_size != rbd_dev->mapping.size) { - sector_t size; - - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long)size); - set_capacity(rbd_dev->disk, size); - revalidate_disk(rbd_dev->disk); + rbd_dev_update_size(rbd_dev); } return ret; @@ -3706,12 +3729,14 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, if (ret < sizeof (size_buf)) return -ERANGE; - if (order) + if (order) { *order = size_buf.order; + dout(" order %u", (unsigned int)*order); + } *snap_size = le64_to_cpu(size_buf.size); - dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", - (unsigned long long)snap_id, (unsigned int)*order, + dout(" snap_id 0x%016llx snap_size = %llu\n", + (unsigned long long)snap_id, (unsigned long long)*snap_size); return 0; @@ -4059,8 +4084,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) snap_id = snapc->snaps[which]; snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); - if (IS_ERR(snap_name)) - break; + if (IS_ERR(snap_name)) { + /* ignore no-longer existing snapshots */ + if (PTR_ERR(snap_name) == -ENOENT) + continue; + else + break; + } found = !strcmp(name, snap_name); kfree(snap_name); } @@ -4139,8 +4169,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) /* Look up the snapshot name, and make a copy */ snap_name = rbd_snap_name(rbd_dev, spec->snap_id); - if (!snap_name) { - ret = -ENOMEM; + if (IS_ERR(snap_name)) { + ret = PTR_ERR(snap_name); goto out_err; } @@ -5130,7 +5160,7 @@ static ssize_t rbd_remove(struct bus_type *bus, bool already = false; int ret; - ret = strict_strtoul(buf, 10, &ul); + ret = kstrtoul(buf, 10, &ul); if (ret) return ret; @@ -5161,10 +5191,23 @@ static ssize_t rbd_remove(struct bus_type *bus, if (ret < 0 || already) return ret; - rbd_bus_del_dev(rbd_dev); ret = rbd_dev_header_watch_sync(rbd_dev, false); if (ret) rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); + + /* + * flush remaining watch callbacks - these must be complete + * before the osd_client is shutdown + */ + dout("%s: flushing notifies", __func__); + ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); + /* + * Don't free anything from rbd_dev->disk until after all + * notifies are completely processed. Otherwise + * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting + * in a potential use after free of rbd_dev->disk or rbd_dev. + */ + rbd_bus_del_dev(rbd_dev); rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 6e85e21445eb..a8de2eec6ff3 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev) for (i = 0; i < card->n_targets; i++) { spin_lock_bh(&card->ctrl[i].queue_lock); cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], - &card->ctrl[i].queue); + &card->ctrl[i].queue, + COMPLETE_DMA); spin_unlock_bh(&card->ctrl[i].queue_lock); cnt += rsxx_dma_cancel(&card->ctrl[i]); @@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) card->eeh_state = 0; - st = rsxx_eeh_remap_dmas(card); - if (st) - goto failed_remap_dmas; - spin_lock_irqsave(&card->irq_lock, flags); if (card->n_targets & RSXX_MAX_TARGETS) rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); @@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) return PCI_ERS_RESULT_RECOVERED; failed_hw_buffers_init: -failed_remap_dmas: for (i = 0; i < card->n_targets; i++) { if (card->ctrl[i].status.buf) pci_free_consistent(card->dev, diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index d7af441880be..2284f5d3a54a 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) return -ENOMEM; } - blk_size = card->config.data.block_size; + if (card->config_valid) { + blk_size = card->config.data.block_size; + blk_queue_dma_alignment(card->queue, blk_size - 1); + blk_queue_logical_block_size(card->queue, blk_size); + } blk_queue_make_request(card->queue, rsxx_make_request); blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); - blk_queue_dma_alignment(card->queue, blk_size - 1); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); - blk_queue_logical_block_size(card->queue, blk_size); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index bed32f16b084..fc88ba3e1bd2 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) } /*----------------- RSXX DMA Handling -------------------*/ +static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) +{ + if (dma->cmd != HW_CMD_BLK_DISCARD) { + if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { + pci_unmap_page(ctrl->card->dev, dma->dma_addr, + get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + } + } + + kmem_cache_free(rsxx_dma_pool, dma); +} + static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma, unsigned int status) @@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, if (status & DMA_CANCELLED) ctrl->stats.dma_cancelled++; - if (dma->dma_addr) - pci_unmap_page(ctrl->card->dev, dma->dma_addr, - get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (dma->cb) dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); - kmem_cache_free(rsxx_dma_pool, dma); + rsxx_free_dma(ctrl, dma); } int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, - struct list_head *q) + struct list_head *q, unsigned int done) { struct rsxx_dma *dma; struct rsxx_dma *tmp; @@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, list_for_each_entry_safe(dma, tmp, q, list) { list_del(&dma->list); - rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + if (done & COMPLETE_DMA) + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + else + rsxx_free_dma(ctrl, dma); cnt++; } @@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data) /* Clean up the DMA queue */ spin_lock(&ctrl->queue_lock); - cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); + cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); spin_unlock(&ctrl->queue_lock); cnt += rsxx_dma_cancel(ctrl); @@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) int tag; int cmds_pending = 0; struct hw_cmd *hw_cmd_buf; + int dir; hw_cmd_buf = ctrl->cmd.buf; @@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) continue; } + if (dma->cmd != HW_CMD_BLK_DISCARD) { + if (dma->cmd == HW_CMD_BLK_WRITE) + dir = PCI_DMA_TODEVICE; + else + dir = PCI_DMA_FROMDEVICE; + + /* + * The function pci_map_page is placed here because we + * can only, by design, issue up to 255 commands to the + * hardware at one time per DMA channel. So the maximum + * amount of mapped memory would be 255 * 4 channels * + * 4096 Bytes which is less than 2GB, the limit of a x8 + * Non-HWWD PCIe slot. This way the pci_map_page + * function should never fail because of a lack of + * mappable memory. + */ + dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page, + dma->pg_off, dma->sub_page.cnt << 9, dir); + if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { + push_tracker(ctrl->trackers, tag); + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + continue; + } + } + set_tracker_dma(ctrl->trackers, tag, dma); hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; hw_cmd_buf[ctrl->cmd.idx].tag = tag; @@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, if (!dma) return -ENOMEM; - dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, - dir ? PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (!dma->dma_addr) { - kmem_cache_free(rsxx_dma_pool, dma); - return -ENOMEM; - } - dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; dma->laddr = laddr; dma->sub_page.off = (dma_off >> 9); @@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, return 0; bvec_err: - for (i = 0; i < card->n_targets; i++) { - spin_lock_bh(&card->ctrl[i].queue_lock); - rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); - spin_unlock_bh(&card->ctrl[i].queue_lock); - } + for (i = 0; i < card->n_targets; i++) + rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], + FREE_DMA); return st; } @@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) /* Clean up the DMA queue */ spin_lock_bh(&ctrl->queue_lock); - rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); + rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); spin_unlock_bh(&ctrl->queue_lock); rsxx_dma_cancel(ctrl); @@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) else card->ctrl[i].stats.reads_issued--; + if (dma->cmd != HW_CMD_BLK_DISCARD) { + pci_unmap_page(card->dev, dma->dma_addr, + get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + } + list_add_tail(&dma->list, &issued_dmas[i]); push_tracker(card->ctrl[i].trackers, j); cnt++; @@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); card->ctrl[i].stats.sw_q_depth += cnt; card->ctrl[i].e_cnt = 0; - - list_for_each_entry(dma, &card->ctrl[i].queue, list) { - if (dma->dma_addr) - pci_unmap_page(card->dev, dma->dma_addr, - get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - } spin_unlock_bh(&card->ctrl[i].queue_lock); } @@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) return 0; } -int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) -{ - struct rsxx_dma *dma; - int i; - - for (i = 0; i < card->n_targets; i++) { - spin_lock_bh(&card->ctrl[i].queue_lock); - list_for_each_entry(dma, &card->ctrl[i].queue, list) { - dma->dma_addr = pci_map_page(card->dev, dma->page, - dma->pg_off, get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (!dma->dma_addr) { - spin_unlock_bh(&card->ctrl[i].queue_lock); - kmem_cache_free(rsxx_dma_pool, dma); - return -ENOMEM; - } - } - spin_unlock_bh(&card->ctrl[i].queue_lock); - } - - return 0; -} - int rsxx_dma_init(void) { rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 5ad5055a4104..6bbc64d0f690 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h @@ -52,7 +52,7 @@ struct proc_cmd; #define RS70_PCI_REV_SUPPORTED 4 #define DRIVER_NAME "rsxx" -#define DRIVER_VERSION "4.0" +#define DRIVER_VERSION "4.0.3.2516" /* Block size is 4096 */ #define RSXX_HW_BLK_SHIFT 12 @@ -345,6 +345,11 @@ enum rsxx_creg_stat { CREG_STAT_TAG_MASK = 0x0000ff00, }; +enum rsxx_dma_finish { + FREE_DMA = 0x0, + COMPLETE_DMA = 0x1, +}; + static inline unsigned int CREG_DATA(int N) { return CREG_DATA0 + (N << 2); @@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, int rsxx_dma_setup(struct rsxx_cardinfo *card); void rsxx_dma_destroy(struct rsxx_cardinfo *card); int rsxx_dma_init(void); -int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); +int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, + struct list_head *q, + unsigned int done); int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); void rsxx_dma_cleanup(void); void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c new file mode 100644 index 000000000000..9199c93be926 --- /dev/null +++ b/drivers/block/skd_main.c @@ -0,0 +1,5432 @@ +/* Copyright 2012 STEC, Inc. + * + * This file is licensed under the terms of the 3-clause + * BSD License (http://opensource.org/licenses/BSD-3-Clause) + * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), + * at your option. Both licenses are also available in the LICENSE file + * distributed with this project. This file may not be copied, modified, + * or distributed except in accordance with those terms. + * Gordoni Waidhofer <gwaidhofer@stec-inc.com> + * Initial Driver Design! + * Thomas Swann <tswann@stec-inc.com> + * Interrupt handling. + * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> + * biomode implementation. + * Akhil Bhansali <abhansali@stec-inc.com> + * Added support for DISCARD / FLUSH and FUA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/compiler.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/time.h> +#include <linux/hdreg.h> +#include <linux/dma-mapping.h> +#include <linux/completion.h> +#include <linux/scatterlist.h> +#include <linux/version.h> +#include <linux/err.h> +#include <linux/scatterlist.h> +#include <linux/aer.h> +#include <linux/ctype.h> +#include <linux/wait.h> +#include <linux/uio.h> +#include <scsi/scsi.h> +#include <scsi/sg.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <asm/unaligned.h> + +#include "skd_s1120.h" + +static int skd_dbg_level; +static int skd_isr_comp_limit = 4; + +enum { + STEC_LINK_2_5GTS = 0, + STEC_LINK_5GTS = 1, + STEC_LINK_8GTS = 2, + STEC_LINK_UNKNOWN = 0xFF +}; + +enum { + SKD_FLUSH_INITIALIZER, + SKD_FLUSH_ZERO_SIZE_FIRST, + SKD_FLUSH_DATA_SECOND, +}; + +#define SKD_ASSERT(expr) \ + do { \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ + # expr, __FILE__, __func__, __LINE__); \ + } \ + } while (0) + +#define DRV_NAME "skd" +#define DRV_VERSION "2.2.1" +#define DRV_BUILD_ID "0260" +#define PFX DRV_NAME ": " +#define DRV_BIN_VERSION 0x100 +#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID + +MODULE_AUTHOR("bug-reports: support@stec-inc.com"); +MODULE_LICENSE("Dual BSD/GPL"); + +MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); +MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); + +#define PCI_VENDOR_ID_STEC 0x1B39 +#define PCI_DEVICE_ID_S1120 0x0001 + +#define SKD_FUA_NV (1 << 1) +#define SKD_MINORS_PER_DEVICE 16 + +#define SKD_MAX_QUEUE_DEPTH 200u + +#define SKD_PAUSE_TIMEOUT (5 * 1000) + +#define SKD_N_FITMSG_BYTES (512u) + +#define SKD_N_SPECIAL_CONTEXT 32u +#define SKD_N_SPECIAL_FITMSG_BYTES (128u) + +/* SG elements are 32 bytes, so we can make this 4096 and still be under the + * 128KB limit. That allows 4096*4K = 16M xfer size + */ +#define SKD_N_SG_PER_REQ_DEFAULT 256u +#define SKD_N_SG_PER_SPECIAL 256u + +#define SKD_N_COMPLETION_ENTRY 256u +#define SKD_N_READ_CAP_BYTES (8u) + +#define SKD_N_INTERNAL_BYTES (512u) + +/* 5 bits of uniqifier, 0xF800 */ +#define SKD_ID_INCR (0x400) +#define SKD_ID_TABLE_MASK (3u << 8u) +#define SKD_ID_RW_REQUEST (0u << 8u) +#define SKD_ID_INTERNAL (1u << 8u) +#define SKD_ID_SPECIAL_REQUEST (2u << 8u) +#define SKD_ID_FIT_MSG (3u << 8u) +#define SKD_ID_SLOT_MASK 0x00FFu +#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu + +#define SKD_N_TIMEOUT_SLOT 4u +#define SKD_TIMEOUT_SLOT_MASK 3u + +#define SKD_N_MAX_SECTORS 2048u + +#define SKD_MAX_RETRIES 2u + +#define SKD_TIMER_SECONDS(seconds) (seconds) +#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) + +#define INQ_STD_NBYTES 36 +#define SKD_DISCARD_CDB_LENGTH 24 + +enum skd_drvr_state { + SKD_DRVR_STATE_LOAD, + SKD_DRVR_STATE_IDLE, + SKD_DRVR_STATE_BUSY, + SKD_DRVR_STATE_STARTING, + SKD_DRVR_STATE_ONLINE, + SKD_DRVR_STATE_PAUSING, + SKD_DRVR_STATE_PAUSED, + SKD_DRVR_STATE_DRAINING_TIMEOUT, + SKD_DRVR_STATE_RESTARTING, + SKD_DRVR_STATE_RESUMING, + SKD_DRVR_STATE_STOPPING, + SKD_DRVR_STATE_FAULT, + SKD_DRVR_STATE_DISAPPEARED, + SKD_DRVR_STATE_PROTOCOL_MISMATCH, + SKD_DRVR_STATE_BUSY_ERASE, + SKD_DRVR_STATE_BUSY_SANITIZE, + SKD_DRVR_STATE_BUSY_IMMINENT, + SKD_DRVR_STATE_WAIT_BOOT, + SKD_DRVR_STATE_SYNCING, +}; + +#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) +#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) +#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) +#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) +#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) +#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) +#define SKD_START_WAIT_SECONDS 90u + +enum skd_req_state { + SKD_REQ_STATE_IDLE, + SKD_REQ_STATE_SETUP, + SKD_REQ_STATE_BUSY, + SKD_REQ_STATE_COMPLETED, + SKD_REQ_STATE_TIMEOUT, + SKD_REQ_STATE_ABORTED, +}; + +enum skd_fit_msg_state { + SKD_MSG_STATE_IDLE, + SKD_MSG_STATE_BUSY, +}; + +enum skd_check_status_action { + SKD_CHECK_STATUS_REPORT_GOOD, + SKD_CHECK_STATUS_REPORT_SMART_ALERT, + SKD_CHECK_STATUS_REQUEUE_REQUEST, + SKD_CHECK_STATUS_REPORT_ERROR, + SKD_CHECK_STATUS_BUSY_IMMINENT, +}; + +struct skd_fitmsg_context { + enum skd_fit_msg_state state; + + struct skd_fitmsg_context *next; + + u32 id; + u16 outstanding; + + u32 length; + u32 offset; + + u8 *msg_buf; + dma_addr_t mb_dma_address; +}; + +struct skd_request_context { + enum skd_req_state state; + + struct skd_request_context *next; + + u16 id; + u32 fitmsg_id; + + struct request *req; + u8 flush_cmd; + u8 discard_page; + + u32 timeout_stamp; + u8 sg_data_dir; + struct scatterlist *sg; + u32 n_sg; + u32 sg_byte_count; + + struct fit_sg_descriptor *sksg_list; + dma_addr_t sksg_dma_address; + + struct fit_completion_entry_v1 completion; + + struct fit_comp_error_info err_info; + +}; +#define SKD_DATA_DIR_HOST_TO_CARD 1 +#define SKD_DATA_DIR_CARD_TO_HOST 2 +#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */ + +struct skd_special_context { + struct skd_request_context req; + + u8 orphaned; + + void *data_buf; + dma_addr_t db_dma_address; + + u8 *msg_buf; + dma_addr_t mb_dma_address; +}; + +struct skd_sg_io { + fmode_t mode; + void __user *argp; + + struct sg_io_hdr sg; + + u8 cdb[16]; + + u32 dxfer_len; + u32 iovcnt; + struct sg_iovec *iov; + struct sg_iovec no_iov_iov; + + struct skd_special_context *skspcl; +}; + +typedef enum skd_irq_type { + SKD_IRQ_LEGACY, + SKD_IRQ_MSI, + SKD_IRQ_MSIX +} skd_irq_type_t; + +#define SKD_MAX_BARS 2 + +struct skd_device { + volatile void __iomem *mem_map[SKD_MAX_BARS]; + resource_size_t mem_phys[SKD_MAX_BARS]; + u32 mem_size[SKD_MAX_BARS]; + + skd_irq_type_t irq_type; + u32 msix_count; + struct skd_msix_entry *msix_entries; + + struct pci_dev *pdev; + int pcie_error_reporting_is_enabled; + + spinlock_t lock; + struct gendisk *disk; + struct request_queue *queue; + struct device *class_dev; + int gendisk_on; + int sync_done; + + atomic_t device_count; + u32 devno; + u32 major; + char name[32]; + char isr_name[30]; + + enum skd_drvr_state state; + u32 drive_state; + + u32 in_flight; + u32 cur_max_queue_depth; + u32 queue_low_water_mark; + u32 dev_max_queue_depth; + + u32 num_fitmsg_context; + u32 num_req_context; + + u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; + u32 timeout_stamp; + struct skd_fitmsg_context *skmsg_free_list; + struct skd_fitmsg_context *skmsg_table; + + struct skd_request_context *skreq_free_list; + struct skd_request_context *skreq_table; + + struct skd_special_context *skspcl_free_list; + struct skd_special_context *skspcl_table; + + struct skd_special_context internal_skspcl; + u32 read_cap_blocksize; + u32 read_cap_last_lba; + int read_cap_is_valid; + int inquiry_is_valid; + u8 inq_serial_num[13]; /*12 chars plus null term */ + u8 id_str[80]; /* holds a composite name (pci + sernum) */ + + u8 skcomp_cycle; + u32 skcomp_ix; + struct fit_completion_entry_v1 *skcomp_table; + struct fit_comp_error_info *skerr_table; + dma_addr_t cq_dma_address; + + wait_queue_head_t waitq; + + struct timer_list timer; + u32 timer_countdown; + u32 timer_substate; + + int n_special; + int sgs_per_request; + u32 last_mtd; + + u32 proto_ver; + + int dbg_level; + u32 connect_time_stamp; + int connect_retries; +#define SKD_MAX_CONNECT_RETRIES 16 + u32 drive_jiffies; + + u32 timo_slot; + + + struct work_struct completion_worker; +}; + +#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) +#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) +#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) + +static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) +{ + u32 val; + + if (likely(skdev->dbg_level < 2)) + return readl(skdev->mem_map[1] + offset); + else { + barrier(); + val = readl(skdev->mem_map[1] + offset); + barrier(); + pr_debug("%s:%s:%d offset %x = %x\n", + skdev->name, __func__, __LINE__, offset, val); + return val; + } + +} + +static inline void skd_reg_write32(struct skd_device *skdev, u32 val, + u32 offset) +{ + if (likely(skdev->dbg_level < 2)) { + writel(val, skdev->mem_map[1] + offset); + barrier(); + } else { + barrier(); + writel(val, skdev->mem_map[1] + offset); + barrier(); + pr_debug("%s:%s:%d offset %x = %x\n", + skdev->name, __func__, __LINE__, offset, val); + } +} + +static inline void skd_reg_write64(struct skd_device *skdev, u64 val, + u32 offset) +{ + if (likely(skdev->dbg_level < 2)) { + writeq(val, skdev->mem_map[1] + offset); + barrier(); + } else { + barrier(); + writeq(val, skdev->mem_map[1] + offset); + barrier(); + pr_debug("%s:%s:%d offset %x = %016llx\n", + skdev->name, __func__, __LINE__, offset, val); + } +} + + +#define SKD_IRQ_DEFAULT SKD_IRQ_MSI +static int skd_isr_type = SKD_IRQ_DEFAULT; + +module_param(skd_isr_type, int, 0444); +MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." + " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); + +#define SKD_MAX_REQ_PER_MSG_DEFAULT 1 +static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; + +module_param(skd_max_req_per_msg, int, 0444); +MODULE_PARM_DESC(skd_max_req_per_msg, + "Maximum SCSI requests packed in a single message." + " (1-14, default==1)"); + +#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 +#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" +static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; + +module_param(skd_max_queue_depth, int, 0444); +MODULE_PARM_DESC(skd_max_queue_depth, + "Maximum SCSI requests issued to s1120." + " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); + +static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; +module_param(skd_sgs_per_request, int, 0444); +MODULE_PARM_DESC(skd_sgs_per_request, + "Maximum SG elements per block request." + " (1-4096, default==256)"); + +static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; +module_param(skd_max_pass_thru, int, 0444); +MODULE_PARM_DESC(skd_max_pass_thru, + "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); + +module_param(skd_dbg_level, int, 0444); +MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); + +module_param(skd_isr_comp_limit, int, 0444); +MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); + +/* Major device number dynamically assigned. */ +static u32 skd_major; + +static void skd_destruct(struct skd_device *skdev); +static const struct block_device_operations skd_blockdev_ops; +static void skd_send_fitmsg(struct skd_device *skdev, + struct skd_fitmsg_context *skmsg); +static void skd_send_special_fitmsg(struct skd_device *skdev, + struct skd_special_context *skspcl); +static void skd_request_fn(struct request_queue *rq); +static void skd_end_request(struct skd_device *skdev, + struct skd_request_context *skreq, int error); +static int skd_preop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq); +static void skd_postop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq); + +static void skd_restart_device(struct skd_device *skdev); +static int skd_quiesce_dev(struct skd_device *skdev); +static int skd_unquiesce_dev(struct skd_device *skdev); +static void skd_release_special(struct skd_device *skdev, + struct skd_special_context *skspcl); +static void skd_disable_interrupts(struct skd_device *skdev); +static void skd_isr_fwstate(struct skd_device *skdev); +static void skd_recover_requests(struct skd_device *skdev, int requeue); +static void skd_soft_reset(struct skd_device *skdev); + +static const char *skd_name(struct skd_device *skdev); +const char *skd_drive_state_to_str(int state); +const char *skd_skdev_state_to_str(enum skd_drvr_state state); +static void skd_log_skdev(struct skd_device *skdev, const char *event); +static void skd_log_skmsg(struct skd_device *skdev, + struct skd_fitmsg_context *skmsg, const char *event); +static void skd_log_skreq(struct skd_device *skdev, + struct skd_request_context *skreq, const char *event); + +/* + ***************************************************************************** + * READ/WRITE REQUESTS + ***************************************************************************** + */ +static void skd_fail_all_pending(struct skd_device *skdev) +{ + struct request_queue *q = skdev->queue; + struct request *req; + + for (;; ) { + req = blk_peek_request(q); + if (req == NULL) + break; + blk_start_request(req); + __blk_end_request_all(req, -EIO); + } +} + +static void +skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, + int data_dir, unsigned lba, + unsigned count) +{ + if (data_dir == READ) + scsi_req->cdb[0] = 0x28; + else + scsi_req->cdb[0] = 0x2a; + + scsi_req->cdb[1] = 0; + scsi_req->cdb[2] = (lba & 0xff000000) >> 24; + scsi_req->cdb[3] = (lba & 0xff0000) >> 16; + scsi_req->cdb[4] = (lba & 0xff00) >> 8; + scsi_req->cdb[5] = (lba & 0xff); + scsi_req->cdb[6] = 0; + scsi_req->cdb[7] = (count & 0xff00) >> 8; + scsi_req->cdb[8] = count & 0xff; + scsi_req->cdb[9] = 0; +} + +static void +skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, + struct skd_request_context *skreq) +{ + skreq->flush_cmd = 1; + + scsi_req->cdb[0] = 0x35; + scsi_req->cdb[1] = 0; + scsi_req->cdb[2] = 0; + scsi_req->cdb[3] = 0; + scsi_req->cdb[4] = 0; + scsi_req->cdb[5] = 0; + scsi_req->cdb[6] = 0; + scsi_req->cdb[7] = 0; + scsi_req->cdb[8] = 0; + scsi_req->cdb[9] = 0; +} + +static void +skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, + struct skd_request_context *skreq, + struct page *page, + u32 lba, u32 count) +{ + char *buf; + unsigned long len; + struct request *req; + + buf = page_address(page); + len = SKD_DISCARD_CDB_LENGTH; + + scsi_req->cdb[0] = UNMAP; + scsi_req->cdb[8] = len; + + put_unaligned_be16(6 + 16, &buf[0]); + put_unaligned_be16(16, &buf[2]); + put_unaligned_be64(lba, &buf[8]); + put_unaligned_be32(count, &buf[16]); + + req = skreq->req; + blk_add_request_payload(req, page, len); + req->buffer = buf; +} + +static void skd_request_fn_not_online(struct request_queue *q); + +static void skd_request_fn(struct request_queue *q) +{ + struct skd_device *skdev = q->queuedata; + struct skd_fitmsg_context *skmsg = NULL; + struct fit_msg_hdr *fmh = NULL; + struct skd_request_context *skreq; + struct request *req = NULL; + struct skd_scsi_request *scsi_req; + struct page *page; + unsigned long io_flags; + int error; + u32 lba; + u32 count; + int data_dir; + u32 be_lba; + u32 be_count; + u64 be_dmaa; + u64 cmdctxt; + u32 timo_slot; + void *cmd_ptr; + int flush, fua; + + if (skdev->state != SKD_DRVR_STATE_ONLINE) { + skd_request_fn_not_online(q); + return; + } + + if (blk_queue_stopped(skdev->queue)) { + if (skdev->skmsg_free_list == NULL || + skdev->skreq_free_list == NULL || + skdev->in_flight >= skdev->queue_low_water_mark) + /* There is still some kind of shortage */ + return; + + queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); + } + + /* + * Stop conditions: + * - There are no more native requests + * - There are already the maximum number of requests in progress + * - There are no more skd_request_context entries + * - There are no more FIT msg buffers + */ + for (;; ) { + + flush = fua = 0; + + req = blk_peek_request(q); + + /* Are there any native requests to start? */ + if (req == NULL) + break; + + lba = (u32)blk_rq_pos(req); + count = blk_rq_sectors(req); + data_dir = rq_data_dir(req); + io_flags = req->cmd_flags; + + if (io_flags & REQ_FLUSH) + flush++; + + if (io_flags & REQ_FUA) + fua++; + + pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " + "count=%u(0x%x) dir=%d\n", + skdev->name, __func__, __LINE__, + req, lba, lba, count, count, data_dir); + + /* At this point we know there is a request */ + + /* Are too many requets already in progress? */ + if (skdev->in_flight >= skdev->cur_max_queue_depth) { + pr_debug("%s:%s:%d qdepth %d, limit %d\n", + skdev->name, __func__, __LINE__, + skdev->in_flight, skdev->cur_max_queue_depth); + break; + } + + /* Is a skd_request_context available? */ + skreq = skdev->skreq_free_list; + if (skreq == NULL) { + pr_debug("%s:%s:%d Out of req=%p\n", + skdev->name, __func__, __LINE__, q); + break; + } + SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); + SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); + + /* Now we check to see if we can get a fit msg */ + if (skmsg == NULL) { + if (skdev->skmsg_free_list == NULL) { + pr_debug("%s:%s:%d Out of msg\n", + skdev->name, __func__, __LINE__); + break; + } + } + + skreq->flush_cmd = 0; + skreq->n_sg = 0; + skreq->sg_byte_count = 0; + skreq->discard_page = 0; + + /* + * OK to now dequeue request from q. + * + * At this point we are comitted to either start or reject + * the native request. Note that skd_request_context is + * available but is still at the head of the free list. + */ + blk_start_request(req); + skreq->req = req; + skreq->fitmsg_id = 0; + + /* Either a FIT msg is in progress or we have to start one. */ + if (skmsg == NULL) { + /* Are there any FIT msg buffers available? */ + skmsg = skdev->skmsg_free_list; + if (skmsg == NULL) { + pr_debug("%s:%s:%d Out of msg skdev=%p\n", + skdev->name, __func__, __LINE__, + skdev); + break; + } + SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); + SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); + + skdev->skmsg_free_list = skmsg->next; + + skmsg->state = SKD_MSG_STATE_BUSY; + skmsg->id += SKD_ID_INCR; + + /* Initialize the FIT msg header */ + fmh = (struct fit_msg_hdr *)skmsg->msg_buf; + memset(fmh, 0, sizeof(*fmh)); + fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; + skmsg->length = sizeof(*fmh); + } + + skreq->fitmsg_id = skmsg->id; + + /* + * Note that a FIT msg may have just been started + * but contains no SoFIT requests yet. + */ + + /* + * Transcode the request, checking as we go. The outcome of + * the transcoding is represented by the error variable. + */ + cmd_ptr = &skmsg->msg_buf[skmsg->length]; + memset(cmd_ptr, 0, 32); + + be_lba = cpu_to_be32(lba); + be_count = cpu_to_be32(count); + be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); + cmdctxt = skreq->id + SKD_ID_INCR; + + scsi_req = cmd_ptr; + scsi_req->hdr.tag = cmdctxt; + scsi_req->hdr.sg_list_dma_address = be_dmaa; + + if (data_dir == READ) + skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; + else + skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; + + if (io_flags & REQ_DISCARD) { + page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + if (!page) { + pr_err("request_fn:Page allocation failed.\n"); + skd_end_request(skdev, skreq, -ENOMEM); + break; + } + skreq->discard_page = 1; + skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); + + } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { + skd_prep_zerosize_flush_cdb(scsi_req, skreq); + SKD_ASSERT(skreq->flush_cmd == 1); + + } else { + skd_prep_rw_cdb(scsi_req, data_dir, lba, count); + } + + if (fua) + scsi_req->cdb[1] |= SKD_FUA_NV; + + if (!req->bio) + goto skip_sg; + + error = skd_preop_sg_list(skdev, skreq); + + if (error != 0) { + /* + * Complete the native request with error. + * Note that the request context is still at the + * head of the free list, and that the SoFIT request + * was encoded into the FIT msg buffer but the FIT + * msg length has not been updated. In short, the + * only resource that has been allocated but might + * not be used is that the FIT msg could be empty. + */ + pr_debug("%s:%s:%d error Out\n", + skdev->name, __func__, __LINE__); + skd_end_request(skdev, skreq, error); + continue; + } + +skip_sg: + scsi_req->hdr.sg_list_len_bytes = + cpu_to_be32(skreq->sg_byte_count); + + /* Complete resource allocations. */ + skdev->skreq_free_list = skreq->next; + skreq->state = SKD_REQ_STATE_BUSY; + skreq->id += SKD_ID_INCR; + + skmsg->length += sizeof(struct skd_scsi_request); + fmh->num_protocol_cmds_coalesced++; + + /* + * Update the active request counts. + * Capture the timeout timestamp. + */ + skreq->timeout_stamp = skdev->timeout_stamp; + timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; + skdev->timeout_slot[timo_slot]++; + skdev->in_flight++; + pr_debug("%s:%s:%d req=0x%x busy=%d\n", + skdev->name, __func__, __LINE__, + skreq->id, skdev->in_flight); + + /* + * If the FIT msg buffer is full send it. + */ + if (skmsg->length >= SKD_N_FITMSG_BYTES || + fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { + skd_send_fitmsg(skdev, skmsg); + skmsg = NULL; + fmh = NULL; + } + } + + /* + * Is a FIT msg in progress? If it is empty put the buffer back + * on the free list. If it is non-empty send what we got. + * This minimizes latency when there are fewer requests than + * what fits in a FIT msg. + */ + if (skmsg != NULL) { + /* Bigger than just a FIT msg header? */ + if (skmsg->length > sizeof(struct fit_msg_hdr)) { + pr_debug("%s:%s:%d sending msg=%p, len %d\n", + skdev->name, __func__, __LINE__, + skmsg, skmsg->length); + skd_send_fitmsg(skdev, skmsg); + } else { + /* + * The FIT msg is empty. It means we got started + * on the msg, but the requests were rejected. + */ + skmsg->state = SKD_MSG_STATE_IDLE; + skmsg->id += SKD_ID_INCR; + skmsg->next = skdev->skmsg_free_list; + skdev->skmsg_free_list = skmsg; + } + skmsg = NULL; + fmh = NULL; + } + + /* + * If req is non-NULL it means there is something to do but + * we are out of a resource. + */ + if (req) + blk_stop_queue(skdev->queue); +} + +static void skd_end_request(struct skd_device *skdev, + struct skd_request_context *skreq, int error) +{ + struct request *req = skreq->req; + unsigned int io_flags = req->cmd_flags; + + if ((io_flags & REQ_DISCARD) && + (skreq->discard_page == 1)) { + pr_debug("%s:%s:%d, free the page!", + skdev->name, __func__, __LINE__); + free_page((unsigned long)req->buffer); + req->buffer = NULL; + } + + if (unlikely(error)) { + struct request *req = skreq->req; + char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; + u32 lba = (u32)blk_rq_pos(req); + u32 count = blk_rq_sectors(req); + + pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", + skd_name(skdev), cmd, lba, count, skreq->id); + } else + pr_debug("%s:%s:%d id=0x%x error=%d\n", + skdev->name, __func__, __LINE__, skreq->id, error); + + __blk_end_request_all(skreq->req, error); +} + +static int skd_preop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + struct request *req = skreq->req; + int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; + int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; + struct scatterlist *sg = &skreq->sg[0]; + int n_sg; + int i; + + skreq->sg_byte_count = 0; + + /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || + skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ + + n_sg = blk_rq_map_sg(skdev->queue, req, sg); + if (n_sg <= 0) + return -EINVAL; + + /* + * Map scatterlist to PCI bus addresses. + * Note PCI might change the number of entries. + */ + n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); + if (n_sg <= 0) + return -EINVAL; + + SKD_ASSERT(n_sg <= skdev->sgs_per_request); + + skreq->n_sg = n_sg; + + for (i = 0; i < n_sg; i++) { + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; + u32 cnt = sg_dma_len(&sg[i]); + uint64_t dma_addr = sg_dma_address(&sg[i]); + + sgd->control = FIT_SGD_CONTROL_NOT_LAST; + sgd->byte_count = cnt; + skreq->sg_byte_count += cnt; + sgd->host_side_addr = dma_addr; + sgd->dev_side_addr = 0; + } + + skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; + skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; + + if (unlikely(skdev->dbg_level > 1)) { + pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", + skdev->name, __func__, __LINE__, + skreq->id, skreq->sksg_list, skreq->sksg_dma_address); + for (i = 0; i < n_sg; i++) { + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; + pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " + "addr=0x%llx next=0x%llx\n", + skdev->name, __func__, __LINE__, + i, sgd->byte_count, sgd->control, + sgd->host_side_addr, sgd->next_desc_ptr); + } + } + + return 0; +} + +static void skd_postop_sg_list(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; + int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; + + /* + * restore the next ptr for next IO request so we + * don't have to set it every time. + */ + skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = + skreq->sksg_dma_address + + ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); + pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); +} + +static void skd_request_fn_not_online(struct request_queue *q) +{ + struct skd_device *skdev = q->queuedata; + int error; + + SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); + + skd_log_skdev(skdev, "req_not_online"); + switch (skdev->state) { + case SKD_DRVR_STATE_PAUSING: + case SKD_DRVR_STATE_PAUSED: + case SKD_DRVR_STATE_STARTING: + case SKD_DRVR_STATE_RESTARTING: + case SKD_DRVR_STATE_WAIT_BOOT: + /* In case of starting, we haven't started the queue, + * so we can't get here... but requests are + * possibly hanging out waiting for us because we + * reported the dev/skd0 already. They'll wait + * forever if connect doesn't complete. + * What to do??? delay dev/skd0 ?? + */ + case SKD_DRVR_STATE_BUSY: + case SKD_DRVR_STATE_BUSY_IMMINENT: + case SKD_DRVR_STATE_BUSY_ERASE: + case SKD_DRVR_STATE_DRAINING_TIMEOUT: + return; + + case SKD_DRVR_STATE_BUSY_SANITIZE: + case SKD_DRVR_STATE_STOPPING: + case SKD_DRVR_STATE_SYNCING: + case SKD_DRVR_STATE_FAULT: + case SKD_DRVR_STATE_DISAPPEARED: + default: + error = -EIO; + break; + } + + /* If we get here, terminate all pending block requeusts + * with EIO and any scsi pass thru with appropriate sense + */ + + skd_fail_all_pending(skdev); +} + +/* + ***************************************************************************** + * TIMER + ***************************************************************************** + */ + +static void skd_timer_tick_not_online(struct skd_device *skdev); + +static void skd_timer_tick(ulong arg) +{ + struct skd_device *skdev = (struct skd_device *)arg; + + u32 timo_slot; + u32 overdue_timestamp; + unsigned long reqflags; + u32 state; + + if (skdev->state == SKD_DRVR_STATE_FAULT) + /* The driver has declared fault, and we want it to + * stay that way until driver is reloaded. + */ + return; + + spin_lock_irqsave(&skdev->lock, reqflags); + + state = SKD_READL(skdev, FIT_STATUS); + state &= FIT_SR_DRIVE_STATE_MASK; + if (state != skdev->drive_state) + skd_isr_fwstate(skdev); + + if (skdev->state != SKD_DRVR_STATE_ONLINE) { + skd_timer_tick_not_online(skdev); + goto timer_func_out; + } + skdev->timeout_stamp++; + timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; + + /* + * All requests that happened during the previous use of + * this slot should be done by now. The previous use was + * over 7 seconds ago. + */ + if (skdev->timeout_slot[timo_slot] == 0) + goto timer_func_out; + + /* Something is overdue */ + overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; + + pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", + skdev->name, __func__, __LINE__, + skdev->timeout_slot[timo_slot], skdev->in_flight); + pr_err("(%s): Overdue IOs (%d), busy %d\n", + skd_name(skdev), skdev->timeout_slot[timo_slot], + skdev->in_flight); + + skdev->timer_countdown = SKD_DRAINING_TIMO; + skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; + skdev->timo_slot = timo_slot; + blk_stop_queue(skdev->queue); + +timer_func_out: + mod_timer(&skdev->timer, (jiffies + HZ)); + + spin_unlock_irqrestore(&skdev->lock, reqflags); +} + +static void skd_timer_tick_not_online(struct skd_device *skdev) +{ + switch (skdev->state) { + case SKD_DRVR_STATE_IDLE: + case SKD_DRVR_STATE_LOAD: + break; + case SKD_DRVR_STATE_BUSY_SANITIZE: + pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", + skdev->name, __func__, __LINE__, + skdev->drive_state, skdev->state); + /* If we've been in sanitize for 3 seconds, we figure we're not + * going to get anymore completions, so recover requests now + */ + if (skdev->timer_countdown > 0) { + skdev->timer_countdown--; + return; + } + skd_recover_requests(skdev, 0); + break; + + case SKD_DRVR_STATE_BUSY: + case SKD_DRVR_STATE_BUSY_IMMINENT: + case SKD_DRVR_STATE_BUSY_ERASE: + pr_debug("%s:%s:%d busy[%x], countdown=%d\n", + skdev->name, __func__, __LINE__, + skdev->state, skdev->timer_countdown); + if (skdev->timer_countdown > 0) { + skdev->timer_countdown--; + return; + } + pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", + skdev->name, __func__, __LINE__, + skdev->state, skdev->timer_countdown); + skd_restart_device(skdev); + break; + + case SKD_DRVR_STATE_WAIT_BOOT: + case SKD_DRVR_STATE_STARTING: + if (skdev->timer_countdown > 0) { + skdev->timer_countdown--; + return; + } + /* For now, we fault the drive. Could attempt resets to + * revcover at some point. */ + skdev->state = SKD_DRVR_STATE_FAULT; + + pr_err("(%s): DriveFault Connect Timeout (%x)\n", + skd_name(skdev), skdev->drive_state); + + /*start the queue so we can respond with error to requests */ + /* wakeup anyone waiting for startup complete */ + blk_start_queue(skdev->queue); + skdev->gendisk_on = -1; + wake_up_interruptible(&skdev->waitq); + break; + + case SKD_DRVR_STATE_ONLINE: + /* shouldn't get here. */ + break; + + case SKD_DRVR_STATE_PAUSING: + case SKD_DRVR_STATE_PAUSED: + break; + + case SKD_DRVR_STATE_DRAINING_TIMEOUT: + pr_debug("%s:%s:%d " + "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", + skdev->name, __func__, __LINE__, + skdev->timo_slot, + skdev->timer_countdown, + skdev->in_flight, + skdev->timeout_slot[skdev->timo_slot]); + /* if the slot has cleared we can let the I/O continue */ + if (skdev->timeout_slot[skdev->timo_slot] == 0) { + pr_debug("%s:%s:%d Slot drained, starting queue.\n", + skdev->name, __func__, __LINE__); + skdev->state = SKD_DRVR_STATE_ONLINE; + blk_start_queue(skdev->queue); + return; + } + if (skdev->timer_countdown > 0) { + skdev->timer_countdown--; + return; + } + skd_restart_device(skdev); + break; + + case SKD_DRVR_STATE_RESTARTING: + if (skdev->timer_countdown > 0) { + skdev->timer_countdown--; + return; + } + /* For now, we fault the drive. Could attempt resets to + * revcover at some point. */ + skdev->state = SKD_DRVR_STATE_FAULT; + pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", + skd_name(skdev), skdev->drive_state); + + /* + * Recovering does two things: + * 1. completes IO with error + * 2. reclaims dma resources + * When is it safe to recover requests? + * - if the drive state is faulted + * - if the state is still soft reset after out timeout + * - if the drive registers are dead (state = FF) + * If it is "unsafe", we still need to recover, so we will + * disable pci bus mastering and disable our interrupts. + */ + + if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || + (skdev->drive_state == FIT_SR_DRIVE_FAULT) || + (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) + /* It never came out of soft reset. Try to + * recover the requests and then let them + * fail. This is to mitigate hung processes. */ + skd_recover_requests(skdev, 0); + else { + pr_err("(%s): Disable BusMaster (%x)\n", + skd_name(skdev), skdev->drive_state); + pci_disable_device(skdev->pdev); + skd_disable_interrupts(skdev); + skd_recover_requests(skdev, 0); + } + + /*start the queue so we can respond with error to requests */ + /* wakeup anyone waiting for startup complete */ + blk_start_queue(skdev->queue); + skdev->gendisk_on = -1; + wake_up_interruptible(&skdev->waitq); + break; + + case SKD_DRVR_STATE_RESUMING: + case SKD_DRVR_STATE_STOPPING: + case SKD_DRVR_STATE_SYNCING: + case SKD_DRVR_STATE_FAULT: + case SKD_DRVR_STATE_DISAPPEARED: + default: + break; + } +} + +static int skd_start_timer(struct skd_device *skdev) +{ + int rc; + + init_timer(&skdev->timer); + setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); + + rc = mod_timer(&skdev->timer, (jiffies + HZ)); + if (rc) + pr_err("%s: failed to start timer %d\n", + __func__, rc); + return rc; +} + +static void skd_kill_timer(struct skd_device *skdev) +{ + del_timer_sync(&skdev->timer); +} + +/* + ***************************************************************************** + * IOCTL + ***************************************************************************** + */ +static int skd_ioctl_sg_io(struct skd_device *skdev, + fmode_t mode, void __user *argp); +static int skd_sg_io_get_and_check_args(struct skd_device *skdev, + struct skd_sg_io *sksgio); +static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, + struct skd_sg_io *sksgio); +static int skd_sg_io_prep_buffering(struct skd_device *skdev, + struct skd_sg_io *sksgio); +static int skd_sg_io_copy_buffer(struct skd_device *skdev, + struct skd_sg_io *sksgio, int dxfer_dir); +static int skd_sg_io_send_fitmsg(struct skd_device *skdev, + struct skd_sg_io *sksgio); +static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); +static int skd_sg_io_release_skspcl(struct skd_device *skdev, + struct skd_sg_io *sksgio); +static int skd_sg_io_put_status(struct skd_device *skdev, + struct skd_sg_io *sksgio); + +static void skd_complete_special(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 + *skcomp, + volatile struct fit_comp_error_info *skerr, + struct skd_special_context *skspcl); + +static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, + uint cmd_in, ulong arg) +{ + int rc = 0; + struct gendisk *disk = bdev->bd_disk; + struct skd_device *skdev = disk->private_data; + void __user *p = (void *)arg; + + pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", + skdev->name, __func__, __LINE__, + disk->disk_name, current->comm, mode, cmd_in, arg); + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + switch (cmd_in) { + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_VERSION_NUM: + rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); + break; + case SG_IO: + rc = skd_ioctl_sg_io(skdev, mode, p); + break; + + default: + rc = -ENOTTY; + break; + } + + pr_debug("%s:%s:%d %s: completion rc %d\n", + skdev->name, __func__, __LINE__, disk->disk_name, rc); + return rc; +} + +static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, + void __user *argp) +{ + int rc; + struct skd_sg_io sksgio; + + memset(&sksgio, 0, sizeof(sksgio)); + sksgio.mode = mode; + sksgio.argp = argp; + sksgio.iov = &sksgio.no_iov_iov; + + switch (skdev->state) { + case SKD_DRVR_STATE_ONLINE: + case SKD_DRVR_STATE_BUSY_IMMINENT: + break; + + default: + pr_debug("%s:%s:%d drive not online\n", + skdev->name, __func__, __LINE__); + rc = -ENXIO; + goto out; + } + + rc = skd_sg_io_get_and_check_args(skdev, &sksgio); + if (rc) + goto out; + + rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); + if (rc) + goto out; + + rc = skd_sg_io_prep_buffering(skdev, &sksgio); + if (rc) + goto out; + + rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); + if (rc) + goto out; + + rc = skd_sg_io_send_fitmsg(skdev, &sksgio); + if (rc) + goto out; + + rc = skd_sg_io_await(skdev, &sksgio); + if (rc) + goto out; + + rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); + if (rc) + goto out; + + rc = skd_sg_io_put_status(skdev, &sksgio); + if (rc) + goto out; + + rc = 0; + +out: + skd_sg_io_release_skspcl(skdev, &sksgio); + + if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) + kfree(sksgio.iov); + return rc; +} + +static int skd_sg_io_get_and_check_args(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct sg_io_hdr *sgp = &sksgio->sg; + int i, acc; + + if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { + pr_debug("%s:%s:%d access sg failed %p\n", + skdev->name, __func__, __LINE__, sksgio->argp); + return -EFAULT; + } + + if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { + pr_debug("%s:%s:%d copy_from_user sg failed %p\n", + skdev->name, __func__, __LINE__, sksgio->argp); + return -EFAULT; + } + + if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { + pr_debug("%s:%s:%d interface_id invalid 0x%x\n", + skdev->name, __func__, __LINE__, sgp->interface_id); + return -EINVAL; + } + + if (sgp->cmd_len > sizeof(sksgio->cdb)) { + pr_debug("%s:%s:%d cmd_len invalid %d\n", + skdev->name, __func__, __LINE__, sgp->cmd_len); + return -EINVAL; + } + + if (sgp->iovec_count > 256) { + pr_debug("%s:%s:%d iovec_count invalid %d\n", + skdev->name, __func__, __LINE__, sgp->iovec_count); + return -EINVAL; + } + + if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { + pr_debug("%s:%s:%d dxfer_len invalid %d\n", + skdev->name, __func__, __LINE__, sgp->dxfer_len); + return -EINVAL; + } + + switch (sgp->dxfer_direction) { + case SG_DXFER_NONE: + acc = -1; + break; + + case SG_DXFER_TO_DEV: + acc = VERIFY_READ; + break; + + case SG_DXFER_FROM_DEV: + case SG_DXFER_TO_FROM_DEV: + acc = VERIFY_WRITE; + break; + + default: + pr_debug("%s:%s:%d dxfer_dir invalid %d\n", + skdev->name, __func__, __LINE__, sgp->dxfer_direction); + return -EINVAL; + } + + if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { + pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", + skdev->name, __func__, __LINE__, sgp->cmdp); + return -EFAULT; + } + + if (sgp->mx_sb_len != 0) { + if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { + pr_debug("%s:%s:%d access sbp failed %p\n", + skdev->name, __func__, __LINE__, sgp->sbp); + return -EFAULT; + } + } + + if (sgp->iovec_count == 0) { + sksgio->iov[0].iov_base = sgp->dxferp; + sksgio->iov[0].iov_len = sgp->dxfer_len; + sksgio->iovcnt = 1; + sksgio->dxfer_len = sgp->dxfer_len; + } else { + struct sg_iovec *iov; + uint nbytes = sizeof(*iov) * sgp->iovec_count; + size_t iov_data_len; + + iov = kmalloc(nbytes, GFP_KERNEL); + if (iov == NULL) { + pr_debug("%s:%s:%d alloc iovec failed %d\n", + skdev->name, __func__, __LINE__, + sgp->iovec_count); + return -ENOMEM; + } + sksgio->iov = iov; + sksgio->iovcnt = sgp->iovec_count; + + if (copy_from_user(iov, sgp->dxferp, nbytes)) { + pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", + skdev->name, __func__, __LINE__, sgp->dxferp); + return -EFAULT; + } + + /* + * Sum up the vecs, making sure they don't overflow + */ + iov_data_len = 0; + for (i = 0; i < sgp->iovec_count; i++) { + if (iov_data_len + iov[i].iov_len < iov_data_len) + return -EINVAL; + iov_data_len += iov[i].iov_len; + } + + /* SG_IO howto says that the shorter of the two wins */ + if (sgp->dxfer_len < iov_data_len) { + sksgio->iovcnt = iov_shorten((struct iovec *)iov, + sgp->iovec_count, + sgp->dxfer_len); + sksgio->dxfer_len = sgp->dxfer_len; + } else + sksgio->dxfer_len = iov_data_len; + } + + if (sgp->dxfer_direction != SG_DXFER_NONE) { + struct sg_iovec *iov = sksgio->iov; + for (i = 0; i < sksgio->iovcnt; i++, iov++) { + if (!access_ok(acc, iov->iov_base, iov->iov_len)) { + pr_debug("%s:%s:%d access data failed %p/%d\n", + skdev->name, __func__, __LINE__, + iov->iov_base, (int)iov->iov_len); + return -EFAULT; + } + } + } + + return 0; +} + +static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct skd_special_context *skspcl = NULL; + int rc; + + for (;;) { + ulong flags; + + spin_lock_irqsave(&skdev->lock, flags); + skspcl = skdev->skspcl_free_list; + if (skspcl != NULL) { + skdev->skspcl_free_list = + (struct skd_special_context *)skspcl->req.next; + skspcl->req.id += SKD_ID_INCR; + skspcl->req.state = SKD_REQ_STATE_SETUP; + skspcl->orphaned = 0; + skspcl->req.n_sg = 0; + } + spin_unlock_irqrestore(&skdev->lock, flags); + + if (skspcl != NULL) { + rc = 0; + break; + } + + pr_debug("%s:%s:%d blocking\n", + skdev->name, __func__, __LINE__); + + rc = wait_event_interruptible_timeout( + skdev->waitq, + (skdev->skspcl_free_list != NULL), + msecs_to_jiffies(sksgio->sg.timeout)); + + pr_debug("%s:%s:%d unblocking, rc=%d\n", + skdev->name, __func__, __LINE__, rc); + + if (rc <= 0) { + if (rc == 0) + rc = -ETIMEDOUT; + else + rc = -EINTR; + break; + } + /* + * If we get here rc > 0 meaning the timeout to + * wait_event_interruptible_timeout() had time left, hence the + * sought event -- non-empty free list -- happened. + * Retry the allocation. + */ + } + sksgio->skspcl = skspcl; + + return rc; +} + +static int skd_skreq_prep_buffering(struct skd_device *skdev, + struct skd_request_context *skreq, + u32 dxfer_len) +{ + u32 resid = dxfer_len; + + /* + * The DMA engine must have aligned addresses and byte counts. + */ + resid += (-resid) & 3; + skreq->sg_byte_count = resid; + + skreq->n_sg = 0; + + while (resid > 0) { + u32 nbytes = PAGE_SIZE; + u32 ix = skreq->n_sg; + struct scatterlist *sg = &skreq->sg[ix]; + struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; + struct page *page; + + if (nbytes > resid) + nbytes = resid; + + page = alloc_page(GFP_KERNEL); + if (page == NULL) + return -ENOMEM; + + sg_set_page(sg, page, nbytes, 0); + + /* TODO: This should be going through a pci_???() + * routine to do proper mapping. */ + sksg->control = FIT_SGD_CONTROL_NOT_LAST; + sksg->byte_count = nbytes; + + sksg->host_side_addr = sg_phys(sg); + + sksg->dev_side_addr = 0; + sksg->next_desc_ptr = skreq->sksg_dma_address + + (ix + 1) * sizeof(*sksg); + + skreq->n_sg++; + resid -= nbytes; + } + + if (skreq->n_sg > 0) { + u32 ix = skreq->n_sg - 1; + struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; + + sksg->control = FIT_SGD_CONTROL_LAST; + sksg->next_desc_ptr = 0; + } + + if (unlikely(skdev->dbg_level > 1)) { + u32 i; + + pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", + skdev->name, __func__, __LINE__, + skreq->id, skreq->sksg_list, skreq->sksg_dma_address); + for (i = 0; i < skreq->n_sg; i++) { + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; + + pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " + "addr=0x%llx next=0x%llx\n", + skdev->name, __func__, __LINE__, + i, sgd->byte_count, sgd->control, + sgd->host_side_addr, sgd->next_desc_ptr); + } + } + + return 0; +} + +static int skd_sg_io_prep_buffering(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct skd_special_context *skspcl = sksgio->skspcl; + struct skd_request_context *skreq = &skspcl->req; + u32 dxfer_len = sksgio->dxfer_len; + int rc; + + rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); + /* + * Eventually, errors or not, skd_release_special() is called + * to recover allocations including partial allocations. + */ + return rc; +} + +static int skd_sg_io_copy_buffer(struct skd_device *skdev, + struct skd_sg_io *sksgio, int dxfer_dir) +{ + struct skd_special_context *skspcl = sksgio->skspcl; + u32 iov_ix = 0; + struct sg_iovec curiov; + u32 sksg_ix = 0; + u8 *bufp = NULL; + u32 buf_len = 0; + u32 resid = sksgio->dxfer_len; + int rc; + + curiov.iov_len = 0; + curiov.iov_base = NULL; + + if (dxfer_dir != sksgio->sg.dxfer_direction) { + if (dxfer_dir != SG_DXFER_TO_DEV || + sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) + return 0; + } + + while (resid > 0) { + u32 nbytes = PAGE_SIZE; + + if (curiov.iov_len == 0) { + curiov = sksgio->iov[iov_ix++]; + continue; + } + + if (buf_len == 0) { + struct page *page; + page = sg_page(&skspcl->req.sg[sksg_ix++]); + bufp = page_address(page); + buf_len = PAGE_SIZE; + } + + nbytes = min_t(u32, nbytes, resid); + nbytes = min_t(u32, nbytes, curiov.iov_len); + nbytes = min_t(u32, nbytes, buf_len); + + if (dxfer_dir == SG_DXFER_TO_DEV) + rc = __copy_from_user(bufp, curiov.iov_base, nbytes); + else + rc = __copy_to_user(curiov.iov_base, bufp, nbytes); + + if (rc) + return -EFAULT; + + resid -= nbytes; + curiov.iov_len -= nbytes; + curiov.iov_base += nbytes; + buf_len -= nbytes; + } + + return 0; +} + +static int skd_sg_io_send_fitmsg(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct skd_special_context *skspcl = sksgio->skspcl; + struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; + struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; + + memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); + + /* Initialize the FIT msg header */ + fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; + fmh->num_protocol_cmds_coalesced = 1; + + /* Initialize the SCSI request */ + if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) + scsi_req->hdr.sg_list_dma_address = + cpu_to_be64(skspcl->req.sksg_dma_address); + scsi_req->hdr.tag = skspcl->req.id; + scsi_req->hdr.sg_list_len_bytes = + cpu_to_be32(skspcl->req.sg_byte_count); + memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); + + skspcl->req.state = SKD_REQ_STATE_BUSY; + skd_send_special_fitmsg(skdev, skspcl); + + return 0; +} + +static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) +{ + unsigned long flags; + int rc; + + rc = wait_event_interruptible_timeout(skdev->waitq, + (sksgio->skspcl->req.state != + SKD_REQ_STATE_BUSY), + msecs_to_jiffies(sksgio->sg. + timeout)); + + spin_lock_irqsave(&skdev->lock, flags); + + if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { + pr_debug("%s:%s:%d skspcl %p aborted\n", + skdev->name, __func__, __LINE__, sksgio->skspcl); + + /* Build check cond, sense and let command finish. */ + /* For a timeout, we must fabricate completion and sense + * data to complete the command */ + sksgio->skspcl->req.completion.status = + SAM_STAT_CHECK_CONDITION; + + memset(&sksgio->skspcl->req.err_info, 0, + sizeof(sksgio->skspcl->req.err_info)); + sksgio->skspcl->req.err_info.type = 0x70; + sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; + sksgio->skspcl->req.err_info.code = 0x44; + sksgio->skspcl->req.err_info.qual = 0; + rc = 0; + } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) + /* No longer on the adapter. We finish. */ + rc = 0; + else { + /* Something's gone wrong. Still busy. Timeout or + * user interrupted (control-C). Mark as an orphan + * so it will be disposed when completed. */ + sksgio->skspcl->orphaned = 1; + sksgio->skspcl = NULL; + if (rc == 0) { + pr_debug("%s:%s:%d timed out %p (%u ms)\n", + skdev->name, __func__, __LINE__, + sksgio, sksgio->sg.timeout); + rc = -ETIMEDOUT; + } else { + pr_debug("%s:%s:%d cntlc %p\n", + skdev->name, __func__, __LINE__, sksgio); + rc = -EINTR; + } + } + + spin_unlock_irqrestore(&skdev->lock, flags); + + return rc; +} + +static int skd_sg_io_put_status(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct sg_io_hdr *sgp = &sksgio->sg; + struct skd_special_context *skspcl = sksgio->skspcl; + int resid = 0; + + u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); + + sgp->status = skspcl->req.completion.status; + resid = sksgio->dxfer_len - nb; + + sgp->masked_status = sgp->status & STATUS_MASK; + sgp->msg_status = 0; + sgp->host_status = 0; + sgp->driver_status = 0; + sgp->resid = resid; + if (sgp->masked_status || sgp->host_status || sgp->driver_status) + sgp->info |= SG_INFO_CHECK; + + pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", + skdev->name, __func__, __LINE__, + sgp->status, sgp->masked_status, sgp->resid); + + if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { + if (sgp->mx_sb_len > 0) { + struct fit_comp_error_info *ei = &skspcl->req.err_info; + u32 nbytes = sizeof(*ei); + + nbytes = min_t(u32, nbytes, sgp->mx_sb_len); + + sgp->sb_len_wr = nbytes; + + if (__copy_to_user(sgp->sbp, ei, nbytes)) { + pr_debug("%s:%s:%d copy_to_user sense failed %p\n", + skdev->name, __func__, __LINE__, + sgp->sbp); + return -EFAULT; + } + } + } + + if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { + pr_debug("%s:%s:%d copy_to_user sg failed %p\n", + skdev->name, __func__, __LINE__, sksgio->argp); + return -EFAULT; + } + + return 0; +} + +static int skd_sg_io_release_skspcl(struct skd_device *skdev, + struct skd_sg_io *sksgio) +{ + struct skd_special_context *skspcl = sksgio->skspcl; + + if (skspcl != NULL) { + ulong flags; + + sksgio->skspcl = NULL; + + spin_lock_irqsave(&skdev->lock, flags); + skd_release_special(skdev, skspcl); + spin_unlock_irqrestore(&skdev->lock, flags); + } + + return 0; +} + +/* + ***************************************************************************** + * INTERNAL REQUESTS -- generated by driver itself + ***************************************************************************** + */ + +static int skd_format_internal_skspcl(struct skd_device *skdev) +{ + struct skd_special_context *skspcl = &skdev->internal_skspcl; + struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; + struct fit_msg_hdr *fmh; + uint64_t dma_address; + struct skd_scsi_request *scsi; + + fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; + fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; + fmh->num_protocol_cmds_coalesced = 1; + + scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; + memset(scsi, 0, sizeof(*scsi)); + dma_address = skspcl->req.sksg_dma_address; + scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); + sgd->control = FIT_SGD_CONTROL_LAST; + sgd->byte_count = 0; + sgd->host_side_addr = skspcl->db_dma_address; + sgd->dev_side_addr = 0; + sgd->next_desc_ptr = 0LL; + + return 1; +} + +#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES + +static void skd_send_internal_skspcl(struct skd_device *skdev, + struct skd_special_context *skspcl, + u8 opcode) +{ + struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; + struct skd_scsi_request *scsi; + unsigned char *buf = skspcl->data_buf; + int i; + + if (skspcl->req.state != SKD_REQ_STATE_IDLE) + /* + * A refresh is already in progress. + * Just wait for it to finish. + */ + return; + + SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); + skspcl->req.state = SKD_REQ_STATE_BUSY; + skspcl->req.id += SKD_ID_INCR; + + scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; + scsi->hdr.tag = skspcl->req.id; + + memset(scsi->cdb, 0, sizeof(scsi->cdb)); + + switch (opcode) { + case TEST_UNIT_READY: + scsi->cdb[0] = TEST_UNIT_READY; + sgd->byte_count = 0; + scsi->hdr.sg_list_len_bytes = 0; + break; + + case READ_CAPACITY: + scsi->cdb[0] = READ_CAPACITY; + sgd->byte_count = SKD_N_READ_CAP_BYTES; + scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); + break; + + case INQUIRY: + scsi->cdb[0] = INQUIRY; + scsi->cdb[1] = 0x01; /* evpd */ + scsi->cdb[2] = 0x80; /* serial number page */ + scsi->cdb[4] = 0x10; + sgd->byte_count = 16; + scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); + break; + + case SYNCHRONIZE_CACHE: + scsi->cdb[0] = SYNCHRONIZE_CACHE; + sgd->byte_count = 0; + scsi->hdr.sg_list_len_bytes = 0; + break; + + case WRITE_BUFFER: + scsi->cdb[0] = WRITE_BUFFER; + scsi->cdb[1] = 0x02; + scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; + scsi->cdb[8] = WR_BUF_SIZE & 0xFF; + sgd->byte_count = WR_BUF_SIZE; + scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); + /* fill incrementing byte pattern */ + for (i = 0; i < sgd->byte_count; i++) + buf[i] = i & 0xFF; + break; + + case READ_BUFFER: + scsi->cdb[0] = READ_BUFFER; + scsi->cdb[1] = 0x02; + scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; + scsi->cdb[8] = WR_BUF_SIZE & 0xFF; + sgd->byte_count = WR_BUF_SIZE; + scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); + memset(skspcl->data_buf, 0, sgd->byte_count); + break; + + default: + SKD_ASSERT("Don't know what to send"); + return; + + } + skd_send_special_fitmsg(skdev, skspcl); +} + +static void skd_refresh_device_data(struct skd_device *skdev) +{ + struct skd_special_context *skspcl = &skdev->internal_skspcl; + + skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); +} + +static int skd_chk_read_buf(struct skd_device *skdev, + struct skd_special_context *skspcl) +{ + unsigned char *buf = skspcl->data_buf; + int i; + + /* check for incrementing byte pattern */ + for (i = 0; i < WR_BUF_SIZE; i++) + if (buf[i] != (i & 0xFF)) + return 1; + + return 0; +} + +static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, + u8 code, u8 qual, u8 fruc) +{ + /* If the check condition is of special interest, log a message */ + if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) + && (code == 0x04) && (qual == 0x06)) { + pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" + "ascq/fruc %02x/%02x/%02x/%02x\n", + skd_name(skdev), key, code, qual, fruc); + } +} + +static void skd_complete_internal(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 + *skcomp, + volatile struct fit_comp_error_info *skerr, + struct skd_special_context *skspcl) +{ + u8 *buf = skspcl->data_buf; + u8 status; + int i; + struct skd_scsi_request *scsi = + (struct skd_scsi_request *)&skspcl->msg_buf[64]; + + SKD_ASSERT(skspcl == &skdev->internal_skspcl); + + pr_debug("%s:%s:%d complete internal %x\n", + skdev->name, __func__, __LINE__, scsi->cdb[0]); + + skspcl->req.completion = *skcomp; + skspcl->req.state = SKD_REQ_STATE_IDLE; + skspcl->req.id += SKD_ID_INCR; + + status = skspcl->req.completion.status; + + skd_log_check_status(skdev, status, skerr->key, skerr->code, + skerr->qual, skerr->fruc); + + switch (scsi->cdb[0]) { + case TEST_UNIT_READY: + if (status == SAM_STAT_GOOD) + skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); + else if ((status == SAM_STAT_CHECK_CONDITION) && + (skerr->key == MEDIUM_ERROR)) + skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); + else { + if (skdev->state == SKD_DRVR_STATE_STOPPING) { + pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", + skdev->name, __func__, __LINE__, + skdev->state); + return; + } + pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", + skdev->name, __func__, __LINE__); + skd_send_internal_skspcl(skdev, skspcl, 0x00); + } + break; + + case WRITE_BUFFER: + if (status == SAM_STAT_GOOD) + skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); + else { + if (skdev->state == SKD_DRVR_STATE_STOPPING) { + pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", + skdev->name, __func__, __LINE__, + skdev->state); + return; + } + pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", + skdev->name, __func__, __LINE__); + skd_send_internal_skspcl(skdev, skspcl, 0x00); + } + break; + + case READ_BUFFER: + if (status == SAM_STAT_GOOD) { + if (skd_chk_read_buf(skdev, skspcl) == 0) + skd_send_internal_skspcl(skdev, skspcl, + READ_CAPACITY); + else { + pr_err( + "(%s):*** W/R Buffer mismatch %d ***\n", + skd_name(skdev), skdev->connect_retries); + if (skdev->connect_retries < + SKD_MAX_CONNECT_RETRIES) { + skdev->connect_retries++; + skd_soft_reset(skdev); + } else { + pr_err( + "(%s): W/R Buffer Connect Error\n", + skd_name(skdev)); + return; + } + } + + } else { + if (skdev->state == SKD_DRVR_STATE_STOPPING) { + pr_debug("%s:%s:%d " + "read buffer failed, don't send anymore state 0x%x\n", + skdev->name, __func__, __LINE__, + skdev->state); + return; + } + pr_debug("%s:%s:%d " + "**** read buffer failed, retry skerr\n", + skdev->name, __func__, __LINE__); + skd_send_internal_skspcl(skdev, skspcl, 0x00); + } + break; + + case READ_CAPACITY: + skdev->read_cap_is_valid = 0; + if (status == SAM_STAT_GOOD) { + skdev->read_cap_last_lba = + (buf[0] << 24) | (buf[1] << 16) | + (buf[2] << 8) | buf[3]; + skdev->read_cap_blocksize = + (buf[4] << 24) | (buf[5] << 16) | + (buf[6] << 8) | buf[7]; + + pr_debug("%s:%s:%d last lba %d, bs %d\n", + skdev->name, __func__, __LINE__, + skdev->read_cap_last_lba, + skdev->read_cap_blocksize); + + set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); + + skdev->read_cap_is_valid = 1; + + skd_send_internal_skspcl(skdev, skspcl, INQUIRY); + } else if ((status == SAM_STAT_CHECK_CONDITION) && + (skerr->key == MEDIUM_ERROR)) { + skdev->read_cap_last_lba = ~0; + set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); + pr_debug("%s:%s:%d " + "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", + skdev->name, __func__, __LINE__); + skd_send_internal_skspcl(skdev, skspcl, INQUIRY); + } else { + pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", + skdev->name, __func__, __LINE__); + skd_send_internal_skspcl(skdev, skspcl, + TEST_UNIT_READY); + } + break; + + case INQUIRY: + skdev->inquiry_is_valid = 0; + if (status == SAM_STAT_GOOD) { + skdev->inquiry_is_valid = 1; + + for (i = 0; i < 12; i++) + skdev->inq_serial_num[i] = buf[i + 4]; + skdev->inq_serial_num[12] = 0; + } + + if (skd_unquiesce_dev(skdev) < 0) + pr_debug("%s:%s:%d **** failed, to ONLINE device\n", + skdev->name, __func__, __LINE__); + /* connection is complete */ + skdev->connect_retries = 0; + break; + + case SYNCHRONIZE_CACHE: + if (status == SAM_STAT_GOOD) + skdev->sync_done = 1; + else + skdev->sync_done = -1; + wake_up_interruptible(&skdev->waitq); + break; + + default: + SKD_ASSERT("we didn't send this"); + } +} + +/* + ***************************************************************************** + * FIT MESSAGES + ***************************************************************************** + */ + +static void skd_send_fitmsg(struct skd_device *skdev, + struct skd_fitmsg_context *skmsg) +{ + u64 qcmd; + struct fit_msg_hdr *fmh; + + pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", + skdev->name, __func__, __LINE__, + skmsg->mb_dma_address, skdev->in_flight); + pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", + skdev->name, __func__, __LINE__, + skmsg->msg_buf, skmsg->offset); + + qcmd = skmsg->mb_dma_address; + qcmd |= FIT_QCMD_QID_NORMAL; + + fmh = (struct fit_msg_hdr *)skmsg->msg_buf; + skmsg->outstanding = fmh->num_protocol_cmds_coalesced; + + if (unlikely(skdev->dbg_level > 1)) { + u8 *bp = (u8 *)skmsg->msg_buf; + int i; + for (i = 0; i < skmsg->length; i += 8) { + pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x " + "%02x %02x %02x %02x\n", + skdev->name, __func__, __LINE__, + i, bp[i + 0], bp[i + 1], bp[i + 2], + bp[i + 3], bp[i + 4], bp[i + 5], + bp[i + 6], bp[i + 7]); + if (i == 0) + i = 64 - 8; + } + } + + if (skmsg->length > 256) + qcmd |= FIT_QCMD_MSGSIZE_512; + else if (skmsg->length > 128) + qcmd |= FIT_QCMD_MSGSIZE_256; + else if (skmsg->length > 64) + qcmd |= FIT_QCMD_MSGSIZE_128; + else + /* + * This makes no sense because the FIT msg header is + * 64 bytes. If the msg is only 64 bytes long it has + * no payload. + */ + qcmd |= FIT_QCMD_MSGSIZE_64; + + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); + +} + +static void skd_send_special_fitmsg(struct skd_device *skdev, + struct skd_special_context *skspcl) +{ + u64 qcmd; + + if (unlikely(skdev->dbg_level > 1)) { + u8 *bp = (u8 *)skspcl->msg_buf; + int i; + + for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { + pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x " + "%02x %02x %02x %02x\n", + skdev->name, __func__, __LINE__, i, + bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], + bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); + if (i == 0) + i = 64 - 8; + } + + pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", + skdev->name, __func__, __LINE__, + skspcl, skspcl->req.id, skspcl->req.sksg_list, + skspcl->req.sksg_dma_address); + for (i = 0; i < skspcl->req.n_sg; i++) { + struct fit_sg_descriptor *sgd = + &skspcl->req.sksg_list[i]; + + pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " + "addr=0x%llx next=0x%llx\n", + skdev->name, __func__, __LINE__, + i, sgd->byte_count, sgd->control, + sgd->host_side_addr, sgd->next_desc_ptr); + } + } + + /* + * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr + * and one 64-byte SSDI command. + */ + qcmd = skspcl->mb_dma_address; + qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; + + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); +} + +/* + ***************************************************************************** + * COMPLETION QUEUE + ***************************************************************************** + */ + +static void skd_complete_other(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 *skcomp, + volatile struct fit_comp_error_info *skerr); + +struct sns_info { + u8 type; + u8 stat; + u8 key; + u8 asc; + u8 ascq; + u8 mask; + enum skd_check_status_action action; +}; + +static struct sns_info skd_chkstat_table[] = { + /* Good */ + { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, + SKD_CHECK_STATUS_REPORT_GOOD }, + + /* Smart alerts */ + { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ + SKD_CHECK_STATUS_REPORT_SMART_ALERT }, + { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ + SKD_CHECK_STATUS_REPORT_SMART_ALERT }, + { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ + SKD_CHECK_STATUS_REPORT_SMART_ALERT }, + + /* Retry (with limits) */ + { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ + SKD_CHECK_STATUS_REQUEUE_REQUEST }, + { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ + SKD_CHECK_STATUS_REQUEUE_REQUEST }, + { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ + SKD_CHECK_STATUS_REQUEUE_REQUEST }, + { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ + SKD_CHECK_STATUS_REQUEUE_REQUEST }, + + /* Busy (or about to be) */ + { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ + SKD_CHECK_STATUS_BUSY_IMMINENT }, +}; + +/* + * Look up status and sense data to decide how to handle the error + * from the device. + * mask says which fields must match e.g., mask=0x18 means check + * type and stat, ignore key, asc, ascq. + */ + +static enum skd_check_status_action +skd_check_status(struct skd_device *skdev, + u8 cmp_status, volatile struct fit_comp_error_info *skerr) +{ + int i, n; + + pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", + skd_name(skdev), skerr->key, skerr->code, skerr->qual, + skerr->fruc); + + pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", + skdev->name, __func__, __LINE__, skerr->type, cmp_status, + skerr->key, skerr->code, skerr->qual, skerr->fruc); + + /* Does the info match an entry in the good category? */ + n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); + for (i = 0; i < n; i++) { + struct sns_info *sns = &skd_chkstat_table[i]; + + if (sns->mask & 0x10) + if (skerr->type != sns->type) + continue; + + if (sns->mask & 0x08) + if (cmp_status != sns->stat) + continue; + + if (sns->mask & 0x04) + if (skerr->key != sns->key) + continue; + + if (sns->mask & 0x02) + if (skerr->code != sns->asc) + continue; + + if (sns->mask & 0x01) + if (skerr->qual != sns->ascq) + continue; + + if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { + pr_err("(%s): SMART Alert: sense key/asc/ascq " + "%02x/%02x/%02x\n", + skd_name(skdev), skerr->key, + skerr->code, skerr->qual); + } + return sns->action; + } + + /* No other match, so nonzero status means error, + * zero status means good + */ + if (cmp_status) { + pr_debug("%s:%s:%d status check: error\n", + skdev->name, __func__, __LINE__); + return SKD_CHECK_STATUS_REPORT_ERROR; + } + + pr_debug("%s:%s:%d status check good default\n", + skdev->name, __func__, __LINE__); + return SKD_CHECK_STATUS_REPORT_GOOD; +} + +static void skd_resolve_req_exception(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + u8 cmp_status = skreq->completion.status; + + switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { + case SKD_CHECK_STATUS_REPORT_GOOD: + case SKD_CHECK_STATUS_REPORT_SMART_ALERT: + skd_end_request(skdev, skreq, 0); + break; + + case SKD_CHECK_STATUS_BUSY_IMMINENT: + skd_log_skreq(skdev, skreq, "retry(busy)"); + blk_requeue_request(skdev->queue, skreq->req); + pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); + skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; + skdev->timer_countdown = SKD_TIMER_MINUTES(20); + skd_quiesce_dev(skdev); + break; + + case SKD_CHECK_STATUS_REQUEUE_REQUEST: + if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { + skd_log_skreq(skdev, skreq, "retry"); + blk_requeue_request(skdev->queue, skreq->req); + break; + } + /* fall through to report error */ + + case SKD_CHECK_STATUS_REPORT_ERROR: + default: + skd_end_request(skdev, skreq, -EIO); + break; + } +} + +/* assume spinlock is already held */ +static void skd_release_skreq(struct skd_device *skdev, + struct skd_request_context *skreq) +{ + u32 msg_slot; + struct skd_fitmsg_context *skmsg; + + u32 timo_slot; + + /* + * Reclaim the FIT msg buffer if this is + * the first of the requests it carried to + * be completed. The FIT msg buffer used to + * send this request cannot be reused until + * we are sure the s1120 card has copied + * it to its memory. The FIT msg might have + * contained several requests. As soon as + * any of them are completed we know that + * the entire FIT msg was transferred. + * Only the first completed request will + * match the FIT msg buffer id. The FIT + * msg buffer id is immediately updated. + * When subsequent requests complete the FIT + * msg buffer id won't match, so we know + * quite cheaply that it is already done. + */ + msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; + SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); + + skmsg = &skdev->skmsg_table[msg_slot]; + if (skmsg->id == skreq->fitmsg_id) { + SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); + SKD_ASSERT(skmsg->outstanding > 0); + skmsg->outstanding--; + if (skmsg->outstanding == 0) { + skmsg->state = SKD_MSG_STATE_IDLE; + skmsg->id += SKD_ID_INCR; + skmsg->next = skdev->skmsg_free_list; + skdev->skmsg_free_list = skmsg; + } + } + + /* + * Decrease the number of active requests. + * Also decrements the count in the timeout slot. + */ + SKD_ASSERT(skdev->in_flight > 0); + skdev->in_flight -= 1; + + timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; + SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); + skdev->timeout_slot[timo_slot] -= 1; + + /* + * Reset backpointer + */ + skreq->req = NULL; + + /* + * Reclaim the skd_request_context + */ + skreq->state = SKD_REQ_STATE_IDLE; + skreq->id += SKD_ID_INCR; + skreq->next = skdev->skreq_free_list; + skdev->skreq_free_list = skreq; +} + +#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA + +static void skd_do_inq_page_00(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 *skcomp, + volatile struct fit_comp_error_info *skerr, + uint8_t *cdb, uint8_t *buf) +{ + uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; + + /* Caller requested "supported pages". The driver needs to insert + * its page. + */ + pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", + skdev->name, __func__, __LINE__); + + /* If the device rejected the request because the CDB was + * improperly formed, then just leave. + */ + if (skcomp->status == SAM_STAT_CHECK_CONDITION && + skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) + return; + + /* Get the amount of space the caller allocated */ + max_bytes = (cdb[3] << 8) | cdb[4]; + + /* Get the number of pages actually returned by the device */ + drive_pages = (buf[2] << 8) | buf[3]; + drive_bytes = drive_pages + 4; + new_size = drive_pages + 1; + + /* Supported pages must be in numerical order, so find where + * the driver page needs to be inserted into the list of + * pages returned by the device. + */ + for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { + if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) + return; /* Device using this page code. abort */ + else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) + break; + } + + if (insert_pt < max_bytes) { + uint16_t u; + + /* Shift everything up one byte to make room. */ + for (u = new_size + 3; u > insert_pt; u--) + buf[u] = buf[u - 1]; + buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; + + /* SCSI byte order increment of num_returned_bytes by 1 */ + skcomp->num_returned_bytes = + be32_to_cpu(skcomp->num_returned_bytes) + 1; + skcomp->num_returned_bytes = + be32_to_cpu(skcomp->num_returned_bytes); + } + + /* update page length field to reflect the driver's page too */ + buf[2] = (uint8_t)((new_size >> 8) & 0xFF); + buf[3] = (uint8_t)((new_size >> 0) & 0xFF); +} + +static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) +{ + int pcie_reg; + u16 pci_bus_speed; + u8 pci_lanes; + + pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_reg) { + u16 linksta; + pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); + + pci_bus_speed = linksta & 0xF; + pci_lanes = (linksta & 0x3F0) >> 4; + } else { + *speed = STEC_LINK_UNKNOWN; + *width = 0xFF; + return; + } + + switch (pci_bus_speed) { + case 1: + *speed = STEC_LINK_2_5GTS; + break; + case 2: + *speed = STEC_LINK_5GTS; + break; + case 3: + *speed = STEC_LINK_8GTS; + break; + default: + *speed = STEC_LINK_UNKNOWN; + break; + } + + if (pci_lanes <= 0x20) + *width = pci_lanes; + else + *width = 0xFF; +} + +static void skd_do_inq_page_da(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 *skcomp, + volatile struct fit_comp_error_info *skerr, + uint8_t *cdb, uint8_t *buf) +{ + struct pci_dev *pdev = skdev->pdev; + unsigned max_bytes; + struct driver_inquiry_data inq; + u16 val; + + pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", + skdev->name, __func__, __LINE__); + + memset(&inq, 0, sizeof(inq)); + + inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; + + skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); + inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); + inq.pcie_device_number = PCI_SLOT(pdev->devfn); + inq.pcie_function_number = PCI_FUNC(pdev->devfn); + + pci_read_config_word(pdev, PCI_VENDOR_ID, &val); + inq.pcie_vendor_id = cpu_to_be16(val); + + pci_read_config_word(pdev, PCI_DEVICE_ID, &val); + inq.pcie_device_id = cpu_to_be16(val); + + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); + inq.pcie_subsystem_vendor_id = cpu_to_be16(val); + + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); + inq.pcie_subsystem_device_id = cpu_to_be16(val); + + /* Driver version, fixed lenth, padded with spaces on the right */ + inq.driver_version_length = sizeof(inq.driver_version); + memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); + memcpy(inq.driver_version, DRV_VER_COMPL, + min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); + + inq.page_length = cpu_to_be16((sizeof(inq) - 4)); + + /* Clear the error set by the device */ + skcomp->status = SAM_STAT_GOOD; + memset((void *)skerr, 0, sizeof(*skerr)); + + /* copy response into output buffer */ + max_bytes = (cdb[3] << 8) | cdb[4]; + memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); + + skcomp->num_returned_bytes = + be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); +} + +static void skd_do_driver_inq(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 *skcomp, + volatile struct fit_comp_error_info *skerr, + uint8_t *cdb, uint8_t *buf) +{ + if (!buf) + return; + else if (cdb[0] != INQUIRY) + return; /* Not an INQUIRY */ + else if ((cdb[1] & 1) == 0) + return; /* EVPD not set */ + else if (cdb[2] == 0) + /* Need to add driver's page to supported pages list */ + skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); + else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) + /* Caller requested driver's page */ + skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); +} + +static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) +{ + if (!sg) + return NULL; + if (!sg_page(sg)) + return NULL; + return sg_virt(sg); +} + +static void skd_process_scsi_inq(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 + *skcomp, + volatile struct fit_comp_error_info *skerr, + struct skd_special_context *skspcl) +{ + uint8_t *buf; + struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; + struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; + + dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, + skspcl->req.sg_data_dir); + buf = skd_sg_1st_page_ptr(skspcl->req.sg); + + if (buf) + skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); +} + + +static int skd_isr_completion_posted(struct skd_device *skdev, + int limit, int *enqueued) +{ + volatile struct fit_completion_entry_v1 *skcmp = NULL; + volatile struct fit_comp_error_info *skerr; + u16 req_id; + u32 req_slot; + struct skd_request_context *skreq; + u16 cmp_cntxt = 0; + u8 cmp_status = 0; + u8 cmp_cycle = 0; + u32 cmp_bytes = 0; + int rc = 0; + int processed = 0; + + for (;; ) { + SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); + + skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; + cmp_cycle = skcmp->cycle; + cmp_cntxt = skcmp->tag; + cmp_status = skcmp->status; + cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); + + skerr = &skdev->skerr_table[skdev->skcomp_ix]; + + pr_debug("%s:%s:%d " + "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " + "busy=%d rbytes=0x%x proto=%d\n", + skdev->name, __func__, __LINE__, skdev->skcomp_cycle, + skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, + skdev->in_flight, cmp_bytes, skdev->proto_ver); + + if (cmp_cycle != skdev->skcomp_cycle) { + pr_debug("%s:%s:%d end of completions\n", + skdev->name, __func__, __LINE__); + break; + } + /* + * Update the completion queue head index and possibly + * the completion cycle count. 8-bit wrap-around. + */ + skdev->skcomp_ix++; + if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { + skdev->skcomp_ix = 0; + skdev->skcomp_cycle++; + } + + /* + * The command context is a unique 32-bit ID. The low order + * bits help locate the request. The request is usually a + * r/w request (see skd_start() above) or a special request. + */ + req_id = cmp_cntxt; + req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; + + /* Is this other than a r/w request? */ + if (req_slot >= skdev->num_req_context) { + /* + * This is not a completion for a r/w request. + */ + skd_complete_other(skdev, skcmp, skerr); + continue; + } + + skreq = &skdev->skreq_table[req_slot]; + + /* + * Make sure the request ID for the slot matches. + */ + if (skreq->id != req_id) { + pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", + skdev->name, __func__, __LINE__, + req_id, skreq->id); + { + u16 new_id = cmp_cntxt; + pr_err("(%s): Completion mismatch " + "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", + skd_name(skdev), req_id, + skreq->id, new_id); + + continue; + } + } + + SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); + + if (skreq->state == SKD_REQ_STATE_ABORTED) { + pr_debug("%s:%s:%d reclaim req %p id=%04x\n", + skdev->name, __func__, __LINE__, + skreq, skreq->id); + /* a previously timed out command can + * now be cleaned up */ + skd_release_skreq(skdev, skreq); + continue; + } + + skreq->completion = *skcmp; + if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { + skreq->err_info = *skerr; + skd_log_check_status(skdev, cmp_status, skerr->key, + skerr->code, skerr->qual, + skerr->fruc); + } + /* Release DMA resources for the request. */ + if (skreq->n_sg > 0) + skd_postop_sg_list(skdev, skreq); + + if (!skreq->req) { + pr_debug("%s:%s:%d NULL backptr skdreq %p, " + "req=0x%x req_id=0x%x\n", + skdev->name, __func__, __LINE__, + skreq, skreq->id, req_id); + } else { + /* + * Capture the outcome and post it back to the + * native request. + */ + if (likely(cmp_status == SAM_STAT_GOOD)) + skd_end_request(skdev, skreq, 0); + else + skd_resolve_req_exception(skdev, skreq); + } + + /* + * Release the skreq, its FIT msg (if one), timeout slot, + * and queue depth. + */ + skd_release_skreq(skdev, skreq); + + /* skd_isr_comp_limit equal zero means no limit */ + if (limit) { + if (++processed >= limit) { + rc = 1; + break; + } + } + } + + if ((skdev->state == SKD_DRVR_STATE_PAUSING) + && (skdev->in_flight) == 0) { + skdev->state = SKD_DRVR_STATE_PAUSED; + wake_up_interruptible(&skdev->waitq); + } + + return rc; +} + +static void skd_complete_other(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 *skcomp, + volatile struct fit_comp_error_info *skerr) +{ + u32 req_id = 0; + u32 req_table; + u32 req_slot; + struct skd_special_context *skspcl; + + req_id = skcomp->tag; + req_table = req_id & SKD_ID_TABLE_MASK; + req_slot = req_id & SKD_ID_SLOT_MASK; + + pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", + skdev->name, __func__, __LINE__, + req_table, req_id, req_slot); + + /* + * Based on the request id, determine how to dispatch this completion. + * This swich/case is finding the good cases and forwarding the + * completion entry. Errors are reported below the switch. + */ + switch (req_table) { + case SKD_ID_RW_REQUEST: + /* + * The caller, skd_completion_posted_isr() above, + * handles r/w requests. The only way we get here + * is if the req_slot is out of bounds. + */ + break; + + case SKD_ID_SPECIAL_REQUEST: + /* + * Make sure the req_slot is in bounds and that the id + * matches. + */ + if (req_slot < skdev->n_special) { + skspcl = &skdev->skspcl_table[req_slot]; + if (skspcl->req.id == req_id && + skspcl->req.state == SKD_REQ_STATE_BUSY) { + skd_complete_special(skdev, + skcomp, skerr, skspcl); + return; + } + } + break; + + case SKD_ID_INTERNAL: + if (req_slot == 0) { + skspcl = &skdev->internal_skspcl; + if (skspcl->req.id == req_id && + skspcl->req.state == SKD_REQ_STATE_BUSY) { + skd_complete_internal(skdev, + skcomp, skerr, skspcl); + return; + } + } + break; + + case SKD_ID_FIT_MSG: + /* + * These id's should never appear in a completion record. + */ + break; + + default: + /* + * These id's should never appear anywhere; + */ + break; + } + + /* + * If we get here it is a bad or stale id. + */ +} + +static void skd_complete_special(struct skd_device *skdev, + volatile struct fit_completion_entry_v1 + *skcomp, + volatile struct fit_comp_error_info *skerr, + struct skd_special_context *skspcl) +{ + pr_debug("%s:%s:%d completing special request %p\n", + skdev->name, __func__, __LINE__, skspcl); + if (skspcl->orphaned) { + /* Discard orphaned request */ + /* ?: Can this release directly or does it need + * to use a worker? */ + pr_debug("%s:%s:%d release orphaned %p\n", + skdev->name, __func__, __LINE__, skspcl); + skd_release_special(skdev, skspcl); + return; + } + + skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); + + skspcl->req.state = SKD_REQ_STATE_COMPLETED; + skspcl->req.completion = *skcomp; + skspcl->req.err_info = *skerr; + + skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, + skerr->code, skerr->qual, skerr->fruc); + + wake_up_interruptible(&skdev->waitq); +} + +/* assume spinlock is already held */ +static void skd_release_special(struct skd_device *skdev, + struct skd_special_context *skspcl) +{ + int i, was_depleted; + + for (i = 0; i < skspcl->req.n_sg; i++) { + struct page *page = sg_page(&skspcl->req.sg[i]); + __free_page(page); + } + + was_depleted = (skdev->skspcl_free_list == NULL); + + skspcl->req.state = SKD_REQ_STATE_IDLE; + skspcl->req.id += SKD_ID_INCR; + skspcl->req.next = + (struct skd_request_context *)skdev->skspcl_free_list; + skdev->skspcl_free_list = (struct skd_special_context *)skspcl; + + if (was_depleted) { + pr_debug("%s:%s:%d skspcl was depleted\n", + skdev->name, __func__, __LINE__); + /* Free list was depleted. Their might be waiters. */ + wake_up_interruptible(&skdev->waitq); + } +} + +static void skd_reset_skcomp(struct skd_device *skdev) +{ + u32 nbytes; + struct fit_completion_entry_v1 *skcomp; + + nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; + nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; + + memset(skdev->skcomp_table, 0, nbytes); + + skdev->skcomp_ix = 0; + skdev->skcomp_cycle = 1; +} + +/* + ***************************************************************************** + * INTERRUPTS + ***************************************************************************** + */ +static void skd_completion_worker(struct work_struct *work) +{ + struct skd_device *skdev = + container_of(work, struct skd_device, completion_worker); + unsigned long flags; + int flush_enqueued = 0; + + spin_lock_irqsave(&skdev->lock, flags); + + /* + * pass in limit=0, which means no limit.. + * process everything in compq + */ + skd_isr_completion_posted(skdev, 0, &flush_enqueued); + skd_request_fn(skdev->queue); + + spin_unlock_irqrestore(&skdev->lock, flags); +} + +static void skd_isr_msg_from_dev(struct skd_device *skdev); + +irqreturn_t +static skd_isr(int irq, void *ptr) +{ + struct skd_device *skdev; + u32 intstat; + u32 ack; + int rc = 0; + int deferred = 0; + int flush_enqueued = 0; + + skdev = (struct skd_device *)ptr; + spin_lock(&skdev->lock); + + for (;; ) { + intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); + + ack = FIT_INT_DEF_MASK; + ack &= intstat; + + pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", + skdev->name, __func__, __LINE__, intstat, ack); + + /* As long as there is an int pending on device, keep + * running loop. When none, get out, but if we've never + * done any processing, call completion handler? + */ + if (ack == 0) { + /* No interrupts on device, but run the completion + * processor anyway? + */ + if (rc == 0) + if (likely (skdev->state + == SKD_DRVR_STATE_ONLINE)) + deferred = 1; + break; + } + + rc = IRQ_HANDLED; + + SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); + + if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && + (skdev->state != SKD_DRVR_STATE_STOPPING))) { + if (intstat & FIT_ISH_COMPLETION_POSTED) { + /* + * If we have already deferred completion + * processing, don't bother running it again + */ + if (deferred == 0) + deferred = + skd_isr_completion_posted(skdev, + skd_isr_comp_limit, &flush_enqueued); + } + + if (intstat & FIT_ISH_FW_STATE_CHANGE) { + skd_isr_fwstate(skdev); + if (skdev->state == SKD_DRVR_STATE_FAULT || + skdev->state == + SKD_DRVR_STATE_DISAPPEARED) { + spin_unlock(&skdev->lock); + return rc; + } + } + + if (intstat & FIT_ISH_MSG_FROM_DEV) + skd_isr_msg_from_dev(skdev); + } + } + + if (unlikely(flush_enqueued)) + skd_request_fn(skdev->queue); + + if (deferred) + schedule_work(&skdev->completion_worker); + else if (!flush_enqueued) + skd_request_fn(skdev->queue); + + spin_unlock(&skdev->lock); + + return rc; +} + +static void skd_drive_fault(struct skd_device *skdev) +{ + skdev->state = SKD_DRVR_STATE_FAULT; + pr_err("(%s): Drive FAULT\n", skd_name(skdev)); +} + +static void skd_drive_disappeared(struct skd_device *skdev) +{ + skdev->state = SKD_DRVR_STATE_DISAPPEARED; + pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); +} + +static void skd_isr_fwstate(struct skd_device *skdev) +{ + u32 sense; + u32 state; + u32 mtd; + int prev_driver_state = skdev->state; + + sense = SKD_READL(skdev, FIT_STATUS); + state = sense & FIT_SR_DRIVE_STATE_MASK; + + pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", + skd_name(skdev), + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, + skd_drive_state_to_str(state), state); + + skdev->drive_state = state; + + switch (skdev->drive_state) { + case FIT_SR_DRIVE_INIT: + if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { + skd_disable_interrupts(skdev); + break; + } + if (skdev->state == SKD_DRVR_STATE_RESTARTING) + skd_recover_requests(skdev, 0); + if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { + skdev->timer_countdown = SKD_STARTING_TIMO; + skdev->state = SKD_DRVR_STATE_STARTING; + skd_soft_reset(skdev); + break; + } + mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_SR_DRIVE_ONLINE: + skdev->cur_max_queue_depth = skd_max_queue_depth; + if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) + skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; + + skdev->queue_low_water_mark = + skdev->cur_max_queue_depth * 2 / 3 + 1; + if (skdev->queue_low_water_mark < 1) + skdev->queue_low_water_mark = 1; + pr_info( + "(%s): Queue depth limit=%d dev=%d lowat=%d\n", + skd_name(skdev), + skdev->cur_max_queue_depth, + skdev->dev_max_queue_depth, skdev->queue_low_water_mark); + + skd_refresh_device_data(skdev); + break; + + case FIT_SR_DRIVE_BUSY: + skdev->state = SKD_DRVR_STATE_BUSY; + skdev->timer_countdown = SKD_BUSY_TIMO; + skd_quiesce_dev(skdev); + break; + case FIT_SR_DRIVE_BUSY_SANITIZE: + /* set timer for 3 seconds, we'll abort any unfinished + * commands after that expires + */ + skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; + skdev->timer_countdown = SKD_TIMER_SECONDS(3); + blk_start_queue(skdev->queue); + break; + case FIT_SR_DRIVE_BUSY_ERASE: + skdev->state = SKD_DRVR_STATE_BUSY_ERASE; + skdev->timer_countdown = SKD_BUSY_TIMO; + break; + case FIT_SR_DRIVE_OFFLINE: + skdev->state = SKD_DRVR_STATE_IDLE; + break; + case FIT_SR_DRIVE_SOFT_RESET: + switch (skdev->state) { + case SKD_DRVR_STATE_STARTING: + case SKD_DRVR_STATE_RESTARTING: + /* Expected by a caller of skd_soft_reset() */ + break; + default: + skdev->state = SKD_DRVR_STATE_RESTARTING; + break; + } + break; + case FIT_SR_DRIVE_FW_BOOTING: + pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", + skdev->name, __func__, __LINE__, skdev->name); + skdev->state = SKD_DRVR_STATE_WAIT_BOOT; + skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; + break; + + case FIT_SR_DRIVE_DEGRADED: + case FIT_SR_PCIE_LINK_DOWN: + case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: + break; + + case FIT_SR_DRIVE_FAULT: + skd_drive_fault(skdev); + skd_recover_requests(skdev, 0); + blk_start_queue(skdev->queue); + break; + + /* PCIe bus returned all Fs? */ + case 0xFF: + pr_info("(%s): state=0x%x sense=0x%x\n", + skd_name(skdev), state, sense); + skd_drive_disappeared(skdev); + skd_recover_requests(skdev, 0); + blk_start_queue(skdev->queue); + break; + default: + /* + * Uknown FW State. Wait for a state we recognize. + */ + break; + } + pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", + skd_name(skdev), + skd_skdev_state_to_str(prev_driver_state), prev_driver_state, + skd_skdev_state_to_str(skdev->state), skdev->state); +} + +static void skd_recover_requests(struct skd_device *skdev, int requeue) +{ + int i; + + for (i = 0; i < skdev->num_req_context; i++) { + struct skd_request_context *skreq = &skdev->skreq_table[i]; + + if (skreq->state == SKD_REQ_STATE_BUSY) { + skd_log_skreq(skdev, skreq, "recover"); + + SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); + SKD_ASSERT(skreq->req != NULL); + + /* Release DMA resources for the request. */ + if (skreq->n_sg > 0) + skd_postop_sg_list(skdev, skreq); + + if (requeue && + (unsigned long) ++skreq->req->special < + SKD_MAX_RETRIES) + blk_requeue_request(skdev->queue, skreq->req); + else + skd_end_request(skdev, skreq, -EIO); + + skreq->req = NULL; + + skreq->state = SKD_REQ_STATE_IDLE; + skreq->id += SKD_ID_INCR; + } + if (i > 0) + skreq[-1].next = skreq; + skreq->next = NULL; + } + skdev->skreq_free_list = skdev->skreq_table; + + for (i = 0; i < skdev->num_fitmsg_context; i++) { + struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; + + if (skmsg->state == SKD_MSG_STATE_BUSY) { + skd_log_skmsg(skdev, skmsg, "salvaged"); + SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); + skmsg->state = SKD_MSG_STATE_IDLE; + skmsg->id += SKD_ID_INCR; + } + if (i > 0) + skmsg[-1].next = skmsg; + skmsg->next = NULL; + } + skdev->skmsg_free_list = skdev->skmsg_table; + + for (i = 0; i < skdev->n_special; i++) { + struct skd_special_context *skspcl = &skdev->skspcl_table[i]; + + /* If orphaned, reclaim it because it has already been reported + * to the process as an error (it was just waiting for + * a completion that didn't come, and now it will never come) + * If busy, change to a state that will cause it to error + * out in the wait routine and let it do the normal + * reporting and reclaiming + */ + if (skspcl->req.state == SKD_REQ_STATE_BUSY) { + if (skspcl->orphaned) { + pr_debug("%s:%s:%d orphaned %p\n", + skdev->name, __func__, __LINE__, + skspcl); + skd_release_special(skdev, skspcl); + } else { + pr_debug("%s:%s:%d not orphaned %p\n", + skdev->name, __func__, __LINE__, + skspcl); + skspcl->req.state = SKD_REQ_STATE_ABORTED; + } + } + } + skdev->skspcl_free_list = skdev->skspcl_table; + + for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) + skdev->timeout_slot[i] = 0; + + skdev->in_flight = 0; +} + +static void skd_isr_msg_from_dev(struct skd_device *skdev) +{ + u32 mfd; + u32 mtd; + u32 data; + + mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); + + pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", + skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); + + /* ignore any mtd that is an ack for something we didn't send */ + if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) + return; + + switch (FIT_MXD_TYPE(mfd)) { + case FIT_MTD_FITFW_INIT: + skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); + + if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { + pr_err("(%s): protocol mismatch\n", + skdev->name); + pr_err("(%s): got=%d support=%d\n", + skdev->name, skdev->proto_ver, + FIT_PROTOCOL_VERSION_1); + pr_err("(%s): please upgrade driver\n", + skdev->name); + skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; + skd_soft_reset(skdev); + break; + } + mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_GET_CMDQ_DEPTH: + skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); + mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, + SKD_N_COMPLETION_ENTRY); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_SET_COMPQ_DEPTH: + SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); + mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_SET_COMPQ_ADDR: + skd_reset_skcomp(skdev); + mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_CMD_LOG_HOST_ID: + skdev->connect_time_stamp = get_seconds(); + data = skdev->connect_time_stamp & 0xFFFF; + mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_CMD_LOG_TIME_STAMP_LO: + skdev->drive_jiffies = FIT_MXD_DATA(mfd); + data = (skdev->connect_time_stamp >> 16) & 0xFFFF; + mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + break; + + case FIT_MTD_CMD_LOG_TIME_STAMP_HI: + skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); + mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); + SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); + skdev->last_mtd = mtd; + + pr_err("(%s): Time sync driver=0x%x device=0x%x\n", + skd_name(skdev), + skdev->connect_time_stamp, skdev->drive_jiffies); + break; + + case FIT_MTD_ARM_QUEUE: + skdev->last_mtd = 0; + /* + * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. + */ + break; + + default: + break; + } +} + +static void skd_disable_interrupts(struct skd_device *skdev) +{ + u32 sense; + + sense = SKD_READL(skdev, FIT_CONTROL); + sense &= ~FIT_CR_ENABLE_INTERRUPTS; + SKD_WRITEL(skdev, sense, FIT_CONTROL); + pr_debug("%s:%s:%d sense 0x%x\n", + skdev->name, __func__, __LINE__, sense); + + /* Note that the 1s is written. A 1-bit means + * disable, a 0 means enable. + */ + SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); +} + +static void skd_enable_interrupts(struct skd_device *skdev) +{ + u32 val; + + /* unmask interrupts first */ + val = FIT_ISH_FW_STATE_CHANGE + + FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; + + /* Note that the compliment of mask is written. A 1-bit means + * disable, a 0 means enable. */ + SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); + pr_debug("%s:%s:%d interrupt mask=0x%x\n", + skdev->name, __func__, __LINE__, ~val); + + val = SKD_READL(skdev, FIT_CONTROL); + val |= FIT_CR_ENABLE_INTERRUPTS; + pr_debug("%s:%s:%d control=0x%x\n", + skdev->name, __func__, __LINE__, val); + SKD_WRITEL(skdev, val, FIT_CONTROL); +} + +/* + ***************************************************************************** + * START, STOP, RESTART, QUIESCE, UNQUIESCE + ***************************************************************************** + */ + +static void skd_soft_reset(struct skd_device *skdev) +{ + u32 val; + + val = SKD_READL(skdev, FIT_CONTROL); + val |= (FIT_CR_SOFT_RESET); + pr_debug("%s:%s:%d control=0x%x\n", + skdev->name, __func__, __LINE__, val); + SKD_WRITEL(skdev, val, FIT_CONTROL); +} + +static void skd_start_device(struct skd_device *skdev) +{ + unsigned long flags; + u32 sense; + u32 state; + + spin_lock_irqsave(&skdev->lock, flags); + + /* ack all ghost interrupts */ + SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); + + sense = SKD_READL(skdev, FIT_STATUS); + + pr_debug("%s:%s:%d initial status=0x%x\n", + skdev->name, __func__, __LINE__, sense); + + state = sense & FIT_SR_DRIVE_STATE_MASK; + skdev->drive_state = state; + skdev->last_mtd = 0; + + skdev->state = SKD_DRVR_STATE_STARTING; + skdev->timer_countdown = SKD_STARTING_TIMO; + + skd_enable_interrupts(skdev); + + switch (skdev->drive_state) { + case FIT_SR_DRIVE_OFFLINE: + pr_err("(%s): Drive offline...\n", skd_name(skdev)); + break; + + case FIT_SR_DRIVE_FW_BOOTING: + pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", + skdev->name, __func__, __LINE__, skdev->name); + skdev->state = SKD_DRVR_STATE_WAIT_BOOT; + skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; + break; + + case FIT_SR_DRIVE_BUSY_SANITIZE: + pr_info("(%s): Start: BUSY_SANITIZE\n", + skd_name(skdev)); + skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; + skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; + break; + + case FIT_SR_DRIVE_BUSY_ERASE: + pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); + skdev->state = SKD_DRVR_STATE_BUSY_ERASE; + skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; + break; + + case FIT_SR_DRIVE_INIT: + case FIT_SR_DRIVE_ONLINE: + skd_soft_reset(skdev); + break; + + case FIT_SR_DRIVE_BUSY: + pr_err("(%s): Drive Busy...\n", skd_name(skdev)); + skdev->state = SKD_DRVR_STATE_BUSY; + skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; + break; + + case FIT_SR_DRIVE_SOFT_RESET: + pr_err("(%s) drive soft reset in prog\n", + skd_name(skdev)); + break; + + case FIT_SR_DRIVE_FAULT: + /* Fault state is bad...soft reset won't do it... + * Hard reset, maybe, but does it work on device? + * For now, just fault so the system doesn't hang. + */ + skd_drive_fault(skdev); + /*start the queue so we can respond with error to requests */ + pr_debug("%s:%s:%d starting %s queue\n", + skdev->name, __func__, __LINE__, skdev->name); + blk_start_queue(skdev->queue); + skdev->gendisk_on = -1; + wake_up_interruptible(&skdev->waitq); + break; + + case 0xFF: + /* Most likely the device isn't there or isn't responding + * to the BAR1 addresses. */ + skd_drive_disappeared(skdev); + /*start the queue so we can respond with error to requests */ + pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", + skdev->name, __func__, __LINE__, skdev->name); + blk_start_queue(skdev->queue); + skdev->gendisk_on = -1; + wake_up_interruptible(&skdev->waitq); + break; + + default: + pr_err("(%s) Start: unknown state %x\n", + skd_name(skdev), skdev->drive_state); + break; + } + + state = SKD_READL(skdev, FIT_CONTROL); + pr_debug("%s:%s:%d FIT Control Status=0x%x\n", + skdev->name, __func__, __LINE__, state); + + state = SKD_READL(skdev, FIT_INT_STATUS_HOST); + pr_debug("%s:%s:%d Intr Status=0x%x\n", + skdev->name, __func__, __LINE__, state); + + state = SKD_READL(skdev, FIT_INT_MASK_HOST); + pr_debug("%s:%s:%d Intr Mask=0x%x\n", + skdev->name, __func__, __LINE__, state); + + state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); + pr_debug("%s:%s:%d Msg from Dev=0x%x\n", + skdev->name, __func__, __LINE__, state); + + state = SKD_READL(skdev, FIT_HW_VERSION); + pr_debug("%s:%s:%d HW version=0x%x\n", + skdev->name, __func__, __LINE__, state); + + spin_unlock_irqrestore(&skdev->lock, flags); +} + +static void skd_stop_device(struct skd_device *skdev) +{ + unsigned long flags; + struct skd_special_context *skspcl = &skdev->internal_skspcl; + u32 dev_state; + int i; + + spin_lock_irqsave(&skdev->lock, flags); + + if (skdev->state != SKD_DRVR_STATE_ONLINE) { + pr_err("(%s): skd_stop_device not online no sync\n", + skd_name(skdev)); + goto stop_out; + } + + if (skspcl->req.state != SKD_REQ_STATE_IDLE) { + pr_err("(%s): skd_stop_device no special\n", + skd_name(skdev)); + goto stop_out; + } + + skdev->state = SKD_DRVR_STATE_SYNCING; + skdev->sync_done = 0; + + skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); + + spin_unlock_irqrestore(&skdev->lock, flags); + + wait_event_interruptible_timeout(skdev->waitq, + (skdev->sync_done), (10 * HZ)); + + spin_lock_irqsave(&skdev->lock, flags); + + switch (skdev->sync_done) { + case 0: + pr_err("(%s): skd_stop_device no sync\n", + skd_name(skdev)); + break; + case 1: + pr_err("(%s): skd_stop_device sync done\n", + skd_name(skdev)); + break; + default: + pr_err("(%s): skd_stop_device sync error\n", + skd_name(skdev)); + } + +stop_out: + skdev->state = SKD_DRVR_STATE_STOPPING; + spin_unlock_irqrestore(&skdev->lock, flags); + + skd_kill_timer(skdev); + + spin_lock_irqsave(&skdev->lock, flags); + skd_disable_interrupts(skdev); + + /* ensure all ints on device are cleared */ + /* soft reset the device to unload with a clean slate */ + SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); + SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); + + spin_unlock_irqrestore(&skdev->lock, flags); + + /* poll every 100ms, 1 second timeout */ + for (i = 0; i < 10; i++) { + dev_state = + SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; + if (dev_state == FIT_SR_DRIVE_INIT) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(100)); + } + + if (dev_state != FIT_SR_DRIVE_INIT) + pr_err("(%s): skd_stop_device state error 0x%02x\n", + skd_name(skdev), dev_state); +} + +/* assume spinlock is held */ +static void skd_restart_device(struct skd_device *skdev) +{ + u32 state; + + /* ack all ghost interrupts */ + SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); + + state = SKD_READL(skdev, FIT_STATUS); + + pr_debug("%s:%s:%d drive status=0x%x\n", + skdev->name, __func__, __LINE__, state); + + state &= FIT_SR_DRIVE_STATE_MASK; + skdev->drive_state = state; + skdev->last_mtd = 0; + + skdev->state = SKD_DRVR_STATE_RESTARTING; + skdev->timer_countdown = SKD_RESTARTING_TIMO; + + skd_soft_reset(skdev); +} + +/* assume spinlock is held */ +static int skd_quiesce_dev(struct skd_device *skdev) +{ + int rc = 0; + + switch (skdev->state) { + case SKD_DRVR_STATE_BUSY: + case SKD_DRVR_STATE_BUSY_IMMINENT: + pr_debug("%s:%s:%d stopping %s queue\n", + skdev->name, __func__, __LINE__, skdev->name); + blk_stop_queue(skdev->queue); + break; + case SKD_DRVR_STATE_ONLINE: + case SKD_DRVR_STATE_STOPPING: + case SKD_DRVR_STATE_SYNCING: + case SKD_DRVR_STATE_PAUSING: + case SKD_DRVR_STATE_PAUSED: + case SKD_DRVR_STATE_STARTING: + case SKD_DRVR_STATE_RESTARTING: + case SKD_DRVR_STATE_RESUMING: + default: + rc = -EINVAL; + pr_debug("%s:%s:%d state [%d] not implemented\n", + skdev->name, __func__, __LINE__, skdev->state); + } + return rc; +} + +/* assume spinlock is held */ +static int skd_unquiesce_dev(struct skd_device *skdev) +{ + int prev_driver_state = skdev->state; + + skd_log_skdev(skdev, "unquiesce"); + if (skdev->state == SKD_DRVR_STATE_ONLINE) { + pr_debug("%s:%s:%d **** device already ONLINE\n", + skdev->name, __func__, __LINE__); + return 0; + } + if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { + /* + * If there has been an state change to other than + * ONLINE, we will rely on controller state change + * to come back online and restart the queue. + * The BUSY state means that driver is ready to + * continue normal processing but waiting for controller + * to become available. + */ + skdev->state = SKD_DRVR_STATE_BUSY; + pr_debug("%s:%s:%d drive BUSY state\n", + skdev->name, __func__, __LINE__); + return 0; + } + + /* + * Drive has just come online, driver is either in startup, + * paused performing a task, or bust waiting for hardware. + */ + switch (skdev->state) { + case SKD_DRVR_STATE_PAUSED: + case SKD_DRVR_STATE_BUSY: + case SKD_DRVR_STATE_BUSY_IMMINENT: + case SKD_DRVR_STATE_BUSY_ERASE: + case SKD_DRVR_STATE_STARTING: + case SKD_DRVR_STATE_RESTARTING: + case SKD_DRVR_STATE_FAULT: + case SKD_DRVR_STATE_IDLE: + case SKD_DRVR_STATE_LOAD: + skdev->state = SKD_DRVR_STATE_ONLINE; + pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", + skd_name(skdev), + skd_skdev_state_to_str(prev_driver_state), + prev_driver_state, skd_skdev_state_to_str(skdev->state), + skdev->state); + pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", + skdev->name, __func__, __LINE__); + pr_debug("%s:%s:%d starting %s queue\n", + skdev->name, __func__, __LINE__, skdev->name); + pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); + blk_start_queue(skdev->queue); + skdev->gendisk_on = 1; + wake_up_interruptible(&skdev->waitq); + break; + + case SKD_DRVR_STATE_DISAPPEARED: + default: + pr_debug("%s:%s:%d **** driver state %d, not implemented \n", + skdev->name, __func__, __LINE__, + skdev->state); + return -EBUSY; + } + return 0; +} + +/* + ***************************************************************************** + * PCIe MSI/MSI-X INTERRUPT HANDLERS + ***************************************************************************** + */ + +static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) +{ + struct skd_device *skdev = skd_host_data; + unsigned long flags; + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d MSIX = 0x%x\n", + skdev->name, __func__, __LINE__, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), + irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); + SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); + spin_unlock_irqrestore(&skdev->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) +{ + struct skd_device *skdev = skd_host_data; + unsigned long flags; + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d MSIX = 0x%x\n", + skdev->name, __func__, __LINE__, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); + skd_isr_fwstate(skdev); + spin_unlock_irqrestore(&skdev->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t skd_comp_q(int irq, void *skd_host_data) +{ + struct skd_device *skdev = skd_host_data; + unsigned long flags; + int flush_enqueued = 0; + int deferred; + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d MSIX = 0x%x\n", + skdev->name, __func__, __LINE__, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); + deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, + &flush_enqueued); + if (flush_enqueued) + skd_request_fn(skdev->queue); + + if (deferred) + schedule_work(&skdev->completion_worker); + else if (!flush_enqueued) + skd_request_fn(skdev->queue); + + spin_unlock_irqrestore(&skdev->lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) +{ + struct skd_device *skdev = skd_host_data; + unsigned long flags; + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d MSIX = 0x%x\n", + skdev->name, __func__, __LINE__, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); + skd_isr_msg_from_dev(skdev); + spin_unlock_irqrestore(&skdev->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) +{ + struct skd_device *skdev = skd_host_data; + unsigned long flags; + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d MSIX = 0x%x\n", + skdev->name, __func__, __LINE__, + SKD_READL(skdev, FIT_INT_STATUS_HOST)); + SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); + spin_unlock_irqrestore(&skdev->lock, flags); + return IRQ_HANDLED; +} + +/* + ***************************************************************************** + * PCIe MSI/MSI-X SETUP + ***************************************************************************** + */ + +struct skd_msix_entry { + int have_irq; + u32 vector; + u32 entry; + struct skd_device *rsp; + char isr_name[30]; +}; + +struct skd_init_msix_entry { + const char *name; + irq_handler_t handler; +}; + +#define SKD_MAX_MSIX_COUNT 13 +#define SKD_MIN_MSIX_COUNT 7 +#define SKD_BASE_MSIX_IRQ 4 + +static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { + { "(DMA 0)", skd_reserved_isr }, + { "(DMA 1)", skd_reserved_isr }, + { "(DMA 2)", skd_reserved_isr }, + { "(DMA 3)", skd_reserved_isr }, + { "(State Change)", skd_statec_isr }, + { "(COMPL_Q)", skd_comp_q }, + { "(MSG)", skd_msg_isr }, + { "(Reserved)", skd_reserved_isr }, + { "(Reserved)", skd_reserved_isr }, + { "(Queue Full 0)", skd_qfull_isr }, + { "(Queue Full 1)", skd_qfull_isr }, + { "(Queue Full 2)", skd_qfull_isr }, + { "(Queue Full 3)", skd_qfull_isr }, +}; + +static void skd_release_msix(struct skd_device *skdev) +{ + struct skd_msix_entry *qentry; + int i; + + if (skdev->msix_entries == NULL) + return; + for (i = 0; i < skdev->msix_count; i++) { + qentry = &skdev->msix_entries[i]; + skdev = qentry->rsp; + + if (qentry->have_irq) + devm_free_irq(&skdev->pdev->dev, + qentry->vector, qentry->rsp); + } + pci_disable_msix(skdev->pdev); + kfree(skdev->msix_entries); + skdev->msix_count = 0; + skdev->msix_entries = NULL; +} + +static int skd_acquire_msix(struct skd_device *skdev) +{ + int i, rc; + struct pci_dev *pdev; + struct msix_entry *entries = NULL; + struct skd_msix_entry *qentry; + + pdev = skdev->pdev; + skdev->msix_count = SKD_MAX_MSIX_COUNT; + entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT, + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) + entries[i].entry = i; + + rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT); + if (rc < 0) + goto msix_out; + if (rc) { + if (rc < SKD_MIN_MSIX_COUNT) { + pr_err("(%s): failed to enable MSI-X %d\n", + skd_name(skdev), rc); + goto msix_out; + } + pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n", + skdev->name, __func__, __LINE__, + pci_name(pdev), skdev->name, rc); + + skdev->msix_count = rc; + rc = pci_enable_msix(pdev, entries, skdev->msix_count); + if (rc) { + pr_err("(%s): failed to enable MSI-X " + "support (%d) %d\n", + skd_name(skdev), skdev->msix_count, rc); + goto msix_out; + } + } + skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * + skdev->msix_count, GFP_KERNEL); + if (!skdev->msix_entries) { + rc = -ENOMEM; + skdev->msix_count = 0; + pr_err("(%s): msix table allocation error\n", + skd_name(skdev)); + goto msix_out; + } + + qentry = skdev->msix_entries; + for (i = 0; i < skdev->msix_count; i++) { + qentry->vector = entries[i].vector; + qentry->entry = entries[i].entry; + qentry->rsp = NULL; + qentry->have_irq = 0; + pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n", + skdev->name, __func__, __LINE__, + pci_name(pdev), skdev->name, + i, qentry->vector, qentry->entry); + qentry++; + } + + /* Enable MSI-X vectors for the base queue */ + for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { + qentry = &skdev->msix_entries[i]; + snprintf(qentry->isr_name, sizeof(qentry->isr_name), + "%s%d-msix %s", DRV_NAME, skdev->devno, + msix_entries[i].name); + rc = devm_request_irq(&skdev->pdev->dev, qentry->vector, + msix_entries[i].handler, 0, + qentry->isr_name, skdev); + if (rc) { + pr_err("(%s): Unable to register(%d) MSI-X " + "handler %d: %s\n", + skd_name(skdev), rc, i, qentry->isr_name); + goto msix_out; + } else { + qentry->have_irq = 1; + qentry->rsp = skdev; + } + } + pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", + skdev->name, __func__, __LINE__, + pci_name(pdev), skdev->name, skdev->msix_count); + return 0; + +msix_out: + if (entries) + kfree(entries); + skd_release_msix(skdev); + return rc; +} + +static int skd_acquire_irq(struct skd_device *skdev) +{ + int rc; + struct pci_dev *pdev; + + pdev = skdev->pdev; + skdev->msix_count = 0; + +RETRY_IRQ_TYPE: + switch (skdev->irq_type) { + case SKD_IRQ_MSIX: + rc = skd_acquire_msix(skdev); + if (!rc) + pr_info("(%s): MSI-X %d irqs enabled\n", + skd_name(skdev), skdev->msix_count); + else { + pr_err( + "(%s): failed to enable MSI-X, re-trying with MSI %d\n", + skd_name(skdev), rc); + skdev->irq_type = SKD_IRQ_MSI; + goto RETRY_IRQ_TYPE; + } + break; + case SKD_IRQ_MSI: + snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi", + DRV_NAME, skdev->devno); + rc = pci_enable_msi(pdev); + if (!rc) { + rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0, + skdev->isr_name, skdev); + if (rc) { + pci_disable_msi(pdev); + pr_err( + "(%s): failed to allocate the MSI interrupt %d\n", + skd_name(skdev), rc); + goto RETRY_IRQ_LEGACY; + } + pr_info("(%s): MSI irq %d enabled\n", + skd_name(skdev), pdev->irq); + } else { +RETRY_IRQ_LEGACY: + pr_err( + "(%s): failed to enable MSI, re-trying with LEGACY %d\n", + skd_name(skdev), rc); + skdev->irq_type = SKD_IRQ_LEGACY; + goto RETRY_IRQ_TYPE; + } + break; + case SKD_IRQ_LEGACY: + snprintf(skdev->isr_name, sizeof(skdev->isr_name), + "%s%d-legacy", DRV_NAME, skdev->devno); + rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, + IRQF_SHARED, skdev->isr_name, skdev); + if (!rc) + pr_info("(%s): LEGACY irq %d enabled\n", + skd_name(skdev), pdev->irq); + else + pr_err("(%s): request LEGACY irq error %d\n", + skd_name(skdev), rc); + break; + default: + pr_info("(%s): irq_type %d invalid, re-set to %d\n", + skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT); + skdev->irq_type = SKD_IRQ_LEGACY; + goto RETRY_IRQ_TYPE; + } + return rc; +} + +static void skd_release_irq(struct skd_device *skdev) +{ + switch (skdev->irq_type) { + case SKD_IRQ_MSIX: + skd_release_msix(skdev); + break; + case SKD_IRQ_MSI: + devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); + pci_disable_msi(skdev->pdev); + break; + case SKD_IRQ_LEGACY: + devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); + break; + default: + pr_err("(%s): wrong irq type %d!", + skd_name(skdev), skdev->irq_type); + break; + } +} + +/* + ***************************************************************************** + * CONSTRUCT + ***************************************************************************** + */ + +static int skd_cons_skcomp(struct skd_device *skdev) +{ + int rc = 0; + struct fit_completion_entry_v1 *skcomp; + u32 nbytes; + + nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; + nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; + + pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", + skdev->name, __func__, __LINE__, + nbytes, SKD_N_COMPLETION_ENTRY); + + skcomp = pci_alloc_consistent(skdev->pdev, nbytes, + &skdev->cq_dma_address); + + if (skcomp == NULL) { + rc = -ENOMEM; + goto err_out; + } + + memset(skcomp, 0, nbytes); + + skdev->skcomp_table = skcomp; + skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + + sizeof(*skcomp) * + SKD_N_COMPLETION_ENTRY); + +err_out: + return rc; +} + +static int skd_cons_skmsg(struct skd_device *skdev) +{ + int rc = 0; + u32 i; + + pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", + skdev->name, __func__, __LINE__, + sizeof(struct skd_fitmsg_context), + skdev->num_fitmsg_context, + sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); + + skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) + *skdev->num_fitmsg_context, GFP_KERNEL); + if (skdev->skmsg_table == NULL) { + rc = -ENOMEM; + goto err_out; + } + + for (i = 0; i < skdev->num_fitmsg_context; i++) { + struct skd_fitmsg_context *skmsg; + + skmsg = &skdev->skmsg_table[i]; + + skmsg->id = i + SKD_ID_FIT_MSG; + + skmsg->state = SKD_MSG_STATE_IDLE; + skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, + SKD_N_FITMSG_BYTES + 64, + &skmsg->mb_dma_address); + + if (skmsg->msg_buf == NULL) { + rc = -ENOMEM; + goto err_out; + } + + skmsg->offset = (u32)((u64)skmsg->msg_buf & + (~FIT_QCMD_BASE_ADDRESS_MASK)); + skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; + skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & + FIT_QCMD_BASE_ADDRESS_MASK); + skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; + skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; + memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); + + skmsg->next = &skmsg[1]; + } + + /* Free list is in order starting with the 0th entry. */ + skdev->skmsg_table[i - 1].next = NULL; + skdev->skmsg_free_list = skdev->skmsg_table; + +err_out: + return rc; +} + +static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, + u32 n_sg, + dma_addr_t *ret_dma_addr) +{ + struct fit_sg_descriptor *sg_list; + u32 nbytes; + + nbytes = sizeof(*sg_list) * n_sg; + + sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); + + if (sg_list != NULL) { + uint64_t dma_address = *ret_dma_addr; + u32 i; + + memset(sg_list, 0, nbytes); + + for (i = 0; i < n_sg - 1; i++) { + uint64_t ndp_off; + ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); + + sg_list[i].next_desc_ptr = dma_address + ndp_off; + } + sg_list[i].next_desc_ptr = 0LL; + } + + return sg_list; +} + +static int skd_cons_skreq(struct skd_device *skdev) +{ + int rc = 0; + u32 i; + + pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", + skdev->name, __func__, __LINE__, + sizeof(struct skd_request_context), + skdev->num_req_context, + sizeof(struct skd_request_context) * skdev->num_req_context); + + skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) + * skdev->num_req_context, GFP_KERNEL); + if (skdev->skreq_table == NULL) { + rc = -ENOMEM; + goto err_out; + } + + pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", + skdev->name, __func__, __LINE__, + skdev->sgs_per_request, sizeof(struct scatterlist), + skdev->sgs_per_request * sizeof(struct scatterlist)); + + for (i = 0; i < skdev->num_req_context; i++) { + struct skd_request_context *skreq; + + skreq = &skdev->skreq_table[i]; + + skreq->id = i + SKD_ID_RW_REQUEST; + skreq->state = SKD_REQ_STATE_IDLE; + + skreq->sg = kzalloc(sizeof(struct scatterlist) * + skdev->sgs_per_request, GFP_KERNEL); + if (skreq->sg == NULL) { + rc = -ENOMEM; + goto err_out; + } + sg_init_table(skreq->sg, skdev->sgs_per_request); + + skreq->sksg_list = skd_cons_sg_list(skdev, + skdev->sgs_per_request, + &skreq->sksg_dma_address); + + if (skreq->sksg_list == NULL) { + rc = -ENOMEM; + goto err_out; + } + + skreq->next = &skreq[1]; + } + + /* Free list is in order starting with the 0th entry. */ + skdev->skreq_table[i - 1].next = NULL; + skdev->skreq_free_list = skdev->skreq_table; + +err_out: + return rc; +} + +static int skd_cons_skspcl(struct skd_device *skdev) +{ + int rc = 0; + u32 i, nbytes; + + pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", + skdev->name, __func__, __LINE__, + sizeof(struct skd_special_context), + skdev->n_special, + sizeof(struct skd_special_context) * skdev->n_special); + + skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) + * skdev->n_special, GFP_KERNEL); + if (skdev->skspcl_table == NULL) { + rc = -ENOMEM; + goto err_out; + } + + for (i = 0; i < skdev->n_special; i++) { + struct skd_special_context *skspcl; + + skspcl = &skdev->skspcl_table[i]; + + skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; + skspcl->req.state = SKD_REQ_STATE_IDLE; + + skspcl->req.next = &skspcl[1].req; + + nbytes = SKD_N_SPECIAL_FITMSG_BYTES; + + skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, + &skspcl->mb_dma_address); + if (skspcl->msg_buf == NULL) { + rc = -ENOMEM; + goto err_out; + } + + memset(skspcl->msg_buf, 0, nbytes); + + skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * + SKD_N_SG_PER_SPECIAL, GFP_KERNEL); + if (skspcl->req.sg == NULL) { + rc = -ENOMEM; + goto err_out; + } + + skspcl->req.sksg_list = skd_cons_sg_list(skdev, + SKD_N_SG_PER_SPECIAL, + &skspcl->req. + sksg_dma_address); + if (skspcl->req.sksg_list == NULL) { + rc = -ENOMEM; + goto err_out; + } + } + + /* Free list is in order starting with the 0th entry. */ + skdev->skspcl_table[i - 1].req.next = NULL; + skdev->skspcl_free_list = skdev->skspcl_table; + + return rc; + +err_out: + return rc; +} + +static int skd_cons_sksb(struct skd_device *skdev) +{ + int rc = 0; + struct skd_special_context *skspcl; + u32 nbytes; + + skspcl = &skdev->internal_skspcl; + + skspcl->req.id = 0 + SKD_ID_INTERNAL; + skspcl->req.state = SKD_REQ_STATE_IDLE; + + nbytes = SKD_N_INTERNAL_BYTES; + + skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes, + &skspcl->db_dma_address); + if (skspcl->data_buf == NULL) { + rc = -ENOMEM; + goto err_out; + } + + memset(skspcl->data_buf, 0, nbytes); + + nbytes = SKD_N_SPECIAL_FITMSG_BYTES; + skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, + &skspcl->mb_dma_address); + if (skspcl->msg_buf == NULL) { + rc = -ENOMEM; + goto err_out; + } + + memset(skspcl->msg_buf, 0, nbytes); + + skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, + &skspcl->req.sksg_dma_address); + if (skspcl->req.sksg_list == NULL) { + rc = -ENOMEM; + goto err_out; + } + + if (!skd_format_internal_skspcl(skdev)) { + rc = -EINVAL; + goto err_out; + } + +err_out: + return rc; +} + +static int skd_cons_disk(struct skd_device *skdev) +{ + int rc = 0; + struct gendisk *disk; + struct request_queue *q; + unsigned long flags; + + disk = alloc_disk(SKD_MINORS_PER_DEVICE); + if (!disk) { + rc = -ENOMEM; + goto err_out; + } + + skdev->disk = disk; + sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); + + disk->major = skdev->major; + disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; + disk->fops = &skd_blockdev_ops; + disk->private_data = skdev; + + q = blk_init_queue(skd_request_fn, &skdev->lock); + if (!q) { + rc = -ENOMEM; + goto err_out; + } + + skdev->queue = q; + disk->queue = q; + q->queuedata = skdev; + + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); + blk_queue_max_segments(q, skdev->sgs_per_request); + blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); + + /* set sysfs ptimal_io_size to 8K */ + blk_queue_io_opt(q, 8192); + + /* DISCARD Flag initialization. */ + q->limits.discard_granularity = 8192; + q->limits.discard_alignment = 0; + q->limits.max_discard_sectors = UINT_MAX >> 9; + q->limits.discard_zeroes_data = 1; + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + + spin_lock_irqsave(&skdev->lock, flags); + pr_debug("%s:%s:%d stopping %s queue\n", + skdev->name, __func__, __LINE__, skdev->name); + blk_stop_queue(skdev->queue); + spin_unlock_irqrestore(&skdev->lock, flags); + +err_out: + return rc; +} + +#define SKD_N_DEV_TABLE 16u +static u32 skd_next_devno; + +static struct skd_device *skd_construct(struct pci_dev *pdev) +{ + struct skd_device *skdev; + int blk_major = skd_major; + int rc; + + skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); + + if (!skdev) { + pr_err(PFX "(%s): memory alloc failure\n", + pci_name(pdev)); + return NULL; + } + + skdev->state = SKD_DRVR_STATE_LOAD; + skdev->pdev = pdev; + skdev->devno = skd_next_devno++; + skdev->major = blk_major; + skdev->irq_type = skd_isr_type; + sprintf(skdev->name, DRV_NAME "%d", skdev->devno); + skdev->dev_max_queue_depth = 0; + + skdev->num_req_context = skd_max_queue_depth; + skdev->num_fitmsg_context = skd_max_queue_depth; + skdev->n_special = skd_max_pass_thru; + skdev->cur_max_queue_depth = 1; + skdev->queue_low_water_mark = 1; + skdev->proto_ver = 99; + skdev->sgs_per_request = skd_sgs_per_request; + skdev->dbg_level = skd_dbg_level; + + atomic_set(&skdev->device_count, 0); + + spin_lock_init(&skdev->lock); + + INIT_WORK(&skdev->completion_worker, skd_completion_worker); + + pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); + rc = skd_cons_skcomp(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); + rc = skd_cons_skmsg(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); + rc = skd_cons_skreq(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); + rc = skd_cons_skspcl(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); + rc = skd_cons_sksb(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); + rc = skd_cons_disk(skdev); + if (rc < 0) + goto err_out; + + pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); + return skdev; + +err_out: + pr_debug("%s:%s:%d construct failed\n", + skdev->name, __func__, __LINE__); + skd_destruct(skdev); + return NULL; +} + +/* + ***************************************************************************** + * DESTRUCT (FREE) + ***************************************************************************** + */ + +static void skd_free_skcomp(struct skd_device *skdev) +{ + if (skdev->skcomp_table != NULL) { + u32 nbytes; + + nbytes = sizeof(skdev->skcomp_table[0]) * + SKD_N_COMPLETION_ENTRY; + pci_free_consistent(skdev->pdev, nbytes, + skdev->skcomp_table, skdev->cq_dma_address); + } + + skdev->skcomp_table = NULL; + skdev->cq_dma_address = 0; +} + +static void skd_free_skmsg(struct skd_device *skdev) +{ + u32 i; + + if (skdev->skmsg_table == NULL) + return; + + for (i = 0; i < skdev->num_fitmsg_context; i++) { + struct skd_fitmsg_context *skmsg; + + skmsg = &skdev->skmsg_table[i]; + + if (skmsg->msg_buf != NULL) { + skmsg->msg_buf += skmsg->offset; + skmsg->mb_dma_address += skmsg->offset; + pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, + skmsg->msg_buf, + skmsg->mb_dma_address); + } + skmsg->msg_buf = NULL; + skmsg->mb_dma_address = 0; + } + + kfree(skdev->skmsg_table); + skdev->skmsg_table = NULL; +} + +static void skd_free_sg_list(struct skd_device *skdev, + struct fit_sg_descriptor *sg_list, + u32 n_sg, dma_addr_t dma_addr) +{ + if (sg_list != NULL) { + u32 nbytes; + + nbytes = sizeof(*sg_list) * n_sg; + + pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); + } +} + +static void skd_free_skreq(struct skd_device *skdev) +{ + u32 i; + + if (skdev->skreq_table == NULL) + return; + + for (i = 0; i < skdev->num_req_context; i++) { + struct skd_request_context *skreq; + + skreq = &skdev->skreq_table[i]; + + skd_free_sg_list(skdev, skreq->sksg_list, + skdev->sgs_per_request, + skreq->sksg_dma_address); + + skreq->sksg_list = NULL; + skreq->sksg_dma_address = 0; + + kfree(skreq->sg); + } + + kfree(skdev->skreq_table); + skdev->skreq_table = NULL; +} + +static void skd_free_skspcl(struct skd_device *skdev) +{ + u32 i; + u32 nbytes; + + if (skdev->skspcl_table == NULL) + return; + + for (i = 0; i < skdev->n_special; i++) { + struct skd_special_context *skspcl; + + skspcl = &skdev->skspcl_table[i]; + + if (skspcl->msg_buf != NULL) { + nbytes = SKD_N_SPECIAL_FITMSG_BYTES; + pci_free_consistent(skdev->pdev, nbytes, + skspcl->msg_buf, + skspcl->mb_dma_address); + } + + skspcl->msg_buf = NULL; + skspcl->mb_dma_address = 0; + + skd_free_sg_list(skdev, skspcl->req.sksg_list, + SKD_N_SG_PER_SPECIAL, + skspcl->req.sksg_dma_address); + + skspcl->req.sksg_list = NULL; + skspcl->req.sksg_dma_address = 0; + + kfree(skspcl->req.sg); + } + + kfree(skdev->skspcl_table); + skdev->skspcl_table = NULL; +} + +static void skd_free_sksb(struct skd_device *skdev) +{ + struct skd_special_context *skspcl; + u32 nbytes; + + skspcl = &skdev->internal_skspcl; + + if (skspcl->data_buf != NULL) { + nbytes = SKD_N_INTERNAL_BYTES; + + pci_free_consistent(skdev->pdev, nbytes, + skspcl->data_buf, skspcl->db_dma_address); + } + + skspcl->data_buf = NULL; + skspcl->db_dma_address = 0; + + if (skspcl->msg_buf != NULL) { + nbytes = SKD_N_SPECIAL_FITMSG_BYTES; + pci_free_consistent(skdev->pdev, nbytes, + skspcl->msg_buf, skspcl->mb_dma_address); + } + + skspcl->msg_buf = NULL; + skspcl->mb_dma_address = 0; + + skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, + skspcl->req.sksg_dma_address); + + skspcl->req.sksg_list = NULL; + skspcl->req.sksg_dma_address = 0; +} + +static void skd_free_disk(struct skd_device *skdev) +{ + struct gendisk *disk = skdev->disk; + + if (disk != NULL) { + struct request_queue *q = disk->queue; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) + blk_cleanup_queue(q); + put_disk(disk); + } + skdev->disk = NULL; +} + +static void skd_destruct(struct skd_device *skdev) +{ + if (skdev == NULL) + return; + + + pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); + skd_free_disk(skdev); + + pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); + skd_free_sksb(skdev); + + pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); + skd_free_skspcl(skdev); + + pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); + skd_free_skreq(skdev); + + pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); + skd_free_skmsg(skdev); + + pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); + skd_free_skcomp(skdev); + + pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); + kfree(skdev); +} + +/* + ***************************************************************************** + * BLOCK DEVICE (BDEV) GLUE + ***************************************************************************** + */ + +static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct skd_device *skdev; + u64 capacity; + + skdev = bdev->bd_disk->private_data; + + pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", + skdev->name, __func__, __LINE__, + bdev->bd_disk->disk_name, current->comm); + + if (skdev->read_cap_is_valid) { + capacity = get_capacity(skdev->disk); + geo->heads = 64; + geo->sectors = 255; + geo->cylinders = (capacity) / (255 * 64); + + return 0; + } + return -EIO; +} + +static int skd_bdev_attach(struct skd_device *skdev) +{ + pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); + add_disk(skdev->disk); + return 0; +} + +static const struct block_device_operations skd_blockdev_ops = { + .owner = THIS_MODULE, + .ioctl = skd_bdev_ioctl, + .getgeo = skd_bdev_getgeo, +}; + + +/* + ***************************************************************************** + * PCIe DRIVER GLUE + ***************************************************************************** + */ + +static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = { + { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, + { 0 } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, skd_pci_tbl); + +static char *skd_pci_info(struct skd_device *skdev, char *str) +{ + int pcie_reg; + + strcpy(str, "PCIe ("); + pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); + + if (pcie_reg) { + + char lwstr[6]; + uint16_t pcie_lstat, lspeed, lwidth; + + pcie_reg += 0x12; + pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); + lspeed = pcie_lstat & (0xF); + lwidth = (pcie_lstat & 0x3F0) >> 4; + + if (lspeed == 1) + strcat(str, "2.5GT/s "); + else if (lspeed == 2) + strcat(str, "5.0GT/s "); + else + strcat(str, "<unknown> "); + snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); + strcat(str, lwstr); + } + return str; +} + +static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int i; + int rc = 0; + char pci_str[32]; + struct skd_device *skdev; + + pr_info("STEC s1120 Driver(%s) version %s-b%s\n", + DRV_NAME, DRV_VERSION, DRV_BUILD_ID); + pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", + pci_name(pdev), pdev->vendor, pdev->device); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!rc) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + + pr_err("(%s): consistent DMA mask error %d\n", + pci_name(pdev), rc); + } + } else { + (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); + if (rc) { + + pr_err("(%s): DMA mask error %d\n", + pci_name(pdev), rc); + goto err_out_regions; + } + } + + if (!skd_major) { + rc = register_blkdev(0, DRV_NAME); + if (rc < 0) + goto err_out_regions; + BUG_ON(!rc); + skd_major = rc; + } + + skdev = skd_construct(pdev); + if (skdev == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + skd_pci_info(skdev, pci_str); + pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); + + pci_set_master(pdev); + rc = pci_enable_pcie_error_reporting(pdev); + if (rc) { + pr_err( + "(%s): bad enable of PCIe error reporting rc=%d\n", + skd_name(skdev), rc); + skdev->pcie_error_reporting_is_enabled = 0; + } else + skdev->pcie_error_reporting_is_enabled = 1; + + + pci_set_drvdata(pdev, skdev); + + skdev->disk->driverfs_dev = &pdev->dev; + + for (i = 0; i < SKD_MAX_BARS; i++) { + skdev->mem_phys[i] = pci_resource_start(pdev, i); + skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); + skdev->mem_map[i] = ioremap(skdev->mem_phys[i], + skdev->mem_size[i]); + if (!skdev->mem_map[i]) { + pr_err("(%s): Unable to map adapter memory!\n", + skd_name(skdev)); + rc = -ENODEV; + goto err_out_iounmap; + } + pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", + skdev->name, __func__, __LINE__, + skdev->mem_map[i], + (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); + } + + rc = skd_acquire_irq(skdev); + if (rc) { + pr_err("(%s): interrupt resource error %d\n", + skd_name(skdev), rc); + goto err_out_iounmap; + } + + rc = skd_start_timer(skdev); + if (rc) + goto err_out_timer; + + init_waitqueue_head(&skdev->waitq); + + skd_start_device(skdev); + + rc = wait_event_interruptible_timeout(skdev->waitq, + (skdev->gendisk_on), + (SKD_START_WAIT_SECONDS * HZ)); + if (skdev->gendisk_on > 0) { + /* device came on-line after reset */ + skd_bdev_attach(skdev); + rc = 0; + } else { + /* we timed out, something is wrong with the device, + don't add the disk structure */ + pr_err( + "(%s): error: waiting for s1120 timed out %d!\n", + skd_name(skdev), rc); + /* in case of no error; we timeout with ENXIO */ + if (!rc) + rc = -ENXIO; + goto err_out_timer; + } + + +#ifdef SKD_VMK_POLL_HANDLER + if (skdev->irq_type == SKD_IRQ_MSIX) { + /* MSIX completion handler is being used for coredump */ + vmklnx_scsi_register_poll_handler(skdev->scsi_host, + skdev->msix_entries[5].vector, + skd_comp_q, skdev); + } else { + vmklnx_scsi_register_poll_handler(skdev->scsi_host, + skdev->pdev->irq, skd_isr, + skdev); + } +#endif /* SKD_VMK_POLL_HANDLER */ + + return rc; + +err_out_timer: + skd_stop_device(skdev); + skd_release_irq(skdev); + +err_out_iounmap: + for (i = 0; i < SKD_MAX_BARS; i++) + if (skdev->mem_map[i]) + iounmap(skdev->mem_map[i]); + + if (skdev->pcie_error_reporting_is_enabled) + pci_disable_pcie_error_reporting(pdev); + + skd_destruct(skdev); + +err_out_regions: + pci_release_regions(pdev); + +err_out: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return rc; +} + +static void skd_pci_remove(struct pci_dev *pdev) +{ + int i; + struct skd_device *skdev; + + skdev = pci_get_drvdata(pdev); + if (!skdev) { + pr_err("%s: no device data for PCI\n", pci_name(pdev)); + return; + } + skd_stop_device(skdev); + skd_release_irq(skdev); + + for (i = 0; i < SKD_MAX_BARS; i++) + if (skdev->mem_map[i]) + iounmap((u32 *)skdev->mem_map[i]); + + if (skdev->pcie_error_reporting_is_enabled) + pci_disable_pcie_error_reporting(pdev); + + skd_destruct(skdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + return; +} + +static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int i; + struct skd_device *skdev; + + skdev = pci_get_drvdata(pdev); + if (!skdev) { + pr_err("%s: no device data for PCI\n", pci_name(pdev)); + return -EIO; + } + + skd_stop_device(skdev); + + skd_release_irq(skdev); + + for (i = 0; i < SKD_MAX_BARS; i++) + if (skdev->mem_map[i]) + iounmap((u32 *)skdev->mem_map[i]); + + if (skdev->pcie_error_reporting_is_enabled) + pci_disable_pcie_error_reporting(pdev); + + pci_release_regions(pdev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int skd_pci_resume(struct pci_dev *pdev) +{ + int i; + int rc = 0; + struct skd_device *skdev; + + skdev = pci_get_drvdata(pdev); + if (!skdev) { + pr_err("%s: no device data for PCI\n", pci_name(pdev)); + return -1; + } + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!rc) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + + pr_err("(%s): consistent DMA mask error %d\n", + pci_name(pdev), rc); + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + + pr_err("(%s): DMA mask error %d\n", + pci_name(pdev), rc); + goto err_out_regions; + } + } + + pci_set_master(pdev); + rc = pci_enable_pcie_error_reporting(pdev); + if (rc) { + pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", + skdev->name, rc); + skdev->pcie_error_reporting_is_enabled = 0; + } else + skdev->pcie_error_reporting_is_enabled = 1; + + for (i = 0; i < SKD_MAX_BARS; i++) { + + skdev->mem_phys[i] = pci_resource_start(pdev, i); + skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); + skdev->mem_map[i] = ioremap(skdev->mem_phys[i], + skdev->mem_size[i]); + if (!skdev->mem_map[i]) { + pr_err("(%s): Unable to map adapter memory!\n", + skd_name(skdev)); + rc = -ENODEV; + goto err_out_iounmap; + } + pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", + skdev->name, __func__, __LINE__, + skdev->mem_map[i], + (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); + } + rc = skd_acquire_irq(skdev); + if (rc) { + + pr_err("(%s): interrupt resource error %d\n", + pci_name(pdev), rc); + goto err_out_iounmap; + } + + rc = skd_start_timer(skdev); + if (rc) + goto err_out_timer; + + init_waitqueue_head(&skdev->waitq); + + skd_start_device(skdev); + + return rc; + +err_out_timer: + skd_stop_device(skdev); + skd_release_irq(skdev); + +err_out_iounmap: + for (i = 0; i < SKD_MAX_BARS; i++) + if (skdev->mem_map[i]) + iounmap(skdev->mem_map[i]); + + if (skdev->pcie_error_reporting_is_enabled) + pci_disable_pcie_error_reporting(pdev); + +err_out_regions: + pci_release_regions(pdev); + +err_out: + pci_disable_device(pdev); + return rc; +} + +static void skd_pci_shutdown(struct pci_dev *pdev) +{ + struct skd_device *skdev; + + pr_err("skd_pci_shutdown called\n"); + + skdev = pci_get_drvdata(pdev); + if (!skdev) { + pr_err("%s: no device data for PCI\n", pci_name(pdev)); + return; + } + + pr_err("%s: calling stop\n", skd_name(skdev)); + skd_stop_device(skdev); +} + +static struct pci_driver skd_driver = { + .name = DRV_NAME, + .id_table = skd_pci_tbl, + .probe = skd_pci_probe, + .remove = skd_pci_remove, + .suspend = skd_pci_suspend, + .resume = skd_pci_resume, + .shutdown = skd_pci_shutdown, +}; + +/* + ***************************************************************************** + * LOGGING SUPPORT + ***************************************************************************** + */ + +static const char *skd_name(struct skd_device *skdev) +{ + memset(skdev->id_str, 0, sizeof(skdev->id_str)); + + if (skdev->inquiry_is_valid) + snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", + skdev->name, skdev->inq_serial_num, + pci_name(skdev->pdev)); + else + snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", + skdev->name, pci_name(skdev->pdev)); + + return skdev->id_str; +} + +const char *skd_drive_state_to_str(int state) +{ + switch (state) { + case FIT_SR_DRIVE_OFFLINE: + return "OFFLINE"; + case FIT_SR_DRIVE_INIT: + return "INIT"; + case FIT_SR_DRIVE_ONLINE: + return "ONLINE"; + case FIT_SR_DRIVE_BUSY: + return "BUSY"; + case FIT_SR_DRIVE_FAULT: + return "FAULT"; + case FIT_SR_DRIVE_DEGRADED: + return "DEGRADED"; + case FIT_SR_PCIE_LINK_DOWN: + return "INK_DOWN"; + case FIT_SR_DRIVE_SOFT_RESET: + return "SOFT_RESET"; + case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: + return "NEED_FW"; + case FIT_SR_DRIVE_INIT_FAULT: + return "INIT_FAULT"; + case FIT_SR_DRIVE_BUSY_SANITIZE: + return "BUSY_SANITIZE"; + case FIT_SR_DRIVE_BUSY_ERASE: + return "BUSY_ERASE"; + case FIT_SR_DRIVE_FW_BOOTING: + return "FW_BOOTING"; + default: + return "???"; + } +} + +const char *skd_skdev_state_to_str(enum skd_drvr_state state) +{ + switch (state) { + case SKD_DRVR_STATE_LOAD: + return "LOAD"; + case SKD_DRVR_STATE_IDLE: + return "IDLE"; + case SKD_DRVR_STATE_BUSY: + return "BUSY"; + case SKD_DRVR_STATE_STARTING: + return "STARTING"; + case SKD_DRVR_STATE_ONLINE: + return "ONLINE"; + case SKD_DRVR_STATE_PAUSING: + return "PAUSING"; + case SKD_DRVR_STATE_PAUSED: + return "PAUSED"; + case SKD_DRVR_STATE_DRAINING_TIMEOUT: + return "DRAINING_TIMEOUT"; + case SKD_DRVR_STATE_RESTARTING: + return "RESTARTING"; + case SKD_DRVR_STATE_RESUMING: + return "RESUMING"; + case SKD_DRVR_STATE_STOPPING: + return "STOPPING"; + case SKD_DRVR_STATE_SYNCING: + return "SYNCING"; + case SKD_DRVR_STATE_FAULT: + return "FAULT"; + case SKD_DRVR_STATE_DISAPPEARED: + return "DISAPPEARED"; + case SKD_DRVR_STATE_BUSY_ERASE: + return "BUSY_ERASE"; + case SKD_DRVR_STATE_BUSY_SANITIZE: + return "BUSY_SANITIZE"; + case SKD_DRVR_STATE_BUSY_IMMINENT: + return "BUSY_IMMINENT"; + case SKD_DRVR_STATE_WAIT_BOOT: + return "WAIT_BOOT"; + + default: + return "???"; + } +} + +const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) +{ + switch (state) { + case SKD_MSG_STATE_IDLE: + return "IDLE"; + case SKD_MSG_STATE_BUSY: + return "BUSY"; + default: + return "???"; + } +} + +const char *skd_skreq_state_to_str(enum skd_req_state state) +{ + switch (state) { + case SKD_REQ_STATE_IDLE: + return "IDLE"; + case SKD_REQ_STATE_SETUP: + return "SETUP"; + case SKD_REQ_STATE_BUSY: + return "BUSY"; + case SKD_REQ_STATE_COMPLETED: + return "COMPLETED"; + case SKD_REQ_STATE_TIMEOUT: + return "TIMEOUT"; + case SKD_REQ_STATE_ABORTED: + return "ABORTED"; + default: + return "???"; + } +} + +static void skd_log_skdev(struct skd_device *skdev, const char *event) +{ + pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", + skdev->name, __func__, __LINE__, skdev->name, skdev, event); + pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", + skdev->name, __func__, __LINE__, + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, + skd_skdev_state_to_str(skdev->state), skdev->state); + pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", + skdev->name, __func__, __LINE__, + skdev->in_flight, skdev->cur_max_queue_depth, + skdev->dev_max_queue_depth, skdev->queue_low_water_mark); + pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", + skdev->name, __func__, __LINE__, + skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); +} + +static void skd_log_skmsg(struct skd_device *skdev, + struct skd_fitmsg_context *skmsg, const char *event) +{ + pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", + skdev->name, __func__, __LINE__, skdev->name, skmsg, event); + pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", + skdev->name, __func__, __LINE__, + skd_skmsg_state_to_str(skmsg->state), skmsg->state, + skmsg->id, skmsg->length); +} + +static void skd_log_skreq(struct skd_device *skdev, + struct skd_request_context *skreq, const char *event) +{ + pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", + skdev->name, __func__, __LINE__, skdev->name, skreq, event); + pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", + skdev->name, __func__, __LINE__, + skd_skreq_state_to_str(skreq->state), skreq->state, + skreq->id, skreq->fitmsg_id); + pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", + skdev->name, __func__, __LINE__, + skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); + + if (skreq->req != NULL) { + struct request *req = skreq->req; + u32 lba = (u32)blk_rq_pos(req); + u32 count = blk_rq_sectors(req); + + pr_debug("%s:%s:%d " + "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", + skdev->name, __func__, __LINE__, + req, lba, lba, count, count, + (int)rq_data_dir(req)); + } else + pr_debug("%s:%s:%d req=NULL\n", + skdev->name, __func__, __LINE__); +} + +/* + ***************************************************************************** + * MODULE GLUE + ***************************************************************************** + */ + +static int __init skd_init(void) +{ + pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); + + switch (skd_isr_type) { + case SKD_IRQ_LEGACY: + case SKD_IRQ_MSI: + case SKD_IRQ_MSIX: + break; + default: + pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", + skd_isr_type, SKD_IRQ_DEFAULT); + skd_isr_type = SKD_IRQ_DEFAULT; + } + + if (skd_max_queue_depth < 1 || + skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { + pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", + skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); + skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; + } + + if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { + pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", + skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); + skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; + } + + if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { + pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", + skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); + skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; + } + + if (skd_dbg_level < 0 || skd_dbg_level > 2) { + pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", + skd_dbg_level, 0); + skd_dbg_level = 0; + } + + if (skd_isr_comp_limit < 0) { + pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", + skd_isr_comp_limit, 0); + skd_isr_comp_limit = 0; + } + + if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { + pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", + skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); + skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; + } + + return pci_register_driver(&skd_driver); +} + +static void __exit skd_exit(void) +{ + pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); + + pci_unregister_driver(&skd_driver); + + if (skd_major) + unregister_blkdev(skd_major, DRV_NAME); +} + +module_init(skd_init); +module_exit(skd_exit); diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h new file mode 100644 index 000000000000..61c757ff0161 --- /dev/null +++ b/drivers/block/skd_s1120.h @@ -0,0 +1,330 @@ +/* Copyright 2012 STEC, Inc. + * + * This file is licensed under the terms of the 3-clause + * BSD License (http://opensource.org/licenses/BSD-3-Clause) + * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), + * at your option. Both licenses are also available in the LICENSE file + * distributed with this project. This file may not be copied, modified, + * or distributed except in accordance with those terms. + */ + + +#ifndef SKD_S1120_H +#define SKD_S1120_H + +#pragma pack(push, s1120_h, 1) + +/* + * Q-channel, 64-bit r/w + */ +#define FIT_Q_COMMAND 0x400u +#define FIT_QCMD_QID_MASK (0x3 << 1) +#define FIT_QCMD_QID0 (0x0 << 1) +#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0 +#define FIT_QCMD_QID1 (0x1 << 1) +#define FIT_QCMD_QID2 (0x2 << 1) +#define FIT_QCMD_QID3 (0x3 << 1) +#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */ +#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4) +#define FIT_QCMD_MSGSIZE_64 (0x0 << 4) +#define FIT_QCMD_MSGSIZE_128 (0x1 << 4) +#define FIT_QCMD_MSGSIZE_256 (0x2 << 4) +#define FIT_QCMD_MSGSIZE_512 (0x3 << 4) +#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull) + +/* + * Control, 32-bit r/w + */ +#define FIT_CONTROL 0x500u +#define FIT_CR_HARD_RESET (1u << 0u) +#define FIT_CR_SOFT_RESET (1u << 1u) +#define FIT_CR_DIS_TIMESTAMPS (1u << 6u) +#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u) + +/* + * Status, 32-bit, r/o + */ +#define FIT_STATUS 0x510u +#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu +#define FIT_SR_SIGNATURE (0xFF << 8) +#define FIT_SR_PIO_DMA (1 << 16) +#define FIT_SR_DRIVE_OFFLINE 0x00 +#define FIT_SR_DRIVE_INIT 0x01 +/* #define FIT_SR_DRIVE_READY 0x02 */ +#define FIT_SR_DRIVE_ONLINE 0x03 +#define FIT_SR_DRIVE_BUSY 0x04 +#define FIT_SR_DRIVE_FAULT 0x05 +#define FIT_SR_DRIVE_DEGRADED 0x06 +#define FIT_SR_PCIE_LINK_DOWN 0x07 +#define FIT_SR_DRIVE_SOFT_RESET 0x08 +#define FIT_SR_DRIVE_INIT_FAULT 0x09 +#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A +#define FIT_SR_DRIVE_BUSY_ERASE 0x0B +#define FIT_SR_DRIVE_FW_BOOTING 0x0C +#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE +#define FIT_SR_DEVICE_MISSING 0xFF +#define FIT_SR__RESERVED 0xFFFFFF00u + +/* + * FIT_STATUS - Status register data definition + */ +#define FIT_SR_STATE_MASK (0xFF << 0) +#define FIT_SR_SIGNATURE (0xFF << 8) +#define FIT_SR_PIO_DMA (1 << 16) + +/* + * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear) + */ +#define FIT_INT_STATUS_HOST 0x520u +#define FIT_ISH_FW_STATE_CHANGE (1u << 0u) +#define FIT_ISH_COMPLETION_POSTED (1u << 1u) +#define FIT_ISH_MSG_FROM_DEV (1u << 2u) +#define FIT_ISH_UNDEFINED_3 (1u << 3u) +#define FIT_ISH_UNDEFINED_4 (1u << 4u) +#define FIT_ISH_Q0_FULL (1u << 5u) +#define FIT_ISH_Q1_FULL (1u << 6u) +#define FIT_ISH_Q2_FULL (1u << 7u) +#define FIT_ISH_Q3_FULL (1u << 8u) +#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u) +#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u) + +#define FIT_INT_DEF_MASK \ + (FIT_ISH_FW_STATE_CHANGE | \ + FIT_ISH_COMPLETION_POSTED | \ + FIT_ISH_MSG_FROM_DEV | \ + FIT_ISH_Q0_FULL | \ + FIT_ISH_Q1_FULL | \ + FIT_ISH_Q2_FULL | \ + FIT_ISH_Q3_FULL | \ + FIT_ISH_QCMD_FIFO_OVERRUN | \ + FIT_ISH_BAD_EXP_ROM_READ) + +#define FIT_INT_QUEUE_FULL \ + (FIT_ISH_Q0_FULL | \ + FIT_ISH_Q1_FULL | \ + FIT_ISH_Q2_FULL | \ + FIT_ISH_Q3_FULL) + +#define MSI_MSG_NWL_ERROR_0 0x00000000 +#define MSI_MSG_NWL_ERROR_1 0x00000001 +#define MSI_MSG_NWL_ERROR_2 0x00000002 +#define MSI_MSG_NWL_ERROR_3 0x00000003 +#define MSI_MSG_STATE_CHANGE 0x00000004 +#define MSI_MSG_COMPLETION_POSTED 0x00000005 +#define MSI_MSG_MSG_FROM_DEV 0x00000006 +#define MSI_MSG_RESERVED_0 0x00000007 +#define MSI_MSG_RESERVED_1 0x00000008 +#define MSI_MSG_QUEUE_0_FULL 0x00000009 +#define MSI_MSG_QUEUE_1_FULL 0x0000000A +#define MSI_MSG_QUEUE_2_FULL 0x0000000B +#define MSI_MSG_QUEUE_3_FULL 0x0000000C + +#define FIT_INT_RESERVED_MASK \ + (FIT_ISH_UNDEFINED_3 | \ + FIT_ISH_UNDEFINED_4) + +/* + * Interrupt mask, 32-bit r/w + * Bit definitions are the same as FIT_INT_STATUS_HOST + */ +#define FIT_INT_MASK_HOST 0x528u + +/* + * Message to device, 32-bit r/w + */ +#define FIT_MSG_TO_DEVICE 0x540u + +/* + * Message from device, 32-bit, r/o + */ +#define FIT_MSG_FROM_DEVICE 0x548u + +/* + * 32-bit messages to/from device, composition/extraction macros + */ +#define FIT_MXD_CONS(TYPE, PARAM, DATA) \ + ((((TYPE) & 0xFFu) << 24u) | \ + (((PARAM) & 0xFFu) << 16u) | \ + (((DATA) & 0xFFFFu) << 0u)) +#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu) +#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu) +#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu) + +/* + * Types of messages to/from device + */ +#define FIT_MTD_FITFW_INIT 0x01u +#define FIT_MTD_GET_CMDQ_DEPTH 0x02u +#define FIT_MTD_SET_COMPQ_DEPTH 0x03u +#define FIT_MTD_SET_COMPQ_ADDR 0x04u +#define FIT_MTD_ARM_QUEUE 0x05u +#define FIT_MTD_CMD_LOG_HOST_ID 0x07u +#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u +#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u +#define FIT_MFD_SMART_EXCEEDED 0x10u +#define FIT_MFD_POWER_DOWN 0x11u +#define FIT_MFD_OFFLINE 0x12u +#define FIT_MFD_ONLINE 0x13u +#define FIT_MFD_FW_RESTARTING 0x14u +#define FIT_MFD_PM_ACTIVE 0x15u +#define FIT_MFD_PM_STANDBY 0x16u +#define FIT_MFD_PM_SLEEP 0x17u +#define FIT_MFD_CMD_PROGRESS 0x18u + +#define FIT_MTD_DEBUG 0xFEu +#define FIT_MFD_DEBUG 0xFFu + +#define FIT_MFD_MASK (0xFFu) +#define FIT_MFD_DATA_MASK (0xFFu) +#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK) +#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK) + +/* + * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w + * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR) + * (was Response buffer in docs) + */ +#define FIT_MSG_TO_DEVICE_ARG 0x580u + +/* + * Hardware (ASIC) version, 32-bit r/o + */ +#define FIT_HW_VERSION 0x588u + +/* + * Scatter/gather list descriptor. + * 32-bytes and must be aligned on a 32-byte boundary. + * All fields are in little endian order. + */ +struct fit_sg_descriptor { + uint32_t control; + uint32_t byte_count; + uint64_t host_side_addr; + uint64_t dev_side_addr; + uint64_t next_desc_ptr; +}; + +#define FIT_SGD_CONTROL_NOT_LAST 0x000u +#define FIT_SGD_CONTROL_LAST 0x40Eu + +/* + * Header at the beginning of a FIT message. The header + * is followed by SSDI requests each 64 bytes. + * A FIT message can be up to 512 bytes long and must start + * on a 64-byte boundary. + */ +struct fit_msg_hdr { + uint8_t protocol_id; + uint8_t num_protocol_cmds_coalesced; + uint8_t _reserved[62]; +}; + +#define FIT_PROTOCOL_ID_FIT 1 +#define FIT_PROTOCOL_ID_SSDI 2 +#define FIT_PROTOCOL_ID_SOFIT 3 + + +#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF) +#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF) + +/* + * Format of a completion entry. The completion queue is circular + * and must have at least as many entries as the maximum number + * of commands that may be issued to the device. + * + * There are no head/tail pointers. The cycle value is used to + * infer the presence of new completion records. + * Initially the cycle in all entries is 0, the index is 0, and + * the cycle value to expect is 1. When completions are added + * their cycle values are set to 1. When the index wraps the + * cycle value to expect is incremented. + * + * Command_context is opaque and taken verbatim from the SSDI command. + * All other fields are big endian. + */ +#define FIT_PROTOCOL_VERSION_0 0 + +/* + * Protocol major version 1 completion entry. + * The major protocol version is found in bits + * 20-23 of the FIT_MTD_FITFW_INIT response. + */ +struct fit_completion_entry_v1 { + uint32_t num_returned_bytes; + uint16_t tag; + uint8_t status; /* SCSI status */ + uint8_t cycle; +}; +#define FIT_PROTOCOL_VERSION_1 1 +#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1 + +struct fit_comp_error_info { + uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */ + uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */ + uint8_t reserved0; /* 01: Obsolete field */ + uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */ + uint8_t reserved2:1; /* 02: Reserved bit. */ + uint8_t bad_length:1; /* 02: Incorrect Length Indicator */ + uint8_t end_medium:1; /* 02: End of Medium */ + uint8_t file_mark:1; /* 02: Filemark */ + uint8_t info[4]; /* 03: */ + uint8_t reserved1; /* 07: Additional Sense Length */ + uint8_t cmd_spec[4]; /* 08: Command Specific Information */ + uint8_t code; /* 0C: Additional Sense Code */ + uint8_t qual; /* 0D: Additional Sense Code Qualifier */ + uint8_t fruc; /* 0E: Field Replaceable Unit Code */ + uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */ + uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */ + uint16_t sks_low; /* 10: Sense Key Specific (LSW) */ + uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */ + uint16_t uec; /* 14: Additional Sense Bytes */ + uint64_t per; /* 16: Additional Sense Bytes */ + uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */ +}; + + +/* Task management constants */ +#define SOFT_TASK_SIMPLE 0x00 +#define SOFT_TASK_HEAD_OF_QUEUE 0x01 +#define SOFT_TASK_ORDERED 0x02 + +/* Version zero has the last 32 bits reserved, + * Version one has the last 32 bits sg_list_len_bytes; + */ +struct skd_command_header { + uint64_t sg_list_dma_address; + uint16_t tag; + uint8_t attribute; + uint8_t add_cdb_len; /* In 32 bit words */ + uint32_t sg_list_len_bytes; +}; + +struct skd_scsi_request { + struct skd_command_header hdr; + unsigned char cdb[16]; +/* unsigned char _reserved[16]; */ +}; + +struct driver_inquiry_data { + uint8_t peripheral_device_type:5; + uint8_t qualifier:3; + uint8_t page_code; + uint16_t page_length; + uint16_t pcie_bus_number; + uint8_t pcie_device_number; + uint8_t pcie_function_number; + uint8_t pcie_link_speed; + uint8_t pcie_link_lanes; + uint16_t pcie_vendor_id; + uint16_t pcie_device_id; + uint16_t pcie_subsystem_vendor_id; + uint16_t pcie_subsystem_device_id; + uint8_t reserved1[2]; + uint8_t reserved2[3]; + uint8_t driver_version_length; + uint8_t driver_version[0x14]; +}; + +#pragma pack(pop, s1120_h) + +#endif /* SKD_S1120_H */ diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 8ed6ccb748cf..b02d53a399f3 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -924,7 +924,6 @@ static int swim_probe(struct platform_device *dev) return 0; out_kfree: - platform_set_drvdata(dev, NULL); kfree(swd); out_iounmap: iounmap(swim_base); @@ -962,7 +961,6 @@ static int swim_remove(struct platform_device *dev) if (res) release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(dev, NULL); kfree(swd); return 0; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5cdf88b7ad9e..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -11,12 +11,11 @@ #include <linux/string_helpers.h> #include <scsi/scsi_cmnd.h> #include <linux/idr.h> +#include <linux/blk-mq.h> +#include <linux/numa.h> #define PART_BITS 4 -static bool use_bio; -module_param(use_bio, bool, S_IRUGO); - static int major; static DEFINE_IDA(vd_index_ida); @@ -26,13 +25,11 @@ struct virtio_blk { struct virtio_device *vdev; struct virtqueue *vq; - wait_queue_head_t queue_wait; + spinlock_t vq_lock; /* The disk structure for the kernel. */ struct gendisk *disk; - mempool_t *pool; - /* Process context for config space updates */ struct work_struct config_work; @@ -47,31 +44,17 @@ struct virtio_blk /* Ida index - used to track minor number allocations. */ int index; - - /* Scatterlist: can be too big for stack. */ - struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req { struct request *req; - struct bio *bio; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; - struct work_struct work; - struct virtio_blk *vblk; - int flags; u8 status; struct scatterlist sg[]; }; -enum { - VBLK_IS_FLUSH = 1, - VBLK_REQ_FLUSH = 2, - VBLK_REQ_DATA = 4, - VBLK_REQ_FUA = 8, -}; - static inline int virtblk_result(struct virtblk_req *vbr) { switch (vbr->status) { @@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr) } } -static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, - gfp_t gfp_mask) -{ - struct virtblk_req *vbr; - - vbr = mempool_alloc(vblk->pool, gfp_mask); - if (!vbr) - return NULL; - - vbr->vblk = vblk; - if (use_bio) - sg_init_table(vbr->sg, vblk->sg_elems); - - return vbr; -} - static int __virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, struct scatterlist *data_sg, @@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq, return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } -static void virtblk_add_req(struct virtblk_req *vbr, bool have_data) -{ - struct virtio_blk *vblk = vbr->vblk; - DEFINE_WAIT(wait); - int ret; - - spin_lock_irq(vblk->disk->queue->queue_lock); - while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg, - have_data)) < 0)) { - prepare_to_wait_exclusive(&vblk->queue_wait, &wait, - TASK_UNINTERRUPTIBLE); - - spin_unlock_irq(vblk->disk->queue->queue_lock); - io_schedule(); - spin_lock_irq(vblk->disk->queue->queue_lock); - - finish_wait(&vblk->queue_wait, &wait); - } - - virtqueue_kick(vblk->vq); - spin_unlock_irq(vblk->disk->queue->queue_lock); -} - -static void virtblk_bio_send_flush(struct virtblk_req *vbr) -{ - vbr->flags |= VBLK_IS_FLUSH; - vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; - vbr->out_hdr.sector = 0; - vbr->out_hdr.ioprio = 0; - - virtblk_add_req(vbr, false); -} - -static void virtblk_bio_send_data(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - struct bio *bio = vbr->bio; - bool have_data; - - vbr->flags &= ~VBLK_IS_FLUSH; - vbr->out_hdr.type = 0; - vbr->out_hdr.sector = bio->bi_sector; - vbr->out_hdr.ioprio = bio_prio(bio); - - if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) { - have_data = true; - if (bio->bi_rw & REQ_WRITE) - vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; - else - vbr->out_hdr.type |= VIRTIO_BLK_T_IN; - } else - have_data = false; - - virtblk_add_req(vbr, have_data); -} - -static void virtblk_bio_send_data_work(struct work_struct *work) -{ - struct virtblk_req *vbr; - - vbr = container_of(work, struct virtblk_req, work); - - virtblk_bio_send_data(vbr); -} - -static void virtblk_bio_send_flush_work(struct work_struct *work) -{ - struct virtblk_req *vbr; - - vbr = container_of(work, struct virtblk_req, work); - - virtblk_bio_send_flush(vbr); -} - static inline void virtblk_request_done(struct virtblk_req *vbr) { - struct virtio_blk *vblk = vbr->vblk; struct request *req = vbr->req; int error = virtblk_result(vbr); @@ -231,90 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr) req->errors = (error != 0); } - __blk_end_request_all(req, error); - mempool_free(vbr, vblk->pool); -} - -static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - - if (vbr->flags & VBLK_REQ_DATA) { - /* Send out the actual write data */ - INIT_WORK(&vbr->work, virtblk_bio_send_data_work); - queue_work(virtblk_wq, &vbr->work); - } else { - bio_endio(vbr->bio, virtblk_result(vbr)); - mempool_free(vbr, vblk->pool); - } -} - -static inline void virtblk_bio_data_done(struct virtblk_req *vbr) -{ - struct virtio_blk *vblk = vbr->vblk; - - if (unlikely(vbr->flags & VBLK_REQ_FUA)) { - /* Send out a flush before end the bio */ - vbr->flags &= ~VBLK_REQ_DATA; - INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); - queue_work(virtblk_wq, &vbr->work); - } else { - bio_endio(vbr->bio, virtblk_result(vbr)); - mempool_free(vbr, vblk->pool); - } -} - -static inline void virtblk_bio_done(struct virtblk_req *vbr) -{ - if (unlikely(vbr->flags & VBLK_IS_FLUSH)) - virtblk_bio_flush_done(vbr); - else - virtblk_bio_data_done(vbr); + blk_mq_end_io(req, error); } static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; - bool bio_done = false, req_done = false; + bool req_done = false; struct virtblk_req *vbr; unsigned long flags; unsigned int len; - spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); + spin_lock_irqsave(&vblk->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - if (vbr->bio) { - virtblk_bio_done(vbr); - bio_done = true; - } else { - virtblk_request_done(vbr); - req_done = true; - } + virtblk_request_done(vbr); + req_done = true; } + if (unlikely(virtqueue_is_broken(vq))) + break; } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_start_queue(vblk->disk->queue); - spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); - - if (bio_done) - wake_up(&vblk->queue_wait); + blk_mq_start_stopped_hw_queues(vblk->disk->queue); } -static bool do_req(struct request_queue *q, struct virtio_blk *vblk, - struct request *req) +static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) { + struct virtio_blk *vblk = hctx->queue->queuedata; + struct virtblk_req *vbr = req->special; + unsigned long flags; unsigned int num; - struct virtblk_req *vbr; + const bool last = (req->cmd_flags & REQ_END) != 0; - vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); - if (!vbr) - /* When another request finishes we'll try again. */ - return false; + BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; - vbr->bio = NULL; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; @@ -342,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - num = blk_rq_map_sg(q, vbr->req, vblk->sg); + num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); if (num) { if (rq_data_dir(vbr->req) == WRITE) vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; @@ -350,63 +197,19 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } - if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { - mempool_free(vbr, vblk->pool); - return false; - } - - return true; -} - -static void virtblk_request(struct request_queue *q) -{ - struct virtio_blk *vblk = q->queuedata; - struct request *req; - unsigned int issued = 0; - - while ((req = blk_peek_request(q)) != NULL) { - BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - - /* If this request fails, stop queue and wait for something to - finish to restart it. */ - if (!do_req(q, vblk, req)) { - blk_stop_queue(q); - break; - } - blk_start_request(req); - issued++; + spin_lock_irqsave(&vblk->vq_lock, flags); + if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { + virtqueue_kick(vblk->vq); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + blk_mq_stop_hw_queue(hctx); + return BLK_MQ_RQ_QUEUE_BUSY; } - if (issued) + if (last) virtqueue_kick(vblk->vq); -} -static void virtblk_make_request(struct request_queue *q, struct bio *bio) -{ - struct virtio_blk *vblk = q->queuedata; - struct virtblk_req *vbr; - - BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); - - vbr = virtblk_alloc_req(vblk, GFP_NOIO); - if (!vbr) { - bio_endio(bio, -ENOMEM); - return; - } - - vbr->bio = bio; - vbr->flags = 0; - if (bio->bi_rw & REQ_FLUSH) - vbr->flags |= VBLK_REQ_FLUSH; - if (bio->bi_rw & REQ_FUA) - vbr->flags |= VBLK_REQ_FUA; - if (bio->bi_size) - vbr->flags |= VBLK_REQ_DATA; - - if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) - virtblk_bio_send_flush(vbr); - else - virtblk_bio_send_data(vbr); + spin_unlock_irqrestore(&vblk->vq_lock, flags); + return BLK_MQ_RQ_QUEUE_OK; } /* return id (s/n) string for *disk to *id_str @@ -456,18 +259,15 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; - struct virtio_blk_geometry vgeo; - int err; /* see if the host passed in geometry config */ - err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, - offsetof(struct virtio_blk_config, geometry), - &vgeo); - - if (!err) { - geo->heads = vgeo.heads; - geo->sectors = vgeo.sectors; - geo->cylinders = vgeo.cylinders; + if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { + virtio_cread(vblk->vdev, struct virtio_blk_config, + geometry.cylinders, &geo->cylinders); + virtio_cread(vblk->vdev, struct virtio_blk_config, + geometry.heads, &geo->heads); + virtio_cread(vblk->vdev, struct virtio_blk_config, + geometry.sectors, &geo->sectors); } else { /* some standard values, similar to sd */ geo->heads = 1 << 6; @@ -529,8 +329,7 @@ static void virtblk_config_changed_work(struct work_struct *work) goto done; /* Host must always specify the capacity. */ - vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), - &capacity, sizeof(capacity)); + virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); /* If capacity is too big, truncate with warning. */ if ((sector_t)capacity != capacity) { @@ -608,9 +407,9 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev) u8 writeback; int err; - err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE, - offsetof(struct virtio_blk_config, wce), - &writeback); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, + struct virtio_blk_config, wce, + &writeback); if (err) writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); @@ -642,7 +441,6 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; int i; - u8 writeback; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) @@ -652,11 +450,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, if (i < 0) return -EINVAL; - writeback = i; - vdev->config->set(vdev, - offsetof(struct virtio_blk_config, wce), - &writeback, sizeof(writeback)); - + virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); virtblk_update_cache_mode(vdev); return count; } @@ -680,12 +474,35 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); +static struct blk_mq_ops virtio_mq_ops = { + .queue_rq = virtio_queue_rq, + .map_queue = blk_mq_map_queue, + .alloc_hctx = blk_mq_alloc_single_hw_queue, + .free_hctx = blk_mq_free_single_hw_queue, +}; + +static struct blk_mq_reg virtio_mq_reg = { + .ops = &virtio_mq_ops, + .nr_hw_queues = 1, + .queue_depth = 64, + .numa_node = NUMA_NO_NODE, + .flags = BLK_MQ_F_SHOULD_MERGE, +}; + +static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, + struct request *rq, unsigned int nr) +{ + struct virtio_blk *vblk = data; + struct virtblk_req *vbr = rq->special; + + sg_init_table(vbr->sg, vblk->sg_elems); +} + static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err, index; - int pool_size; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; @@ -699,9 +516,9 @@ static int virtblk_probe(struct virtio_device *vdev) index = err; /* We need to know how many segments before we allocate. */ - err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, - offsetof(struct virtio_blk_config, seg_max), - &sg_elems); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, + struct virtio_blk_config, seg_max, + &sg_elems); /* We need at least one SG element, whatever they say. */ if (err || !sg_elems) @@ -709,17 +526,14 @@ static int virtblk_probe(struct virtio_device *vdev) /* We need an extra sg elements at head and tail. */ sg_elems += 2; - vdev->priv = vblk = kmalloc(sizeof(*vblk) + - sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); + vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out_free_index; } - init_waitqueue_head(&vblk->queue_wait); vblk->vdev = vdev; vblk->sg_elems = sg_elems; - sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); INIT_WORK(&vblk->config_work, virtblk_config_changed_work); @@ -728,31 +542,27 @@ static int virtblk_probe(struct virtio_device *vdev) err = init_vq(vblk); if (err) goto out_free_vblk; - - pool_size = sizeof(struct virtblk_req); - if (use_bio) - pool_size += sizeof(struct scatterlist) * sg_elems; - vblk->pool = mempool_create_kmalloc_pool(1, pool_size); - if (!vblk->pool) { - err = -ENOMEM; - goto out_free_vq; - } + spin_lock_init(&vblk->vq_lock); /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; - goto out_mempool; + goto out_free_vq; } - q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); + virtio_mq_reg.cmd_size = + sizeof(struct virtblk_req) + + sizeof(struct scatterlist) * sg_elems; + + q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); if (!q) { err = -ENOMEM; goto out_put_disk; } - if (use_bio) - blk_queue_make_request(q, virtblk_make_request); + blk_mq_init_commands(q, virtblk_init_vbr, vblk); + q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -772,8 +582,7 @@ static int virtblk_probe(struct virtio_device *vdev) set_disk_ro(vblk->disk, 1); /* Host must always specify the capacity. */ - vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), - &cap, sizeof(cap)); + virtio_cread(vdev, struct virtio_blk_config, capacity, &cap); /* If capacity is too big, truncate with warning. */ if ((sector_t)cap != cap) { @@ -794,46 +603,45 @@ static int virtblk_probe(struct virtio_device *vdev) /* Host can optionally specify maximum segment size and number of * segments. */ - err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, - offsetof(struct virtio_blk_config, size_max), - &v); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, + struct virtio_blk_config, size_max, &v); if (!err) blk_queue_max_segment_size(q, v); else blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ - err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, - offsetof(struct virtio_blk_config, blk_size), - &blk_size); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, + struct virtio_blk_config, blk_size, + &blk_size); if (!err) blk_queue_logical_block_size(q, blk_size); else blk_size = queue_logical_block_size(q); /* Use topology information if available */ - err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, - offsetof(struct virtio_blk_config, physical_block_exp), - &physical_block_exp); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, + struct virtio_blk_config, physical_block_exp, + &physical_block_exp); if (!err && physical_block_exp) blk_queue_physical_block_size(q, blk_size * (1 << physical_block_exp)); - err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, - offsetof(struct virtio_blk_config, alignment_offset), - &alignment_offset); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, + struct virtio_blk_config, alignment_offset, + &alignment_offset); if (!err && alignment_offset) blk_queue_alignment_offset(q, blk_size * alignment_offset); - err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, - offsetof(struct virtio_blk_config, min_io_size), - &min_io_size); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, + struct virtio_blk_config, min_io_size, + &min_io_size); if (!err && min_io_size) blk_queue_io_min(q, blk_size * min_io_size); - err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, - offsetof(struct virtio_blk_config, opt_io_size), - &opt_io_size); + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, + struct virtio_blk_config, opt_io_size, + &opt_io_size); if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); @@ -857,8 +665,6 @@ out_del_disk: blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); -out_mempool: - mempool_destroy(vblk->pool); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: @@ -890,7 +696,6 @@ static void virtblk_remove(struct virtio_device *vdev) refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); - mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); @@ -899,7 +704,7 @@ static void virtblk_remove(struct virtio_device *vdev) ida_simple_remove(&vd_index_ida, index); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; @@ -914,10 +719,7 @@ static int virtblk_freeze(struct virtio_device *vdev) flush_work(&vblk->config_work); - spin_lock_irq(vblk->disk->queue->queue_lock); - blk_stop_queue(vblk->disk->queue); - spin_unlock_irq(vblk->disk->queue->queue_lock); - blk_sync_queue(vblk->disk->queue); + blk_mq_stop_hw_queues(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; @@ -930,11 +732,9 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); - if (!ret) { - spin_lock_irq(vblk->disk->queue->queue_lock); - blk_start_queue(vblk->disk->queue); - spin_unlock_irq(vblk->disk->queue->queue_lock); - } + if (!ret) + blk_mq_start_stopped_hw_queues(vblk->disk->queue); + return ret; } #endif @@ -959,7 +759,7 @@ static struct virtio_driver virtio_blk = { .probe = virtblk_probe, .remove = virtblk_remove, .config_changed = virtblk_config_changed, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .freeze = virtblk_freeze, .restore = virtblk_restore, #endif diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bf4b9d282c04..6620b73d0490 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif, unsigned long secure; struct phys_req preq; + xen_blkif_get(blkif); + preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; @@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif, } blkif->st_ds_req++; - xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index fe5c3cd10c34..c2014a0aa206 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -620,7 +620,7 @@ static void backend_changed(struct xenbus_watch *watch, } /* Front end dir is a number, which is used as the handle. */ - err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); + err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); if (err) return; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a4660bbee8a6..c4a4c9006288 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -121,7 +121,8 @@ struct blkfront_info struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; - struct list_head persistent_gnts; + struct list_head grants; + struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned long shadow_free; unsigned int feature_flush; @@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) if (!gnt_list_entry) goto out_of_memory; - granted_page = alloc_page(GFP_NOIO); - if (!granted_page) { - kfree(gnt_list_entry); - goto out_of_memory; + if (info->feature_persistent) { + granted_page = alloc_page(GFP_NOIO); + if (!granted_page) { + kfree(gnt_list_entry); + goto out_of_memory; + } + gnt_list_entry->pfn = page_to_pfn(granted_page); } - gnt_list_entry->pfn = page_to_pfn(granted_page); gnt_list_entry->gref = GRANT_INVALID_REF; - list_add(&gnt_list_entry->node, &info->persistent_gnts); + list_add(&gnt_list_entry->node, &info->grants); i++; } @@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, - &info->persistent_gnts, node) { + &info->grants, node) { list_del(&gnt_list_entry->node); - __free_page(pfn_to_page(gnt_list_entry->pfn)); + if (info->feature_persistent) + __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } @@ -227,13 +231,14 @@ out_of_memory: } static struct grant *get_grant(grant_ref_t *gref_head, + unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; - BUG_ON(list_empty(&info->persistent_gnts)); - gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, + BUG_ON(list_empty(&info->grants)); + gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); @@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); + if (!info->feature_persistent) { + BUG_ON(!pfn); + gnt_list_entry->pfn = pfn; + } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, @@ -400,10 +409,13 @@ static int blkif_queue_request(struct request *req) if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; - max_grefs = info->max_indirect_segments ? - info->max_indirect_segments + - INDIRECT_GREFS(info->max_indirect_segments) : - BLKIF_MAX_SEGMENTS_PER_REQUEST; + max_grefs = req->nr_phys_segments; + if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) + /* + * If we are using indirect segments we need to account + * for the indirect grefs used in the request. + */ + max_grefs += INDIRECT_GREFS(req->nr_phys_segments); /* Check if we have enough grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { @@ -477,22 +489,34 @@ static int blkif_queue_request(struct request *req) if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { + unsigned long uninitialized_var(pfn); + if (segments) kunmap_atomic(segments); n = i / SEGS_PER_INDIRECT_FRAME; - gnt_list_entry = get_grant(&gref_head, info); + if (!info->feature_persistent) { + struct page *indirect_page; + + /* Fetch a pre-allocated page to use for indirect grefs */ + BUG_ON(list_empty(&info->indirect_pages)); + indirect_page = list_first_entry(&info->indirect_pages, + struct page, lru); + list_del(&indirect_page->lru); + pfn = page_to_pfn(indirect_page); + } + gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } - gnt_list_entry = get_grant(&gref_head, info); + gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; - if (rq_data_dir(req)) { + if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; @@ -904,21 +928,36 @@ static void blkif_free(struct blkfront_info *info, int suspend) blk_stop_queue(info->rq); /* Remove all persistent grants */ - if (!list_empty(&info->persistent_gnts)) { + if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, - &info->persistent_gnts, node) { + &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); info->persistent_gnts_c--; } - __free_page(pfn_to_page(persistent_gnt->pfn)); + if (info->feature_persistent) + __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); + /* + * Remove indirect pages, this only happens when using indirect + * descriptors but not persistent grants + */ + if (!list_empty(&info->indirect_pages)) { + struct page *indirect_page, *n; + + BUG_ON(info->feature_persistent); + list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { + list_del(&indirect_page->lru); + __free_page(indirect_page); + } + } + for (i = 0; i < BLK_RING_SIZE; i++) { /* * Clear persistent grants present in requests already @@ -933,7 +972,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); - __free_page(pfn_to_page(persistent_gnt->pfn)); + if (info->feature_persistent) + __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } @@ -992,7 +1032,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; - if (bret->operation == BLKIF_OP_READ) { + if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different @@ -1013,13 +1053,51 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { - list_add(&s->grants_used[i]->node, &info->persistent_gnts); - info->persistent_gnts_c++; + if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + /* + * If the grant is still mapped by the backend (the + * backend has chosen to make this grant persistent) + * we add it at the head of the list, so it will be + * reused first. + */ + if (!info->feature_persistent) + pr_alert_ratelimited("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + list_add(&s->grants_used[i]->node, &info->grants); + info->persistent_gnts_c++; + } else { + /* + * If the grant is not mapped by the backend we end the + * foreign access and add it to the tail of the list, + * so it will not be picked again unless we run out of + * persistent grants. + */ + gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); + s->grants_used[i]->gref = GRANT_INVALID_REF; + list_add_tail(&s->grants_used[i]->node, &info->grants); + } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { - list_add(&s->indirect_grants[i]->node, &info->persistent_gnts); - info->persistent_gnts_c++; + if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) + pr_alert_ratelimited("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + list_add(&s->indirect_grants[i]->node, &info->grants); + info->persistent_gnts_c++; + } else { + struct page *indirect_page; + + gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); + /* + * Add the used indirect page back to the list of + * available pages for indirect grefs. + */ + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); + list_add(&indirect_page->lru, &info->indirect_pages); + s->indirect_grants[i]->gref = GRANT_INVALID_REF; + list_add_tail(&s->indirect_grants[i]->node, &info->grants); + } } } } @@ -1313,7 +1391,8 @@ static int blkfront_probe(struct xenbus_device *dev, spin_lock_init(&info->io_lock); info->xbdev = dev; info->vdevice = vdevice; - INIT_LIST_HEAD(&info->persistent_gnts); + INIT_LIST_HEAD(&info->grants); + INIT_LIST_HEAD(&info->indirect_pages); info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); @@ -1336,57 +1415,6 @@ static int blkfront_probe(struct xenbus_device *dev, return 0; } -/* - * This is a clone of md_trim_bio, used to split a bio into smaller ones - */ -static void trim_bio(struct bio *bio, int offset, int size) -{ - /* 'bio' is a cloned bio which we need to trim to match - * the given offset and size. - * This requires adjusting bi_sector, bi_size, and bi_io_vec - */ - int i; - struct bio_vec *bvec; - int sofar = 0; - - size <<= 9; - if (offset == 0 && size == bio->bi_size) - return; - - bio->bi_sector += offset; - bio->bi_size = size; - offset <<= 9; - clear_bit(BIO_SEG_VALID, &bio->bi_flags); - - while (bio->bi_idx < bio->bi_vcnt && - bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { - /* remove this whole bio_vec */ - offset -= bio->bi_io_vec[bio->bi_idx].bv_len; - bio->bi_idx++; - } - if (bio->bi_idx < bio->bi_vcnt) { - bio->bi_io_vec[bio->bi_idx].bv_offset += offset; - bio->bi_io_vec[bio->bi_idx].bv_len -= offset; - } - /* avoid any complications with bi_idx being non-zero*/ - if (bio->bi_idx) { - memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, - (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); - bio->bi_vcnt -= bio->bi_idx; - bio->bi_idx = 0; - } - /* Make sure vcnt and last bv are not too big */ - bio_for_each_segment(bvec, bio, i) { - if (sofar + bvec->bv_len > size) - bvec->bv_len = size - sofar; - if (bvec->bv_len == 0) { - bio->bi_vcnt = i; - break; - } - sofar += bvec->bv_len; - } -} - static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; @@ -1522,7 +1550,7 @@ static int blkif_recover(struct blkfront_info *info) (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); - trim_bio(cloned_bio, offset, size); + bio_trim(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); @@ -1660,6 +1688,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info) if (err) goto out_of_memory; + if (!info->feature_persistent && info->max_indirect_segments) { + /* + * We are using indirect descriptors but not persistent + * grants, we need to allocate a set of pages that can be + * used for mapping indirect grefs + */ + int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE; + + BUG_ON(!list_empty(&info->indirect_pages)); + for (i = 0; i < num; i++) { + struct page *indirect_page = alloc_page(GFP_NOIO); + if (!indirect_page) + goto out_of_memory; + list_add(&indirect_page->lru, &info->indirect_pages); + } + } + for (i = 0; i < BLK_RING_SIZE; i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, @@ -1690,6 +1735,13 @@ out_of_memory: kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } + if (!list_empty(&info->indirect_pages)) { + struct page *indirect_page, *n; + list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { + list_del(&indirect_page->lru); + __free_page(indirect_page); + } + } return -ENOMEM; } @@ -1959,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) bdev = bdget_disk(disk, 0); + if (!bdev) { + WARN(1, "Block device %s yanked out from us!\n", disk->disk_name); + goto out_mutex; + } if (bdev->bd_openers) goto out; @@ -1989,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) out: bdput(bdev); +out_mutex: mutex_unlock(&blkfront_mutex); } |