From ba6abf1352dc83e500a71e3ad9b39de0337f0c6b Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sat, 30 Jul 2005 22:38:30 -0700 Subject: [PATCH] USB: ub 1/3: Axboe's quasi-S/G This the quasi-S/G patch for ub as suggested by Jens Axboe at OLS and implemented that night before 4 a.m. Surprisingly, it worked right away... Alas, I had to skip some OLS partying, but it was for the good cause. Now the speed of ub is quite acceptable even on partitions with small block size. The ub does not really support S/G. Instead, it just tells the block layer that it does. Then, most of the time, the block layer merges requests and passes single-segmnent requests down to ub; everything works as before. Very rarely ub gets an unmerged S/G request. In such case, it issues several commands to the device. I added a small array of counters to monitor the merging (sg_stat). This may be dropped later. Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/block/ub.c | 185 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 126 insertions(+), 59 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/ub.c b/drivers/block/ub.c index a026567f5d18..fb3d1e9bc407 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -16,9 +16,10 @@ * -- verify the 13 conditions and do bulk resets * -- kill last_pipe and simply do two-state clearing on both pipes * -- verify protocol (bulk) from USB descriptors (maybe...) - * -- highmem and sg + * -- highmem * -- move top_sense and work_bcs into separate allocations (if they survive) * for cache purists and esoteric architectures. + * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? * -- prune comments, they are too volumnous * -- Exterminate P3 printks * -- Resove XXX's @@ -171,7 +172,7 @@ struct bulk_cs_wrap { */ struct ub_dev; -#define UB_MAX_REQ_SG 1 +#define UB_MAX_REQ_SG 4 #define UB_MAX_SECTORS 64 /* @@ -240,13 +241,21 @@ struct ub_scsi_cmd { */ char *data; /* Requested buffer */ unsigned int len; /* Requested length */ - // struct scatterlist sgv[UB_MAX_REQ_SG]; struct ub_lun *lun; void (*done)(struct ub_dev *, struct ub_scsi_cmd *); void *back; }; +struct ub_request { + struct request *rq; + unsigned char dir; + unsigned int current_block; + unsigned int current_sg; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; +}; + /* */ struct ub_capacity { @@ -342,6 +351,8 @@ struct ub_lun { int readonly; int first_open; /* Kludge. See ub_bd_open. */ + struct ub_request urq; + /* Use Ingo's mempool if or when we have more than one command. */ /* * Currently we never need more than one command for the whole device. @@ -389,6 +400,7 @@ struct ub_dev { struct bulk_cs_wrap work_bcs; struct usb_ctrlrequest work_cr; + int sg_stat[UB_MAX_REQ_SG+1]; struct ub_scsi_trace tr; }; @@ -398,10 +410,14 @@ static void ub_cleanup(struct ub_dev *sc); static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq); -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq); +static void ub_scsi_build_block(struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq); +static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct request *rq); static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_end_rq(struct request *rq, int uptodate); +static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd); static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); static void ub_scsi_action(unsigned long _dev); @@ -523,6 +539,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, c cnt += sprintf(page + cnt, "qlen %d qmax %d\n", sc->cmd_queue.qlen, sc->cmd_queue.qmax); + cnt += sprintf(page + cnt, + "sg %d %d %d %d %d\n", + sc->sg_stat[0], + sc->sg_stat[1], + sc->sg_stat[2], + sc->sg_stat[3], + sc->sg_stat[4]); list_for_each (p, &sc->luns) { lun = list_entry(p, struct ub_lun, link); @@ -769,14 +792,15 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) return 0; } + if (lun->urq.rq != NULL) + return -1; if ((cmd = ub_get_cmd(lun)) == NULL) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); blkdev_dequeue_request(rq); - if (blk_pc_request(rq)) { - rc = ub_cmd_build_packet(sc, cmd, rq); + rc = ub_cmd_build_packet(sc, lun, cmd, rq); } else { rc = ub_cmd_build_block(sc, lun, cmd, rq); } @@ -788,10 +812,10 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) cmd->state = UB_CMDST_INIT; cmd->lun = lun; cmd->done = ub_rw_cmd_done; - cmd->back = rq; + cmd->back = &lun->urq; cmd->tag = sc->tagcnt++; - if ((rc = ub_submit_scsi(sc, cmd)) != 0) { + if (ub_submit_scsi(sc, cmd) != 0) { ub_put_cmd(lun, cmd); ub_end_rq(rq, 0); return 0; @@ -803,12 +827,12 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq) { + struct ub_request *urq; int ub_dir; -#if 0 /* We use rq->buffer for now */ - struct scatterlist *sg; int n_elem; -#endif - unsigned int block, nblks; + + urq = &lun->urq; + memset(urq, 0, sizeof(struct ub_request)); if (rq_data_dir(rq) == WRITE) ub_dir = UB_DIR_WRITE; @@ -818,44 +842,19 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, /* * get scatterlist from block layer */ -#if 0 /* We use rq->buffer for now */ - sg = &cmd->sgv[0]; - n_elem = blk_rq_map_sg(q, rq, sg); + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); if (n_elem <= 0) { - ub_put_cmd(lun, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; /* request with no s/g entries? */ + printk(KERN_INFO "%s: failed request map (%d)\n", + sc->name, n_elem); /* P3 */ + return -1; /* request with no s/g entries? */ } - - if (n_elem != 1) { /* Paranoia */ + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ printk(KERN_WARNING "%s: request with %d segments\n", sc->name, n_elem); - ub_put_cmd(lun, cmd); - ub_end_rq(rq, 0); - blk_start_queue(q); - return 0; - } -#endif - - /* - * XXX Unfortunately, this check does not work. It is quite possible - * to get bogus non-null rq->buffer if you allow sg by mistake. - */ - if (rq->buffer == NULL) { - /* - * This must not happen if we set the queue right. - * The block level must create bounce buffers for us. - */ - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped block request" - " flags 0x%lx sectors %lu\n", - sc->name, rq->flags, rq->nr_sectors); - do_print = 0; - } return -1; } + urq->nsg = n_elem; + sc->sg_stat[n_elem]++; /* * build the command @@ -863,10 +862,29 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, * The call to blk_queue_hardsect_size() guarantees that request * is aligned, but it is given in terms of 512 byte units, always. */ - block = rq->sector >> lun->capacity.bshift; - nblks = rq->nr_sectors >> lun->capacity.bshift; + urq->current_block = rq->sector >> lun->capacity.bshift; + // nblks = rq->nr_sectors >> lun->capacity.bshift; + + urq->rq = rq; + urq->current_sg = 0; + urq->dir = ub_dir; + + ub_scsi_build_block(lun, cmd, urq); + return 0; +} + +static void ub_scsi_build_block(struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq) +{ + struct scatterlist *sg; + unsigned int block, nblks; + + sg = &urq->sgv[urq->current_sg]; + + block = urq->current_block; + nblks = sg->length >> (lun->capacity.bshift + 9); - cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; + cmd->cdb[0] = (urq->dir == UB_DIR_READ)? READ_10: WRITE_10; /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ cmd->cdb[2] = block >> 24; cmd->cdb[3] = block >> 16; @@ -876,16 +894,20 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, cmd->cdb[8] = nblks; cmd->cdb_len = 10; - cmd->dir = ub_dir; - cmd->data = rq->buffer; - cmd->len = rq->nr_sectors * 512; - - return 0; + cmd->dir = urq->dir; + cmd->data = page_address(sg->page) + sg->offset; + cmd->len = sg->length; } -static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - struct request *rq) +static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct request *rq) { + struct ub_request *urq; + + urq = &lun->urq; + memset(urq, 0, sizeof(struct ub_request)); + urq->rq = rq; + sc->sg_stat[0]++; if (rq->data_len != 0 && rq->data == NULL) { static int do_print = 1; @@ -917,12 +939,13 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { - struct request *rq = cmd->back; struct ub_lun *lun = cmd->lun; - struct gendisk *disk = lun->disk; - request_queue_t *q = disk->queue; + struct ub_request *urq = cmd->back; + struct request *rq; int uptodate; + rq = urq->rq; + if (blk_pc_request(rq)) { /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); @@ -934,9 +957,19 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) else uptodate = 0; + if (cmd->error == 0 && urq->current_sg+1 < urq->nsg) { + if (ub_request_advance(sc, lun, urq, cmd) == 0) { + /* Stay on target... */ + return; + } + uptodate = 0; + } + + urq->rq = NULL; + ub_put_cmd(lun, cmd); ub_end_rq(rq, uptodate); - blk_start_queue(q); + blk_start_queue(lun->disk->queue); } static void ub_end_rq(struct request *rq, int uptodate) @@ -948,6 +981,40 @@ static void ub_end_rq(struct request *rq, int uptodate) end_that_request_last(rq); } +static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd) +{ + struct scatterlist *sg; + unsigned int nblks; + + /* XXX This is temporary, until we sort out S/G in packet requests. */ + if (blk_pc_request(urq->rq)) { + printk(KERN_WARNING + "2-segment packet request completed\n"); /* P3 */ + return -1; + } + + sg = &urq->sgv[urq->current_sg]; + nblks = sg->length >> (lun->capacity.bshift + 9); + urq->current_block += nblks; + urq->current_sg++; + sg++; + + memset(cmd, 0, sizeof(struct ub_scsi_cmd)); + ub_scsi_build_block(lun, cmd, urq); + cmd->state = UB_CMDST_INIT; + cmd->lun = lun; + cmd->done = ub_rw_cmd_done; + cmd->back = &lun->urq; + + cmd->tag = sc->tagcnt++; + if (ub_submit_scsi(sc, cmd) != 0) { + return -1; + } + + return 0; +} + /* * Submit a regular SCSI operation (not an auto-sense). * -- cgit v1.2.3 From 07d4fd2566ddbf2a91ff3cde80ddf449ab82c381 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sat, 30 Jul 2005 22:51:45 -0700 Subject: [PATCH] USB: ub 2/3: Fold one line Evidently, Yani Ioannou's display is wider than mine. Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/block/ub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/block') diff --git a/drivers/block/ub.c b/drivers/block/ub.c index fb3d1e9bc407..fe81d2febb63 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -516,7 +516,8 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, } } -static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) +static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, + char *page) { struct usb_interface *intf; struct ub_dev *sc; -- cgit v1.2.3 From 6c1eb8c1c3ec2df00b629ab4fe7fe04a95129f08 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sat, 30 Jul 2005 22:51:52 -0700 Subject: [PATCH] USB: ub 3/3: death to ub_bd_rq_fn_1 When Al Viro saw the ub.c, he observed that it was a proof positive of Linus not reading patches anymore: names like fo_ob_ar_ba_2 used to cause serious fireworks. In my defence, any good scheme can be pushed to the realm of absurd if pushed far enough. Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/block/ub.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/ub.c b/drivers/block/ub.c index fe81d2febb63..692553270401 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -407,7 +407,7 @@ struct ub_dev { /* */ static void ub_cleanup(struct ub_dev *sc); -static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq); static void ub_scsi_build_block(struct ub_lun *lun, @@ -768,20 +768,20 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) * The request function is our main entry point */ -static void ub_bd_rq_fn(request_queue_t *q) +static void ub_request_fn(request_queue_t *q) { struct ub_lun *lun = q->queuedata; struct request *rq; while ((rq = elv_next_request(q)) != NULL) { - if (ub_bd_rq_fn_1(lun, rq) != 0) { + if (ub_request_fn_1(lun, rq) != 0) { blk_stop_queue(q); break; } } } -static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) { struct ub_dev *sc = lun->udev; struct ub_scsi_cmd *cmd; @@ -2357,7 +2357,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ rc = -ENOMEM; - if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) + if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) goto err_blkqinit; disk->queue = q; -- cgit v1.2.3 From a1cf96efbabac2f8af6f75286ffcefd40b0a466c Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Sun, 14 Aug 2005 21:16:03 -0700 Subject: [PATCH] USB: ub 4: Zaitcev's quasi-S/G Back out Axboe-style quasi-S/G and replace it with one command and repeated URBs. This is similar to what usb-storage does, only instead of a few URBs allocated together, one URB is reused. Jens's idea was very nice, but it collapsed when I had to support packet commads for CD burning. I cannot issue two or more packet commands where application expected only one. However, burning does not work completely yet. The cdrecord starts, recognizes the device, then aborts without writing a TOC. Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/block/ub.c | 287 +++++++++++++++++++++++------------------------------ 1 file changed, 123 insertions(+), 164 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 692553270401..57d3279a8815 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -235,27 +235,16 @@ struct ub_scsi_cmd { int stat_count; /* Retries getting status. */ - /* - * We do not support transfers from highmem pages - * because the underlying USB framework does not do what we need. - */ - char *data; /* Requested buffer */ unsigned int len; /* Requested length */ + unsigned int current_sg; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; struct ub_lun *lun; void (*done)(struct ub_dev *, struct ub_scsi_cmd *); void *back; }; -struct ub_request { - struct request *rq; - unsigned char dir; - unsigned int current_block; - unsigned int current_sg; - unsigned int nsg; /* sgv[nsg] */ - struct scatterlist sgv[UB_MAX_REQ_SG]; -}; - /* */ struct ub_capacity { @@ -351,8 +340,6 @@ struct ub_lun { int readonly; int first_open; /* Kludge. See ub_bd_open. */ - struct ub_request urq; - /* Use Ingo's mempool if or when we have more than one command. */ /* * Currently we never need more than one command for the whole device. @@ -410,19 +397,16 @@ static void ub_cleanup(struct ub_dev *sc); static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq); -static void ub_scsi_build_block(struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq); static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq); static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_end_rq(struct request *rq, int uptodate); -static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun, - struct ub_request *urq, struct ub_scsi_cmd *cmd); static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); static void ub_scsi_action(unsigned long _dev); static void ub_scsi_dispatch(struct ub_dev *sc); static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); @@ -793,8 +777,6 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) return 0; } - if (lun->urq.rq != NULL) - return -1; if ((cmd = ub_get_cmd(lun)) == NULL) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); @@ -813,7 +795,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) cmd->state = UB_CMDST_INIT; cmd->lun = lun; cmd->done = ub_rw_cmd_done; - cmd->back = &lun->urq; + cmd->back = rq; cmd->tag = sc->tagcnt++; if (ub_submit_scsi(sc, cmd) != 0) { @@ -828,22 +810,20 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq) { - struct ub_request *urq; int ub_dir; int n_elem; - - urq = &lun->urq; - memset(urq, 0, sizeof(struct ub_request)); + unsigned int block, nblks; if (rq_data_dir(rq) == WRITE) ub_dir = UB_DIR_WRITE; else ub_dir = UB_DIR_READ; + cmd->dir = ub_dir; /* * get scatterlist from block layer */ - n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); if (n_elem <= 0) { printk(KERN_INFO "%s: failed request map (%d)\n", sc->name, n_elem); /* P3 */ @@ -854,7 +834,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, sc->name, n_elem); return -1; } - urq->nsg = n_elem; + cmd->nsg = n_elem; sc->sg_stat[n_elem]++; /* @@ -863,29 +843,10 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, * The call to blk_queue_hardsect_size() guarantees that request * is aligned, but it is given in terms of 512 byte units, always. */ - urq->current_block = rq->sector >> lun->capacity.bshift; - // nblks = rq->nr_sectors >> lun->capacity.bshift; - - urq->rq = rq; - urq->current_sg = 0; - urq->dir = ub_dir; + block = rq->sector >> lun->capacity.bshift; + nblks = rq->nr_sectors >> lun->capacity.bshift; - ub_scsi_build_block(lun, cmd, urq); - return 0; -} - -static void ub_scsi_build_block(struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq) -{ - struct scatterlist *sg; - unsigned int block, nblks; - - sg = &urq->sgv[urq->current_sg]; - - block = urq->current_block; - nblks = sg->length >> (lun->capacity.bshift + 9); - - cmd->cdb[0] = (urq->dir == UB_DIR_READ)? READ_10: WRITE_10; + cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ cmd->cdb[2] = block >> 24; cmd->cdb[3] = block >> 16; @@ -895,34 +856,15 @@ static void ub_scsi_build_block(struct ub_lun *lun, cmd->cdb[8] = nblks; cmd->cdb_len = 10; - cmd->dir = urq->dir; - cmd->data = page_address(sg->page) + sg->offset; - cmd->len = sg->length; + cmd->len = rq->nr_sectors * 512; + + return 0; } static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct request *rq) { - struct ub_request *urq; - - urq = &lun->urq; - memset(urq, 0, sizeof(struct ub_request)); - urq->rq = rq; - sc->sg_stat[0]++; - - if (rq->data_len != 0 && rq->data == NULL) { - static int do_print = 1; - if (do_print) { - printk(KERN_WARNING "%s: unmapped packet request" - " flags 0x%lx length %d\n", - sc->name, rq->flags, rq->data_len); - do_print = 0; - } - return -1; - } - - memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); - cmd->cdb_len = rq->cmd_len; + int n_elem; if (rq->data_len == 0) { cmd->dir = UB_DIR_NONE; @@ -931,8 +873,29 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, cmd->dir = UB_DIR_WRITE; else cmd->dir = UB_DIR_READ; + } - cmd->data = rq->data; + + /* + * get scatterlist from block layer + */ + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); + if (n_elem < 0) { + printk(KERN_INFO "%s: failed request map (%d)\n", + sc->name, n_elem); /* P3 */ + return -1; + } + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ + printk(KERN_WARNING "%s: request with %d segments\n", + sc->name, n_elem); + return -1; + } + cmd->nsg = n_elem; + sc->sg_stat[n_elem]++; + + memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); + cmd->cdb_len = rq->cmd_len; + cmd->len = rq->data_len; return 0; @@ -940,33 +903,32 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { + struct request *rq = cmd->back; struct ub_lun *lun = cmd->lun; - struct ub_request *urq = cmd->back; - struct request *rq; int uptodate; - rq = urq->rq; - - if (blk_pc_request(rq)) { - /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ - memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); - rq->sense_len = UB_SENSE_SIZE; - } - - if (cmd->error == 0) + if (cmd->error == 0) { uptodate = 1; - else - uptodate = 0; - if (cmd->error == 0 && urq->current_sg+1 < urq->nsg) { - if (ub_request_advance(sc, lun, urq, cmd) == 0) { - /* Stay on target... */ - return; + if (blk_pc_request(rq)) { + if (cmd->act_len >= rq->data_len) + rq->data_len = 0; + else + rq->data_len -= cmd->act_len; } + } else { uptodate = 0; - } - urq->rq = NULL; + if (blk_pc_request(rq)) { + /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ + memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); + rq->sense_len = UB_SENSE_SIZE; + if (sc->top_sense[0] != 0) + rq->errors = SAM_STAT_CHECK_CONDITION; + else + rq->errors = DID_ERROR << 16; + } + } ub_put_cmd(lun, cmd); ub_end_rq(rq, uptodate); @@ -982,40 +944,6 @@ static void ub_end_rq(struct request *rq, int uptodate) end_that_request_last(rq); } -static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun, - struct ub_request *urq, struct ub_scsi_cmd *cmd) -{ - struct scatterlist *sg; - unsigned int nblks; - - /* XXX This is temporary, until we sort out S/G in packet requests. */ - if (blk_pc_request(urq->rq)) { - printk(KERN_WARNING - "2-segment packet request completed\n"); /* P3 */ - return -1; - } - - sg = &urq->sgv[urq->current_sg]; - nblks = sg->length >> (lun->capacity.bshift + 9); - urq->current_block += nblks; - urq->current_sg++; - sg++; - - memset(cmd, 0, sizeof(struct ub_scsi_cmd)); - ub_scsi_build_block(lun, cmd, urq); - cmd->state = UB_CMDST_INIT; - cmd->lun = lun; - cmd->done = ub_rw_cmd_done; - cmd->back = &lun->urq; - - cmd->tag = sc->tagcnt++; - if (ub_submit_scsi(sc, cmd) != 0) { - return -1; - } - - return 0; -} - /* * Submit a regular SCSI operation (not an auto-sense). * @@ -1171,7 +1099,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct urb *urb = &sc->work_urb; struct bulk_cs_wrap *bcs; - int pipe; int rc; if (atomic_read(&sc->poison)) { @@ -1272,38 +1199,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) goto Bad_End; } - if (cmd->dir == UB_DIR_NONE) { + if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { ub_state_stat(sc, cmd); return; } - UB_INIT_COMPLETION(sc->work_done); - - if (cmd->dir == UB_DIR_READ) - pipe = sc->recv_bulk_pipe; - else - pipe = sc->send_bulk_pipe; - sc->last_pipe = pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, - cmd->data, cmd->len, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; - sc->work_urb.actual_length = 0; - sc->work_urb.error_count = 0; - sc->work_urb.status = 0; - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ - ub_complete(&sc->work_done); - ub_state_done(sc, cmd, rc); - return; - } - - sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; - add_timer(&sc->work_timer); - - cmd->state = UB_CMDST_DATA; - ub_cmdtr_state(sc, cmd); + // udelay(125); // usb-storage has this + ub_data_start(sc, cmd); } else if (cmd->state == UB_CMDST_DATA) { if (urb->status == -EPIPE) { @@ -1325,16 +1227,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if (urb->status == -EOVERFLOW) { /* * A babble? Failure, but we must transfer CSW now. + * XXX This is going to end in perpetual babble. Reset. */ cmd->error = -EOVERFLOW; /* A cheap trick... */ - } else { - if (urb->status != 0) - goto Bad_End; + ub_state_stat(sc, cmd); + return; } + if (urb->status != 0) + goto Bad_End; - cmd->act_len = urb->actual_length; + cmd->act_len += urb->actual_length; ub_cmdtr_act_len(sc, cmd); + if (++cmd->current_sg < cmd->nsg) { + ub_data_start(sc, cmd); + return; + } ub_state_stat(sc, cmd); } else if (cmd->state == UB_CMDST_STAT) { @@ -1467,6 +1375,46 @@ Bad_End: /* Little Excel is dead */ ub_state_done(sc, cmd, -EIO); } +/* + * Factorization helper for the command state machine: + * Initiate a data segment transfer. + */ +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; + int pipe; + int rc; + + UB_INIT_COMPLETION(sc->work_done); + + if (cmd->dir == UB_DIR_READ) + pipe = sc->recv_bulk_pipe; + else + pipe = sc->send_bulk_pipe; + sc->last_pipe = pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, + page_address(sg->page) + sg->offset, sg->length, + ub_urb_complete, sc); + sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; + sc->work_urb.actual_length = 0; + sc->work_urb.error_count = 0; + sc->work_urb.status = 0; + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ + ub_complete(&sc->work_done); + ub_state_done(sc, cmd, rc); + return; + } + + sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; + add_timer(&sc->work_timer); + + cmd->state = UB_CMDST_DATA; + ub_cmdtr_state(sc, cmd); +} + /* * Factorization helper for the command state machine: * Finish the command. @@ -1552,6 +1500,7 @@ static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) { struct ub_scsi_cmd *scmd; + struct scatterlist *sg; int rc; if (cmd->cdb[0] == REQUEST_SENSE) { @@ -1560,12 +1509,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } scmd = &sc->top_rqs_cmd; + memset(scmd, 0, sizeof(struct ub_scsi_cmd)); scmd->cdb[0] = REQUEST_SENSE; scmd->cdb[4] = UB_SENSE_SIZE; scmd->cdb_len = 6; scmd->dir = UB_DIR_READ; scmd->state = UB_CMDST_INIT; - scmd->data = sc->top_sense; + scmd->nsg = 1; + sg = &scmd->sgv[0]; + sg->page = virt_to_page(sc->top_sense); + sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1); + sg->length = UB_SENSE_SIZE; scmd->len = UB_SENSE_SIZE; scmd->lun = cmd->lun; scmd->done = ub_top_sense_done; @@ -1628,7 +1582,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, */ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) { - unsigned char *sense = scmd->data; + unsigned char *sense = sc->top_sense; struct ub_scsi_cmd *cmd; /* @@ -1920,6 +1874,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, struct ub_capacity *ret) { struct ub_scsi_cmd *cmd; + struct scatterlist *sg; char *p; enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; unsigned long flags; @@ -1940,7 +1895,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, cmd->cdb_len = 10; cmd->dir = UB_DIR_READ; cmd->state = UB_CMDST_INIT; - cmd->data = p; + cmd->nsg = 1; + sg = &cmd->sgv[0]; + sg->page = virt_to_page(p); + sg->offset = (unsigned int)p & (PAGE_SIZE-1); + sg->length = 8; cmd->len = 8; cmd->lun = lun; cmd->done = ub_probe_done; -- cgit v1.2.3 From b375a0495fd622037560c73c05f23ae6f127bb0c Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 29 Jul 2005 16:11:07 -0400 Subject: [PATCH] USB: URB_ASYNC_UNLINK flag removed from the kernel 29 July 2005, Cambridge, MA: This afternoon Alan Stern submitted a patch to remove the URB_ASYNC_UNLINK flag from the Linux kernel. Mr. Stern explained, "This flag is a relic from an earlier, less-well-designed system. For over a year it hasn't been used for anything other than printing warning messages." An anonymous spokesman for the Linux kernel development community commented, "This is exactly the sort of thing we see happening all the time. As the kernel evolves, support for old techniques and old code can be jettisoned and replaced by newer, better approaches. Proprietary operating systems do not have the freedom or flexibility to change so quickly." Mr. Stern, a staff member at Harvard University's Rowland Institute who works on Linux only as a hobby, noted that the patch (labelled as548) did not update two files, keyspan.c and option.c, in the USB drivers' "serial" subdirectory. "Those files need more extensive changes," he remarked. "They examine the status field of several URBs at times when they're not supposed to. That will need to be fixed before the URB_ASYNC_UNLINK flag is removed." Greg Kroah-Hartman, the kernel maintainer responsible for overseeing all of Linux's USB drivers, did not respond to our inquiries or return our calls. His only comment was "Applied, thanks." Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/block/ub.c | 8 ++++---- drivers/net/irda/irda-usb.c | 13 ++----------- drivers/usb/atm/cxacru.c | 2 -- drivers/usb/core/message.c | 4 +--- drivers/usb/core/urb.c | 26 ++++---------------------- drivers/usb/input/hid-core.c | 6 +++--- drivers/usb/misc/auerswald.c | 3 +-- drivers/usb/misc/sisusbvga/sisusb.c | 4 ++-- drivers/usb/misc/usbtest.c | 2 -- drivers/usb/net/catc.c | 2 -- drivers/usb/net/kaweth.c | 1 - drivers/usb/net/pegasus.c | 1 - drivers/usb/net/rtl8150.c | 1 - drivers/usb/net/usbnet.c | 2 -- drivers/usb/net/zd1201.c | 1 - drivers/usb/storage/transport.c | 7 +++---- include/linux/usb.h | 9 +-------- sound/usb/usbaudio.c | 10 ++++------ 18 files changed, 25 insertions(+), 77 deletions(-) (limited to 'drivers/block') diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 57d3279a8815..aa0bf7ee008d 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -1010,7 +1010,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->send_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; + sc->work_urb.transfer_flags = 0; /* Fill what we shouldn't be filling, because usb-storage did so. */ sc->work_urb.actual_length = 0; @@ -1395,7 +1395,7 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, page_address(sg->page) + sg->offset, sg->length, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; + sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1442,7 +1442,7 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->recv_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; + sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1563,7 +1563,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); - sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; + sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 46e0022d3258..6c766fdc51a6 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -267,7 +267,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self) frame, IRDA_USB_SPEED_MTU, speed_bulk_callback, self); urb->transfer_buffer_length = USB_IRDA_HEADER; - urb->transfer_flags = URB_ASYNC_UNLINK; + urb->transfer_flags = 0; /* Irq disabled -> GFP_ATOMIC */ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { @@ -401,15 +401,12 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) skb->data, IRDA_SKB_MAX_MTU, write_bulk_callback, skb); urb->transfer_buffer_length = skb->len; - /* Note : unlink *must* be Asynchronous because of the code in - * irda_usb_net_timeout() -> call in irq - Jean II */ - urb->transfer_flags = URB_ASYNC_UNLINK; /* This flag (URB_ZERO_PACKET) indicates that what we send is not * a continuous stream of data but separate packets. * In this case, the USB layer will insert an empty USB frame (TD) * after each of our packets that is exact multiple of the frame size. * This is how the dongle will detect the end of packet - Jean II */ - urb->transfer_flags |= URB_ZERO_PACKET; + urb->transfer_flags = URB_ZERO_PACKET; /* Generate min turn time. FIXME: can we do better than this? */ /* Trying to a turnaround time at this level is trying to measure @@ -630,8 +627,6 @@ static void irda_usb_net_timeout(struct net_device *netdev) * in completion handler, because urb->status will * be -ENOENT. We will fix that at the next watchdog, * leaving more time to USB to recover... - * Also, we are in interrupt, so we need to have - * URB_ASYNC_UNLINK to work properly... * Jean II */ done = 1; break; @@ -1008,9 +1003,7 @@ static int irda_usb_net_close(struct net_device *netdev) } } /* Cancel Tx and speed URB - need to be synchronous to avoid races */ - self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK; usb_kill_urb(self->tx_urb); - self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK; usb_kill_urb(self->speed_urb); /* Stop and remove instance of IrLAP */ @@ -1521,9 +1514,7 @@ static void irda_usb_disconnect(struct usb_interface *intf) usb_kill_urb(self->rx_urb[i]); /* Cancel Tx and speed URB. * Toggle flags to make sure it's synchronous. */ - self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK; usb_kill_urb(self->tx_urb); - self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK; usb_kill_urb(self->speed_urb); } diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 8e184e2641cb..79861ee12a29 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -715,13 +715,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done, 1); - instance->rcv_urb->transfer_flags |= URB_ASYNC_UNLINK; usb_fill_int_urb(instance->snd_urb, usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done, 4); - instance->snd_urb->transfer_flags |= URB_ASYNC_UNLINK; init_MUTEX(&instance->cm_serialize); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 88d1b376f67c..74197249c245 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -48,7 +48,6 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) init_completion(&done); urb->context = &done; - urb->transfer_flags |= URB_ASYNC_UNLINK; urb->actual_length = 0; status = usb_submit_urb(urb, GFP_NOIO); @@ -357,8 +356,7 @@ int usb_sg_init ( if (!io->urbs) goto nomem; - urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP - | URB_NO_INTERRUPT; + urb_flags = URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; if (usb_pipein (pipe)) urb_flags |= URB_SHORT_NOT_OK; diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index c0feee25ff0a..c846fefb7386 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -309,9 +309,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags) unsigned int allowed; /* enforce simple/standard policy */ - allowed = URB_ASYNC_UNLINK; // affects later unlinks - allowed |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); - allowed |= URB_NO_INTERRUPT; + allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | + URB_NO_INTERRUPT); switch (temp) { case PIPE_BULK: if (is_out) @@ -400,14 +399,8 @@ int usb_submit_urb(struct urb *urb, unsigned mem_flags) * canceled (rather than any other code) and will quickly be removed * from host controller data structures. * - * In the past, clearing the URB_ASYNC_UNLINK transfer flag for the - * URB indicated that the request was synchronous. This usage is now - * deprecated; if the flag is clear the call will be forwarded to - * usb_kill_urb() and the return value will be 0. In the future, drivers - * should call usb_kill_urb() directly for synchronous unlinking. - * - * When the URB_ASYNC_UNLINK transfer flag for the URB is set, this - * request is asynchronous. Success is indicated by returning -EINPROGRESS, + * This request is always asynchronous. + * Success is indicated by returning -EINPROGRESS, * at which time the URB will normally have been unlinked but not yet * given back to the device driver. When it is called, the completion * function will see urb->status == -ECONNRESET. Failure is indicated @@ -453,17 +446,6 @@ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; - if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) { -#ifdef CONFIG_DEBUG_KERNEL - if (printk_ratelimit()) { - printk(KERN_NOTICE "usb_unlink_urb() is deprecated for " - "synchronous unlinks. Use usb_kill_urb() instead.\n"); - WARN_ON(1); - } -#endif - usb_kill_urb(urb); - return 0; - } if (!(urb->dev && urb->dev->bus && urb->dev->bus->op)) return -ENODEV; return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET); diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index 719c0316cc39..1ab95d24c5e2 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c @@ -1688,7 +1688,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) usb_fill_int_urb(hid->urbin, dev, pipe, hid->inbuf, 0, hid_irq_in, hid, interval); hid->urbin->transfer_dma = hid->inbuf_dma; - hid->urbin->transfer_flags |=(URB_NO_TRANSFER_DMA_MAP | URB_ASYNC_UNLINK); + hid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } else { if (hid->urbout) continue; @@ -1698,7 +1698,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) usb_fill_int_urb(hid->urbout, dev, pipe, hid->outbuf, 0, hid_irq_out, hid, interval); hid->urbout->transfer_dma = hid->outbuf_dma; - hid->urbout->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_ASYNC_UNLINK); + hid->urbout->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } } @@ -1750,7 +1750,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) hid->ctrlbuf, 1, hid_ctrl, hid); hid->urbctrl->setup_dma = hid->cr_dma; hid->urbctrl->transfer_dma = hid->ctrlbuf_dma; - hid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | URB_ASYNC_UNLINK); + hid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); return hid; diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c index 6f7994f5a714..ae4681f9f0ea 100644 --- a/drivers/usb/misc/auerswald.c +++ b/drivers/usb/misc/auerswald.c @@ -426,7 +426,7 @@ static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb) /* cancel an urb which is submitted to the chain the result is 0 if the urb is cancelled, or -EINPROGRESS if - URB_ASYNC_UNLINK is set and the function is successfully started. + the function is successfully started. */ static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb) { @@ -515,7 +515,6 @@ static void auerchain_unlink_all (pauerchain_t acp) acep = acp->active; if (acep) { urbp = acep->urbp; - urbp->transfer_flags &= ~URB_ASYNC_UNLINK; dbg ("unlink active urb"); usb_kill_urb (urbp); } diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 2fd12264fd53..d63ce6c030f3 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -229,7 +229,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len, sisusb_bulk_completeout, &sisusb->urbout_context[index]); - urb->transfer_flags |= (tflags | URB_ASYNC_UNLINK); + urb->transfer_flags |= tflags; urb->actual_length = 0; if ((urb->transfer_dma = transfer_dma)) @@ -295,7 +295,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data, usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len, sisusb_bulk_completein, sisusb); - urb->transfer_flags |= (tflags | URB_ASYNC_UNLINK); + urb->transfer_flags |= tflags; urb->actual_length = 0; if ((urb->transfer_dma = transfer_dma)) diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index fd7fb98e4b20..54799eb0bc60 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -986,7 +986,6 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) u->context = &context; u->complete = ctrl_complete; - u->transfer_flags |= URB_ASYNC_UNLINK; } /* queue the urbs */ @@ -1052,7 +1051,6 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async) urb = simple_alloc_urb (testdev_to_usbdev (dev), pipe, size); if (!urb) return -ENOMEM; - urb->transfer_flags |= URB_ASYNC_UNLINK; urb->context = &completion; urb->complete = unlink1_callback; diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c index c8be912f24e1..37ef365a2472 100644 --- a/drivers/usb/net/catc.c +++ b/drivers/usb/net/catc.c @@ -383,7 +383,6 @@ static void catc_tx_done(struct urb *urb, struct pt_regs *regs) if (urb->status == -ECONNRESET) { dbg("Tx Reset."); - urb->transfer_flags &= ~URB_ASYNC_UNLINK; urb->status = 0; catc->netdev->trans_start = jiffies; catc->stats.tx_errors++; @@ -445,7 +444,6 @@ static void catc_tx_timeout(struct net_device *netdev) struct catc *catc = netdev_priv(netdev); warn("Transmit timed out."); - catc->tx_urb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(catc->tx_urb); } diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index 7ffa99b9760f..e04b0ce3611a 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c @@ -787,7 +787,6 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; - kaweth->tx_urb->transfer_flags |= URB_ASYNC_UNLINK; if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) { diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index fcd6d3ccef44..7484d34780fc 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c @@ -825,7 +825,6 @@ static void pegasus_tx_timeout(struct net_device *net) pegasus_t *pegasus = netdev_priv(net); if (netif_msg_timer(pegasus)) printk(KERN_WARNING "%s: tx timeout\n", net->name); - pegasus->tx_urb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(pegasus->tx_urb); pegasus->stats.tx_errors++; } diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c index 59ab40ebb394..c3d4e3589e30 100644 --- a/drivers/usb/net/rtl8150.c +++ b/drivers/usb/net/rtl8150.c @@ -653,7 +653,6 @@ static void rtl8150_tx_timeout(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); warn("%s: Tx timeout.", netdev->name); - dev->tx_urb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(dev->tx_urb); dev->stats.tx_errors++; } diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 4682696450db..3c6eef4168e5 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c @@ -2987,7 +2987,6 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags) usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); - urb->transfer_flags |= URB_ASYNC_UNLINK; spin_lock_irqsave (&dev->rxq.lock, lockflags); @@ -3561,7 +3560,6 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); - urb->transfer_flags |= URB_ASYNC_UNLINK; /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c index fc013978837e..c4e479ee926a 100644 --- a/drivers/usb/net/zd1201.c +++ b/drivers/usb/net/zd1201.c @@ -847,7 +847,6 @@ static void zd1201_tx_timeout(struct net_device *dev) return; dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n", dev->name); - zd->tx_urb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(zd->tx_urb); zd->stats.tx_errors++; /* Restart the timeout to quiet the watchdog: */ diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index e42875152c34..c1ba5301ebfc 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -96,8 +96,8 @@ * or before the URB_ACTIVE bit was set. If so, it's essential to cancel * the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit * is still set). Either way, the function must then wait for the URB to - * finish. Note that because the URB_ASYNC_UNLINK flag is set, the URB can - * still be in progress even after a call to usb_unlink_urb() returns. + * finish. Note that the URB can still be in progress even after a call to + * usb_unlink_urb() returns. * * The idea is that (1) once the ABORTING or DISCONNECTING bit is set, * either the stop_transport() function or the submitting function @@ -158,8 +158,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout) * hasn't been mapped for DMA. Yes, this is clunky, but it's * easier than always having the caller tell us whether the * transfer buffer has already been mapped. */ - us->current_urb->transfer_flags = - URB_ASYNC_UNLINK | URB_NO_SETUP_DMA_MAP; + us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; if (us->current_urb->transfer_buffer == us->iobuf) us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; us->current_urb->transfer_dma = us->iobuf_dma; diff --git a/include/linux/usb.h b/include/linux/usb.h index 434e35120c65..4dbe580f9335 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -616,7 +616,6 @@ extern int usb_disabled(void); #define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame ignored */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ #define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */ -#define URB_ASYNC_UNLINK 0x0010 /* usb_unlink_urb() returns asap */ #define URB_NO_FSBR 0x0020 /* UHCI-specific */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUTs with short packet */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt needed */ @@ -724,13 +723,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *); * Initialization: * * All URBs submitted must initialize the dev, pipe, transfer_flags (may be - * zero), and complete fields. - * The URB_ASYNC_UNLINK transfer flag affects later invocations of - * the usb_unlink_urb() routine. Note: Failure to set URB_ASYNC_UNLINK - * with usb_unlink_urb() is deprecated. For synchronous unlinks use - * usb_kill_urb() instead. - * - * All URBs must also initialize + * zero), and complete fields. All URBs must also initialize * transfer_buffer and transfer_buffer_length. They may provide the * URB_SHORT_NOT_OK transfer flag, indicating that short reads are * to be treated as errors; that flag is invalid for write requests. diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index 5aa5fe651a8a..bfbec5876659 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c @@ -735,10 +735,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep) if (test_bit(i, &subs->active_mask)) { if (! test_and_set_bit(i, &subs->unlink_mask)) { struct urb *u = subs->dataurb[i].urb; - if (async) { - u->transfer_flags |= URB_ASYNC_UNLINK; + if (async) usb_unlink_urb(u); - } else + else usb_kill_urb(u); } } @@ -748,10 +747,9 @@ static int deactivate_urbs(snd_usb_substream_t *subs, int force, int can_sleep) if (test_bit(i+16, &subs->active_mask)) { if (! test_and_set_bit(i+16, &subs->unlink_mask)) { struct urb *u = subs->syncurb[i].urb; - if (async) { - u->transfer_flags |= URB_ASYNC_UNLINK; + if (async) usb_unlink_urb(u); - } else + else usb_kill_urb(u); } } -- cgit v1.2.3