diff options
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 139 |
1 files changed, 94 insertions, 45 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 932f99938481..5e92c72df642 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -67,22 +67,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, } /* - * Force HC into halt state. - * - * Disable any IRQs and clear the run/stop bit. - * HC will complete any current and actively pipelined transactions, and - * should halt within 16 microframes of the run/stop bit being cleared. - * Read HC Halted bit in the status register to see when the HC is finished. - * XXX: shouldn't we set HC_STATE_HALT here somewhere? + * Disable interrupts and begin the xHCI halting process. */ -int xhci_halt(struct xhci_hcd *xhci) +void xhci_quiesce(struct xhci_hcd *xhci) { u32 halted; u32 cmd; u32 mask; - xhci_dbg(xhci, "// Halt the HC\n"); - /* Disable all interrupts from the host controller */ mask = ~(XHCI_IRQS); halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; if (!halted) @@ -91,6 +83,21 @@ int xhci_halt(struct xhci_hcd *xhci) cmd = xhci_readl(xhci, &xhci->op_regs->command); cmd &= mask; xhci_writel(xhci, cmd, &xhci->op_regs->command); +} + +/* + * Force HC into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * HC will complete any current and actively pipelined transactions, and + * should halt within 16 microframes of the run/stop bit being cleared. + * Read HC Halted bit in the status register to see when the HC is finished. + * XXX: shouldn't we set HC_STATE_HALT here somewhere? + */ +int xhci_halt(struct xhci_hcd *xhci) +{ + xhci_dbg(xhci, "// Halt the HC\n"); + xhci_quiesce(xhci); return handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); @@ -124,28 +131,6 @@ int xhci_reset(struct xhci_hcd *xhci) return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); } -/* - * Stop the HC from processing the endpoint queues. - */ -static void xhci_quiesce(struct xhci_hcd *xhci) -{ - /* - * Queues are per endpoint, so we need to disable an endpoint or slot. - * - * To disable a slot, we need to insert a disable slot command on the - * command ring and ring the doorbell. This will also free any internal - * resources associated with the slot (which might not be what we want). - * - * A Release Endpoint command sounds better - doesn't free internal HC - * memory, but removes the endpoints from the schedule and releases the - * bandwidth, disables the doorbells, and clears the endpoint enable - * flag. Usually used prior to a set interface command. - * - * TODO: Implement after command ring code is done. - */ - BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); - xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); -} #if 0 /* Set up MSI-X table for entry 0 (may claim other entries later) */ @@ -261,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci) /* Flush posted writes */ xhci_readl(xhci, &xhci->ir_set->irq_pending); - /* FIXME this should be a delayed service routine that clears the EHB */ - xhci_handle_event(xhci); + if (xhci->xhc_state & XHCI_STATE_DYING) + xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " + "Shouldn't IRQs be disabled?\n"); + else + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ + xhci_handle_event(xhci); /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); @@ -335,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_dbg(xhci, "op reg status = 0x%x\n", temp); - if (temp == 0xffffffff) { + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_dbg(xhci, "HW died, polling stopped.\n"); spin_unlock_irqrestore(&xhci->lock, flags); return; @@ -490,8 +481,6 @@ void xhci_stop(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); spin_lock_irq(&xhci->lock); - if (HC_IS_RUNNING(hcd->state)) - xhci_quiesce(xhci); xhci_halt(xhci); xhci_reset(xhci); spin_unlock_irq(&xhci->lock); @@ -727,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) * atomic context to this function, which may allocate memory. */ spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); @@ -745,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) } exit: return ret; +dying: + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; } /* @@ -806,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) kfree(td); return ret; } + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + /* Let the stop endpoint command watchdog timer (which set this + * state) finish cleaning up the endpoint TD lists. We must + * have caught it in the middle of dropping a lock and giving + * back an URB. + */ + goto done; + } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); @@ -817,12 +829,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; - ep->cancels_pending++; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ - if (ep->cancels_pending == 1) { + if (!(ep->ep_state & EP_HALT_PENDING)) { + ep->ep_state |= EP_HALT_PENDING; + ep->stop_cmds_pending++; + ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; + add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } @@ -1246,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); xhci_zero_in_ctx(xhci, virt_dev); - /* Free any old rings */ + /* Install new rings and free or cache any old rings */ for (i = 1; i < 31; ++i) { - if (virt_dev->eps[i].new_ring) { - xhci_ring_free(xhci, virt_dev->eps[i].ring); - virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; - virt_dev->eps[i].new_ring = NULL; + int rings_cached; + + if (!virt_dev->eps[i].new_ring) + continue; + /* Only cache or free the old ring if it exists. + * It may not if this is the first add of an endpoint. + */ + if (virt_dev->eps[i].ring) { + rings_cached = virt_dev->num_rings_cached; + if (rings_cached < XHCI_MAX_RINGS_CACHED) { + virt_dev->num_rings_cached++; + rings_cached = virt_dev->num_rings_cached; + virt_dev->ring_cache[rings_cached] = + virt_dev->eps[i].ring; + xhci_dbg(xhci, "Cached old ring, " + "%d ring%s cached\n", + rings_cached, + (rings_cached > 1) ? "s" : ""); + } else { + xhci_ring_free(xhci, virt_dev->eps[i].ring); + xhci_dbg(xhci, "Ring cache full (%d rings), " + "freeing ring\n", + virt_dev->num_rings_cached); + } } + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; } return ret; @@ -1427,16 +1465,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev; unsigned long flags; u32 state; + int i; if (udev->slot_id == 0) return; + virt_dev = xhci->devs[udev->slot_id]; + if (!virt_dev) + return; + + /* Stop any wayward timer functions (which may grab the lock) */ + for (i = 0; i < 31; ++i) { + virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; + del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); + } spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = xhci_readl(xhci, &xhci->op_regs->status); - if (state == 0xffffffff) { + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_free_virt_device(xhci, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); return; |