diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2009-04-28 04:57:38 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-06-16 06:44:49 +0200 |
commit | 3ffbba9511b4148cbe1f6b6238686adaeaca8feb (patch) | |
tree | f69e42d07d596039e049fe2b14b720ddc6be2694 | |
parent | USB: Support for addressing a USB device under xHCI (diff) | |
download | linux-3ffbba9511b4148cbe1f6b6238686adaeaca8feb.tar.xz linux-3ffbba9511b4148cbe1f6b6238686adaeaca8feb.zip |
USB: xhci: Allocate and address USB devices
xHCI needs to get a "Slot ID" from the host controller and allocate other
data structures for every USB device. Make usb_alloc_dev() and
usb_release_dev() allocate and free these device structures. After
setting up the xHC device structures, usb_alloc_dev() must wait for the
hardware to respond to an Enable Slot command. usb_alloc_dev() fires off
a Disable Slot command and does not wait for it to complete.
When the USB core wants to choose an address for the device, the xHCI
driver must issue a Set Address command and wait for an event for that
command.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 79 | ||||
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 201 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 204 | ||||
-rw-r--r-- | drivers/usb/host/xhci-pci.c | 7 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 34 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 94 |
6 files changed, 590 insertions, 29 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 570cd4820458..16ef42a0fe85 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -410,3 +410,82 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); } + +void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) +{ + int i, j; + int last_ep_ctx = 31; + /* Fields are 32 bits wide, DMA addresses are in bytes */ + int field_size = 32 / 8; + + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - drop flags\n", + (unsigned int) &ctx->drop_flags, + dma, ctx->drop_flags); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - add flags\n", + (unsigned int) &ctx->add_flags, + dma, ctx->add_flags); + dma += field_size; + for (i = 0; i > 6; ++i) { + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", + (unsigned int) &ctx->rsvd[i], + dma, ctx->rsvd[i], i); + dma += field_size; + } + + xhci_dbg(xhci, "Slot Context:\n"); + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info\n", + (unsigned int) &ctx->slot.dev_info, + dma, ctx->slot.dev_info); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info2\n", + (unsigned int) &ctx->slot.dev_info2, + dma, ctx->slot.dev_info2); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tt_info\n", + (unsigned int) &ctx->slot.tt_info, + dma, ctx->slot.tt_info); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_state\n", + (unsigned int) &ctx->slot.dev_state, + dma, ctx->slot.dev_state); + dma += field_size; + for (i = 0; i > 4; ++i) { + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", + (unsigned int) &ctx->slot.reserved[i], + dma, ctx->slot.reserved[i], i); + dma += field_size; + } + + if (last_ep < 31) + last_ep_ctx = last_ep + 1; + for (i = 0; i < last_ep_ctx; ++i) { + xhci_dbg(xhci, "Endpoint %02d Context:\n", i); + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info\n", + (unsigned int) &ctx->ep[i].ep_info, + dma, ctx->ep[i].ep_info); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info2\n", + (unsigned int) &ctx->ep[i].ep_info2, + dma, ctx->ep[i].ep_info2); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[0]\n", + (unsigned int) &ctx->ep[i].deq[0], + dma, ctx->ep[i].deq[0]); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[1]\n", + (unsigned int) &ctx->ep[i].deq[1], + dma, ctx->ep[i].deq[1]); + dma += field_size; + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tx_info\n", + (unsigned int) &ctx->ep[i].tx_info, + dma, ctx->ep[i].tx_info); + dma += field_size; + for (j = 0; j < 3; ++j) { + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", + (unsigned int) &ctx->ep[i].reserved[j], + dma, ctx->ep[i].reserved[j], j); + dma += field_size; + } + } +} diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index d7c2fed55978..a01d2ee7435a 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -318,6 +318,16 @@ void event_ring_work(unsigned long arg) xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); xhci_dbg_cmd_ptrs(xhci); + for (i = 0; i < MAX_HC_SLOTS; ++i) { + if (xhci->devs[i]) { + for (j = 0; j < 31; ++j) { + if (xhci->devs[i]->ep_rings[j]) { + xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); + xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); + } + } + } + } if (xhci->noops_submitted != NUM_TEST_NOOPS) if (setup_one_noop(xhci)) @@ -499,6 +509,197 @@ void xhci_shutdown(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ +/* + * At this point, the struct usb_device is about to go away, the device has + * disconnected, and all traffic has been stopped and the endpoints have been + * disabled. Free any HC data structures associated with that device. + */ +void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + + if (udev->slot_id == 0) + return; + + spin_lock_irqsave(&xhci->lock, flags); + if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return; + } + ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + /* + * Event command completion handler will free any data structures + * associated with the slot + */ +} + +/* + * Returns 0 if the xHC ran out of device slots, the Enable Slot command + * timed out, or allocating memory failed. Returns 1 on success. + */ +int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int timeleft; + int ret; + + spin_lock_irqsave(&xhci->lock, flags); + ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return 0; + } + ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* XXX: how much time for xHC slot assignment? */ + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for a slot\n", + timeleft == 0 ? "Timeout" : "Signal"); + /* FIXME cancel the enable slot request */ + return 0; + } + + spin_lock_irqsave(&xhci->lock, flags); + if (!xhci->slot_id) { + xhci_err(xhci, "Error while assigning device slot ID\n"); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; + } + if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { + /* Disable slot, if we can do it without mem alloc */ + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); + if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) + ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; + } + udev->slot_id = xhci->slot_id; + /* Is this a LS or FS device under a HS hub? */ + /* Hub or peripherial? */ + spin_unlock_irqrestore(&xhci->lock, flags); + return 1; +} + +/* + * Issue an Address Device command (which will issue a SetAddress request to + * the device). + * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so + * we should only issue and wait on one address command at the same time. + * + * We add one to the device address issued by the hardware because the USB core + * uses address 1 for the root hubs (even though they're not really devices). + */ +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + unsigned long flags; + int timeleft; + struct xhci_virt_device *virt_dev; + int ret = 0; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + u32 temp; + + if (!udev->slot_id) { + xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); + return -EINVAL; + } + + spin_lock_irqsave(&xhci->lock, flags); + virt_dev = xhci->devs[udev->slot_id]; + + /* If this is a Set Address to an unconfigured device, setup ep 0 */ + if (!udev->config) + xhci_setup_addressable_virt_dev(xhci, udev); + /* Otherwise, assume the core has the device configured how it wants */ + + ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return ret; + } + ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, + USB_CTRL_SET_TIMEOUT); + /* FIXME: From section 4.3.4: "Software shall be responsible for timing + * the SetAddress() "recovery interval" required by USB and aborting the + * command on a timeout. + */ + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for a slot\n", + timeleft == 0 ? "Timeout" : "Signal"); + /* FIXME cancel the address device command */ + return -ETIME; + } + + spin_lock_irqsave(&xhci->lock, flags); + switch (virt_dev->cmd_status) { + case COMP_CTX_STATE: + case COMP_EBADSLT: + xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", + udev->slot_id); + ret = -EINVAL; + break; + case COMP_TX_ERR: + dev_warn(&udev->dev, "Device not responding to set address.\n"); + ret = -EPROTO; + break; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful Address Device command\n"); + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion " + "code 0x%x.\n", virt_dev->cmd_status); + ret = -EINVAL; + break; + } + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; + } + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); + xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); + xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); + xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n", + udev->slot_id, + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); + xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n", + udev->slot_id, + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); + xhci_dbg(xhci, "Output Context DMA address = %#08x\n", + virt_dev->out_ctx_dma); + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); + xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); + /* + * USB core uses address 1 for the roothubs, so we add one to the + * address given back to us by the HC. + */ + udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; + /* FIXME: Zero the input context control for later use? */ + spin_unlock_irqrestore(&xhci->lock, flags); + + xhci_dbg(xhci, "Device address = %d\n", udev->devnum); + /* XXX Meh, not sure if anyone else but choose_address uses this. */ + set_bit(udev->devnum, udev->bus->devmap.devicemap); + + return 0; +} + int xhci_get_frame(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 005d44641d81..d34b91a135a1 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -188,12 +188,187 @@ fail: return 0; } +void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *dev; + int i; + + /* Slot ID 0 is reserved */ + if (slot_id == 0 || !xhci->devs[slot_id]) + return; + + dev = xhci->devs[slot_id]; + xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; + if (!dev) + return; + + for (i = 0; i < 31; ++i) + if (dev->ep_rings[i]) + xhci_ring_free(xhci, dev->ep_rings[i]); + + if (dev->in_ctx) + dma_pool_free(xhci->device_pool, + dev->in_ctx, dev->in_ctx_dma); + if (dev->out_ctx) + dma_pool_free(xhci->device_pool, + dev->out_ctx, dev->out_ctx_dma); + kfree(xhci->devs[slot_id]); + xhci->devs[slot_id] = 0; +} + +int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + struct usb_device *udev, gfp_t flags) +{ + dma_addr_t dma; + struct xhci_virt_device *dev; + + /* Slot ID 0 is reserved */ + if (slot_id == 0 || xhci->devs[slot_id]) { + xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); + return 0; + } + + xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); + if (!xhci->devs[slot_id]) + return 0; + dev = xhci->devs[slot_id]; + + /* Allocate the (output) device context that will be used in the HC */ + dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); + if (!dev->out_ctx) + goto fail; + dev->out_ctx_dma = dma; + xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma); + memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); + + /* Allocate the (input) device context for address device command */ + dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); + if (!dev->in_ctx) + goto fail; + dev->in_ctx_dma = dma; + xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma); + memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); + + /* Allocate endpoint 0 ring */ + dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); + if (!dev->ep_rings[0]) + goto fail; + + /* + * Point to output device context in dcbaa; skip the output control + * context, which is eight 32 bit fields (or 32 bytes long) + */ + xhci->dcbaa->dev_context_ptrs[2*slot_id] = + (u32) dev->out_ctx_dma + (32); + xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n", + slot_id, + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id], + dev->out_ctx_dma); + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; + + return 1; +fail: + xhci_free_virt_device(xhci, slot_id); + return 0; +} + +/* Setup an xHCI virtual device for a Set Address command */ +int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) +{ + struct xhci_virt_device *dev; + struct xhci_ep_ctx *ep0_ctx; + struct usb_device *top_dev; + + dev = xhci->devs[udev->slot_id]; + /* Slot ID 0 is reserved */ + if (udev->slot_id == 0 || !dev) { + xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", + udev->slot_id); + return -EINVAL; + } + ep0_ctx = &dev->in_ctx->ep[0]; + + /* 2) New slot context and endpoint 0 context are valid*/ + dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; + + /* 3) Only the control endpoint is valid - one endpoint context */ + dev->in_ctx->slot.dev_info |= LAST_CTX(1); + + switch (udev->speed) { + case USB_SPEED_SUPER: + dev->in_ctx->slot.dev_info |= (u32) udev->route; + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; + break; + case USB_SPEED_HIGH: + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; + break; + case USB_SPEED_FULL: + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; + break; + case USB_SPEED_LOW: + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; + break; + case USB_SPEED_VARIABLE: + xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); + return -EINVAL; + break; + default: + /* Speed was set earlier, this shouldn't happen. */ + BUG(); + } + /* Find the root hub port this device is under */ + for (top_dev = udev; top_dev->parent && top_dev->parent->parent; + top_dev = top_dev->parent) + /* Found device below root hub */; + dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); + xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); + + /* Is this a LS/FS device under a HS hub? */ + /* + * FIXME: I don't think this is right, where does the TT info for the + * roothub or parent hub come from? + */ + if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && + udev->tt) { + dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; + dev->in_ctx->slot.tt_info |= udev->ttport << 8; + } + xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt); + xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); + + /* Step 4 - ring already allocated */ + /* Step 5 */ + ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); + /* + * See section 4.3 bullet 6: + * The default Max Packet size for ep0 is "8 bytes for a USB2 + * LS/FS/HS device or 512 bytes for a USB3 SS device" + * XXX: Not sure about wireless USB devices. + */ + if (udev->speed == USB_SPEED_SUPER) + ep0_ctx->ep_info2 |= MAX_PACKET(512); + else + ep0_ctx->ep_info2 |= MAX_PACKET(8); + /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ + ep0_ctx->ep_info2 |= MAX_BURST(0); + ep0_ctx->ep_info2 |= ERROR_COUNT(3); + + ep0_ctx->deq[0] = + dev->ep_rings[0]->first_seg->dma; + ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; + ep0_ctx->deq[1] = 0; + + /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ + + return 0; +} + void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); int size; - - /* XXX: Free all the segments in the various rings */ + int i; /* Free the Event Ring Segment Table and the actual Event Ring */ xhci_writel(xhci, 0, &xhci->ir_set->erst_size); @@ -218,16 +393,27 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; xhci_dbg(xhci, "Freed command ring\n"); + + for (i = 1; i < MAX_HC_SLOTS; ++i) + xhci_free_virt_device(xhci, i); + if (xhci->segment_pool) dma_pool_destroy(xhci->segment_pool); xhci->segment_pool = NULL; xhci_dbg(xhci, "Freed segment pool\n"); + + if (xhci->device_pool) + dma_pool_destroy(xhci->device_pool); + xhci->device_pool = NULL; + xhci_dbg(xhci, "Freed device context pool\n"); + xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); if (xhci->dcbaa) pci_free_consistent(pdev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa = NULL; + xhci->page_size = 0; xhci->page_shift = 0; } @@ -280,8 +466,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); xhci->dcbaa->dma = dma; - xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n", - xhci->dcbaa->dma); + xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n", + xhci->dcbaa->dma, (unsigned int) xhci->dcbaa); xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); @@ -293,7 +479,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, SEGMENT_SIZE, 64, xhci->page_size); - if (!xhci->segment_pool) + /* See Table 46 and Note on Figure 55 */ + /* FIXME support 64-byte contexts */ + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, + sizeof(struct xhci_device_control), + 64, xhci->page_size); + if (!xhci->segment_pool || !xhci->device_pool) goto fail; /* Set up the command ring to have one segments for now. */ @@ -385,6 +576,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ + init_completion(&xhci->addr_dev); + for (i = 0; i < MAX_HC_SLOTS; ++i) + xhci->devs[i] = 0; return 0; fail: diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 005c5b264a7c..7ac12b4ffe86 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -109,6 +109,13 @@ static const struct hc_driver xhci_pci_hc_driver = { .shutdown = xhci_shutdown, /* + * managing i/o requests and associated device resources + */ + .alloc_dev = xhci_alloc_dev, + .free_dev = xhci_free_dev, + .address_device = xhci_address_device, + + /* * scheduling support */ .get_frame_number = xhci_get_frame, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9d6bb3d730c4..901ce70b30b8 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -252,13 +252,10 @@ void ring_cmd_db(struct xhci_hcd *xhci) static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { + int slot_id = TRB_TO_SLOT_ID(event->flags); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; - /* Check completion code */ - if (GET_COMP_CODE(event->status) != COMP_SUCCESS) - xhci_dbg(xhci, "WARN: unsuccessful no-op command\n"); - cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); @@ -273,6 +270,21 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, return; } switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { + case TRB_TYPE(TRB_ENABLE_SLOT): + if (GET_COMP_CODE(event->status) == COMP_SUCCESS) + xhci->slot_id = slot_id; + else + xhci->slot_id = 0; + complete(&xhci->addr_dev); + break; + case TRB_TYPE(TRB_DISABLE_SLOT): + if (xhci->devs[slot_id]) + xhci_free_virt_device(xhci, slot_id); + break; + case TRB_TYPE(TRB_ADDR_DEV): + xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); + complete(&xhci->addr_dev); + break; case TRB_TYPE(TRB_CMD_NOOP): ++xhci->noops_handled; break; @@ -400,3 +412,17 @@ void *setup_one_noop(struct xhci_hcd *xhci) xhci->noops_submitted++; return ring_cmd_db; } + +/* Queue a slot enable or disable request on the command ring */ +int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) +{ + return queue_command(xhci, 0, 0, 0, + TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); +} + +/* Queue an address device command TRB */ +int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) +{ + return queue_command(xhci, in_ctx_ptr, 0, 0, + TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); +} diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 059c659d3f39..4ef6b9e88504 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -285,12 +285,21 @@ struct xhci_op_regs { * 4 - super speed * 5-15 reserved */ -#define DEV_SPEED_MASK (0xf<<10) +#define DEV_SPEED_MASK (0xf << 10) +#define XDEV_FS (0x1 << 10) +#define XDEV_LS (0x2 << 10) +#define XDEV_HS (0x3 << 10) +#define XDEV_SS (0x4 << 10) #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10)) -#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == (0x1<<10)) -#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == (0x2<<10)) -#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == (0x3<<10)) -#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == (0x4<<10)) +#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) +#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS) +#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) +#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +/* Bits 20:23 in the Slot Context are the speed for the device */ +#define SLOT_SPEED_FS (XDEV_FS << 10) +#define SLOT_SPEED_LS (XDEV_LS << 10) +#define SLOT_SPEED_HS (XDEV_HS << 10) +#define SLOT_SPEED_SS (XDEV_SS << 10) /* Port Indicator Control */ #define PORT_LED_OFF (0 << 14) #define PORT_LED_AMBER (1 << 14) @@ -471,14 +480,19 @@ struct xhci_slot_ctx { /* Set if the device is a hub - bit 26 */ #define DEV_HUB (0x1 << 26) /* Index of the last valid endpoint context in this device context - 27:31 */ -#define LAST_EP_MASK (0x1f << 27) -#define LAST_EP(p) ((p) << 27) +#define LAST_CTX_MASK (0x1f << 27) +#define LAST_CTX(p) ((p) << 27) +#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) +/* Plus one for the slot context flag */ +#define EPI_TO_FLAG(p) (1 << ((p) + 1)) +#define SLOT_FLAG (1 << 0) +#define EP0_FLAG (1 << 1) /* dev_info2 bitmasks */ /* Max Exit Latency (ms) - worst case time to wake up all links in dev path */ #define MAX_EXIT (0xffff) /* Root hub port number that is needed to access the USB device */ -#define ROOT_HUB_PORT (0xff << 16) +#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) /* tt_info bitmasks */ /* @@ -495,7 +509,7 @@ struct xhci_slot_ctx { /* dev_state bitmasks */ /* USB device address - assigned by the HC */ -#define DEV_ADDR (0xff) +#define DEV_ADDR_MASK (0xff) /* bits 8:26 reserved */ /* Slot state */ #define SLOT_STATE (0x1f << 27) @@ -507,12 +521,13 @@ struct xhci_slot_ctx { * @ep_info2: information on endpoint type, max packet size, max burst size, * error count, and whether the HC will force an event for all * transactions. - * @ep_ring: 64-bit ring address. If the endpoint only defines one flow, - * this points to the endpoint transfer ring. Otherwise, it points - * to a flow context array, which has a ring pointer for each flow. - * @intr_target: - * 64-bit address of the Interrupter Target that will receive - * events from this endpoint. + * @deq: 64-bit ring dequeue pointer address. If the endpoint only + * defines one stream, this points to the endpoint transfer ring. + * Otherwise, it points to a stream context array, which has a + * ring pointer for each flow. + * @tx_info: + * Average TRB lengths for the endpoint ring and + * max payload within an Endpoint Service Interval Time (ESIT). * * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes @@ -521,12 +536,10 @@ struct xhci_slot_ctx { struct xhci_ep_ctx { u32 ep_info; u32 ep_info2; - /* 64-bit endpoint ring address */ - u32 ep_ring[2]; - /* 64-bit address of the interrupter target */ - u32 intr_target[2]; + u32 deq[2]; + u32 tx_info; /* offset 0x14 - 0x1f reserved for HC internal use */ - u32 reserved[2]; + u32 reserved[3]; } __attribute__ ((packed)); /* ep_info bitmasks */ @@ -589,6 +602,28 @@ struct xhci_device_control { #define ADD_EP(x) (0x1 << x) +struct xhci_virt_device { + /* + * Commands to the hardware are passed an "input context" that + * tells the hardware what to change in its data structures. + * The hardware will return changes in an "output context" that + * software must allocate for the hardware. We need to keep + * track of input and output contexts separately because + * these commands might fail and we don't trust the hardware. + */ + struct xhci_device_control *out_ctx; + dma_addr_t out_ctx_dma; + /* Used for addressing devices and configuration changes */ + struct xhci_device_control *in_ctx; + dma_addr_t in_ctx_dma; + /* FIXME when stream support is added */ + struct xhci_ring *ep_rings[31]; + dma_addr_t ep_dma[31]; + /* Status of the last command issued for this device */ + u32 cmd_status; +}; + + /** * struct xhci_device_context_array * @dev_context_ptr array of 64-bit DMA addresses for device contexts @@ -711,6 +746,11 @@ struct xhci_event_cmd { u32 flags; } __attribute__ ((packed)); +/* flags bitmasks */ +/* bits 16:23 are the virtual function ID */ +/* bits 24:31 are the slot ID */ +#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) +#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) /* Port Status Change Event TRB fields */ /* Port ID - bits 31:24 */ @@ -931,6 +971,11 @@ struct xhci_hcd { struct xhci_ring *cmd_ring; struct xhci_ring *event_ring; struct xhci_erst erst; + /* slot enabling and address device helpers */ + struct completion addr_dev; + int slot_id; + /* Internal mirror of the HW's dcbaa */ + struct xhci_virt_device *devs[MAX_HC_SLOTS]; /* DMA pools */ struct dma_pool *device_pool; @@ -1002,10 +1047,14 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); +void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); /* xHCI memory managment */ void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); +void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); +int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); +int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); #ifdef CONFIG_PCI /* xHCI PCI glue */ @@ -1022,6 +1071,9 @@ void xhci_stop(struct usb_hcd *hcd); void xhci_shutdown(struct usb_hcd *hcd); int xhci_get_frame(struct usb_hcd *hcd); irqreturn_t xhci_irq(struct usb_hcd *hcd); +int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); @@ -1029,6 +1081,8 @@ void ring_cmd_db(struct xhci_hcd *xhci); void *setup_one_noop(struct xhci_hcd *xhci); void handle_event(struct xhci_hcd *xhci); void set_hc_event_deq(struct xhci_hcd *xhci); +int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); +int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |