summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c302
1 files changed, 178 insertions, 124 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 928b93ad1ee8..4cf5363875c7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -52,6 +52,7 @@
* endpoint rings; it generates events on the event ring for these.
*/
+#include <linux/jiffies.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -145,10 +146,8 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
-static void next_trb(struct xhci_hcd *xhci,
- struct xhci_ring *ring,
- struct xhci_segment **seg,
- union xhci_trb **trb)
+static void next_trb(struct xhci_segment **seg,
+ union xhci_trb **trb)
{
if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
*seg = (*seg)->next;
@@ -169,13 +168,16 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
- goto out;
+ return;
}
if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- goto out;
+
+ trace_xhci_inc_deq(ring);
+
+ return;
}
/* All other rings have link trbs */
@@ -190,14 +192,13 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
+ trace_xhci_inc_deq(ring);
+
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "Ring is an endless link TRB loop\n");
break;
}
}
-out:
- trace_xhci_inc_deq(ring);
-
return;
}
@@ -266,13 +267,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
+ trace_xhci_inc_enq(ring);
+
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
break;
}
}
-
- trace_xhci_inc_enq(ring);
}
/*
@@ -426,7 +427,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
}
}
-/* Must be called with xhci->lock held, releases and aquires lock back */
+/* Must be called with xhci->lock held, releases and acquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
@@ -446,9 +447,9 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
* avoiding corrupting the command ring pointer in case the command ring
* is stopped by the time the upper dword is written.
*/
- next_trb(xhci, NULL, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
if (trb_is_link(new_deq))
- next_trb(xhci, NULL, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
@@ -660,8 +661,8 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
/*
* We want to find the pointer, segment and cycle state of the new trb
- * (the one after current TD's last_trb). We know the cycle state at
- * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+ * (the one after current TD's end_trb). We know the cycle state at
+ * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
* found.
*/
do {
@@ -671,14 +672,14 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
if (td_last_trb_found)
break;
}
- if (new_deq == td->last_trb)
+ if (new_deq == td->end_trb)
td_last_trb_found = true;
if (cycle_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
- next_trb(xhci, ep_ring, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
@@ -740,23 +741,22 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
-static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *td, bool flip_cycle)
+static void td_to_noop(struct xhci_td *td, bool flip_cycle)
{
struct xhci_segment *seg = td->start_seg;
- union xhci_trb *trb = td->first_trb;
+ union xhci_trb *trb = td->start_trb;
while (1) {
trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */
- if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
- if (trb == td->last_trb)
+ if (trb == td->end_trb)
break;
- next_trb(xhci, ep_ring, &seg, &trb);
+ next_trb(&seg, &trb);
}
}
@@ -799,7 +799,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
- /* for in tranfers we need to copy the data from bounce to sg */
+ /* for in transfers we need to copy the data from bounce to sg */
if (urb->num_sgs) {
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
seg->bounce_len, seg->bounce_offs);
@@ -814,8 +814,8 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
seg->bounce_offs = 0;
}
-static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_ring *ep_ring, int status)
+static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_ring *ep_ring, int status)
{
struct urb *urb = NULL;
@@ -858,10 +858,18 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
status = 0;
xhci_giveback_urb_in_irq(xhci, td, status);
}
-
- return 0;
}
+/* Give back previous TD and move on to the next TD. */
+static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring,
+ u32 status)
+{
+ ring->dequeue = td->end_trb;
+ ring->deq_seg = td->end_seg;
+ inc_deq(xhci, ring);
+
+ xhci_td_cleanup(xhci, td, ring, status);
+}
/* Complete the cancelled URBs we unlinked from td_list. */
static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
@@ -972,13 +980,20 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
unsigned int slot_id = ep->vdev->slot_id;
int err;
+ /*
+ * This is not going to work if the hardware is changing its dequeue
+ * pointers as we look at them. Completion handler will call us later.
+ */
+ if (ep->ep_state & SET_DEQ_PENDING)
+ return 0;
+
xhci = ep->xhci;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
(unsigned long long)xhci_trb_virt_to_dma(
- td->start_seg, td->first_trb),
+ td->start_seg, td->start_trb),
td->urb->stream_id, td->urb);
list_del_init(&td->td_list);
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
@@ -1020,16 +1035,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
"Found multiple active URBs %p and %p in stream %u?\n",
td->urb, cached_td->urb,
td->urb->stream_id);
- td_to_noop(xhci, ring, cached_td, false);
+ td_to_noop(cached_td, false);
cached_td->cancel_status = TD_CLEARED;
}
- td_to_noop(xhci, ring, td, false);
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARING_CACHE;
cached_td = td;
break;
}
} else {
- td_to_noop(xhci, ring, td, false);
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1054,7 +1069,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
continue;
xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
td->urb);
- td_to_noop(xhci, ring, td, false);
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1062,6 +1077,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
}
/*
+ * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
+ * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
+ * stopped on. Those will be given back later when the commands complete.
+ *
+ * Call under xhci->lock on a stopped endpoint.
+ */
+void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
+{
+ xhci_invalidate_cancelled_tds(ep);
+ xhci_giveback_invalidated_tds(ep);
+}
+
+/*
* Returns the TD the endpoint ring halted on.
* Only call for non-running rings without streams.
*/
@@ -1151,16 +1179,35 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
return;
case EP_STATE_STOPPED:
/*
- * NEC uPD720200 sometimes sets this state and fails with
- * Context Error while continuing to process TRBs.
- * Be conservative and trust EP_CTX_STATE on other chips.
+ * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
+ * EP is a Context State Error, and EP stays Stopped.
+ *
+ * But maybe it failed on Halted, and somebody ran Reset
+ * Endpoint later. EP state is now Stopped and EP_HALTED
+ * still set because Reset EP handler will run after us.
+ */
+ if (ep->ep_state & EP_HALTED)
+ break;
+ /*
+ * On some HCs EP state remains Stopped for some tens of
+ * us to a few ms or more after a doorbell ring, and any
+ * new Stop Endpoint fails without aborting the restart.
+ * This handler may run quickly enough to still see this
+ * Stopped state, but it will soon change to Running.
+ *
+ * Assume this bug on unexpected Stop Endpoint failures.
+ * Keep retrying until the EP starts and stops again, on
+ * chips where this is known to help. Wait for 100ms.
*/
if (!(xhci->quirks & XHCI_NEC_HOST))
break;
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
- xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
+ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
+ GET_EP_CTX_STATE(ep_ctx));
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -1338,8 +1385,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_stream_ctx *stream_ctx;
struct xhci_td *td, *tmp_td;
- bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1360,6 +1407,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
trace_xhci_handle_cmd_set_deq(slot_ctx);
trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ stream_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+ trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id);
+ }
+
if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
@@ -1396,9 +1448,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
u64 deq;
/* 4.6.10 deq ptr is written to the stream ctx for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
- struct xhci_stream_ctx *ctx =
- &ep->stream_info->stream_ctx_array[stream_id];
- deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK;
/*
* Cadence xHCI controllers store some endpoint state
@@ -1410,8 +1460,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* To fix this issue driver must clear Rsvd0 field.
*/
if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
- ctx->reserved[0] = 0;
- ctx->reserved[1] = 0;
+ stream_ctx->reserved[0] = 0;
+ stream_ctx->reserved[1] = 0;
}
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
@@ -1440,8 +1490,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
- } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
- deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1452,11 +1500,15 @@ cleanup:
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- if (deferred) {
- /* We have more streams to clear */
+ /* Check for deferred or newly cancelled TDs */
+ if (!list_empty(&ep->cancelled_td_list)) {
xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
__func__);
xhci_invalidate_cancelled_tds(ep);
+ /* Try to restart the endpoint if all is done */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ /* Start giving back any TDs invalidated above */
+ xhci_giveback_invalidated_tds(ep);
} else {
/* Restart any rings with pending URBs */
xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
@@ -1716,7 +1768,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_trb = xhci->cmd_ring->dequeue;
- trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
+ trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma);
cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
@@ -2074,7 +2126,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_ad
dma_addr_t end_trb_dma;
struct xhci_segment *cur_seg;
- start_dma = xhci_trb_virt_to_dma(td->start_seg, td->first_trb);
+ start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
cur_seg = td->start_seg;
do {
@@ -2084,7 +2136,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_ad
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->last_trb);
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
if (debug)
xhci_warn(xhci,
@@ -2184,9 +2236,9 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
return 0;
}
-static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- u32 trb_comp_code)
+static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ u32 trb_comp_code)
{
struct xhci_ep_ctx *ep_ctx;
@@ -2201,7 +2253,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
- return 0;
+ return;
case COMP_USB_TRANSACTION_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_SPLIT_TRANSACTION_ERROR:
@@ -2226,8 +2278,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
!list_empty(&td->cancelled_td_list)) {
xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
(unsigned long long)xhci_trb_virt_to_dma(
- td->start_seg, td->first_trb));
- return 0;
+ td->start_seg, td->start_trb));
+ return;
}
/* endpoint not halted, don't reset it */
break;
@@ -2235,7 +2287,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
/* Almost same procedure as for STALL_ERROR below */
xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
- return 0;
+ return;
case COMP_STALL_ERROR:
/*
* xhci internal endpoint state will go to a "halt" state for
@@ -2252,28 +2304,22 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
- return 0; /* xhci_handle_halted_endpoint marked td cancelled */
+ return; /* xhci_handle_halted_endpoint marked td cancelled */
default:
break;
}
- /* Update ring dequeue pointer */
- ep_ring->dequeue = td->last_trb;
- ep_ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep_ring);
-
- return xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
-/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
-static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
- union xhci_trb *stop_trb)
+/* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */
+static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb)
{
u32 sum;
- union xhci_trb *trb = ring->dequeue;
- struct xhci_segment *seg = ring->deq_seg;
+ union xhci_trb *trb = td->start_trb;
+ struct xhci_segment *seg = td->start_seg;
- for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+ for (sum = 0; trb != stop_trb; next_trb(&seg, &trb)) {
if (!trb_is_noop(trb) && !trb_is_link(trb))
sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
}
@@ -2283,9 +2329,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
/*
* Process control tds, update urb status and actual_length.
*/
-static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
@@ -2364,7 +2410,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb_length_set = true;
td->urb->actual_length = requested - remaining;
xhci_dbg(xhci, "Waiting for status stage event\n");
- return 0;
+ return;
}
/* at status stage */
@@ -2372,15 +2418,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length = requested;
finish_td:
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
-static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct urb_priv *urb_priv;
int idx;
@@ -2425,7 +2471,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
fallthrough;
case COMP_ISOCH_BUFFER_OVERRUN:
frame->status = -EOVERFLOW;
- if (ep_trb != td->last_trb)
+ if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
@@ -2435,14 +2481,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
sum_trbs_for_length = true;
- if (ep_trb != td->last_trb)
+ if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
break;
case COMP_STOPPED_SHORT_PACKET:
- /* field normally containing residue now contains tranferred */
+ /* field normally containing residue now contains transferred */
frame->status = short_framestatus;
requested = remaining;
break;
@@ -2462,7 +2508,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
if (sum_trbs_for_length)
- frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
+ frame->actual_length = sum_trb_lengths(td, ep_trb) +
ep_trb_len - remaining;
else
frame->actual_length = requested;
@@ -2471,17 +2517,16 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
finish_td:
/* Don't give back TD yet if we encountered an error mid TD */
- if (td->error_mid_td && ep_trb != td->last_trb) {
+ if (td->error_mid_td && ep_trb != td->end_trb) {
xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
td->urb_length_set = true;
- return 0;
+ return;
}
-
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
-static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_virt_ep *ep, int status)
+static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_virt_ep *ep, int status)
{
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
@@ -2497,20 +2542,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* calc actual length */
frame->actual_length = 0;
- /* Update ring dequeue pointer */
- ep->ring->dequeue = td->last_trb;
- ep->ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep->ring);
-
- return xhci_td_cleanup(xhci, td, ep->ring, status);
+ xhci_dequeue_td(xhci, td, ep->ring, status);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
-static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_slot_ctx *slot_ctx;
u32 trb_comp_code;
@@ -2526,7 +2566,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_SUCCESS:
ep->err_count = 0;
/* handle success with untransferred data as short packet */
- if (ep_trb != td->last_trb || remaining) {
+ if (ep_trb != td->end_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
@@ -2542,7 +2582,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ td->urb->actual_length = sum_trb_lengths(td, ep_trb);
goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
@@ -2553,17 +2593,17 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->status = 0;
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
- return 0;
+ return;
default:
/* do nothing */
break;
}
- if (ep_trb == td->last_trb)
+ if (ep_trb == td->end_trb)
td->urb->actual_length = requested - remaining;
else
td->urb->actual_length =
- sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ sum_trb_lengths(td, ep_trb) +
ep_trb_len - remaining;
finish_td:
if (remaining > requested) {
@@ -2572,7 +2612,7 @@ finish_td:
td->urb->actual_length = 0;
}
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
@@ -2792,10 +2832,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
- ep_ring->dequeue = td->last_trb;
- ep_ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep_ring);
- xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
if (list_empty(&ep_ring->td_list)) {
@@ -2889,7 +2926,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_ring->last_td_was_short = false;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
- trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb);
+ trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
/*
* No-op TRB could trigger interrupts in a case where a URB was killed
@@ -2939,7 +2976,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter
{
u32 trb_type;
- trace_xhci_handle_event(ir->event_ring, &event->generic);
+ trace_xhci_handle_event(ir->event_ring, &event->generic,
+ xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue));
/*
* Barrier between reading the TRB_CYCLE (valid) flag before, and any
@@ -3162,7 +3201,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
wmb();
trb->field[3] = cpu_to_le32(field4);
- trace_xhci_queue_trb(ring, trb);
+ trace_xhci_queue_trb(ring, trb,
+ xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue));
inc_enq(xhci, ring, more_trbs_coming);
}
@@ -3302,7 +3342,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&td->td_list, &ep_ring->td_list);
td->start_seg = ep_ring->enq_seg;
- td->first_trb = ep_ring->enqueue;
+ td->start_trb = ep_ring->enqueue;
return 0;
}
@@ -3641,8 +3681,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field &= ~TRB_CHAIN;
field |= TRB_IOC;
more_trbs_coming = false;
- td->last_trb = ring->enqueue;
- td->last_trb_seg = ring->enq_seg;
+ td->end_trb = ring->enqueue;
+ td->end_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
@@ -3690,8 +3730,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
- urb_priv->td[1].last_trb = ring->enqueue;
- urb_priv->td[1].last_trb_seg = ring->enq_seg;
+ urb_priv->td[1].end_trb = ring->enqueue;
+ urb_priv->td[1].end_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
}
@@ -3727,6 +3767,20 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
+ if ((xhci->quirks & XHCI_ETRON_HOST) &&
+ urb->dev->speed >= USB_SPEED_SUPER) {
+ /*
+ * If next available TRB is the Link TRB in the ring segment then
+ * enqueue a No Op TRB, this can prevent the Setup and Data Stage
+ * TRB to be breaked by the Link TRB.
+ */
+ if (trb_is_link(ep_ring->enqueue + 1)) {
+ field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
+ queue_trb(xhci, ep_ring, false, 0, 0,
+ TRB_INTR_TARGET(0), field);
+ }
+ }
+
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -3815,8 +3869,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* Save the DMA address of the last TRB in the TD */
- td->last_trb = ep_ring->enqueue;
- td->last_trb_seg = ep_ring->enq_seg;
+ td->end_trb = ep_ring->enqueue;
+ td->end_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
@@ -4101,8 +4155,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_CHAIN;
} else {
more_trbs_coming = false;
- td->last_trb = ep_ring->enqueue;
- td->last_trb_seg = ep_ring->enq_seg;
+ td->end_trb = ep_ring->enqueue;
+ td->end_seg = ep_ring->enq_seg;
field |= TRB_IOC;
if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
@@ -4168,14 +4222,14 @@ cleanup:
/* Use the first TD as a temporary variable to turn the TDs we've queued
* into No-ops with a software-owned cycle bit. That way the hardware
* won't accidentally start executing bogus TDs when we partially
- * overwrite them. td->first_trb and td->start_seg are already set.
+ * overwrite them. td->start_trb and td->start_seg are already set.
*/
- urb_priv->td[0].last_trb = ep_ring->enqueue;
+ urb_priv->td[0].end_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
- td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
+ td_to_noop(&urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
- ep_ring->enqueue = urb_priv->td[0].first_trb;
+ ep_ring->enqueue = urb_priv->td[0].start_trb;
ep_ring->enq_seg = urb_priv->td[0].start_seg;
ep_ring->cycle_state = start_cycle;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);