summaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-ring.c62
1 files changed, 46 insertions, 16 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9012098add6b..94e6934edb09 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -182,8 +182,12 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* set, but other sections talk about dealing with the chain bit set. This was
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before calling
+ * prepare_transfer()?
*/
-static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ bool consumer, bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
@@ -199,15 +203,28 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
- if (chain) {
- next->link.control |= TRB_CHAIN;
-
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= TRB_CYCLE;
- } else {
+ /*
+ * If the caller doesn't plan on enqueueing more
+ * TDs before ringing the doorbell, then we
+ * don't want to give the link TRB to the
+ * hardware just yet. We'll give the link TRB
+ * back in prepare_ring() just before we enqueue
+ * the TD at the top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
break;
+
+ /* If we're not dealing with 0.95 hardware,
+ * carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!xhci_link_trb_quirk(xhci)) {
+ next->link.control &= ~TRB_CHAIN;
+ next->link.control |= chain;
}
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= TRB_CYCLE;
}
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -1707,9 +1724,12 @@ void xhci_handle_event(struct xhci_hcd *xhci)
/*
* Generic function for queueing a TRB on a ring.
* The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before calling
+ * prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer,
+ bool consumer, bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
@@ -1719,7 +1739,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = field2;
trb->field[2] = field3;
trb->field[3] = field4;
- inc_enq(xhci, ring, consumer);
+ inc_enq(xhci, ring, consumer, more_trbs_coming);
}
/*
@@ -1988,6 +2008,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int trb_buff_len, this_sg_len, running_total;
bool first_trb;
u64 addr;
+ bool more_trbs_coming;
struct xhci_generic_trb *start_trb;
int start_cycle;
@@ -2073,7 +2094,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false,
+ if (num_trbs > 1)
+ more_trbs_coming = true;
+ else
+ more_trbs_coming = false;
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -2124,6 +2149,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int num_trbs;
struct xhci_generic_trb *start_trb;
bool first_trb;
+ bool more_trbs_coming;
int start_cycle;
u32 field, length_field;
@@ -2212,7 +2238,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false,
+ if (num_trbs > 1)
+ more_trbs_coming = true;
+ else
+ more_trbs_coming = false;
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -2291,7 +2321,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
- queue_trb(xhci, ep_ring, false,
+ queue_trb(xhci, ep_ring, false, true,
/* FIXME endianness is probably going to bite my ass here. */
setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
setup->wIndex | setup->wLength << 16,
@@ -2307,7 +2337,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false,
+ queue_trb(xhci, ep_ring, false, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
@@ -2324,7 +2354,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false,
+ queue_trb(xhci, ep_ring, false, false,
0,
0,
TRB_INTR_TARGET(0),
@@ -2361,7 +2391,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return -ENOMEM;
}
- queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+ queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
OpenPOWER on IntegriCloud