summaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_gadget.c')
-rw-r--r--drivers/usb/musb/musb_gadget.c172
1 files changed, 62 insertions, 110 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f7194cf65aba..d0b87e7b4abf 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -373,7 +373,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
- use_dma = (request->dma != DMA_ADDR_INVALID);
+ use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
/* MUSB_TXCSR_P_ISO is still set correctly */
@@ -644,8 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
struct usb_request *request = &req->request;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
- unsigned fifo_count = 0;
- u16 len;
+ unsigned len = 0;
+ u16 fifo_count;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
u8 use_mode_1;
@@ -655,7 +655,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
else
musb_ep = &hw_ep->ep_out;
- len = musb_ep->packet_sz;
+ fifo_count = musb_ep->packet_sz;
/* Check if EP is disabled */
if (!musb_ep->desc) {
@@ -704,15 +704,14 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
- len = musb_readw(epio, MUSB_RXCOUNT);
+ fifo_count = musb_readw(epio, MUSB_RXCOUNT);
/*
- * Enable Mode 1 on RX transfers only when short_not_ok flag
- * is set. Currently short_not_ok flag is set only from
- * file_storage and f_mass_storage drivers
+ * use mode 1 only if we expect data of at least ep packet_sz
+ * and have not yet received a short packet
*/
-
- if (request->short_not_ok && len == musb_ep->packet_sz)
+ if ((request->length - request->actual >= musb_ep->packet_sz) &&
+ (fifo_count >= musb_ep->packet_sz))
use_mode_1 = 1;
else
use_mode_1 = 0;
@@ -723,31 +722,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
+ int transfer_size;
c = musb->dma_controller;
channel = musb_ep->dma;
- /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
- * mode 0 only. So we do not get endpoint interrupts due to DMA
- * completion. We only get interrupts from DMA controller.
- *
- * We could operate in DMA mode 1 if we knew the size of the tranfer
- * in advance. For mass storage class, request->length = what the host
- * sends, so that'd work. But for pretty much everything else,
- * request->length is routinely more than what the host sends. For
- * most these gadgets, end of is signified either by a short packet,
- * or filling the last byte of the buffer. (Sending extra data in
- * that last pckate should trigger an overflow fault.) But in mode 1,
- * we don't get DMA completion interrupt for short packets.
- *
- * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
- * to get endpoint interrupt on every DMA req, but that didn't seem
- * to work reliably.
- *
- * REVISIT an updated g_file_storage can set req->short_not_ok, which
- * then becomes usable as a runtime "use mode 1" hint...
- */
-
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
@@ -764,35 +743,30 @@ static void rxstate(struct musb *musb, struct musb_request *req)
csr | MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
+ transfer_size = min(request->length - request->actual,
+ channel->max_len);
+ musb_ep->dma->desired_mode = 1;
+
} else {
if (!musb_ep->hb_mult &&
musb_ep->hw_ep->rx_double_buffered)
csr |= MUSB_RXCSR_AUTOCLEAR;
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
- }
- if (request->actual < request->length) {
- int transfer_size = 0;
- if (use_mode_1) {
- transfer_size = min(request->length - request->actual,
- channel->max_len);
- musb_ep->dma->desired_mode = 1;
- } else {
- transfer_size = min(request->length - request->actual,
- (unsigned)len);
- musb_ep->dma->desired_mode = 0;
- }
-
- use_dma = c->channel_program(
- channel,
- musb_ep->packet_sz,
- channel->desired_mode,
- request->dma
- + request->actual,
- transfer_size);
+ transfer_size = min(request->length - request->actual,
+ (unsigned)fifo_count);
+ musb_ep->dma->desired_mode = 0;
}
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+
if (use_dma)
return;
}
@@ -808,8 +782,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
channel = musb_ep->dma;
/* In case first packet is short */
- if (len < musb_ep->packet_sz)
- transfer_size = len;
+ if (fifo_count < musb_ep->packet_sz)
+ transfer_size = fifo_count;
else if (request->short_not_ok)
transfer_size = min(request->length -
request->actual,
@@ -817,7 +791,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
else
transfer_size = min(request->length -
request->actual,
- (unsigned)len);
+ (unsigned)fifo_count);
csr &= ~MUSB_RXCSR_DMAMODE;
csr |= (MUSB_RXCSR_DMAENAB |
@@ -845,10 +819,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
#endif /* Mentor's DMA */
- fifo_count = request->length - request->actual;
+ len = request->length - request->actual;
dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
- len, fifo_count,
+ fifo_count, len,
musb_ep->packet_sz);
fifo_count = min_t(unsigned, len, fifo_count);
@@ -901,7 +875,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
/* reach the end or short packet detected */
- if (request->actual == request->length || len < musb_ep->packet_sz)
+ if (request->actual == request->length ||
+ fifo_count < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
@@ -1885,8 +1860,7 @@ int __devinit musb_gadget_setup(struct musb *musb)
musb->g.dev.release = musb_gadget_release;
musb->g.name = musb_driver_name;
- if (is_otg_enabled(musb))
- musb->g.is_otg = 1;
+ musb->g.is_otg = 1;
musb_g_init_endpoints(musb);
@@ -1932,11 +1906,14 @@ static int musb_gadget_start(struct usb_gadget *g,
{
struct musb *musb = gadget_to_musb(g);
struct usb_otg *otg = musb->xceiv->otg;
+ struct usb_hcd *hcd = musb_to_hcd(musb);
unsigned long flags;
- int retval = -EINVAL;
+ int retval = 0;
- if (driver->max_speed < USB_SPEED_HIGH)
- goto err0;
+ if (driver->max_speed < USB_SPEED_HIGH) {
+ retval = -EINVAL;
+ goto err;
+ }
pm_runtime_get_sync(musb->controller);
@@ -1950,49 +1927,30 @@ static int musb_gadget_start(struct usb_gadget *g,
otg_set_peripheral(otg, &musb->g);
musb->xceiv->state = OTG_STATE_B_IDLE;
-
- /*
- * FIXME this ignores the softconnect flag. Drivers are
- * allowed hold the peripheral inactive until for example
- * userspace hooks up printer hardware or DSP codecs, so
- * hosts only see fully functional devices.
- */
-
- if (!is_otg_enabled(musb))
- musb_start(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
- if (is_otg_enabled(musb)) {
- struct usb_hcd *hcd = musb_to_hcd(musb);
-
- dev_dbg(musb->controller, "OTG startup...\n");
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval < 0) {
+ dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
+ goto err;
+ }
- /* REVISIT: funcall to other code, which also
- * handles power budgeting ... this way also
- * ensures HdrcStart is indirectly called.
- */
- retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
- if (retval < 0) {
- dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
- goto err2;
- }
+ if ((musb->xceiv->last_event == USB_EVENT_ID)
+ && otg->set_vbus)
+ otg_set_vbus(otg, 1);
- if ((musb->xceiv->last_event == USB_EVENT_ID)
- && otg->set_vbus)
- otg_set_vbus(otg, 1);
+ hcd->self.uses_pio_for_control = 1;
- hcd->self.uses_pio_for_control = 1;
- }
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_put(musb->controller);
return 0;
-err2:
- if (!is_otg_enabled(musb))
- musb_stop(musb);
-err0:
+err:
return retval;
}
@@ -2070,16 +2028,12 @@ static int musb_gadget_stop(struct usb_gadget *g,
musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
- if (is_otg_enabled(musb)) {
- usb_remove_hcd(musb_to_hcd(musb));
- /* FIXME we need to be able to register another
- * gadget driver here and have everything work;
- * that currently misbehaves.
- */
- }
-
- if (!is_otg_enabled(musb))
- musb_stop(musb);
+ usb_remove_hcd(musb_to_hcd(musb));
+ /*
+ * FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
pm_runtime_put(musb->controller);
@@ -2241,13 +2195,11 @@ __acquires(musb->lock)
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
- } else if (is_otg_enabled(musb)) {
+ } else {
musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
- } else
- WARN_ON(1);
+ }
/* start with default limits on VBUS power draw */
- (void) musb_gadget_vbus_draw(&musb->g,
- is_otg_enabled(musb) ? 8 : 100);
+ (void) musb_gadget_vbus_draw(&musb->g, 8);
}
OpenPOWER on IntegriCloud