summaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/davinci.c63
-rw-r--r--drivers/usb/musb/davinci.h23
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h5
-rw-r--r--drivers/usb/musb/musb_host.c140
-rw-r--r--drivers/usb/musb/musb_virthub.c2
7 files changed, 163 insertions, 78 deletions
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9985db08e7db..b66e8544d8b9 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -20,8 +20,8 @@ config USB_MUSB_HDRC
it's being used with, including the USB peripheral role,
or the USB host role, or both.
- Texas Instruments parts using this IP include DaVinci 644x,
- OMAP 243x, OMAP 343x, and TUSB 6010.
+ Texas Instruments familiies using this IP include DaVinci
+ (35x, 644x ...), OMAP 243x, OMAP 3, and TUSB 6010.
Analog Devices parts using this IP include Blackfin BF54x,
BF525 and BF527.
@@ -40,7 +40,7 @@ config USB_MUSB_SOC
default y if (BF54x && !BF544)
default y if (BF52x && !BF522 && !BF523)
-comment "DaVinci 644x USB support"
+comment "DaVinci 35x and 644x USB support"
depends on USB_MUSB_HDRC && ARCH_DAVINCI
comment "OMAP 243x high speed USB support"
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 2dc7606f319c..10d11ab113ab 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -48,6 +48,9 @@
#include "cppi_dma.h"
+#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR)
+#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
+
/* REVISIT (PM) we should be able to keep the PHY in low power mode most
* of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
* and, when in host mode, autosuspending idle root ports... PHYPLLON
@@ -56,20 +59,26 @@
static inline void phy_on(void)
{
- /* start the on-chip PHY and its PLL */
- __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
- (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
- while ((__raw_readl((void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR))
- & USBPHY_PHYCLKGD) == 0)
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* power everything up; start the on-chip PHY and its PLL */
+ phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN);
+ phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+
+ /* wait for PLL to lock before proceeding */
+ while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0)
cpu_relax();
}
static inline void phy_off(void)
{
- /* powerdown the on-chip PHY and its oscillator */
- __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR));
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* powerdown the on-chip PHY, its PLL, and the OTG block */
+ phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON);
+ phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
}
static int dma_off = 1;
@@ -126,10 +135,6 @@ void musb_platform_disable(struct musb *musb)
}
-/* REVISIT it's not clear whether DaVinci can support full OTG. */
-
-static int vbus_state = -1;
-
#ifdef CONFIG_USB_MUSB_HDRC_HCD
#define portstate(stmt) stmt
#else
@@ -137,10 +142,19 @@ static int vbus_state = -1;
#endif
-/* VBUS SWITCHING IS BOARD-SPECIFIC */
+/*
+ * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM,
+ * which doesn't wire DRVVBUS to the FET that switches it. Unclear
+ * if that's a problem with the DM6446 chip or just with that board.
+ *
+ * In either case, the DM355 EVM automates DRVVBUS the normal way,
+ * when J10 is out, and TI documents it as handling OTG.
+ */
#ifdef CONFIG_MACH_DAVINCI_EVM
+static int vbus_state = -1;
+
/* I2C operations are always synchronous, and require a task context.
* With unloaded systems, using the shared workqueue seems to suffice
* to satisfy the 100msec A_WAIT_VRISE timeout...
@@ -150,12 +164,12 @@ static void evm_deferred_drvvbus(struct work_struct *ignored)
gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
vbus_state = !vbus_state;
}
-static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
#endif /* EVM */
static void davinci_source_power(struct musb *musb, int is_on, int immediate)
{
+#ifdef CONFIG_MACH_DAVINCI_EVM
if (is_on)
is_on = 1;
@@ -163,16 +177,17 @@ static void davinci_source_power(struct musb *musb, int is_on, int immediate)
return;
vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
-#ifdef CONFIG_MACH_DAVINCI_EVM
if (machine_is_davinci_evm()) {
+ static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
if (immediate)
gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
else
schedule_work(&evm_vbus_work);
}
-#endif
if (immediate)
vbus_state = is_on;
+#endif
}
static void davinci_set_vbus(struct musb *musb, int is_on)
@@ -391,6 +406,17 @@ int __init musb_platform_init(struct musb *musb)
musb->board_set_vbus = davinci_set_vbus;
davinci_source_power(musb, 0, 1);
+ /* dm355 EVM swaps D+/D- for signal integrity, and
+ * is clocked from the main 24 MHz crystal.
+ */
+ if (machine_is_davinci_dm355_evm()) {
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ phy_ctrl &= ~(3 << 9);
+ phy_ctrl |= USBPHY_DATAPOL;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+ }
+
/* reset the controller */
musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
@@ -401,8 +427,7 @@ int __init musb_platform_init(struct musb *musb)
/* NOTE: irqs are in mixed mode, not bypass to pure-musb */
pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
- revision, __raw_readl((void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR)),
+ revision, __raw_readl(USB_PHY_CTRL),
musb_readb(tibase, DAVINCI_USB_CTRL_REG));
musb->isr = davinci_interrupt;
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 7fb6238e270f..046c84433cad 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,14 +15,21 @@
*/
/* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
-#define USBPHY_PHYCLKGD (1 << 8)
-#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
-#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
-#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
-#define USBPHY_CLKO1SEL (1 << 3)
-#define USBPHY_OSCPDWN (1 << 2)
-#define USBPHY_PHYPDWN (1 << 0)
+#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
+#define USBPHY_PHYCLKGD BIT(8)
+#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN BIT(6) /* v(bus) comparator */
+#define USBPHY_VBUSSENS BIT(5) /* (dm355,ro) is vbus > 0.5V */
+#define USBPHY_PHYPLLON BIT(4) /* override pll suspend */
+#define USBPHY_CLKO1SEL BIT(3)
+#define USBPHY_OSCPDWN BIT(2)
+#define USBPHY_OTGPDWN BIT(1)
+#define USBPHY_PHYPDWN BIT(0)
+
+#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DRVVBUS_FORCE BIT(2)
+#define DRVVBUS_OVERRIDE BIT(1)
/* For now include usb OTG module registers here */
#define DAVINCI_USB_VERSION_REG 0x00
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index af77e4659006..338cd1611ab3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -769,7 +769,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_A_SUSPEND:
usb_hcd_resume_root_hub(musb_to_hcd(musb));
musb_root_disconnect(musb);
- if (musb->a_wait_bcon != 0)
+ if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
musb_platform_try_idle(musb, jiffies
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 630946a2d9fc..efb39b5e55b5 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -331,7 +331,6 @@ struct musb {
struct list_head control; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
- struct musb_qh *periodic[32]; /* tree of interrupt+iso */
#endif
/* called with IRQs blocked; ON/nonzero implies starting a session,
@@ -479,10 +478,11 @@ static inline void musb_configure_ep0(struct musb *musb)
static inline int musb_read_fifosize(struct musb *musb,
struct musb_hw_ep *hw_ep, u8 epnum)
{
+ void *mbase = musb->mregs;
u8 reg = 0;
/* read from core using indexed model */
- reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
/* 0's returned when no more endpoints */
if (!reg)
return -ENODEV;
@@ -509,6 +509,7 @@ static inline void musb_configure_ep0(struct musb *musb)
{
musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].is_shared_fifo = true;
}
#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 6dbbd0786a6a..499c431a6d62 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -64,11 +64,8 @@
*
* - DMA (Mentor/OMAP) ...has at least toggle update problems
*
- * - Still no traffic scheduling code to make NAKing for bulk or control
- * transfers unable to starve other requests; or to make efficient use
- * of hardware with periodic transfers. (Note that network drivers
- * commonly post bulk reads that stay pending for a long time; these
- * would make very visible trouble.)
+ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
+ * starvation ... nothing yet for TX, interrupt, or bulk.
*
* - Not tested with HNP, but some SRP paths seem to behave.
*
@@ -88,11 +85,8 @@
*
* CONTROL transfers all go through ep0. BULK ones go through dedicated IN
* and OUT endpoints ... hardware is dedicated for those "async" queue(s).
- *
* (Yes, bulk _could_ use more of the endpoints than that, and would even
- * benefit from it ... one remote device may easily be NAKing while others
- * need to perform transfers in that same direction. The same thing could
- * be done in software though, assuming dma cooperates.)
+ * benefit from it.)
*
* INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
* So far that scheduling is both dumb and optimistic: the endpoint will be
@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
len = urb->iso_frame_desc[0].length;
break;
default: /* bulk, interrupt */
- buf = urb->transfer_buffer;
- len = urb->transfer_buffer_length;
+ /* actual_length may be nonzero on retry paths */
+ buf = urb->transfer_buffer + urb->actual_length;
+ len = urb->transfer_buffer_length - urb->actual_length;
}
DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
@@ -395,7 +390,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
- musb->periodic[ep->epnum] = NULL;
kfree(qh);
qh = NULL;
break;
@@ -1045,7 +1039,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
- * ep0 is more likely to stay busy.
+ * ep0 is more likely to stay busy. That's already done
+ * for bulk RX transfers.
*
* if (qh->ring.next != &musb->control), then
* we have a candidate... NAKing is *NOT* an error
@@ -1197,6 +1192,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
*
* if (bulk && qh->ring.next != &musb->out_bulk), then
* we have a candidate... NAKing is *NOT* an error
@@ -1358,6 +1354,50 @@ finish:
#endif
+/* Schedule next QH from musb->in_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr;
+
+ musb_ep_select(mbase, ep->epnum);
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /* clear nak timeout bit */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ musb_save_toggle(ep, 1, urb);
+
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ musb_start_urb(musb, 1, next_qh);
+ }
+}
+
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
@@ -1421,18 +1461,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
- /* NOTE this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) rx urb
- * that could use this fifo. (dma complicates it...)
+ DBG(6, "RX end %d NAK timeout\n", epnum);
+
+ /* NOTE: NAKing is *NOT* an error, so we want to
+ * continue. Except ... if there's a request for
+ * another QH, use that instead of starving it.
*
- * if (bulk && qh->ring.next != &musb->in_bulk), then
- * we have a candidate... NAKing is *NOT* an error
+ * Devices like Ethernet and serial adapters keep
+ * reads posted at all times, which will starve
+ * other devices without this logic.
*/
- DBG(6, "RX end %d NAK timeout\n", epnum);
+ if (usb_pipebulk(urb->pipe)
+ && qh->mux == 1
+ && !list_is_singular(&musb->in_bulk)) {
+ musb_bulk_rx_nak_timeout(musb, hw_ep);
+ return;
+ }
musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_RXCSR,
- MUSB_RXCSR_H_WZC_BITS
- | MUSB_RXCSR_H_REQPKT);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
goto finish;
} else {
@@ -1711,31 +1759,27 @@ static int musb_schedule(
/* else, periodic transfers get muxed to other endpoints */
- /* FIXME this doesn't consider direction, so it can only
- * work for one half of the endpoint hardware, and assumes
- * the previous cases handled all non-shared endpoints...
- */
-
- /* we know this qh hasn't been scheduled, so all we need to do
+ /*
+ * We know this qh hasn't been scheduled, so all we need to do
* is choose which hardware endpoint to put it on ...
*
* REVISIT what we really want here is a regular schedule tree
- * like e.g. OHCI uses, but for now musb->periodic is just an
- * array of the _single_ logical endpoint associated with a
- * given physical one (identity mapping logical->physical).
- *
- * that simplistic approach makes TT scheduling a lot simpler;
- * there is none, and thus none of its complexity...
+ * like e.g. OHCI uses.
*/
best_diff = 4096;
best_end = -1;
- for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+ for (epnum = 1, hw_ep = musb->endpoints + 1;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
int diff;
- if (musb->periodic[epnum])
+ if (is_in || hw_ep->is_shared_fifo) {
+ if (hw_ep->in_qh != NULL)
+ continue;
+ } else if (hw_ep->out_qh != NULL)
continue;
- hw_ep = &musb->endpoints[epnum];
+
if (hw_ep == musb->bulk_ep)
continue;
@@ -1756,6 +1800,17 @@ static int musb_schedule(
head = &musb->in_bulk;
else
head = &musb->out_bulk;
+
+ /* Enable bulk RX NAK timeout scheme when bulk requests are
+ * multiplexed. This scheme doen't work in high speed to full
+ * speed scenario as NAK interrupts are not coming from a
+ * full speed device connected to a high speed device.
+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
+ * 4 (8 frame or 8ms) for FS device.
+ */
+ if (is_in && qh->dev)
+ qh->intv_reg =
+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
} else if (best_end < 0) {
return -ENOSPC;
@@ -1764,7 +1819,6 @@ static int musb_schedule(
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
- musb->periodic[best_end] = qh;
DBG(4, "qh %p periodic slot %d\n", qh, best_end);
success:
if (head) {
@@ -1888,13 +1942,11 @@ static int musb_urb_enqueue(
*
* The downside of disabling this is that transfer scheduling
* gets VERY unfair for nonperiodic transfers; a misbehaving
- * peripheral could make that hurt. Or for reads, one that's
- * perfectly normal: network and other drivers keep reads
- * posted at all times, having one pending for a week should
- * be perfectly safe.
+ * peripheral could make that hurt. That's perfectly normal
+ * for reads from network or serial adapters ... so we have
+ * partial NAKlimit support for bulk RX.
*
- * The upside of disabling it is avoidng transfer scheduling
- * code to put this aside for while.
+ * The upside of disabling it is simpler transfer scheduling.
*/
interval = 0;
}
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index e0e9ce584175..bf677acc83db 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -285,7 +285,7 @@ int musb_hub_control(
desc->bDescLength = 9;
desc->bDescriptorType = 0x29;
desc->bNbrPorts = 1;
- desc->wHubCharacteristics = __constant_cpu_to_le16(
+ desc->wHubCharacteristics = cpu_to_le16(
0x0001 /* per-port power switching */
| 0x0010 /* no overcurrent reporting */
);
OpenPOWER on IntegriCloud