summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-08-11 09:36:51 +0200
committerJiri Kosina <jkosina@suse.cz>2010-08-11 09:36:51 +0200
commit6396fc3b3ff3f6b942992b653a62df11dcef9bea (patch)
treedb3c7cbe833b43c653adc99f70941431c5ff7c4e /drivers/usb/host
parent4785879e4d340e24e54f6de2ccfc42728b912808 (diff)
parent3d30701b58970425e1d45994d6cb82f828924fdd (diff)
downloadblackbird-op-linux-6396fc3b3ff3f6b942992b653a62df11dcef9bea.tar.gz
blackbird-op-linux-6396fc3b3ff3f6b942992b653a62df11dcef9bea.zip
Merge branch 'master' into for-next
Conflicts: fs/exofs/inode.c
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c196
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-hcd.c49
-rw-r--r--drivers/usb/host/ehci-hub.c25
-rw-r--r--drivers/usb/host/ehci-lpm.c83
-rw-r--r--drivers/usb/host/ehci-omap.c36
-rw-r--r--drivers/usb/host/ehci-pci.c26
-rw-r--r--drivers/usb/host/ehci-q.c3
-rw-r--r--drivers/usb/host/ehci-sched.c182
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/hwa-hc.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp1362.h24
-rw-r--r--drivers/usb/host/isp1760-hcd.c3
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c6
-rw-r--r--drivers/usb/host/ohci-hub.c23
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/ohci-ssb.c52
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c7
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-debug.c23
-rw-r--r--drivers/usb/host/uhci-hcd.c87
-rw-r--r--drivers/usb/host/uhci-hcd.h7
-rw-r--r--drivers/usb/host/uhci-hub.c6
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci-mem.c101
-rw-r--r--drivers/usb/host/xhci-pci.c9
-rw-r--r--drivers/usb/host/xhci-ring.c1332
-rw-r--r--drivers/usb/host/xhci.c344
-rw-r--r--drivers/usb/host/xhci.h30
35 files changed, 1887 insertions, 826 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f865be2276d4..2d926cec0725 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -72,8 +72,9 @@ config USB_EHCI_ROOT_HUB_TT
from ARC, and has since changed hands a few times.
config USB_EHCI_TT_NEWSCHED
- bool "Improved Transaction Translator scheduling (EXPERIMENTAL)"
- depends on USB_EHCI_HCD && EXPERIMENTAL
+ bool "Improved Transaction Translator scheduling"
+ depends on USB_EHCI_HCD
+ default y
---help---
This changes the periodic scheduling code to fill more of the low
and full speed bandwidth available from the Transaction Translator
@@ -84,9 +85,11 @@ config USB_EHCI_TT_NEWSCHED
If you have multiple periodic low/fullspeed devices connected to a
highspeed USB hub which is connected to a highspeed USB Host
Controller, and some of those devices will not work correctly
- (possibly due to "ENOSPC" or "-28" errors), say Y.
+ (possibly due to "ENOSPC" or "-28" errors), say Y. Conversely, if
+ you have only one such device and it doesn't work, you could try
+ saying N.
- If unsure, say N.
+ If unsure, say Y.
config USB_EHCI_BIG_ENDIAN_MMIO
bool
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index faa61748db70..2baf8a849086 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -228,7 +228,7 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
* the root hub is either suspended or stopped.
*/
spin_lock_irqsave(&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci);
+ ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 874d2000bf92..76b7fd2d838a 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -98,13 +98,18 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
ehci_dbg (ehci,
- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
+ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
label,
params,
HCC_ISOC_THRES(params),
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
+ HCC_LPM(params) ? " LPM" : "",
+ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
+ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
+ HCC_32FRAME_PERIODIC_LIST(params) ?
+ " 32 peridic list" : "");
}
}
#else
@@ -191,8 +196,9 @@ static int __maybe_unused
dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf (buf, len,
- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", status,
+ (status & STS_PPCE_MASK) ? " PPCE" : "",
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
@@ -210,8 +216,9 @@ static int __maybe_unused
dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf (buf, len,
- "%s%sintrenable %02x%s%s%s%s%s%s",
+ "%s%sintrenable %02x%s%s%s%s%s%s%s",
label, label [0] ? " " : "", enable,
+ (enable & STS_PPCE_MASK) ? " PPCE" : "",
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
@@ -228,9 +235,15 @@ static int
dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
{
return scnprintf (buf, len,
- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+ "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
+ "period=%s%s %s",
label, label [0] ? " " : "", command,
- (command & CMD_PARK) ? "park" : "(park)",
+ (command & CMD_HIRD) ? " HIRD" : "",
+ (command & CMD_PPCEE) ? " PPCEE" : "",
+ (command & CMD_FSP) ? " FSP" : "",
+ (command & CMD_ASPE) ? " ASPE" : "",
+ (command & CMD_PSPE) ? " PSPE" : "",
+ (command & CMD_PARK) ? " park" : "(park)",
CMD_PARK_CNT (command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
@@ -257,11 +270,22 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
}
return scnprintf (buf, len,
- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+ "%s%sport:%d status %06x %d %s%s%s%s%s%s "
+ "sig=%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", port, status,
+ status>>25,/*device address */
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
+ " ACK" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
+ " NYET" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
+ " STALL" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
+ " ERR" : "",
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
+ (status & PORT_LPM) ? " LPM" : "",
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
@@ -330,6 +354,13 @@ static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
static int debug_async_open(struct inode *, struct file *);
+static int debug_lpm_open(struct inode *, struct file *);
+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static int debug_lpm_close(struct inode *inode, struct file *file);
+
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
@@ -351,6 +382,13 @@ static const struct file_operations debug_registers_fops = {
.read = debug_output,
.release = debug_close,
};
+static const struct file_operations debug_lpm_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_lpm_open,
+ .read = debug_lpm_read,
+ .write = debug_lpm_write,
+ .release = debug_lpm_close,
+};
static struct dentry *ehci_debug_root;
@@ -674,7 +712,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
@@ -917,51 +955,127 @@ static int debug_registers_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+static int debug_lpm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int debug_lpm_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ /* TODO: show lpm stats */
+ return 0;
+}
+
+static ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ char buf[50];
+ size_t len;
+ u32 temp;
+ unsigned long port;
+ u32 __iomem *portsc ;
+ u32 params;
+
+ hcd = bus_to_hcd(file->private_data);
+ ehci = hcd_to_ehci(hcd);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strncmp(buf, "enable", 5) == 0) {
+ if (strict_strtoul(buf + 7, 10, &port))
+ return -EINVAL;
+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (port > HCS_N_PORTS(params)) {
+ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
+ return -ENODEV;
+ }
+ portsc = &ehci->regs->port_status[port-1];
+ temp = ehci_readl(ehci, portsc);
+ if (!(temp & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "LPM: no device attached\n");
+ return -ENODEV;
+ }
+ temp |= PORT_LPM;
+ ehci_writel(ehci, temp, portsc);
+ printk(KERN_INFO "force enable LPM for port %lu\n", port);
+ } else if (strncmp(buf, "hird=", 5) == 0) {
+ unsigned long hird;
+ if (strict_strtoul(buf + 5, 16, &hird))
+ return -EINVAL;
+ printk(KERN_INFO "setting hird %s %lu\n", buf + 6, hird);
+ temp = ehci_readl(ehci, &ehci->regs->command);
+ temp &= ~CMD_HIRD;
+ temp |= hird << 24;
+ ehci_writel(ehci, temp, &ehci->regs->command);
+ } else if (strncmp(buf, "disable", 7) == 0) {
+ if (strict_strtoul(buf + 8, 10, &port))
+ return -EINVAL;
+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ if (port > HCS_N_PORTS(params)) {
+ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
+ return -ENODEV;
+ }
+ portsc = &ehci->regs->port_status[port-1];
+ temp = ehci_readl(ehci, portsc);
+ if (!(temp & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "ERR: no device attached\n");
+ return -ENODEV;
+ }
+ temp &= ~PORT_LPM;
+ ehci_writel(ehci, temp, portsc);
+ printk(KERN_INFO "disabled LPM for port %lu\n", port);
+ } else
+ return -EOPNOTSUPP;
+ return count;
+}
+
static inline void create_debug_files (struct ehci_hcd *ehci)
{
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
if (!ehci->debug_dir)
- goto dir_error;
-
- ehci->debug_async = debugfs_create_file("async", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_async_fops);
- if (!ehci->debug_async)
- goto async_error;
-
- ehci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_periodic_fops);
- if (!ehci->debug_periodic)
- goto periodic_error;
-
- ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_registers_fops);
- if (!ehci->debug_registers)
- goto registers_error;
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus,
+ &debug_lpm_fops))
+ goto file_error;
+
return;
-registers_error:
- debugfs_remove(ehci->debug_periodic);
-periodic_error:
- debugfs_remove(ehci->debug_async);
-async_error:
- debugfs_remove(ehci->debug_dir);
-dir_error:
- ehci->debug_periodic = NULL;
- ehci->debug_async = NULL;
- ehci->debug_dir = NULL;
+file_error:
+ debugfs_remove_recursive(ehci->debug_dir);
}
static inline void remove_debug_files (struct ehci_hcd *ehci)
{
- debugfs_remove(ehci->debug_registers);
- debugfs_remove(ehci->debug_periodic);
- debugfs_remove(ehci->debug_async);
- debugfs_remove(ehci->debug_dir);
+ debugfs_remove_recursive(ehci->debug_dir);
}
#endif /* STUB_DEBUG_FILES */
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 5cd967d28938..a416421abfa2 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,7 +313,8 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
- ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+ device_may_wakeup(dev));
if (!fsl_deep_sleep())
return 0;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6fcffe15a005..ac0f7a4b0341 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -36,6 +36,7 @@
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -78,7 +79,13 @@ static const char hcd_name [] = "ehci_hcd";
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
#define EHCI_IAA_MSECS 10 /* arbitrary */
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
@@ -100,6 +107,11 @@ static int ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
+
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
@@ -304,6 +316,7 @@ static void end_unlink_async(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
#include "ehci-hub.c"
+#include "ehci-lpm.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
@@ -577,6 +590,11 @@ static int ehci_init(struct usb_hcd *hcd)
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
+ ehci->has_ppcd = 1;
+ ehci_dbg(ehci, "enable per-port change event\n");
+ temp |= CMD_PPCEE;
+ }
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
@@ -603,10 +621,22 @@ static int ehci_init(struct usb_hcd *hcd)
default: BUG();
}
}
+ if (HCC_LPM(hcc_params)) {
+ /* support link power management EHCI 1.1 addendum */
+ ehci_dbg(ehci, "support lpm\n");
+ ehci->has_lpm = 1;
+ if (hird > 0xf) {
+ ehci_dbg(ehci, "hird %d invalid, use default 0",
+ hird);
+ hird = 0;
+ }
+ temp |= hird << 24;
+ }
ehci->command = temp;
/* Accept arbitrarily long scatter-gather lists */
- hcd->self.sg_tablesize = ~0;
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
return 0;
}
@@ -619,7 +649,6 @@ static int ehci_run (struct usb_hcd *hcd)
u32 hcc_params;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
/* EHCI spec section 4.1 */
if ((retval = ehci_reset(ehci)) != 0) {
@@ -764,6 +793,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
+ u32 ppcd = 0;
/* kick root hub later */
pcd_status = status;
@@ -772,9 +802,18 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
if (!(cmd & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = status >> 16;
+
while (i--) {
- int pstatus = ehci_readl(ehci,
- &ehci->regs->port_status [i]);
+ int pstatus;
+
+ /* leverage per-port change bits feature */
+ if (ehci->has_ppcd && !(ppcd & (1 << i)))
+ continue;
+ pstatus = ehci_readl(ehci,
+ &ehci->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index e7d3d8def282..796ea0c8900f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -107,7 +107,7 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
}
static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
- bool suspending)
+ bool suspending, bool do_wakeup)
{
int port;
u32 temp;
@@ -117,8 +117,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
* when the controller is suspended or resumed. In all other
* cases they don't need to be changed.
*/
- if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
- device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
/* clear phy low-power mode before changing wakeup flags */
@@ -167,6 +166,10 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
}
}
+
+ /* Does the root hub have a port wakeup pending? */
+ if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
+ usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -316,7 +319,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
spin_unlock_irq(&ehci->lock);
return -ESHUTDOWN;
}
@@ -603,6 +606,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
+ u32 ppcd = 0;
/* if !USB_SUSPEND, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
@@ -632,7 +636,15 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
/* port N changes (bit N)? */
spin_lock_irqsave (&ehci->lock, flags);
+
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
+
for (i = 0; i < ports; i++) {
+ /* leverage per-port change bits feature */
+ if (ehci->has_ppcd && !(ppcd & (1 << i)))
+ continue;
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
/*
@@ -790,6 +802,11 @@ static int ehci_hub_control (
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
+ if (ehci->has_lpm) {
+ /* clear PORTSC bits on disconnect */
+ temp &= ~PORT_LPM;
+ temp &= ~PORT_DEV_ADDR;
+ }
ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
status_reg);
break;
diff --git a/drivers/usb/host/ehci-lpm.c b/drivers/usb/host/ehci-lpm.c
new file mode 100644
index 000000000000..b4d4d63c13ed
--- /dev/null
+++ b/drivers/usb/host/ehci-lpm.c
@@ -0,0 +1,83 @@
+/* ehci-lpm.c EHCI HCD LPM support code
+ * Copyright (c) 2008 - 2010, Intel Corporation.
+ * Author: Jacob Pan <jacob.jun.pan@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/* this file is part of ehci-hcd.c */
+static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
+{
+ u32 __iomem portsc;
+
+ ehci_dbg(ehci, "set dev address %d for port %d\n", dev_addr, port_num);
+ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
+ ehci_dbg(ehci, "invalid port number %d\n", port_num);
+ return -ENODEV;
+ }
+ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
+ portsc &= ~PORT_DEV_ADDR;
+ portsc |= dev_addr<<25;
+ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
+ return 0;
+}
+
+/*
+ * this function is used to check if the device support LPM
+ * if yes, mark the PORTSC register with PORT_LPM bit
+ */
+static int ehci_lpm_check(struct ehci_hcd *ehci, int port)
+{
+ u32 __iomem *portsc ;
+ u32 val32;
+ int retval;
+
+ portsc = &ehci->regs->port_status[port-1];
+ val32 = ehci_readl(ehci, portsc);
+ if (!(val32 & PORT_DEV_ADDR)) {
+ ehci_dbg(ehci, "LPM: no device attached\n");
+ return -ENODEV;
+ }
+ val32 |= PORT_LPM;
+ ehci_writel(ehci, val32, portsc);
+ msleep(5);
+ val32 |= PORT_SUSPEND;
+ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
+ ehci_writel(ehci, val32, portsc);
+ /* wait for ACK */
+ msleep(10);
+ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
+ PORTSC_SUSPEND_STS_ACK, 125);
+ dbg_port(ehci, "LPM", port, val32);
+ if (retval != -ETIMEDOUT) {
+ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
+ val32 |= PORT_LPM;
+ /*
+ * now device should be in L1 sleep, let's wake up the device
+ * so that we can complete enumeration.
+ */
+ ehci_writel(ehci, val32, portsc);
+ msleep(10);
+ val32 |= PORT_RESUME;
+ ehci_writel(ehci, val32, portsc);
+ } else {
+ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
+ retval);
+ val32 &= ~PORT_LPM;
+ retval = -ETIMEDOUT;
+ ehci_writel(ehci, val32, portsc);
+ }
+
+ return retval;
+}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 5450e628157f..116ae280053a 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -38,6 +38,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/usb/ulpi.h>
#include <plat/usb.h>
/*
@@ -236,6 +237,35 @@ static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
/*-------------------------------------------------------------------------*/
+static void omap_ehci_soft_phy_reset(struct ehci_hcd_omap *omap, u8 port)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned reg = 0;
+
+ reg = ULPI_FUNC_CTRL_RESET
+ /* FUNCTION_CTRL_SET register */
+ | (ULPI_SET(ULPI_FUNC_CTRL) << EHCI_INSNREG05_ULPI_REGADD_SHIFT)
+ /* Write */
+ | (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT)
+ /* PORTn */
+ | ((port + 1) << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT)
+ /* start ULPI access*/
+ | (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT);
+
+ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG05_ULPI, reg);
+
+ /* Wait for ULPI access completion */
+ while ((ehci_omap_readl(omap->ehci_base, EHCI_INSNREG05_ULPI)
+ & (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(omap->dev, "phy reset operation timed out\n");
+ break;
+ }
+ }
+}
+
/* omap_start_ehc
* - Start the TI USBHOST controller
*/
@@ -425,6 +455,12 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
gpio_set_value(omap->reset_gpio_port[1], 1);
}
+ /* Soft reset the PHY using PHY reset command over ULPI */
+ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
+ omap_ehci_soft_phy_reset(omap, 0);
+ if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
+ omap_ehci_soft_phy_reset(omap, 1);
+
return 0;
err_sys_status:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index d43d176161aa..58b72d741d93 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -114,6 +114,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_INTEL:
ehci->need_io_watchdog = 0;
+ ehci->fs_i_thresh = 1;
if (pdev->device == 0x27cc) {
ehci->broken_periodic = 1;
ehci_info(ehci, "using broken periodic workaround\n");
@@ -277,7 +278,7 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd)
+static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
@@ -291,7 +292,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
* the root hub is either suspended or stopped.
*/
spin_lock_irqsave (&ehci->lock, flags);
- ehci_prepare_ports_for_controller_suspend(ehci);
+ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
@@ -361,6 +362,22 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
}
#endif
+static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int rc = 0;
+
+ if (!udev->parent) /* udev is root hub itself, impossible */
+ rc = -1;
+ /* we only support lpm device connected to root hub yet */
+ if (ehci->has_lpm && !udev->parent->parent) {
+ rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
+ if (!rc)
+ rc = ehci_lpm_check(ehci, udev->portnum);
+ }
+ return rc;
+}
+
static const struct hc_driver ehci_pci_hc_driver = {
.description = hcd_name,
.product_desc = "EHCI Host Controller",
@@ -407,6 +424,11 @@ static const struct hc_driver ehci_pci_hc_driver = {
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+ /*
+ * call back when device connected and addressed
+ */
+ .update_device = ehci_update_device,
+
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 11a79c4f4a9d..233c288e3f93 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1126,8 +1126,7 @@ submit_async (
#endif
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
rc = -ESHUTDOWN;
goto done;
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 805ec633a652..a92526d6e5ae 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -880,8 +880,7 @@ static int intr_submit (
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -1075,15 +1074,6 @@ iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
if (stream->ep)
stream->ep->hcpriv = NULL;
- if (stream->rescheduled) {
- ehci_info (ehci, "ep%d%s-iso rescheduled "
- "%lu times in %lu seconds\n",
- stream->bEndpointAddress, is_in ? "in" : "out",
- stream->rescheduled,
- ((jiffies - stream->start)/HZ)
- );
- }
-
kfree(stream);
}
}
@@ -1396,30 +1386,25 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, next, start, period;
+ u32 now, next, start, period, span;
int status;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
- struct pci_dev *pdev;
- if (sched->span > (mod - SCHEDULE_SLOP)) {
- ehci_dbg (ehci, "iso request %p too long\n", urb);
- status = -EFBIG;
- goto fail;
+ period = urb->interval;
+ span = sched->span;
+ if (!stream->highspeed) {
+ period <<= 3;
+ span <<= 3;
}
- if ((stream->depth + sched->span) > mod) {
- ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
- urb, stream->depth, sched->span, mod);
+ if (span > mod - SCHEDULE_SLOP) {
+ ehci_dbg (ehci, "iso request %p too long\n", urb);
status = -EFBIG;
goto fail;
}
- period = urb->interval;
- if (!stream->highspeed)
- period <<= 3;
-
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
+ now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
/* Typical case: reuse current schedule, stream is still active.
* Hopefully there are no gaps from the host falling behind
@@ -1427,34 +1412,35 @@ iso_stream_schedule (
* slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely (!list_empty (&stream->td_list))) {
- pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
- start = stream->next_uframe;
+ u32 excess;
/* For high speed devices, allow scheduling within the
- * isochronous scheduling threshold. For full speed devices,
- * don't. (Work around for Intel ICH9 bug.)
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
*/
- if (!stream->highspeed &&
- pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (!stream->highspeed && ehci->fs_i_thresh)
next = now + ehci->i_thresh;
else
next = now;
- /* Fell behind (by up to twice the slop amount)? */
- if (((start - next) & (mod - 1)) >=
- mod - 2 * SCHEDULE_SLOP)
- start += period * DIV_ROUND_UP(
- (next - start) & (mod - 1),
- period);
-
- /* Tried to schedule too far into the future? */
- if (unlikely(((start - now) & (mod - 1)) + sched->span
- >= mod - 2 * SCHEDULE_SLOP)) {
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
status = -EFBIG;
goto fail;
}
- stream->next_uframe = start;
- goto ready;
}
/* need to schedule; when's the next (u)frame we could start?
@@ -1463,51 +1449,60 @@ iso_stream_schedule (
* can also help high bandwidth if the dma and irq loads don't
* jump until after the queue is primed.
*/
- start = SCHEDULE_SLOP + (now & ~0x07);
- start %= mod;
- stream->next_uframe = start;
-
- /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
-
- /* find a uframe slot with enough bandwidth */
- for (; start < (stream->next_uframe + period); start++) {
- int enough_space;
-
- /* check schedule: enough space? */
- if (stream->highspeed)
- enough_space = itd_slot_ok (ehci, mod, start,
- stream->usecs, period);
- else {
- if ((start % 8) >= 6)
- continue;
- enough_space = sitd_slot_ok (ehci, mod, stream,
- start, sched, period);
+ else {
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth */
+ next = start + period;
+ for (; start < next; start++) {
+
+ /* check schedule: enough space? */
+ if (stream->highspeed) {
+ if (itd_slot_ok(ehci, mod, start,
+ stream->usecs, period))
+ break;
+ } else {
+ if ((start % 8) >= 6)
+ continue;
+ if (sitd_slot_ok(ehci, mod, stream,
+ start, sched, period))
+ break;
+ }
}
- /* schedule it here if there's enough bandwidth */
- if (enough_space) {
- stream->next_uframe = start % mod;
- goto ready;
+ /* no room in the schedule */
+ if (start == next) {
+ ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
}
}
- /* no room in the schedule */
- ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
- list_empty (&stream->td_list) ? "" : "re",
- urb, now, now + mod);
- status = -ENOSPC;
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ ehci_dbg(ehci, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
-fail:
- iso_sched_free (stream, sched);
- urb->hcpriv = NULL;
- return status;
+ stream->next_uframe = start & (mod - 1);
-ready:
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = stream->next_uframe;
if (!stream->highspeed)
urb->start_frame >>= 3;
return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -1602,7 +1597,7 @@ itd_link_urb (
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
- next_uframe = stream->next_uframe % mod;
+ next_uframe = stream->next_uframe & (mod - 1);
if (unlikely (list_empty(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
@@ -1613,7 +1608,6 @@ itd_link_urb (
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
urb->interval,
next_uframe >> 3, next_uframe & 0x7);
- stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -1639,14 +1633,13 @@ itd_link_urb (
itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
- stream->depth += stream->interval;
- next_uframe %= mod;
+ next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
- itd_link (ehci, frame % ehci->periodic_size, itd);
+ itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
itd = NULL;
}
}
@@ -1695,7 +1688,6 @@ itd_complete (
t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
itd->hw_transaction [uframe] = 0;
- stream->depth -= stream->interval;
/* report transfer status */
if (unlikely (t & ISO_ERRS)) {
@@ -1815,8 +1807,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -2024,9 +2015,8 @@ sitd_link_urb (
"sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- (next_uframe >> 3) % ehci->periodic_size,
+ (next_uframe >> 3) & (ehci->periodic_size - 1),
stream->interval, hc32_to_cpu(ehci, stream->splits));
- stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -2047,13 +2037,12 @@ sitd_link_urb (
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
- sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
+ sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
next_uframe += stream->interval << 3;
- stream->depth += stream->interval << 3;
}
- stream->next_uframe = next_uframe % mod;
+ stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free (stream, sched);
@@ -2111,7 +2100,6 @@ sitd_complete (
desc->actual_length = desc->length - SITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
- stream->depth -= stream->interval << 3;
/* handle completion now? */
if ((urb_index + 1) != urb->number_of_packets)
@@ -2201,8 +2189,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -2263,7 +2250,7 @@ scan_periodic (struct ehci_hcd *ehci)
now_uframe = ehci->next_uframe;
if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
clock = ehci_readl(ehci, &ehci->regs->frame_index);
- clock_frame = (clock >> 3) % ehci->periodic_size;
+ clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
} else {
clock = now_uframe + mod - 1;
clock_frame = -1;
@@ -2272,7 +2259,7 @@ scan_periodic (struct ehci_hcd *ehci)
free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
- clock %= mod;
+ clock &= mod - 1;
clock_frame = clock >> 3;
for (;;) {
@@ -2361,7 +2348,7 @@ restart:
* frame is current.
*/
if (((frame == clock_frame) ||
- (((frame + 1) % ehci->periodic_size)
+ (((frame + 1) & (ehci->periodic_size - 1))
== clock_frame))
&& live
&& (q.sitd->hw_results &
@@ -2428,7 +2415,8 @@ restart:
|| ehci->periodic_sched == 0)
break;
ehci->next_uframe = now_uframe;
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
+ now = ehci_readl(ehci, &ehci->regs->frame_index) &
+ (mod - 1);
if (now_uframe == now)
break;
@@ -2441,7 +2429,7 @@ restart:
}
} else {
now_uframe++;
- now_uframe %= mod;
+ now_uframe &= mod - 1;
}
}
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 650a687f2854..bde823f704e9 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -130,6 +130,7 @@ struct ehci_hcd { /* one per controller */
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -140,7 +141,8 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
unsigned has_hostpc:1;
-
+ unsigned has_lpm:1; /* support link power management */
+ unsigned has_ppcd:1; /* support per-port change bits */
u8 sbrn; /* packed release number */
/* irq statistics */
@@ -154,9 +156,6 @@ struct ehci_hcd { /* one per controller */
/* debug files */
#ifdef DEBUG
struct dentry *debug_dir;
- struct dentry *debug_async;
- struct dentry *debug_periodic;
- struct dentry *debug_registers;
#endif
};
@@ -401,15 +400,12 @@ struct ehci_iso_stream {
u32 refcount;
u8 bEndpointAddress;
u8 highspeed;
- u16 depth; /* depth in uframes */
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
struct usb_device *udev;
struct usb_host_endpoint *ep;
/* output of (re)scheduling */
- unsigned long start; /* jiffies */
- unsigned long rescheduled;
int next_uframe;
__hc32 splits;
@@ -538,11 +534,11 @@ struct ehci_fstn {
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
-#define ehci_prepare_ports_for_controller_suspend(ehci) \
- ehci_adjust_port_wakeup_flags(ehci, true);
+#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
+ ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
-#define ehci_prepare_ports_for_controller_resume(ehci) \
- ehci_adjust_port_wakeup_flags(ehci, false);
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false, false);
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 35742f8c7cda..9bfac657572e 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -159,7 +159,7 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
goto error_set_cluster_id;
usb_hcd->uses_new_polling = 1;
- usb_hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
result = 0;
out:
@@ -776,7 +776,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
+ set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index caf116c09376..d0abb9b0e673 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1521,7 +1521,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
return -ETIMEDOUT;
}
spin_unlock_irq(&imx21->lock);
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
spin_lock_irq(&imx21->lock);
}
spin_unlock_irqrestore(&imx21->lock, flags);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index d995351f9bed..0f97820e65be 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -8,29 +8,7 @@
/*
* Platform specific compile time options
*/
-#if defined(CONFIG_ARCH_KARO)
-#include <asm/arch/hardware.h>
-#include <asm/arch/pxa-regs.h>
-#include <asm/arch/karo.h>
-
-#define USE_32BIT 1
-
-
-/* These options are mutually eclusive */
-#define USE_PLATFORM_DELAY 1
-#define USE_NDELAY 0
-/*
- * MAX_ROOT_PORTS: Number of downstream ports
- *
- * The chip has two USB ports, one of which can be configured as
- * an USB device port, so the value of this constant is implementation
- * specific.
- */
-#define MAX_ROOT_PORTS 2
-#define DUMMY_DELAY_ACCESS do {} while (0)
-
-/* insert platform specific definitions for other machines here */
-#elif defined(CONFIG_BLACKFIN)
+#if defined(CONFIG_BLACKFIN)
#include <linux/io.h>
#define USE_32BIT 0
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index dbcafa29c775..d1a3dfc9a408 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -482,7 +482,6 @@ static int isp1760_run(struct usb_hcd *hcd)
u32 chipid;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
hcd->state = HC_STATE_RUNNING;
isp1760_enable_interrupts(hcd);
@@ -1450,7 +1449,7 @@ static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&priv->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &priv_to_hcd(priv)->flags)) {
+ if (!HCD_HW_ACCESSIBLE(priv_to_hcd(priv))) {
rc = -ESHUTDOWN;
goto done;
}
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 8ad2441b0284..36abd2baa3ea 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -645,7 +645,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
hcd->product_desc,
hcd_name);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size -= scnprintf (next, size,
"SUSPENDED (no register access)\n");
goto done;
@@ -687,7 +687,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "hub poll timer %s\n",
- ohci_to_hcd(ohci)->poll_rh ? "ON" : "off");
+ HCD_POLL_RH(ohci_to_hcd(ohci)) ? "ON" : "off");
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 3ceb097e165a..15ae39d6cc24 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -212,7 +212,7 @@ static int ohci_urb_enqueue (
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
@@ -684,7 +684,7 @@ retry:
}
/* use rhsc irqs after khubd is fully initialized */
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
@@ -821,7 +821,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
else if (ints & OHCI_INTR_RD) {
ohci_vdbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 65cac8cc8921..cddcda95b579 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -284,7 +284,7 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_suspend (ohci, 0);
@@ -302,7 +302,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_resume (ohci);
@@ -355,6 +355,11 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
ohci_readl(ohci, &ohci->regs->intrenable);
msleep(20);
}
+
+ /* Does the root hub have a port wakeup pending? */
+ if (ohci_readl(ohci, &ohci->regs->intrstatus) &
+ (OHCI_INTR_RD | OHCI_INTR_RHSC))
+ usb_hcd_resume_root_hub(hcd);
}
/* Carry out polling-, autostop-, and autoresume-related state changes */
@@ -364,7 +369,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int poll_rh = 1;
int rhsc_enable;
- /* Some broken controllers never turn off RHCS in the interrupt
+ /* Some broken controllers never turn off RHSC in the interrupt
* status register. For their sake we won't re-enable RHSC
* interrupts if the interrupt bit is already active.
*/
@@ -489,7 +494,7 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
/* undocumented erratum seen on at least rev D */
@@ -533,8 +538,12 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
}
}
- hcd->poll_rh = ohci_root_hub_state_changes(ohci, changed,
- any_connected, rhsc_status);
+ if (ohci_root_hub_state_changes(ohci, changed,
+ any_connected, rhsc_status))
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
done:
spin_unlock_irqrestore (&ohci->lock, flags);
@@ -701,7 +710,7 @@ static int ohci_hub_control (
u32 temp;
int retval = 0;
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
return -ESHUTDOWN;
switch (typeReq) {
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index b8a1148f248e..6bdc8b25a6a1 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -392,7 +392,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
#ifdef CONFIG_PM
-static int ohci_pci_suspend(struct usb_hcd *hcd)
+static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index 23fd6a886bdd..48ee6943bf35 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -93,8 +93,11 @@ static void ssb_ohci_detach(struct ssb_device *dev)
{
struct usb_hcd *hcd = ssb_get_drvdata(dev);
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
ssb_device_disable(dev, 0);
}
@@ -106,10 +109,52 @@ static int ssb_ohci_attach(struct ssb_device *dev)
int err = -ENOMEM;
u32 tmp, flags = 0;
- if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
- flags |= SSB_OHCI_TMSLOW_HOSTMODE;
+ if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+ return -EOPNOTSUPP;
- ssb_device_enable(dev, flags);
+ if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV) {
+ /* Put the device into host-mode. */
+ flags |= SSB_OHCI_TMSLOW_HOSTMODE;
+ ssb_device_enable(dev, flags);
+ } else if (dev->id.coreid == SSB_DEV_USB20_HOST) {
+ /*
+ * USB 2.0 special considerations:
+ *
+ * In addition to the standard SSB reset sequence, the Host
+ * Control Register must be programmed to bring the USB core
+ * and various phy components out of reset.
+ */
+ ssb_device_enable(dev, 0);
+ ssb_write32(dev, 0x200, 0x7ff);
+
+ /* Change Flush control reg */
+ tmp = ssb_read32(dev, 0x400);
+ tmp &= ~8;
+ ssb_write32(dev, 0x400, tmp);
+ tmp = ssb_read32(dev, 0x400);
+
+ /* Change Shim control reg */
+ tmp = ssb_read32(dev, 0x304);
+ tmp &= ~0x100;
+ ssb_write32(dev, 0x304, tmp);
+ tmp = ssb_read32(dev, 0x304);
+
+ udelay(1);
+
+ /* Work around for 5354 failures */
+ if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
+ /* Change syn01 reg */
+ tmp = 0x00fe00fe;
+ ssb_write32(dev, 0x894, tmp);
+
+ /* Change syn03 reg */
+ tmp = ssb_read32(dev, 0x89c);
+ tmp |= 0x1;
+ ssb_write32(dev, 0x89c, tmp);
+ }
+ } else
+ ssb_device_enable(dev, 0);
hcd = usb_create_hcd(&ssb_ohci_hc_driver, dev->dev,
dev_name(dev->dev));
@@ -200,6 +245,7 @@ static int ssb_ohci_resume(struct ssb_device *dev)
static const struct ssb_device_id ssb_ohci_table[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
SSB_DEVTABLE_END
};
MODULE_DEVICE_TABLE(ssb, ssb_ohci_table);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index de9e1c35da45..8026dc85996c 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1633,8 +1633,7 @@ static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
#endif
spin_lock_irqsave(&oxu->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &oxu_to_hcd(oxu)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
@@ -2201,8 +2200,7 @@ static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
spin_lock_irqsave(&oxu->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &oxu_to_hcd(oxu)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
@@ -2707,7 +2705,6 @@ static int oxu_run(struct usb_hcd *hcd)
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index bcf9f0e809de..990f06b89eaa 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -813,8 +813,11 @@ static int sl811h_urb_enqueue(
#endif
/* avoid all allocations within spinlocks */
- if (!hep->hcpriv)
+ if (!hep->hcpriv) {
ep = kzalloc(sizeof *ep, mem_flags);
+ if (ep == NULL)
+ return -ENOMEM;
+ }
spin_lock_irqsave(&sl811->lock, flags);
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 98cf0b26b968..6e7fb5f38db6 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -17,7 +17,6 @@
#include "uhci-hcd.h"
-#define uhci_debug_operations (* (const struct file_operations *) NULL)
static struct dentry *uhci_debugfs_root;
#ifdef DEBUG
@@ -495,18 +494,16 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
{
struct uhci_hcd *uhci = inode->i_private;
struct uhci_debug *up;
- int ret = -ENOMEM;
unsigned long flags;
- lock_kernel();
up = kmalloc(sizeof(*up), GFP_KERNEL);
if (!up)
- goto out;
+ return -ENOMEM;
up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
if (!up->data) {
kfree(up);
- goto out;
+ return -ENOMEM;
}
up->size = 0;
@@ -517,10 +514,7 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
file->private_data = up;
- ret = 0;
-out:
- unlock_kernel();
- return ret;
+ return 0;
}
static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
@@ -528,9 +522,9 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
struct uhci_debug *up;
loff_t new = -1;
- lock_kernel();
up = file->private_data;
+ /* XXX: atomic 64bit seek access, but that needs to be fixed in the VFS */
switch (whence) {
case 0:
new = off;
@@ -539,11 +533,10 @@ static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
new = file->f_pos + off;
break;
}
- if (new < 0 || new > up->size) {
- unlock_kernel();
+
+ if (new < 0 || new > up->size)
return -EINVAL;
- }
- unlock_kernel();
+
return (file->f_pos = new);
}
@@ -564,7 +557,6 @@ static int uhci_debug_release(struct inode *inode, struct file *file)
return 0;
}
-#undef uhci_debug_operations
static const struct file_operations uhci_debug_operations = {
.owner = THIS_MODULE,
.open = uhci_debug_open,
@@ -572,6 +564,7 @@ static const struct file_operations uhci_debug_operations = {
.read = uhci_debug_read,
.release = uhci_debug_release,
};
+#define UHCI_DEBUG_OPS
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6637e52736dd..f52d04db28f4 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -140,7 +140,7 @@ static void finish_reset(struct uhci_hcd *uhci)
uhci->rh_state = UHCI_RH_RESET;
uhci->is_stopped = UHCI_IS_STOPPED;
uhci_to_hcd(uhci)->state = HC_STATE_HALT;
- uhci_to_hcd(uhci)->poll_rh = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
uhci->dead = 0; /* Full reset resurrects the controller */
}
@@ -176,6 +176,8 @@ static void check_and_reset_hc(struct uhci_hcd *uhci)
*/
static void configure_hc(struct uhci_hcd *uhci)
{
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+
/* Set the frame length to the default: 1 ms exactly */
outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF);
@@ -191,8 +193,11 @@ static void configure_hc(struct uhci_hcd *uhci)
mb();
/* Enable PIRQ */
- pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
- USBLEGSUP_DEFAULT);
+ pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
+
+ /* Disable platform-specific non-PME# wakeup */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL, 0);
}
@@ -344,7 +349,10 @@ __acquires(uhci->lock)
/* If interrupts don't work and remote wakeup is enabled then
* the suspended root hub needs to be polled.
*/
- uhci_to_hcd(uhci)->poll_rh = (!int_enable && wakeup_enable);
+ if (!int_enable && wakeup_enable)
+ set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
uhci_scan_schedule(uhci);
uhci_fsbr_off(uhci);
@@ -363,7 +371,7 @@ static void start_rh(struct uhci_hcd *uhci)
uhci->io_addr + USBINTR);
mb();
uhci->rh_state = UHCI_RH_RUNNING;
- uhci_to_hcd(uhci)->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
}
static void wakeup_rh(struct uhci_hcd *uhci)
@@ -589,7 +597,7 @@ static int uhci_start(struct usb_hcd *hcd)
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int retval = -EBUSY;
int i;
- struct dentry *dentry;
+ struct dentry __maybe_unused *dentry;
hcd->uses_new_polling = 1;
@@ -599,18 +607,16 @@ static int uhci_start(struct usb_hcd *hcd)
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
- if (DEBUG_CONFIGURED) {
- dentry = debugfs_create_file(hcd->self.bus_name,
- S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
- uhci, &uhci_debug_operations);
- if (!dentry) {
- dev_err(uhci_dev(uhci), "couldn't create uhci "
- "debugfs entry\n");
- retval = -ENOMEM;
- goto err_create_debug_entry;
- }
- uhci->dentry = dentry;
+#ifdef UHCI_DEBUG_OPS
+ dentry = debugfs_create_file(hcd->self.bus_name,
+ S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
+ uhci, &uhci_debug_operations);
+ if (!dentry) {
+ dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
+ return -ENOMEM;
}
+ uhci->dentry = dentry;
+#endif
uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
@@ -691,7 +697,9 @@ static int uhci_start(struct usb_hcd *hcd)
configure_hc(uhci);
uhci->is_initialized = 1;
+ spin_lock_irq(&uhci->lock);
start_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
return 0;
/*
@@ -722,7 +730,6 @@ err_alloc_frame_cpu:
err_alloc_frame:
debugfs_remove(uhci->dentry);
-err_create_debug_entry:
return retval;
}
@@ -731,7 +738,7 @@ static void uhci_stop(struct usb_hcd *hcd)
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
spin_lock_irq(&uhci->lock);
- if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && !uhci->dead)
+ if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead)
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
@@ -748,7 +755,7 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
int rc = 0;
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (uhci->dead)
; /* Dead controllers tell no tales */
@@ -775,7 +782,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
int rc = 0;
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else if (!uhci->dead)
wakeup_rh(uhci);
@@ -783,15 +790,16 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
return rc;
}
-static int uhci_pci_suspend(struct usb_hcd *hcd)
+static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
int rc = 0;
dev_dbg(uhci_dev(uhci), "%s\n", __func__);
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done_okay; /* Already suspended or dead */
if (uhci->rh_state > UHCI_RH_SUSPENDED) {
@@ -803,11 +811,15 @@ static int uhci_pci_suspend(struct usb_hcd *hcd)
/* All PCI host controllers are required to disable IRQ generation
* at the source, so we must turn off PIRQ.
*/
- pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
- mb();
- hcd->poll_rh = 0;
-
- /* FIXME: Enable non-PME# remote wakeup? */
+ pci_write_config_word(pdev, USBLEGSUP, 0);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Enable platform-specific non-PME# wakeup */
+ if (do_wakeup) {
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ pci_write_config_byte(pdev, USBRES_INTEL,
+ USBPORT1EN | USBPORT2EN);
+ }
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -826,7 +838,6 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
* even if the controller was dead.
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- mb();
spin_lock_irq(&uhci->lock);
@@ -834,8 +845,6 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
if (hibernated)
uhci_hc_died(uhci);
- /* FIXME: Disable non-PME# remote wakeup? */
-
/* The firmware or a boot kernel may have changed the controller
* settings during a system wakeup. Check it and reconfigure
* to avoid problems.
@@ -845,22 +854,20 @@ static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
/* If the controller was dead before, it's back alive now */
configure_hc(uhci);
- if (uhci->rh_state == UHCI_RH_RESET) {
-
- /* The controller had to be reset */
+ /* Tell the core if the controller had to be reset */
+ if (uhci->rh_state == UHCI_RH_RESET)
usb_root_hub_lost_power(hcd->self.root_hub);
- suspend_rh(uhci, UHCI_RH_SUSPENDED);
- }
spin_unlock_irq(&uhci->lock);
/* If interrupts don't work and remote wakeup is enabled then
* the suspended root hub needs to be polled.
*/
- if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) {
- hcd->poll_rh = 1;
- usb_hcd_poll_rh_status(hcd);
- }
+ if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* Does the root hub have a port wakeup pending? */
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
#endif
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 26bd1b2bcbfc..49bf2790f9c2 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -67,12 +67,17 @@
#define USBPORTSC_RES3 0x4000 /* reserved, write zeroes */
#define USBPORTSC_RES4 0x8000 /* reserved, write zeroes */
-/* Legacy support register */
+/* PCI legacy support register */
#define USBLEGSUP 0xc0
#define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
#define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
+/* PCI Intel-specific resume-enable register */
+#define USBRES_INTEL 0xc4
+#define USBPORT1EN 0x01
+#define USBPORT2EN 0x02
+
#define UHCI_PTR_BITS cpu_to_le32(0x000F)
#define UHCI_PTR_TERM cpu_to_le32(0x0001)
#define UHCI_PTR_QH cpu_to_le32(0x0002)
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 8270055848ca..6d59c0f77f25 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -190,7 +190,7 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&uhci->lock, flags);
uhci_scan_schedule(uhci);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done;
uhci_check_ports(uhci);
@@ -200,7 +200,7 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
case UHCI_RH_SUSPENDING:
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
- if (status)
+ if (status || uhci->resuming_ports)
usb_hcd_resume_root_hub(hcd);
break;
@@ -246,7 +246,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wPortChange, wPortStatus;
unsigned long flags;
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) || uhci->dead)
+ if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
return -ETIMEDOUT;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index acd582c02802..d3ade4018487 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -565,7 +565,7 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
qh->unlink_frame = uhci->frame_number;
/* Force an interrupt so we know when the QH is fully unlinked */
- if (list_empty(&uhci->skel_unlink_qh->node))
+ if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
uhci_set_next_interrupt(uhci);
/* Move the QH from its old list to the end of the unlinking list */
@@ -1667,7 +1667,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
qh->advance_jiffies = jiffies;
goto done;
}
- ret = 0;
+ ret = uhci->is_stopped;
}
/* The queue hasn't advanced; check for timeout */
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index e0d3401285c8..72b6892fda67 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -68,7 +68,7 @@ static int whc_start(struct usb_hcd *usb_hcd)
whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
usb_hcd->uses_new_polling = 1;
- usb_hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
out:
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index ab5a14fbfeeb..dc0ab8382f5d 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -475,7 +475,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
|| (prev_end & (WHCI_PAGE_SIZE-1))
|| (dma_addr & (WHCI_PAGE_SIZE-1))
|| std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
- if (std->len % qset->max_packet != 0)
+ if (std && std->len % qset->max_packet != 0)
return -EINVAL;
std = qset_new_std(whc, qset, urb, mem_flags);
if (std == NULL) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2eb658d26394..4e51343ddffc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -391,49 +391,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep;
-
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
- return NULL;
- }
-
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
-}
-
-/* Get the right ring for the given URB.
- * If the endpoint supports streams, boundary check the URB's stream ID.
- * If the endpoint doesn't support streams, return the singular endpoint ring.
- */
-struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb)
-{
- return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
- xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
-}
-
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static int xhci_test_radix_tree(struct xhci_hcd *xhci,
unsigned int num_streams,
@@ -1112,8 +1069,18 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ /*
+ * Isochronous endpoint ring needs bigger size because one isoc URB
+ * carries multiple packets and it will insert multiple tds to the
+ * ring.
+ * This should be replaced with dynamic ring resizing in the future.
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 8, true, mem_flags);
+ else
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1124,6 +1091,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
}
+ virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
@@ -1389,6 +1357,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
return command;
}
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
+{
+ int last;
+
+ if (!urb_priv)
+ return;
+
+ last = urb_priv->length - 1;
+ if (last >= 0) {
+ int i;
+ for (i = 0; i <= last; i++)
+ kfree(urb_priv->td[i]);
+ }
+ kfree(urb_priv);
+}
+
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
@@ -1588,7 +1572,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
unsigned int num_tests;
int i, ret;
- num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
+ num_tests = ARRAY_SIZE(simple_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
xhci->event_ring->first_seg,
@@ -1601,7 +1585,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return ret;
}
- num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
+ num_tests = ARRAY_SIZE(complex_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
complex_test_vector[i].input_seg,
@@ -1617,6 +1601,29 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return 0;
}
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u64 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+ xhci_dbg(xhci, "// Write event ring dequeue pointer, "
+ "preserving EHB bit\n");
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 11482b6b9381..f7efe025beda 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -53,6 +53,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ u32 temp;
hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
@@ -93,6 +94,14 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ if (HCC_64BIT_ADDR(temp)) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ }
+
xhci_dbg(xhci, "Calling HCD init\n");
/* Initialize HCD and host controller data structures. */
retval = xhci_init(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bfc99a939455..bc3f4f427065 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -301,28 +301,6 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
return 1;
}
-void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
- u64 temp;
- dma_addr_t deq;
-
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0 && !in_interrupt())
- xhci_warn(xhci, "WARN something wrong with SW event ring "
- "dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp &= ERST_PTR_MASK;
- /* Don't clear the EHB bit (which is RW1C) because
- * there might be more events to service.
- */
- temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
-}
-
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
@@ -359,11 +337,6 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
field = xhci_readl(xhci, db_addr) & DB_MASK;
field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
xhci_writel(xhci, field, db_addr);
- /* Flush PCI posted writes - FIXME Matthew Wilcox says this
- * isn't time-critical and we shouldn't make the CPU wait for
- * the flush.
- */
- xhci_readl(xhci, db_addr);
}
}
@@ -419,6 +392,50 @@ static struct xhci_segment *find_trb_seg(
return cur_seg;
}
+
+static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
+}
+
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
@@ -578,16 +595,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct urb *urb;
+ struct urb_priv *urb_priv;
- cur_td->urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+ urb = cur_td->urb;
+ urb_priv = urb->hcpriv;
+ urb_priv->td_cnt++;
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(hcd, cur_td->urb, status);
- kfree(cur_td);
- spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
+ /* Only giveback urb when this is the last td in urb */
+ if (urb_priv->td_cnt == urb_priv->length) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
+
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ xhci_urb_free_priv(xhci, urb_priv);
+ spin_lock(&xhci->lock);
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
+ }
}
/*
@@ -1132,7 +1157,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
spin_unlock(&xhci->lock);
/* Pass this up to the core */
@@ -1258,6 +1282,421 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
}
/*
+ * Finish the td processing, remove the td from td list;
+ * Return 1 if the urb can be given back.
+ */
+static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status, bool skip)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct urb *urb = NULL;
+ struct xhci_ep_ctx *ep_ctx;
+ int ret = 0;
+ struct urb_priv *urb_priv;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(event->flags);
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ if (skip)
+ goto td_cleanup;
+
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP) {
+ /* The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ return 0;
+ } else {
+ if (trb_comp_code == COMP_STALL) {
+ /* The transfer is completed from the driver's
+ * perspective, but we need to issue a set dequeue
+ * command for this stalled endpoint to move the dequeue
+ * pointer past the TD. We can't do that here because
+ * the halt condition must be cleared first. Let the
+ * USB class driver clear the stall later.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+ /* Other types of errors halt the endpoint, but the
+ * class driver doesn't call usb_reset_endpoint() unless
+ * the error is -EPIPE. Clear the halted status in the
+ * xHCI hardware manually.
+ */
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, ep_ring->stream_id,
+ td, event_trb);
+ } else {
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+ }
+
+td_cleanup:
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than
+ * the buffer length, urb->actual_length will be a very big
+ * number (since it's unsigned). Play it safe and say we didn't
+ * transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB transfer length is wrong, "
+ "xHC issue? req. len = %u, "
+ "act. len = %u\n",
+ urb->transfer_buffer_length,
+ urb->actual_length);
+ urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ list_del(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del(&td->cancelled_td_list);
+
+ urb_priv->td_cnt++;
+ /* Giveback the urb when all the tds are completed */
+ if (urb_priv->td_cnt == urb_priv->length)
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Process control tds, update urb status and actual_length.
+ */
+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(event->flags);
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ if (event_trb == ep_ring->dequeue) {
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
+ "without IOC set??\n");
+ *status = -ESHUTDOWN;
+ } else if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB "
+ "without IOC set??\n");
+ *status = -ESHUTDOWN;
+ } else {
+ xhci_dbg(xhci, "Successful control transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ break;
+ default:
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
+ break;
+ xhci_dbg(xhci, "TRB error code %u, "
+ "halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
+ /* else fall through */
+ case COMP_STALL:
+ /* Did we transfer part of the data (middle) phase? */
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length
+ - TRB_LEN(event->transfer_len);
+ else
+ td->urb->actual_length = 0;
+
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, 0, td, event_trb);
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
+ }
+ /*
+ * Did we transfer any data, despite the errors that might have
+ * happened? I.e. did we get past the setup stage?
+ */
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+ if (td->urb->actual_length != 0) {
+ /* Don't overwrite a previously set error code
+ */
+ if ((*status == -EINPROGRESS || *status == 0) &&
+ (td->urb->transfer_flags
+ & URB_SHORT_NOT_OK))
+ /* Did we already see a short data
+ * stage? */
+ *status = -EREMOTEIO;
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ }
+ } else {
+ /* Maybe the event was for the data stage? */
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ /* We didn't stop on a link TRB in the middle */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
+ }
+ }
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
+ * Process isochronous tds, update urb packet status and actual_length.
+ */
+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ int idx;
+ int len = 0;
+ int skip_td = 0;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ urb_priv = td->urb->hcpriv;
+ idx = urb_priv->td_cnt;
+
+ if (ep->skip) {
+ /* The transfer is partly done */
+ *status = -EXDEV;
+ td->urb->iso_frame_desc[idx].status = -EXDEV;
+ } else {
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ td->urb->iso_frame_desc[idx].status = 0;
+ xhci_dbg(xhci, "Successful isoc transfer!\n");
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ td->urb->iso_frame_desc[idx].status =
+ -EREMOTEIO;
+ else
+ td->urb->iso_frame_desc[idx].status = 0;
+ break;
+ case COMP_BW_OVER:
+ td->urb->iso_frame_desc[idx].status = -ECOMM;
+ skip_td = 1;
+ break;
+ case COMP_BUFF_OVER:
+ case COMP_BABBLE:
+ td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
+ skip_td = 1;
+ break;
+ case COMP_STALL:
+ td->urb->iso_frame_desc[idx].status = -EPROTO;
+ skip_td = 1;
+ break;
+ case COMP_STOP:
+ case COMP_STOP_INVAL:
+ break;
+ default:
+ td->urb->iso_frame_desc[idx].status = -1;
+ break;
+ }
+ }
+
+ /* calc actual length */
+ if (ep->skip) {
+ td->urb->iso_frame_desc[idx].actual_length = 0;
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
+ }
+
+ if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
+ td->urb->iso_frame_desc[idx].actual_length =
+ td->urb->iso_frame_desc[idx].length;
+ td->urb->actual_length +=
+ td->urb->iso_frame_desc[idx].length;
+ } else {
+ for (cur_trb = ep_ring->dequeue,
+ cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ len +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ len += TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ td->urb->iso_frame_desc[idx].actual_length = len;
+ td->urb->actual_length += len;
+ }
+ }
+
+ if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
+ *status = 0;
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
+ * Process bulk and interrupt tds, update urb status and actual_length.
+ */
+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ } else {
+ if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
+ xhci_dbg(xhci, "Successful bulk "
+ "transfer!\n");
+ else
+ xhci_dbg(xhci, "Successful interrupt "
+ "transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ dev_dbg(&td->urb->dev->dev,
+ "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(event->transfer_len));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+ if (TRB_LEN(event->transfer_len) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+ TRB_LEN(event->transfer_len));
+ td->urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ /* Don't overwrite a previously set error code */
+ if (*status == -EINPROGRESS) {
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ /* Ignore a short packet completion if the
+ * untransferred length was zero.
+ */
+ if (*status == -EREMOTEIO)
+ *status = 0;
+ }
+ } else {
+ /* Slow path - walk the list, starting from the dequeue
+ * pointer, to get the actual length transferred.
+ */
+ td->urb->actual_length = 0;
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+ cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ /* If the ring didn't stop on a Link or No-op TRB, add
+ * in the actual bytes transferred from the Normal TRB
+ */
+ if (trb_comp_code != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
* At this point, the host controller is probably hosed and should be reset.
@@ -1276,10 +1715,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
union xhci_trb *event_trb;
struct urb *urb = NULL;
int status = -EINPROGRESS;
+ struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ int ret = 0;
- xhci_dbg(xhci, "In %s\n", __func__);
slot_id = TRB_TO_SLOT_ID(event->flags);
xdev = xhci->devs[slot_id];
if (!xdev) {
@@ -1293,51 +1733,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ if (!ep_ring ||
+ (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
return -ENODEV;
}
event_dma = event->buffer;
- /* This TRB should be in the TD at the head of this ring's TD list */
- xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
- if (list_empty(&ep_ring->td_list)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
- TRB_TO_SLOT_ID(event->flags), ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
- urb = NULL;
- goto cleanup;
- }
- xhci_dbg(xhci, "%s - getting list entry\n", __func__);
- td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
-
- /* Is this a TRB in the currently executing TD? */
- xhci_dbg(xhci, "%s - looking for TD\n", __func__);
- event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb, event_dma);
- xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
- if (!event_seg) {
- /* HC is busted, give up! */
- xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
- return -ESHUTDOWN;
- }
- event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
- xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
- lower_32_bits(event->buffer));
- xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
- upper_32_bits(event->buffer));
- xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
- (unsigned int) event->transfer_len);
- xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
- (unsigned int) event->flags);
-
- /* Look for common error cases */
trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ /* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
@@ -1373,278 +1778,156 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
status = -ENOSR;
break;
+ case COMP_BW_OVER:
+ xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
+ break;
+ case COMP_BUFF_OVER:
+ xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
+ break;
+ case COMP_UNDERRUN:
+ /*
+ * When the Isoch ring is empty, the xHC will generate
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
+ * Underrun Event for OUT Isoch endpoint.
+ */
+ xhci_dbg(xhci, "underrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ goto cleanup;
+ case COMP_OVERRUN:
+ xhci_dbg(xhci, "overrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ goto cleanup;
+ case COMP_MISSED_INT:
+ /*
+ * When encounter missed service error, one or more isoc tds
+ * may be missed by xHC.
+ * Set skip flag of the ep_ring; Complete the missed tds as
+ * short transfer when process the ep_ring next time.
+ */
+ ep->skip = true;
+ xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
- xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
- urb = NULL;
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
+ "busted\n");
goto cleanup;
}
- /* Now update the urb's actual_length and give back to the core */
- /* Was this a control transfer? */
- if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- if (event_trb == ep_ring->dequeue) {
- xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
- status = -ESHUTDOWN;
- } else if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
- status = -ESHUTDOWN;
- } else {
- xhci_dbg(xhci, "Successful control transfer!\n");
- status = 0;
- }
- break;
- case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- break;
- default:
- if (!xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code))
- break;
- xhci_dbg(xhci, "TRB error code %u, "
- "halted endpoint index = %u\n",
- trb_comp_code, ep_index);
- /* else fall through */
- case COMP_STALL:
- /* Did we transfer part of the data (middle) phase? */
- if (event_trb != ep_ring->dequeue &&
- event_trb != td->last_trb)
- td->urb->actual_length =
- td->urb->transfer_buffer_length
- - TRB_LEN(event->transfer_len);
- else
- td->urb->actual_length = 0;
-
- xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, 0, td, event_trb);
- goto td_cleanup;
- }
- /*
- * Did we transfer any data, despite the errors that might have
- * happened? I.e. did we get past the setup stage?
+ do {
+ /* This TRB should be in the TD at the head of this ring's
+ * TD list.
*/
- if (event_trb != ep_ring->dequeue) {
- /* The event was for the status stage */
- if (event_trb == td->last_trb) {
- if (td->urb->actual_length != 0) {
- /* Don't overwrite a previously set error code */
- if ((status == -EINPROGRESS ||
- status == 0) &&
- (td->urb->transfer_flags
- & URB_SHORT_NOT_OK))
- /* Did we already see a short data stage? */
- status = -EREMOTEIO;
- } else {
- td->urb->actual_length =
- td->urb->transfer_buffer_length;
- }
- } else {
- /* Maybe the event was for the data stage? */
- if (trb_comp_code != COMP_STOP_INVAL) {
- /* We didn't stop on a link TRB in the middle */
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
- xhci_dbg(xhci, "Waiting for status stage event\n");
- urb = NULL;
- goto cleanup;
- }
+ if (list_empty(&ep_ring->td_list)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+ "with no TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ if (ep->skip) {
+ ep->skip = false;
+ xhci_dbg(xhci, "td_list is empty while skip "
+ "flag set. Clear skip flag.\n");
}
+ ret = 0;
+ goto cleanup;
}
- } else {
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- /* Double check that the HW transferred everything. */
- if (event_trb != td->last_trb) {
- xhci_warn(xhci, "WARN Successful completion "
- "on short TX\n");
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- } else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
- status = 0;
- }
- break;
- case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- break;
- default:
- /* Others already handled above */
- break;
+
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ /* Is this a TRB in the currently executing TD? */
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, event_dma);
+ if (event_seg && ep->skip) {
+ xhci_dbg(xhci, "Found td. Clear skip flag.\n");
+ ep->skip = false;
+ }
+ if (!event_seg &&
+ (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
+ /* HC is busted, give up! */
+ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
+ "part of current TD\n");
+ return -ESHUTDOWN;
}
- dev_dbg(&td->urb->dev->dev,
- "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(event->transfer_len));
- /* Fast path - was this the last TRB in the TD for this URB? */
- if (event_trb == td->last_trb) {
- if (TRB_LEN(event->transfer_len) != 0) {
- td->urb->actual_length =
- td->urb->transfer_buffer_length -
- TRB_LEN(event->transfer_len);
- if (td->urb->transfer_buffer_length <
- td->urb->actual_length) {
- xhci_warn(xhci, "HC gave bad length "
- "of %d bytes left\n",
- TRB_LEN(event->transfer_len));
- td->urb->actual_length = 0;
- if (td->urb->transfer_flags &
- URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- }
- /* Don't overwrite a previously set error code */
- if (status == -EINPROGRESS) {
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
- }
- } else {
- td->urb->actual_length = td->urb->transfer_buffer_length;
- /* Ignore a short packet completion if the
- * untransferred length was zero.
- */
- if (status == -EREMOTEIO)
- status = 0;
- }
- } else {
- /* Slow path - walk the list, starting from the dequeue
- * pointer, to get the actual length transferred.
- */
- union xhci_trb *cur_trb;
- struct xhci_segment *cur_seg;
- td->urb->actual_length = 0;
- for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
- cur_trb != event_trb;
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (cur_trb->generic.field[3] &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
- td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]);
- }
- /* If the ring didn't stop on a Link or No-op TRB, add
- * in the actual bytes transferred from the Normal TRB
+ if (event_seg) {
+ event_trb = &event_seg->trbs[(event_dma -
+ event_seg->dma) / sizeof(*event_trb)];
+ /*
+ * No-op TRB should not trigger interrupts.
+ * If event_trb is a no-op TRB, it means the
+ * corresponding TD has been cancelled. Just ignore
+ * the TD.
*/
- if (trb_comp_code != COMP_STOP_INVAL)
- td->urb->actual_length +=
- TRB_LEN(cur_trb->generic.field[2]) -
- TRB_LEN(event->transfer_len);
+ if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
+ == TRB_TYPE(TRB_TR_NOOP)) {
+ xhci_dbg(xhci, "event_trb is a no-op TRB. "
+ "Skip it\n");
+ goto cleanup;
+ }
}
- }
- if (trb_comp_code == COMP_STOP_INVAL ||
- trb_comp_code == COMP_STOP) {
- /* The Endpoint Stop Command completion will take care of any
- * stopped TDs. A stopped TD may be restarted, so don't update
- * the ring dequeue pointer or take this TD off any lists yet.
+
+ /* Now update the urb's actual_length and give back to
+ * the core
*/
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- } else {
- if (trb_comp_code == COMP_STALL) {
- /* The transfer is completed from the driver's
- * perspective, but we need to issue a set dequeue
- * command for this stalled endpoint to move the dequeue
- * pointer past the TD. We can't do that here because
- * the halt condition must be cleared first. Let the
- * USB class driver clear the stall later.
- */
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- ep->stopped_stream = ep_ring->stream_id;
- } else if (xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code)) {
- /* Other types of errors halt the endpoint, but the
- * class driver doesn't call usb_reset_endpoint() unless
- * the error is -EPIPE. Clear the halted status in the
- * xHCI hardware manually.
- */
- xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, ep_ring->stream_id, td, event_trb);
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
- }
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+ ret = process_ctrl_td(xhci, td, event_trb, event, ep,
+ &status);
+ else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+ ret = process_isoc_td(xhci, td, event_trb, event, ep,
+ &status);
+ else
+ ret = process_bulk_intr_td(xhci, td, event_trb, event,
+ ep, &status);
-td_cleanup:
- /* Clean up the endpoint's TD list */
- urb = td->urb;
- /* Do one last check of the actual transfer length.
- * If the host controller said we transferred more data than
- * the buffer length, urb->actual_length will be a very big
- * number (since it's unsigned). Play it safe and say we didn't
- * transfer anything.
+cleanup:
+ /*
+ * Do not update event ring dequeue pointer if ep->skip is set.
+ * Will roll back to continue process missed tds.
*/
- if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB transfer length is wrong, "
- "xHC issue? req. len = %u, "
- "act. len = %u\n",
- urb->transfer_buffer_length,
- urb->actual_length);
- urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- status = -EREMOTEIO;
- else
- status = 0;
+ if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+ inc_deq(xhci, xhci->event_ring, true);
}
- list_del(&td->td_list);
- /* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list))
- list_del(&td->cancelled_td_list);
- /* Leave the TD around for the reset endpoint function to use
- * (but only if it's not a control endpoint, since we already
- * queued the Set TR dequeue pointer command for stalled
- * control endpoints).
- */
- if (usb_endpoint_xfer_control(&urb->ep->desc) ||
- (trb_comp_code != COMP_STALL &&
- trb_comp_code != COMP_BABBLE)) {
- kfree(td);
+ if (ret) {
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+ /* Leave the TD around for the reset endpoint function
+ * to use(but only if it's not a control endpoint,
+ * since we already queued the Set TR dequeue pointer
+ * command for stalled control endpoints).
+ */
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE))
+ xhci_urb_free_priv(xhci, urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "status = %d\n",
+ urb, urb->actual_length, status);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ spin_lock(&xhci->lock);
}
- urb->hcpriv = NULL;
- }
-cleanup:
- inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
- /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
- if (urb) {
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
- urb, urb->actual_length, status);
- spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
- spin_lock(&xhci->lock);
- }
+ /*
+ * If ep->skip is set, it means there are missed tds on the
+ * endpoint ring need to take care of.
+ * Process them as short transfer until reach the td pointed by
+ * the event.
+ */
+ } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+
return 0;
}
@@ -1652,7 +1935,7 @@ cleanup:
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
*/
-void xhci_handle_event(struct xhci_hcd *xhci)
+static void xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
@@ -1710,15 +1993,130 @@ void xhci_handle_event(struct xhci_hcd *xhci)
return;
}
- if (update_ptrs) {
- /* Update SW and HC event ring dequeue pointer */
+ if (update_ptrs)
+ /* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true);
- xhci_set_hc_event_deq(xhci);
- }
+
/* Are there more items on the event ring? */
xhci_handle_event(xhci);
}
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 status;
+ union xhci_trb *trb;
+ u64 temp_64;
+ union xhci_trb *event_ring_deq;
+ dma_addr_t deq;
+
+ spin_lock(&xhci->lock);
+ trb = xhci->event_ring->dequeue;
+ /* Check if the xHC generated the interrupt, or the irq is shared */
+ status = xhci_readl(xhci, &xhci->op_regs->status);
+ if (status == 0xffffffff)
+ goto hw_died;
+
+ if (!(status & STS_EINT)) {
+ spin_unlock(&xhci->lock);
+ xhci_warn(xhci, "Spurious interrupt.\n");
+ return IRQ_NONE;
+ }
+ xhci_dbg(xhci, "op reg status = %08x\n", status);
+ xhci_dbg(xhci, "Event ring dequeue ptr:\n");
+ xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
+ lower_32_bits(trb->link.segment_ptr),
+ upper_32_bits(trb->link.segment_ptr),
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+
+ if (status & STS_FATAL) {
+ xhci_warn(xhci, "WARNING: Host System Error\n");
+ xhci_halt(xhci);
+hw_died:
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ spin_unlock(&xhci->lock);
+ return -ESHUTDOWN;
+ }
+
+ /*
+ * Clear the op reg interrupt status first,
+ * so we can receive interrupts from other MSI-X interrupters.
+ * Write 1 to clear the interrupt status.
+ */
+ status |= STS_EINT;
+ xhci_writel(xhci, status, &xhci->op_regs->status);
+ /* FIXME when MSI-X is supported and there are multiple vectors */
+ /* Clear the MSI-X event interrupt status */
+
+ if (hcd->irq != -1) {
+ u32 irq_pending;
+ /* Acknowledge the PCI interrupt */
+ irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ irq_pending |= 0x3;
+ xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
+ "Shouldn't IRQs be disabled?\n");
+ /* Clear the event handler busy flag (RW1C);
+ * the event ring should be empty.
+ */
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+ }
+
+ event_ring_deq = xhci->event_ring->dequeue;
+ /* FIXME this should be a delayed service routine
+ * that clears the EHB.
+ */
+ xhci_handle_event(xhci);
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event "
+ "ring dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
+{
+ irqreturn_t ret;
+
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ ret = xhci_irq(hcd);
+
+ return ret;
+}
+
/**** Endpoint Ring Operations ****/
/*
@@ -1827,10 +2225,12 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
- struct xhci_td **td,
+ unsigned int td_index,
gfp_t mem_flags)
{
int ret;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -1846,24 +2246,29 @@ static int prepare_transfer(struct xhci_hcd *xhci,
num_trbs, mem_flags);
if (ret)
return ret;
- *td = kzalloc(sizeof(struct xhci_td), mem_flags);
- if (!*td)
- return -ENOMEM;
- INIT_LIST_HEAD(&(*td)->td_list);
- INIT_LIST_HEAD(&(*td)->cancelled_td_list);
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
- if (unlikely(ret)) {
- kfree(*td);
- return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[td_index];
+
+ INIT_LIST_HEAD(&td->td_list);
+ INIT_LIST_HEAD(&td->cancelled_td_list);
+
+ if (td_index == 0) {
+ ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ if (unlikely(ret)) {
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
+ return ret;
+ }
}
- (*td)->urb = urb;
- urb->hcpriv = (void *) (*td);
+ td->urb = urb;
/* Add this TD to the tail of the endpoint ring's TD list */
- list_add_tail(&(*td)->td_list, &ep_ring->td_list);
- (*td)->start_seg = ep_ring->enq_seg;
- (*td)->first_trb = ep_ring->enqueue;
+ list_add_tail(&td->td_list, &ep_ring->td_list);
+ td->start_seg = ep_ring->enq_seg;
+ td->first_trb = ep_ring->enqueue;
+
+ urb_priv->td[td_index] = td;
return 0;
}
@@ -2002,6 +2407,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
{
struct xhci_ring *ep_ring;
unsigned int num_trbs;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
struct scatterlist *sg;
int num_sgs;
@@ -2022,9 +2428,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2145,6 +2555,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
int num_trbs;
struct xhci_generic_trb *start_trb;
@@ -2190,10 +2601,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2279,6 +2693,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
u32 field, length_field;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -2306,10 +2721,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, &td, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
@@ -2366,6 +2784,224 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return 0;
}
+static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
+ struct urb *urb, int i)
+{
+ int num_trbs = 0;
+ u64 addr, td_len, running_total;
+
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+ td_len = urb->iso_frame_desc[i].length;
+
+ running_total = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ while (running_total < td_len) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+
+ return num_trbs;
+}
+
+/* This is for isoc transfer */
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ int num_tds, trbs_per_td;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ int start_cycle;
+ u32 field, length_field;
+ int running_total, trb_buff_len, td_len, td_remain_len, ret;
+ u64 start_addr, addr;
+ int i, j;
+
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+
+ num_tds = urb->number_of_packets;
+ if (num_tds < 1) {
+ xhci_dbg(xhci, "Isoc URB with zero packets?\n");
+ return -EINVAL;
+ }
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+ " addr = %#llx, num_tds = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_tds);
+
+ start_addr = (u64) urb->transfer_dma;
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ /* Queue the first TRB, even if it's zero-length */
+ for (i = 0; i < num_tds; i++) {
+ first_trb = true;
+
+ running_total = 0;
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+
+ trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[i];
+
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+ field = 0;
+
+ if (first_trb) {
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+ field |= TRB_SIA;
+ if (i > 0)
+ field |= ep_ring->cycle_state;
+ first_trb = false;
+ } else {
+ /* Queue other normal TRBs */
+ field |= TRB_TYPE(TRB_NORMAL);
+ field |= ep_ring->cycle_state;
+ }
+
+ /* Chain all the TRBs together; clear the chain bit in
+ * the last TRB to indicate it's the last TRB in the
+ * chain.
+ */
+ if (j < trbs_per_td - 1) {
+ field |= TRB_CHAIN;
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+
+ /* Calculate TRB length */
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (trb_buff_len > td_remain_len)
+ trb_buff_len = td_remain_len;
+
+ remainder = xhci_td_remainder(td_len - running_total);
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+ TRB_INTR_TARGET(0);
+ queue_trb(xhci, ep_ring, false, false,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP);
+ running_total += trb_buff_len;
+
+ addr += trb_buff_len;
+ td_remain_len -= trb_buff_len;
+ }
+
+ /* Check TD length */
+ if (running_total != td_len) {
+ xhci_err(xhci, "ISOC TD length unmatch\n");
+ return -EINVAL;
+ }
+ }
+
+ wmb();
+ start_trb->field[3] |= start_cycle;
+
+ ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+ return 0;
+}
+
+/*
+ * Check transfer ring to guarantee there is enough room for the urb.
+ * Update ISO URB start_frame and interval.
+ * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
+ * update the urb->start_frame by now.
+ * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ */
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_ep_ctx *ep_ctx;
+ int start_frame;
+ int xhci_interval;
+ int ep_interval;
+ int num_tds, num_trbs, i;
+ int ret;
+
+ xdev = xhci->devs[slot_id];
+ ep_ring = xdev->eps[ep_index].ring;
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+ num_trbs = 0;
+ num_tds = urb->number_of_packets;
+ for (i = 0; i < num_tds; i++)
+ num_trbs += count_isoc_trbs_needed(xhci, urb, i);
+
+ /* Check the ring to guarantee there is enough room for the whole urb.
+ * Do not insert any td of the urb to the ring if the check failed.
+ */
+ ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+
+ start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame &= 0x3fff;
+
+ urb->start_frame = start_frame;
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->start_frame >>= 3;
+
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+ ep_interval = urb->interval;
+ /* Convert to microframes */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ ep_interval *= 8;
+ /* FIXME change this to a warning and a suggestion to use the new API
+ * to set the polling interval (once the API is added).
+ */
+ if (xhci_interval != ep_interval) {
+ if (!printk_ratelimit())
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
+ " (%d microframe%s) than xHCI "
+ "(%d microframe%s)\n",
+ ep_interval,
+ ep_interval == 1 ? "" : "s",
+ xhci_interval,
+ xhci_interval == 1 ? "" : "s");
+ urb->interval = xhci_interval;
+ /* Convert back to frames for LS/FS devices */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->interval /= 8;
+ }
+ return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3998f72cd0c4..d5c550ea3e68 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -20,6 +20,7 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
@@ -171,22 +172,84 @@ int xhci_reset(struct xhci_hcd *xhci)
return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
}
+/*
+ * Free IRQs
+ * free all IRQs request
+ */
+static void xhci_free_irq(struct xhci_hcd *xhci)
+{
+ int i;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
-#if 0
-/* Set up MSI-X table for entry 0 (may claim other entries later) */
-static int xhci_setup_msix(struct xhci_hcd *xhci)
+ /* return if using legacy interrupt */
+ if (xhci_to_hcd(xhci)->irq >= 0)
+ return;
+
+ if (xhci->msix_entries) {
+ for (i = 0; i < xhci->msix_count; i++)
+ if (xhci->msix_entries[i].vector)
+ free_irq(xhci->msix_entries[i].vector,
+ xhci_to_hcd(xhci));
+ } else if (pdev->irq >= 0)
+ free_irq(pdev->irq, xhci_to_hcd(xhci));
+
+ return;
+}
+
+/*
+ * Set up MSI
+ */
+static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ xhci_err(xhci, "failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret) {
+ xhci_err(xhci, "disable MSI interrupt\n");
+ pci_disable_msi(pdev);
+ }
+
+ return ret;
+}
+
+/*
+ * Set up MSI-X
+ */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+ int i, ret = 0;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- xhci->msix_count = 0;
- /* XXX: did I do this right? ixgbe does kcalloc for more than one */
- xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+ /*
+ * calculate number of msi-x vectors supported.
+ * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+ * with max number of interrupters based on the xhci HCSPARAMS1.
+ * - num_online_cpus: maximum msi-x vectors per CPUs core.
+ * Add additional 1 vector to ensure always available interrupt.
+ */
+ xhci->msix_count = min(num_online_cpus() + 1,
+ HCS_MAX_INTRS(xhci->hcs_params1));
+
+ xhci->msix_entries =
+ kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
+ GFP_KERNEL);
if (!xhci->msix_entries) {
xhci_err(xhci, "Failed to allocate MSI-X entries\n");
return -ENOMEM;
}
- xhci->msix_entries[0].entry = 0;
+
+ for (i = 0; i < xhci->msix_count; i++) {
+ xhci->msix_entries[i].entry = i;
+ xhci->msix_entries[i].vector = 0;
+ }
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
@@ -194,20 +257,19 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
goto free_entries;
}
- /*
- * Pass the xhci pointer value as the request_irq "cookie".
- * If more irqs are added, this will need to be unique for each one.
- */
- ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
- "xHCI", xhci_to_hcd(xhci));
- if (ret) {
- xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
- goto disable_msix;
+ for (i = 0; i < xhci->msix_count; i++) {
+ ret = request_irq(xhci->msix_entries[i].vector,
+ (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret)
+ goto disable_msix;
}
- xhci_dbg(xhci, "Finished setting up MSI-X\n");
- return 0;
+
+ return ret;
disable_msix:
+ xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
kfree(xhci->msix_entries);
@@ -215,21 +277,23 @@ free_entries:
return ret;
}
-/* XXX: code duplication; can xhci_setup_msix call this? */
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- if (!xhci->msix_entries)
- return;
- free_irq(xhci->msix_entries[0].vector, xhci);
- pci_disable_msix(pdev);
- kfree(xhci->msix_entries);
- xhci->msix_entries = NULL;
- xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+ xhci_free_irq(xhci);
+
+ if (xhci->msix_entries) {
+ pci_disable_msix(pdev);
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ } else {
+ pci_disable_msi(pdev);
+ }
+
+ return;
}
-#endif
/*
* Initialize memory for HCD and xHC (one-time init).
@@ -257,100 +321,8 @@ int xhci_init(struct usb_hcd *hcd)
return retval;
}
-/*
- * Called in interrupt context when there might be work
- * queued on the event ring
- *
- * xhci->lock must be held by caller.
- */
-static void xhci_work(struct xhci_hcd *xhci)
-{
- u32 temp;
- u64 temp_64;
-
- /*
- * Clear the op reg interrupt status first,
- * so we can receive interrupts from other MSI-X interrupters.
- * Write 1 to clear the interrupt status.
- */
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- temp |= STS_EINT;
- xhci_writel(xhci, temp, &xhci->op_regs->status);
- /* FIXME when MSI-X is supported and there are multiple vectors */
- /* Clear the MSI-X event interrupt status */
-
- /* Acknowledge the interrupt */
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- temp |= 0x3;
- xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
- /* Flush posted writes */
- xhci_readl(xhci, &xhci->ir_set->irq_pending);
-
- if (xhci->xhc_state & XHCI_STATE_DYING)
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- else
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- xhci_handle_event(xhci);
-
- /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
- /* Flush posted writes -- FIXME is this necessary? */
- xhci_readl(xhci, &xhci->ir_set->irq_pending);
-}
-
/*-------------------------------------------------------------------------*/
-/*
- * xHCI spec says we can get an interrupt, and if the HC has an error condition,
- * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
- * indicators of an event TRB error, but we check the status *first* to be safe.
- */
-irqreturn_t xhci_irq(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- u32 temp, temp2;
- union xhci_trb *trb;
-
- spin_lock(&xhci->lock);
- trb = xhci->event_ring->dequeue;
- /* Check if the xHC generated the interrupt, or the irq is shared */
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- if (temp == 0xffffffff && temp2 == 0xffffffff)
- goto hw_died;
-
- if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
- spin_unlock(&xhci->lock);
- return IRQ_NONE;
- }
- xhci_dbg(xhci, "op reg status = %08x\n", temp);
- xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(trb->link.segment_ptr),
- upper_32_bits(trb->link.segment_ptr),
- (unsigned int) trb->link.intr_target,
- (unsigned int) trb->link.control);
-
- if (temp & STS_FATAL) {
- xhci_warn(xhci, "WARNING: Host System Error\n");
- xhci_halt(xhci);
-hw_died:
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
- spin_unlock(&xhci->lock);
- return -ESHUTDOWN;
- }
-
- xhci_work(xhci);
- spin_unlock(&xhci->lock);
-
- return IRQ_HANDLED;
-}
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
void xhci_event_ring_work(unsigned long arg)
@@ -423,21 +395,36 @@ int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
+ u32 ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
void (*doorbell)(struct xhci_hcd *) = NULL;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
xhci_dbg(xhci, "xhci_run\n");
-#if 0 /* FIXME: MSI not setup yet */
- /* Do this at the very last minute */
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+ free_irq(hcd->irq, hcd);
+ hcd->irq = -1;
+
ret = xhci_setup_msix(xhci);
- if (!ret)
- return ret;
+ if (ret)
+ /* fall back to msi*/
+ ret = xhci_setup_msi(xhci);
+
+ if (ret) {
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+ if (ret) {
+ xhci_err(xhci, "request interrupt %d failed\n",
+ pdev->irq);
+ return ret;
+ }
+ hcd->irq = pdev->irq;
+ }
- return -ENOSYS;
-#endif
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
init_timer(&xhci->event_ring_timer);
xhci->event_ring_timer.data = (unsigned long) xhci;
@@ -495,7 +482,6 @@ int xhci_run(struct usb_hcd *hcd)
return -ENODEV;
}
- xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
if (doorbell)
(*doorbell)(xhci);
if (xhci->quirks & XHCI_NEC_HOST)
@@ -522,11 +508,9 @@ void xhci_stop(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
xhci_reset(xhci);
+ xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
-#if 0 /* No MSI yet */
- xhci_cleanup_msix(xhci);
-#endif
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
@@ -560,11 +544,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
- spin_unlock_irq(&xhci->lock);
-
-#if 0
xhci_cleanup_msix(xhci);
-#endif
+ spin_unlock_irq(&xhci->lock);
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
@@ -720,7 +701,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
unsigned long flags;
int ret = 0;
unsigned int slot_id, ep_index;
-
+ struct urb_priv *urb_priv;
+ int size, i;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
return -EINVAL;
@@ -734,12 +716,36 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = -EINVAL;
goto exit;
}
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
if (!in_interrupt())
xhci_dbg(xhci, "urb submitted during PCI suspend\n");
ret = -ESHUTDOWN;
goto exit;
}
+
+ if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+ size = urb->number_of_packets;
+ else
+ size = 1;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv) +
+ size * sizeof(struct xhci_td *), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return -ENOMEM;
+ }
+ }
+
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
@@ -788,11 +794,18 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
- ret = -EINVAL;
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ goto dying;
+ ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
@@ -800,6 +813,47 @@ dying:
return -ESHUTDOWN;
}
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned int stream_id;
+ struct xhci_virt_ep *ep;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ stream_id = urb->stream_id;
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
/*
* Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
@@ -834,9 +888,10 @@ dying:
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
- int ret;
+ int ret, i;
u32 temp;
struct xhci_hcd *xhci;
+ struct urb_priv *urb_priv;
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
@@ -851,12 +906,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
- td = (struct xhci_td *) urb->hcpriv;
+ urb_priv = urb->hcpriv;
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
- kfree(td);
+ xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -884,9 +939,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
- td = (struct xhci_td *) urb->hcpriv;
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ urb_priv = urb->hcpriv;
+
+ for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ td = urb_priv->td[i];
+ list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ }
+
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6c7e3430ec93..34a60d9f056a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -720,6 +720,14 @@ struct xhci_virt_ep {
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
struct xhci_hcd *xhci;
+ /*
+ * Sometimes the xHC can not process isochronous endpoint ring quickly
+ * enough, and it will miss some isoc tds on the ring and generate
+ * a Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
+ */
+ bool skip;
};
struct xhci_virt_device {
@@ -911,6 +919,9 @@ struct xhci_event_cmd {
/* Control transfer TRB specific fields */
#define TRB_DIR_IN (1<<16)
+/* Isochronous TRB specific fields */
+#define TRB_SIA (1<<31)
+
struct xhci_generic_trb {
u32 field[4];
};
@@ -1082,6 +1093,12 @@ struct xhci_scratchpad {
dma_addr_t *sp_dma_buffers;
};
+struct urb_priv {
+ int length;
+ int td_cnt;
+ struct xhci_td *td[0];
+};
+
/*
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
@@ -1130,7 +1147,7 @@ struct xhci_hcd {
int page_size;
/* Valid values are 12 to 20, inclusive */
int page_shift;
- /* only one MSI vector for now, but might need more later */
+ /* msi-x vectors */
int msix_count;
struct msix_entry *msix_entries;
/* data structures */
@@ -1327,11 +1344,6 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
-struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb);
-struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id);
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
@@ -1339,6 +1351,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
@@ -1358,6 +1371,7 @@ void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
+irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
@@ -1386,8 +1400,6 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
-void xhci_handle_event(struct xhci_hcd *xhci);
-void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
@@ -1401,6 +1413,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
OpenPOWER on IntegriCloud