diff options
Diffstat (limited to 'drivers/usb/dwc2')
-rw-r--r-- | drivers/usb/dwc2/Kconfig | 2 | ||||
-rw-r--r-- | drivers/usb/dwc2/core.c | 2105 | ||||
-rw-r--r-- | drivers/usb/dwc2/core.h | 215 | ||||
-rw-r--r-- | drivers/usb/dwc2/core_intr.c | 53 | ||||
-rw-r--r-- | drivers/usb/dwc2/gadget.c | 211 | ||||
-rw-r--r-- | drivers/usb/dwc2/hcd.c | 2383 | ||||
-rw-r--r-- | drivers/usb/dwc2/hcd.h | 163 | ||||
-rw-r--r-- | drivers/usb/dwc2/hcd_ddma.c | 290 | ||||
-rw-r--r-- | drivers/usb/dwc2/hcd_intr.c | 221 | ||||
-rw-r--r-- | drivers/usb/dwc2/hcd_queue.c | 1941 | ||||
-rw-r--r-- | drivers/usb/dwc2/hw.h | 4 | ||||
-rw-r--r-- | drivers/usb/dwc2/platform.c | 200 |
12 files changed, 5045 insertions, 2743 deletions
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig index fd95ba6ec317..c1f29caa8990 100644 --- a/drivers/usb/dwc2/Kconfig +++ b/drivers/usb/dwc2/Kconfig @@ -1,6 +1,8 @@ config USB_DWC2 tristate "DesignWare USB2 DRD Core Support" + depends on HAS_DMA depends on USB || USB_GADGET + depends on HAS_IOMEM help Say Y here if your system has a Dual Role Hi-Speed USB controller based on the DesignWare HSOTG IP Core. diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index ef73e498e98f..4135a5ff67ca 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -56,189 +56,6 @@ #include "core.h" #include "hcd.h" -#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) -/** - * dwc2_backup_host_registers() - Backup controller host registers. - * When suspending usb bus, registers needs to be backuped - * if controller power is disabled once suspended. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hregs_backup *hr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Backup Host regs */ - hr = &hsotg->hr_backup; - hr->hcfg = dwc2_readl(hsotg->regs + HCFG); - hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) - hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); - - hr->hprt0 = dwc2_read_hprt0(hsotg); - hr->hfir = dwc2_readl(hsotg->regs + HFIR); - hr->valid = true; - - return 0; -} - -/** - * dwc2_restore_host_registers() - Restore controller host registers. - * When resuming usb bus, device registers needs to be restored - * if controller power were disabled. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hregs_backup *hr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Restore host regs */ - hr = &hsotg->hr_backup; - if (!hr->valid) { - dev_err(hsotg->dev, "%s: no host registers to restore\n", - __func__); - return -EINVAL; - } - hr->valid = false; - - dwc2_writel(hr->hcfg, hsotg->regs + HCFG); - dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); - - for (i = 0; i < hsotg->core_params->host_channels; ++i) - dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); - - dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); - dwc2_writel(hr->hfir, hsotg->regs + HFIR); - hsotg->frame_number = 0; - - return 0; -} -#else -static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) -{ return 0; } - -static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) -{ return 0; } -#endif - -#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ - IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) -/** - * dwc2_backup_device_registers() - Backup controller device registers. - * When suspending usb bus, registers needs to be backuped - * if controller power is disabled once suspended. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_dregs_backup *dr; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Backup dev regs */ - dr = &hsotg->dr_backup; - - dr->dcfg = dwc2_readl(hsotg->regs + DCFG); - dr->dctl = dwc2_readl(hsotg->regs + DCTL); - dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); - dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); - dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); - - for (i = 0; i < hsotg->num_of_eps; i++) { - /* Backup IN EPs */ - dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); - - /* Ensure DATA PID is correctly configured */ - if (dr->diepctl[i] & DXEPCTL_DPID) - dr->diepctl[i] |= DXEPCTL_SETD1PID; - else - dr->diepctl[i] |= DXEPCTL_SETD0PID; - - dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); - dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); - - /* Backup OUT EPs */ - dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); - - /* Ensure DATA PID is correctly configured */ - if (dr->doepctl[i] & DXEPCTL_DPID) - dr->doepctl[i] |= DXEPCTL_SETD1PID; - else - dr->doepctl[i] |= DXEPCTL_SETD0PID; - - dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); - dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); - } - dr->valid = true; - return 0; -} - -/** - * dwc2_restore_device_registers() - Restore controller device registers. - * When resuming usb bus, device registers needs to be restored - * if controller power were disabled. - * - * @hsotg: Programming view of the DWC_otg controller - */ -static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) -{ - struct dwc2_dregs_backup *dr; - u32 dctl; - int i; - - dev_dbg(hsotg->dev, "%s\n", __func__); - - /* Restore dev regs */ - dr = &hsotg->dr_backup; - if (!dr->valid) { - dev_err(hsotg->dev, "%s: no device registers to restore\n", - __func__); - return -EINVAL; - } - dr->valid = false; - - dwc2_writel(dr->dcfg, hsotg->regs + DCFG); - dwc2_writel(dr->dctl, hsotg->regs + DCTL); - dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); - dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); - dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); - - for (i = 0; i < hsotg->num_of_eps; i++) { - /* Restore IN EPs */ - dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); - dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); - dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); - - /* Restore OUT EPs */ - dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); - dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); - dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); - } - - /* Set the Power-On Programming done bit */ - dctl = dwc2_readl(hsotg->regs + DCTL); - dctl |= DCTL_PWRONPRGDONE; - dwc2_writel(dctl, hsotg->regs + DCTL); - - return 0; -} -#else -static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) -{ return 0; } - -static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) -{ return 0; } -#endif - /** * dwc2_backup_global_registers() - Backup global controller registers. * When suspending usb bus, registers needs to be backuped @@ -421,1744 +238,176 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) return ret; } -/** - * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, - * used in both device and host modes - * - * @hsotg: Programming view of the DWC_otg controller - */ -static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk; - - /* Clear any pending OTG Interrupts */ - dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); - - /* Clear any pending interrupts */ - dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); - - /* Enable the interrupts in the GINTMSK */ - intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; - - if (hsotg->core_params->dma_enable <= 0) - intmsk |= GINTSTS_RXFLVL; - if (hsotg->core_params->external_id_pin_ctl <= 0) - intmsk |= GINTSTS_CONIDSTSCHNG; - - intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | - GINTSTS_SESSREQINT; - - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/* - * Initializes the FSLSPClkSel field of the HCFG register depending on the - * PHY type - */ -static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) -{ - u32 hcfg, val; - - if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) || - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* Full speed PHY */ - val = HCFG_FSLSPCLKSEL_48_MHZ; - } else { - /* High speed PHY running at full speed or high speed */ - val = HCFG_FSLSPCLKSEL_30_60_MHZ; - } - - dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg &= ~HCFG_FSLSPCLKSEL_MASK; - hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; - dwc2_writel(hcfg, hsotg->regs + HCFG); -} - /* * Do core a soft reset of the core. Be careful with this because it * resets all the internal state machines of the core. */ -static int dwc2_core_reset(struct dwc2_hsotg *hsotg) +int dwc2_core_reset(struct dwc2_hsotg *hsotg) { u32 greset; int count = 0; - u32 gusbcfg; dev_vdbg(hsotg->dev, "%s()\n", __func__); - /* Wait for AHB master IDLE state */ + /* Core Soft Reset */ + greset = dwc2_readl(hsotg->regs + GRSTCTL); + greset |= GRSTCTL_CSFTRST; + dwc2_writel(greset, hsotg->regs + GRSTCTL); do { - usleep_range(20000, 40000); + udelay(1); greset = dwc2_readl(hsotg->regs + GRSTCTL); if (++count > 50) { dev_warn(hsotg->dev, - "%s() HANG! AHB Idle GRSTCTL=%0x\n", + "%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset); return -EBUSY; } - } while (!(greset & GRSTCTL_AHBIDLE)); + } while (greset & GRSTCTL_CSFTRST); - /* Core Soft Reset */ + /* Wait for AHB master IDLE state */ count = 0; - greset |= GRSTCTL_CSFTRST; - dwc2_writel(greset, hsotg->regs + GRSTCTL); do { - usleep_range(20000, 40000); + udelay(1); greset = dwc2_readl(hsotg->regs + GRSTCTL); if (++count > 50) { dev_warn(hsotg->dev, - "%s() HANG! Soft Reset GRSTCTL=%0x\n", + "%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, greset); return -EBUSY; } - } while (greset & GRSTCTL_CSFTRST); - - if (hsotg->dr_mode == USB_DR_MODE_HOST) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEDEVMODE; - gusbcfg |= GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - gusbcfg |= GUSBCFG_FORCEDEVMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } else if (hsotg->dr_mode == USB_DR_MODE_OTG) { - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - gusbcfg &= ~GUSBCFG_FORCEDEVMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - } - - /* - * NOTE: This long sleep is _very_ important, otherwise the core will - * not stay in host mode after a connector ID change! - */ - usleep_range(150000, 200000); - - return 0; -} - -static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg, i2cctl; - int retval = 0; - - /* - * core_init() is now called on every switch so only call the - * following for the first time through - */ - if (select_phy) { - dev_dbg(hsotg->dev, "FS PHY selected\n"); - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_PHYSEL; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Reset after a PHY select */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s() Reset failed, aborting", - __func__); - return retval; - } - } - - /* - * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also - * do this on HNP Dev/Host mode switches (done in dev_init and - * host_init). - */ - if (dwc2_is_host_mode(hsotg)) - dwc2_init_fs_ls_pclk_sel(hsotg); - - if (hsotg->core_params->i2c_enable > 0) { - dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); - - /* Program GUSBCFG.OtgUtmiFsSel to I2C */ - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Program GI2CCTL.I2CEn */ - i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); - i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; - i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; - i2cctl &= ~GI2CCTL_I2CEN; - dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); - i2cctl |= GI2CCTL_I2CEN; - dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); - } - - return retval; -} - -static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg; - int retval = 0; - - if (!select_phy) - return 0; - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - - /* - * HS PHY parameters. These parameters are preserved during soft reset - * so only program the first time. Do a soft reset immediately after - * setting phyif. - */ - switch (hsotg->core_params->phy_type) { - case DWC2_PHY_TYPE_PARAM_ULPI: - /* ULPI interface */ - dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); - usbcfg |= GUSBCFG_ULPI_UTMI_SEL; - usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); - if (hsotg->core_params->phy_ulpi_ddr > 0) - usbcfg |= GUSBCFG_DDRSEL; - break; - case DWC2_PHY_TYPE_PARAM_UTMI: - /* UTMI+ interface */ - dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); - usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); - if (hsotg->core_params->phy_utmi_width == 16) - usbcfg |= GUSBCFG_PHYIF16; - break; - default: - dev_err(hsotg->dev, "FS PHY selected at HS!\n"); - break; - } - - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Reset after setting the PHY parameters */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s() Reset failed, aborting", - __func__); - return retval; - } - - return retval; -} - -static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) -{ - u32 usbcfg; - int retval = 0; - - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* If FS mode with FS PHY */ - retval = dwc2_fs_phy_init(hsotg, select_phy); - if (retval) - return retval; - } else { - /* High speed PHY */ - retval = dwc2_hs_phy_init(hsotg, select_phy); - if (retval) - return retval; - } - - if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) { - dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg |= GUSBCFG_ULPI_FS_LS; - usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - } else { - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg &= ~GUSBCFG_ULPI_FS_LS; - usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - } - - return retval; -} - -static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) -{ - u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); - - switch (hsotg->hw_params.arch) { - case GHWCFG2_EXT_DMA_ARCH: - dev_err(hsotg->dev, "External DMA Mode not supported\n"); - return -EINVAL; - - case GHWCFG2_INT_DMA_ARCH: - dev_dbg(hsotg->dev, "Internal DMA Mode\n"); - if (hsotg->core_params->ahbcfg != -1) { - ahbcfg &= GAHBCFG_CTRL_MASK; - ahbcfg |= hsotg->core_params->ahbcfg & - ~GAHBCFG_CTRL_MASK; - } - break; - - case GHWCFG2_SLAVE_ONLY_ARCH: - default: - dev_dbg(hsotg->dev, "Slave Only Mode\n"); - break; - } - - dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", - hsotg->core_params->dma_enable, - hsotg->core_params->dma_desc_enable); - - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) - dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); - else - dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); - } else { - dev_dbg(hsotg->dev, "Using Slave mode\n"); - hsotg->core_params->dma_desc_enable = 0; - } - - if (hsotg->core_params->dma_enable > 0) - ahbcfg |= GAHBCFG_DMA_EN; - - dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); + } while (!(greset & GRSTCTL_AHBIDLE)); return 0; } -static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) -{ - u32 usbcfg; - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); - - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - if (hsotg->core_params->otg_cap == - DWC2_CAP_PARAM_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_HNPCAP; - if (hsotg->core_params->otg_cap != - DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_SRPCAP; - break; - - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - if (hsotg->core_params->otg_cap != - DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) - usbcfg |= GUSBCFG_SRPCAP; - break; - - case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: - case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: - default: - break; - } - - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); -} - -/** - * dwc2_core_init() - Initializes the DWC_otg controller registers and - * prepares the core for device mode or host mode operation +/* + * Force the mode of the controller. * - * @hsotg: Programming view of the DWC_otg controller - * @select_phy: If true then also set the Phy type - * @irq: If >= 0, the irq to register - */ -int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq) -{ - u32 usbcfg, otgctl; - int retval; - - dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - - /* Set ULPI External VBUS bit if needed */ - usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; - if (hsotg->core_params->phy_ulpi_ext_vbus == - DWC2_PHY_ULPI_EXTERNAL_VBUS) - usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; - - /* Set external TS Dline pulsing bit if needed */ - usbcfg &= ~GUSBCFG_TERMSELDLPULSE; - if (hsotg->core_params->ts_dline > 0) - usbcfg |= GUSBCFG_TERMSELDLPULSE; - - dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); - - /* Reset the Controller */ - retval = dwc2_core_reset(hsotg); - if (retval) { - dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", - __func__); - return retval; - } - - /* - * This needs to happen in FS mode before any other programming occurs - */ - retval = dwc2_phy_init(hsotg, select_phy); - if (retval) - return retval; - - /* Program the GAHBCFG Register */ - retval = dwc2_gahbcfg_init(hsotg); - if (retval) - return retval; - - /* Program the GUSBCFG register */ - dwc2_gusbcfg_init(hsotg); - - /* Program the GOTGCTL register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_OTGVER; - if (hsotg->core_params->otg_ver > 0) - otgctl |= GOTGCTL_OTGVER; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); - - /* Clear the SRP success bit for FS-I2c */ - hsotg->srp_success = 0; - - /* Enable common interrupts */ - dwc2_enable_common_interrupts(hsotg); - - /* - * Do device or host initialization based on mode during PCD and - * HCD initialization - */ - if (dwc2_is_host_mode(hsotg)) { - dev_dbg(hsotg->dev, "Host Mode\n"); - hsotg->op_state = OTG_STATE_A_HOST; - } else { - dev_dbg(hsotg->dev, "Device Mode\n"); - hsotg->op_state = OTG_STATE_B_PERIPHERAL; - } - - return 0; -} - -/** - * dwc2_enable_host_interrupts() - Enables the Host mode interrupts + * Forcing the mode is needed for two cases: * - * @hsotg: Programming view of DWC_otg controller - */ -void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk; - - dev_dbg(hsotg->dev, "%s()\n", __func__); - - /* Disable all interrupts */ - dwc2_writel(0, hsotg->regs + GINTMSK); - dwc2_writel(0, hsotg->regs + HAINTMSK); - - /* Enable the common interrupts */ - dwc2_enable_common_interrupts(hsotg); - - /* Enable host mode interrupts without disturbing common interrupts */ - intmsk = dwc2_readl(hsotg->regs + GINTMSK); - intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/** - * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts + * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the + * controller to stay in a particular mode regardless of ID pin + * changes. We do this usually after a core reset. * - * @hsotg: Programming view of DWC_otg controller - */ -void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) -{ - u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); - - /* Disable host mode interrupts without disturbing common interrupts */ - intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | - GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); - dwc2_writel(intmsk, hsotg->regs + GINTMSK); -} - -/* - * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size - * For system that have a total fifo depth that is smaller than the default - * RX + TX fifo size. + * 2) During probe we want to read reset values of the hw + * configuration registers that are only available in either host or + * device mode. We may need to force the mode if the current mode does + * not allow us to access the register in the mode that we want. * - * @hsotg: Programming view of DWC_otg controller + * In either case it only makes sense to force the mode if the + * controller hardware is OTG capable. + * + * Checks are done in this function to determine whether doing a force + * would be valid or not. + * + * If a force is done, it requires a 25ms delay to take effect. + * + * Returns true if the mode was forced. */ -static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) +static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) { - struct dwc2_core_params *params = hsotg->core_params; - struct dwc2_hw_params *hw = &hsotg->hw_params; - u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; + u32 gusbcfg; + u32 set; + u32 clear; - total_fifo_size = hw->total_fifo_size; - rxfsiz = params->host_rx_fifo_size; - nptxfsiz = params->host_nperio_tx_fifo_size; - ptxfsiz = params->host_perio_tx_fifo_size; + dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device"); /* - * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth - * allocation with support for high bandwidth endpoints. Synopsys - * defines MPS(Max Packet size) for a periodic EP=1024, and for - * non-periodic as 512. + * Force mode has no effect if the hardware is not OTG. */ - if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { - /* - * For Buffer DMA mode/Scatter Gather DMA mode - * 2 * ((Largest Packet size / 4) + 1 + 1) + n - * with n = number of host channel. - * 2 * ((1024/4) + 2) = 516 - */ - rxfsiz = 516 + hw->host_channels; - - /* - * min non-periodic tx fifo depth - * 2 * (largest non-periodic USB packet used / 4) - * 2 * (512/4) = 256 - */ - nptxfsiz = 256; - - /* - * min periodic tx fifo depth - * (largest packet size*MC)/4 - * (1024 * 3)/4 = 768 - */ - ptxfsiz = 768; - - params->host_rx_fifo_size = rxfsiz; - params->host_nperio_tx_fifo_size = nptxfsiz; - params->host_perio_tx_fifo_size = ptxfsiz; - } + if (!dwc2_hw_is_otg(hsotg)) + return false; /* - * If the summation of RX, NPTX and PTX fifo sizes is still - * bigger than the total_fifo_size, then we have a problem. - * - * We won't be able to allocate as many endpoints. Right now, - * we're just printing an error message, but ideally this FIFO - * allocation algorithm would be improved in the future. - * - * FIXME improve this FIFO allocation algorithm. + * If dr_mode is either peripheral or host only, there is no + * need to ever force the mode to the opposite mode. */ - if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) - dev_err(hsotg->dev, "invalid fifo sizes\n"); -} + if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)) + return false; -static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) -{ - struct dwc2_core_params *params = hsotg->core_params; - u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; + if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST)) + return false; - if (!params->enable_dynamic_fifo) - return; + gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - dwc2_calculate_dynamic_fifo(hsotg); + set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; + clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; - /* Rx FIFO */ - grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); - dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); - grxfsiz &= ~GRXFSIZ_DEPTH_MASK; - grxfsiz |= params->host_rx_fifo_size << - GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; - dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); - dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GRXFSIZ)); - - /* Non-periodic Tx FIFO */ - dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GNPTXFSIZ)); - nptxfsiz = params->host_nperio_tx_fifo_size << - FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; - nptxfsiz |= params->host_rx_fifo_size << - FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; - dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); - dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + GNPTXFSIZ)); - - /* Periodic Tx FIFO */ - dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + HPTXFSIZ)); - hptxfsiz = params->host_perio_tx_fifo_size << - FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; - hptxfsiz |= (params->host_rx_fifo_size + - params->host_nperio_tx_fifo_size) << - FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; - dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", - dwc2_readl(hsotg->regs + HPTXFSIZ)); - - if (hsotg->core_params->en_multiple_tx_fifo > 0 && - hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { - /* - * Global DFIFOCFG calculation for Host mode - - * include RxFIFO, NPTXFIFO and HPTXFIFO - */ - dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); - dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; - dfifocfg |= (params->host_rx_fifo_size + - params->host_nperio_tx_fifo_size + - params->host_perio_tx_fifo_size) << - GDFIFOCFG_EPINFOBASE_SHIFT & - GDFIFOCFG_EPINFOBASE_MASK; - dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); - } + gusbcfg &= ~clear; + gusbcfg |= set; + dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); + + msleep(25); + return true; } -/** - * dwc2_core_host_init() - Initializes the DWC_otg controller registers for - * Host mode - * - * @hsotg: Programming view of DWC_otg controller - * - * This function flushes the Tx and Rx FIFOs and flushes any entries in the - * request queues. Host channels are reset to ensure that they are ready for - * performing transfers. +/* + * Clears the force mode bits. */ -void dwc2_core_host_init(struct dwc2_hsotg *hsotg) +static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) { - u32 hcfg, hfir, otgctl; - - dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); - - /* Restart the Phy Clock */ - dwc2_writel(0, hsotg->regs + PCGCTL); + u32 gusbcfg; - /* Initialize Host Configuration Register */ - dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg |= HCFG_FSLSSUPP; - dwc2_writel(hcfg, hsotg->regs + HCFG); - } + gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; + gusbcfg &= ~GUSBCFG_FORCEDEVMODE; + dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); /* - * This bit allows dynamic reloading of the HFIR register during - * runtime. This bit needs to be programmed during initial configuration - * and its value must not be changed during runtime. + * NOTE: This long sleep is _very_ important, otherwise the core will + * not stay in host mode after a connector ID change! */ - if (hsotg->core_params->reload_ctl > 0) { - hfir = dwc2_readl(hsotg->regs + HFIR); - hfir |= HFIR_RLDCTRL; - dwc2_writel(hfir, hsotg->regs + HFIR); - } - - if (hsotg->core_params->dma_desc_enable > 0) { - u32 op_mode = hsotg->hw_params.op_mode; - if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || - !hsotg->hw_params.dma_desc_enable || - op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || - op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || - op_mode == GHWCFG2_OP_MODE_UNDEFINED) { - dev_err(hsotg->dev, - "Hardware does not support descriptor DMA mode -\n"); - dev_err(hsotg->dev, - "falling back to buffer DMA mode.\n"); - hsotg->core_params->dma_desc_enable = 0; - } else { - hcfg = dwc2_readl(hsotg->regs + HCFG); - hcfg |= HCFG_DESCDMA; - dwc2_writel(hcfg, hsotg->regs + HCFG); - } - } - - /* Configure data FIFO sizes */ - dwc2_config_fifos(hsotg); - - /* TODO - check this */ - /* Clear Host Set HNP Enable in the OTG Control Register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_HSTSETHNPEN; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - - /* Make sure the FIFOs are flushed */ - dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); - dwc2_flush_rx_fifo(hsotg); - - /* Clear Host Set HNP Enable in the OTG Control Register */ - otgctl = dwc2_readl(hsotg->regs + GOTGCTL); - otgctl &= ~GOTGCTL_HSTSETHNPEN; - dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - - if (hsotg->core_params->dma_desc_enable <= 0) { - int num_channels, i; - u32 hcchar; - - /* Flush out any leftover queued requests */ - num_channels = hsotg->core_params->host_channels; - for (i = 0; i < num_channels; i++) { - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - hcchar &= ~HCCHAR_CHENA; - hcchar |= HCCHAR_CHDIS; - hcchar &= ~HCCHAR_EPDIR; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); - } - - /* Halt all channels to put them into a known state */ - for (i = 0; i < num_channels; i++) { - int count = 0; - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; - hcchar &= ~HCCHAR_EPDIR; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); - dev_dbg(hsotg->dev, "%s: Halt channel %d\n", - __func__, i); - do { - hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); - if (++count > 1000) { - dev_err(hsotg->dev, - "Unable to clear enable on channel %d\n", - i); - break; - } - udelay(1); - } while (hcchar & HCCHAR_CHENA); - } - } - - /* Turn on the vbus power */ - dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); - if (hsotg->op_state == OTG_STATE_A_HOST) { - u32 hprt0 = dwc2_read_hprt0(hsotg); - - dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", - !!(hprt0 & HPRT0_PWR)); - if (!(hprt0 & HPRT0_PWR)) { - hprt0 |= HPRT0_PWR; - dwc2_writel(hprt0, hsotg->regs + HPRT0); - } - } - - dwc2_enable_host_interrupts(hsotg); + msleep(25); } -static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) +/* + * Sets or clears force mode based on the dr_mode parameter. + */ +void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) { - u32 hcintmsk = HCINTMSK_CHHLTD; - - switch (chan->ep_type) { - case USB_ENDPOINT_XFER_CONTROL: - case USB_ENDPOINT_XFER_BULK: - dev_vdbg(hsotg->dev, "control/bulk\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_STALL; - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_DATATGLERR; - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_BBLERR; - } else { - hcintmsk |= HCINTMSK_NAK; - hcintmsk |= HCINTMSK_NYET; - if (chan->do_ping) - hcintmsk |= HCINTMSK_ACK; - } - - if (chan->do_split) { - hcintmsk |= HCINTMSK_NAK; - if (chan->complete_split) - hcintmsk |= HCINTMSK_NYET; - else - hcintmsk |= HCINTMSK_ACK; - } - - if (chan->error_state) - hcintmsk |= HCINTMSK_ACK; + switch (hsotg->dr_mode) { + case USB_DR_MODE_HOST: + dwc2_force_mode(hsotg, true); break; - - case USB_ENDPOINT_XFER_INT: - if (dbg_perio()) - dev_vdbg(hsotg->dev, "intr\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_NAK; - hcintmsk |= HCINTMSK_STALL; - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_DATATGLERR; - hcintmsk |= HCINTMSK_FRMOVRUN; - - if (chan->ep_is_in) - hcintmsk |= HCINTMSK_BBLERR; - if (chan->error_state) - hcintmsk |= HCINTMSK_ACK; - if (chan->do_split) { - if (chan->complete_split) - hcintmsk |= HCINTMSK_NYET; - else - hcintmsk |= HCINTMSK_ACK; - } + case USB_DR_MODE_PERIPHERAL: + dwc2_force_mode(hsotg, false); break; - - case USB_ENDPOINT_XFER_ISOC: - if (dbg_perio()) - dev_vdbg(hsotg->dev, "isoc\n"); - hcintmsk |= HCINTMSK_XFERCOMPL; - hcintmsk |= HCINTMSK_FRMOVRUN; - hcintmsk |= HCINTMSK_ACK; - - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_XACTERR; - hcintmsk |= HCINTMSK_BBLERR; - } + case USB_DR_MODE_OTG: + dwc2_clear_force_mode(hsotg); break; default: - dev_err(hsotg->dev, "## Unknown EP type ##\n"); + dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n", + __func__, hsotg->dr_mode); break; } - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); -} - -static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 hcintmsk = HCINTMSK_CHHLTD; - /* - * For Descriptor DMA mode core halts the channel on AHB error. - * Interrupt is not required. + * NOTE: This is required for some rockchip soc based + * platforms. */ - if (hsotg->core_params->dma_desc_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA disabled\n"); - hcintmsk |= HCINTMSK_AHBERR; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA enabled\n"); - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - hcintmsk |= HCINTMSK_XFERCOMPL; - } - - if (chan->error_state && !chan->do_split && - chan->ep_type != USB_ENDPOINT_XFER_ISOC) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "setting ACK\n"); - hcintmsk |= HCINTMSK_ACK; - if (chan->ep_is_in) { - hcintmsk |= HCINTMSK_DATATGLERR; - if (chan->ep_type != USB_ENDPOINT_XFER_INT) - hcintmsk |= HCINTMSK_NAK; - } - } - - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); -} - -static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 intmsk; - - if (hsotg->core_params->dma_enable > 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA enabled\n"); - dwc2_hc_enable_dma_ints(hsotg, chan); - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA disabled\n"); - dwc2_hc_enable_slave_ints(hsotg, chan); - } - - /* Enable the top level host channel interrupt */ - intmsk = dwc2_readl(hsotg->regs + HAINTMSK); - intmsk |= 1 << chan->hc_num; - dwc2_writel(intmsk, hsotg->regs + HAINTMSK); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); - - /* Make sure host channel interrupts are enabled */ - intmsk = dwc2_readl(hsotg->regs + GINTMSK); - intmsk |= GINTSTS_HCHINT; - dwc2_writel(intmsk, hsotg->regs + GINTMSK); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); -} - -/** - * dwc2_hc_init() - Prepares a host channel for transferring packets to/from - * a specific endpoint - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * The HCCHARn register is set up with the characteristics specified in chan. - * Host channel interrupts that may need to be serviced while this transfer is - * in progress are enabled. - */ -void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u8 hc_num = chan->hc_num; - u32 hcintmsk; - u32 hcchar; - u32 hcsplt = 0; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - /* Clear old interrupt conditions for this host channel */ - hcintmsk = 0xffffffff; - hcintmsk &= ~HCINTMSK_RESERVED14_31; - dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); - - /* Enable channel interrupts required for this transfer */ - dwc2_hc_enable_ints(hsotg, chan); - - /* - * Program the HCCHARn register with the endpoint characteristics for - * the current transfer - */ - hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; - hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; - if (chan->ep_is_in) - hcchar |= HCCHAR_EPDIR; - if (chan->speed == USB_SPEED_LOW) - hcchar |= HCCHAR_LSPDDEV; - hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; - hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", - hc_num, hcchar); - - dev_vdbg(hsotg->dev, "%s: Channel %d\n", - __func__, hc_num); - dev_vdbg(hsotg->dev, " Dev Addr: %d\n", - chan->dev_addr); - dev_vdbg(hsotg->dev, " Ep Num: %d\n", - chan->ep_num); - dev_vdbg(hsotg->dev, " Is In: %d\n", - chan->ep_is_in); - dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", - chan->speed == USB_SPEED_LOW); - dev_vdbg(hsotg->dev, " Ep Type: %d\n", - chan->ep_type); - dev_vdbg(hsotg->dev, " Max Pkt: %d\n", - chan->max_packet); - } - - /* Program the HCSPLT register for SPLITs */ - if (chan->do_split) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, - "Programming HC %d with split --> %s\n", - hc_num, - chan->complete_split ? "CSPLIT" : "SSPLIT"); - if (chan->complete_split) - hcsplt |= HCSPLT_COMPSPLT; - hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & - HCSPLT_XACTPOS_MASK; - hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & - HCSPLT_HUBADDR_MASK; - hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & - HCSPLT_PRTADDR_MASK; - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, " comp split %d\n", - chan->complete_split); - dev_vdbg(hsotg->dev, " xact pos %d\n", - chan->xact_pos); - dev_vdbg(hsotg->dev, " hub addr %d\n", - chan->hub_addr); - dev_vdbg(hsotg->dev, " hub port %d\n", - chan->hub_port); - dev_vdbg(hsotg->dev, " is_in %d\n", - chan->ep_is_in); - dev_vdbg(hsotg->dev, " Max Pkt %d\n", - chan->max_packet); - dev_vdbg(hsotg->dev, " xferlen %d\n", - chan->xfer_len); - } - } - - dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); -} - -/** - * dwc2_hc_halt() - Attempts to halt a host channel - * - * @hsotg: Controller register interface - * @chan: Host channel to halt - * @halt_status: Reason for halting the channel - * - * This function should only be called in Slave mode or to abort a transfer in - * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the - * controller halts the channel when the transfer is complete or a condition - * occurs that requires application intervention. - * - * In slave mode, checks for a free request queue entry, then sets the Channel - * Enable and Channel Disable bits of the Host Channel Characteristics - * register of the specified channel to intiate the halt. If there is no free - * request queue entry, sets only the Channel Disable bit of the HCCHARn - * register to flush requests for this channel. In the latter case, sets a - * flag to indicate that the host channel needs to be halted when a request - * queue slot is open. - * - * In DMA mode, always sets the Channel Enable and Channel Disable bits of the - * HCCHARn register. The controller ensures there is space in the request - * queue before submitting the halt request. - * - * Some time may elapse before the core flushes any posted requests for this - * host channel and halts. The Channel Halted interrupt handler completes the - * deactivation of the host channel. - */ -void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, - enum dwc2_halt_status halt_status) -{ - u32 nptxsts, hptxsts, hcchar; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) - dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); - - if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || - halt_status == DWC2_HC_XFER_AHB_ERR) { - /* - * Disable all channel interrupts except Ch Halted. The QTD - * and QH state associated with this transfer has been cleared - * (in the case of URB_DEQUEUE), so the channel needs to be - * shut down carefully to prevent crashes. - */ - u32 hcintmsk = HCINTMSK_CHHLTD; - - dev_vdbg(hsotg->dev, "dequeue/error\n"); - dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); - - /* - * Make sure no other interrupts besides halt are currently - * pending. Handling another interrupt could cause a crash due - * to the QTD and QH state. - */ - dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); - - /* - * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR - * even if the channel was already halted for some other - * reason - */ - chan->halt_status = halt_status; - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - if (!(hcchar & HCCHAR_CHENA)) { - /* - * The channel is either already halted or it hasn't - * started yet. In DMA mode, the transfer may halt if - * it finishes normally or a condition occurs that - * requires driver intervention. Don't want to halt - * the channel again. In either Slave or DMA mode, - * it's possible that the transfer has been assigned - * to a channel, but not started yet when an URB is - * dequeued. Don't want to halt a channel that hasn't - * started yet. - */ - return; - } - } - if (chan->halt_pending) { - /* - * A halt has already been issued for this channel. This might - * happen when a transfer is aborted by a higher level in - * the stack. - */ - dev_vdbg(hsotg->dev, - "*** %s: Channel %d, chan->halt_pending already set ***\n", - __func__, chan->hc_num); - return; - } - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - - /* No need to set the bit in DDMA for disabling the channel */ - /* TODO check it everywhere channel is disabled */ - if (hsotg->core_params->dma_desc_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "desc DMA disabled\n"); - hcchar |= HCCHAR_CHENA; - } else { - if (dbg_hc(chan)) - dev_dbg(hsotg->dev, "desc DMA enabled\n"); - } - hcchar |= HCCHAR_CHDIS; - - if (hsotg->core_params->dma_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA not enabled\n"); - hcchar |= HCCHAR_CHENA; - - /* Check for space in the request queue to issue the halt */ - if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || - chan->ep_type == USB_ENDPOINT_XFER_BULK) { - dev_vdbg(hsotg->dev, "control/bulk\n"); - nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); - if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { - dev_vdbg(hsotg->dev, "Disabling channel\n"); - hcchar &= ~HCCHAR_CHENA; - } - } else { - if (dbg_perio()) - dev_vdbg(hsotg->dev, "isoc/intr\n"); - hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); - if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || - hsotg->queuing_high_bandwidth) { - if (dbg_perio()) - dev_vdbg(hsotg->dev, "Disabling channel\n"); - hcchar &= ~HCCHAR_CHENA; - } - } - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "DMA enabled\n"); - } - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - chan->halt_status = halt_status; - - if (hcchar & HCCHAR_CHENA) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Channel enabled\n"); - chan->halt_pending = 1; - chan->halt_on_queue = 0; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Channel disabled\n"); - chan->halt_on_queue = 1; - } - - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", - hcchar); - dev_vdbg(hsotg->dev, " halt_pending: %d\n", - chan->halt_pending); - dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", - chan->halt_on_queue); - dev_vdbg(hsotg->dev, " halt_status: %d\n", - chan->halt_status); - } -} - -/** - * dwc2_hc_cleanup() - Clears the transfer state for a host channel - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Identifies the host channel to clean up - * - * This function is normally called after a transfer is done and the host - * channel is being released - */ -void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u32 hcintmsk; - - chan->xfer_started = 0; - - /* - * Clear channel interrupt enables and any unhandled channel interrupt - * conditions - */ - dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); - hcintmsk = 0xffffffff; - hcintmsk &= ~HCINTMSK_RESERVED14_31; - dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); -} - -/** - * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in - * which frame a periodic transfer should occur - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Identifies the host channel to set up and its properties - * @hcchar: Current value of the HCCHAR register for the specified host channel - * - * This function has no effect on non-periodic transfers - */ -static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan, u32 *hcchar) -{ - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* 1 if _next_ frame is odd, 0 if it's even */ - if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) - *hcchar |= HCCHAR_ODDFRM; - } -} - -static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) -{ - /* Set up the initial PID for the transfer */ - if (chan->speed == USB_SPEED_HIGH) { - if (chan->ep_is_in) { - if (chan->multi_count == 1) - chan->data_pid_start = DWC2_HC_PID_DATA0; - else if (chan->multi_count == 2) - chan->data_pid_start = DWC2_HC_PID_DATA1; - else - chan->data_pid_start = DWC2_HC_PID_DATA2; - } else { - if (chan->multi_count == 1) - chan->data_pid_start = DWC2_HC_PID_DATA0; - else - chan->data_pid_start = DWC2_HC_PID_MDATA; - } - } else { - chan->data_pid_start = DWC2_HC_PID_DATA0; - } -} - -/** - * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with - * the Host Channel - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * This function should only be called in Slave mode. For a channel associated - * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel - * associated with a periodic EP, the periodic Tx FIFO is written. - * - * Upon return the xfer_buf and xfer_count fields in chan are incremented by - * the number of bytes written to the Tx FIFO. - */ -static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 i; - u32 remaining_count; - u32 byte_count; - u32 dword_count; - u32 __iomem *data_fifo; - u32 *data_buf = (u32 *)chan->xfer_buf; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); - - remaining_count = chan->xfer_len - chan->xfer_count; - if (remaining_count > chan->max_packet) - byte_count = chan->max_packet; - else - byte_count = remaining_count; - - dword_count = (byte_count + 3) / 4; - - if (((unsigned long)data_buf & 0x3) == 0) { - /* xfer_buf is DWORD aligned */ - for (i = 0; i < dword_count; i++, data_buf++) - dwc2_writel(*data_buf, data_fifo); - } else { - /* xfer_buf is not DWORD aligned */ - for (i = 0; i < dword_count; i++, data_buf++) { - u32 data = data_buf[0] | data_buf[1] << 8 | - data_buf[2] << 16 | data_buf[3] << 24; - dwc2_writel(data, data_fifo); - } - } - - chan->xfer_count += byte_count; - chan->xfer_buf += byte_count; -} - -/** - * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host - * channel and starts the transfer - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel. The xfer_len value - * may be reduced to accommodate the max widths of the XferSize and - * PktCnt fields in the HCTSIZn register. The multi_count value may be - * changed to reflect the final xfer_len value. - * - * This function may be called in either Slave mode or DMA mode. In Slave mode, - * the caller must ensure that there is sufficient space in the request queue - * and Tx Data FIFO. - * - * For an OUT transfer in Slave mode, it loads a data packet into the - * appropriate FIFO. If necessary, additional data packets are loaded in the - * Host ISR. - * - * For an IN transfer in Slave mode, a data packet is requested. The data - * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, - * additional data packets are requested in the Host ISR. - * - * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ - * register along with a packet count of 1 and the channel is enabled. This - * causes a single PING transaction to occur. Other fields in HCTSIZ are - * simply set to 0 since no data transfer occurs in this case. - * - * For a PING transfer in DMA mode, the HCTSIZ register is initialized with - * all the information required to perform the subsequent data transfer. In - * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the - * controller performs the entire PING protocol, then starts the data - * transfer. - */ -void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; - u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; - u32 hcchar; - u32 hctsiz = 0; - u16 num_packets; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - if (chan->do_ping) { - if (hsotg->core_params->dma_enable <= 0) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "ping, no DMA\n"); - dwc2_hc_do_ping(hsotg, chan); - chan->xfer_started = 1; - return; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "ping, DMA\n"); - hctsiz |= TSIZ_DOPNG; - } - } - - if (chan->do_split) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "split\n"); - num_packets = 1; - - if (chan->complete_split && !chan->ep_is_in) - /* - * For CSPLIT OUT Transfer, set the size to 0 so the - * core doesn't expect any data written to the FIFO - */ - chan->xfer_len = 0; - else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) - chan->xfer_len = chan->max_packet; - else if (!chan->ep_is_in && chan->xfer_len > 188) - chan->xfer_len = 188; - - hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & - TSIZ_XFERSIZE_MASK; - } else { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "no split\n"); - /* - * Ensure that the transfer length and packet count will fit - * in the widths allocated for them in the HCTSIZn register - */ - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* - * Make sure the transfer size is no larger than one - * (micro)frame's worth of data. (A check was done - * when the periodic transfer was accepted to ensure - * that a (micro)frame's worth of data can be - * programmed into a channel.) - */ - u32 max_periodic_len = - chan->multi_count * chan->max_packet; - - if (chan->xfer_len > max_periodic_len) - chan->xfer_len = max_periodic_len; - } else if (chan->xfer_len > max_hc_xfer_size) { - /* - * Make sure that xfer_len is a multiple of max packet - * size - */ - chan->xfer_len = - max_hc_xfer_size - chan->max_packet + 1; - } - - if (chan->xfer_len > 0) { - num_packets = (chan->xfer_len + chan->max_packet - 1) / - chan->max_packet; - if (num_packets > max_hc_pkt_count) { - num_packets = max_hc_pkt_count; - chan->xfer_len = num_packets * chan->max_packet; - } - } else { - /* Need 1 packet for transfer length of 0 */ - num_packets = 1; - } - - if (chan->ep_is_in) - /* - * Always program an integral # of max packets for IN - * transfers - */ - chan->xfer_len = num_packets * chan->max_packet; - - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) - /* - * Make sure that the multi_count field matches the - * actual transfer length - */ - chan->multi_count = num_packets; - - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - dwc2_set_pid_isoc(chan); - - hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & - TSIZ_XFERSIZE_MASK; - } - - chan->start_pkt_count = num_packets; - hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; - hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & - TSIZ_SC_MC_PID_MASK; - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", - hctsiz, chan->hc_num); - - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " Xfer Size: %d\n", - (hctsiz & TSIZ_XFERSIZE_MASK) >> - TSIZ_XFERSIZE_SHIFT); - dev_vdbg(hsotg->dev, " Num Pkts: %d\n", - (hctsiz & TSIZ_PKTCNT_MASK) >> - TSIZ_PKTCNT_SHIFT); - dev_vdbg(hsotg->dev, " Start PID: %d\n", - (hctsiz & TSIZ_SC_MC_PID_MASK) >> - TSIZ_SC_MC_PID_SHIFT); - } - - if (hsotg->core_params->dma_enable > 0) { - dma_addr_t dma_addr; - - if (chan->align_buf) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "align_buf\n"); - dma_addr = chan->align_buf; - } else { - dma_addr = chan->xfer_dma; - } - dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", - (unsigned long)dma_addr, chan->hc_num); - } - - /* Start the split */ - if (chan->do_split) { - u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); - - hcsplt |= HCSPLT_SPLTENA; - dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); - } - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar &= ~HCCHAR_MULTICNT_MASK; - hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & - HCCHAR_MULTICNT_MASK; - dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); - - if (hcchar & HCCHAR_CHDIS) - dev_warn(hsotg->dev, - "%s: chdis set, channel %d, hcchar 0x%08x\n", - __func__, chan->hc_num, hcchar); - - /* Set host channel enable after all other setup is complete */ - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", - (hcchar & HCCHAR_MULTICNT_MASK) >> - HCCHAR_MULTICNT_SHIFT); - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, - chan->hc_num); - - chan->xfer_started = 1; - chan->requests++; - - if (hsotg->core_params->dma_enable <= 0 && - !chan->ep_is_in && chan->xfer_len > 0) - /* Load OUT packet into the appropriate Tx FIFO */ - dwc2_hc_write_packet(hsotg, chan); -} - -/** - * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a - * host channel and starts the transfer in Descriptor DMA mode - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. - * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field - * with micro-frame bitmap. - * - * Initializes HCDMA register with descriptor list address and CTD value then - * starts the transfer via enabling the channel. - */ -void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) -{ - u32 hcchar; - u32 hc_dma; - u32 hctsiz = 0; - - if (chan->do_ping) - hctsiz |= TSIZ_DOPNG; - - if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) - dwc2_set_pid_isoc(chan); - - /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ - hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & - TSIZ_SC_MC_PID_MASK; - - /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ - hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; - - /* Non-zero only for high-speed interrupt endpoints */ - hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; - - if (dbg_hc(chan)) { - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - dev_vdbg(hsotg->dev, " Start PID: %d\n", - chan->data_pid_start); - dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); - } - - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - - hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; - - /* Always start from first descriptor */ - hc_dma &= ~HCDMA_CTD_MASK; - dwc2_writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", - hc_dma, chan->hc_num); - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar &= ~HCCHAR_MULTICNT_MASK; - hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & - HCCHAR_MULTICNT_MASK; - - if (hcchar & HCCHAR_CHDIS) - dev_warn(hsotg->dev, - "%s: chdis set, channel %d, hcchar 0x%08x\n", - __func__, chan->hc_num, hcchar); - - /* Set host channel enable after all other setup is complete */ - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", - (hcchar & HCCHAR_MULTICNT_MASK) >> - HCCHAR_MULTICNT_SHIFT); - - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, - chan->hc_num); - - chan->xfer_started = 1; - chan->requests++; + msleep(50); } -/** - * dwc2_hc_continue_transfer() - Continues a data transfer that was started by - * a previous call to dwc2_hc_start_transfer() - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * The caller must ensure there is sufficient space in the request queue and Tx - * Data FIFO. This function should only be called in Slave mode. In DMA mode, - * the controller acts autonomously to complete transfers programmed to a host - * channel. - * - * For an OUT transfer, a new data packet is loaded into the appropriate FIFO - * if there is any data remaining to be queued. For an IN transfer, another - * data packet is always requested. For the SETUP phase of a control transfer, - * this function does nothing. +/* + * Do core a soft reset of the core. Be careful with this because it + * resets all the internal state machines of the core. * - * Return: 1 if a new request is queued, 0 if no more requests are required - * for this transfer + * Additionally this will apply force mode as per the hsotg->dr_mode + * parameter. */ -int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan) +int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg) { - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - - if (chan->do_split) - /* SPLITs always queue just once per channel */ - return 0; - - if (chan->data_pid_start == DWC2_HC_PID_SETUP) - /* SETUPs are queued only once since they can't be NAK'd */ - return 0; - - if (chan->ep_is_in) { - /* - * Always queue another request for other IN transfers. If - * back-to-back INs are issued and NAKs are received for both, - * the driver may still be processing the first NAK when the - * second NAK is received. When the interrupt handler clears - * the NAK interrupt for the first NAK, the second NAK will - * not be seen. So we can't depend on the NAK interrupt - * handler to requeue a NAK'd request. Instead, IN requests - * are issued each time this function is called. When the - * transfer completes, the extra requests for the channel will - * be flushed. - */ - u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - - dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", - hcchar); - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); - chan->requests++; - return 1; - } - - /* OUT transfers */ - - if (chan->xfer_count < chan->xfer_len) { - if (chan->ep_type == USB_ENDPOINT_XFER_INT || - chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - u32 hcchar = dwc2_readl(hsotg->regs + - HCCHAR(chan->hc_num)); - - dwc2_hc_set_even_odd_frame(hsotg, chan, - &hcchar); - } + int retval; - /* Load OUT packet into the appropriate Tx FIFO */ - dwc2_hc_write_packet(hsotg, chan); - chan->requests++; - return 1; - } + retval = dwc2_core_reset(hsotg); + if (retval) + return retval; + dwc2_force_dr_mode(hsotg); return 0; } /** - * dwc2_hc_do_ping() - Starts a PING transfer - * - * @hsotg: Programming view of DWC_otg controller - * @chan: Information needed to initialize the host channel - * - * This function should only be called in Slave mode. The Do Ping bit is set in - * the HCTSIZ register, then the channel is enabled. - */ -void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) -{ - u32 hcchar; - u32 hctsiz; - - if (dbg_hc(chan)) - dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, - chan->hc_num); - - - hctsiz = TSIZ_DOPNG; - hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; - dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); - - hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); - hcchar |= HCCHAR_CHENA; - hcchar &= ~HCCHAR_CHDIS; - dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); -} - -/** - * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for - * the HFIR register according to PHY type and speed - * - * @hsotg: Programming view of DWC_otg controller - * - * NOTE: The caller can modify the value of the HFIR register only after the - * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) - * has been set - */ -u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) -{ - u32 usbcfg; - u32 hprt0; - int clock = 60; /* default value */ - - usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - hprt0 = dwc2_readl(hsotg->regs + HPRT0); - - if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && - !(usbcfg & GUSBCFG_PHYIF16)) - clock = 60; - if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == - GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) - clock = 48; - if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) - clock = 30; - if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) - clock = 60; - if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && - !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) - clock = 48; - if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) - clock = 48; - if ((usbcfg & GUSBCFG_PHYSEL) && - hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) - clock = 48; - - if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) - /* High speed case */ - return 125 * clock; - else - /* FS/LS case */ - return 1000 * clock; -} - -/** - * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination - * buffer - * - * @core_if: Programming view of DWC_otg controller - * @dest: Destination buffer for the packet - * @bytes: Number of bytes to copy to the destination - */ -void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) -{ - u32 __iomem *fifo = hsotg->regs + HCFIFO(0); - u32 *data_buf = (u32 *)dest; - int word_count = (bytes + 3) / 4; - int i; - - /* - * Todo: Account for the case where dest is not dword aligned. This - * requires reading data from the FIFO into a u32 temp buffer, then - * moving it into the data buffer. - */ - - dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); - - for (i = 0; i < word_count; i++, data_buf++) - *data_buf = dwc2_readl(fifo); -} - -/** * dwc2_dump_host_registers() - Prints the host registers * * @hsotg: Programming view of DWC_otg controller @@ -2485,6 +734,29 @@ void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) hsotg->core_params->dma_desc_enable = val; } +void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->core_params->dma_enable <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->core_params->dma_enable > 0 && + hsotg->hw_params.dma_desc_enable); + } + + hsotg->core_params->dma_desc_fs_enable = val; + dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); +} + void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, int val) { @@ -3016,6 +1288,7 @@ void dwc2_set_parameters(struct dwc2_hsotg *hsotg, dwc2_set_param_otg_cap(hsotg, params->otg_cap); dwc2_set_param_dma_enable(hsotg, params->dma_enable); dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); + dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); dwc2_set_param_host_support_fs_ls_low_power(hsotg, params->host_support_fs_ls_low_power); dwc2_set_param_enable_dynamic_fifo(hsotg, @@ -3052,6 +1325,79 @@ void dwc2_set_parameters(struct dwc2_hsotg *hsotg, dwc2_set_param_hibernation(hsotg, params->hibernation); } +/* + * Forces either host or device mode if the controller is not + * currently in that mode. + * + * Returns true if the mode was forced. + */ +static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) +{ + if (host && dwc2_is_host_mode(hsotg)) + return false; + else if (!host && dwc2_is_device_mode(hsotg)) + return false; + + return dwc2_force_mode(hsotg, host); +} + +/* + * Gets host hardware parameters. Forces host mode if not currently in + * host mode. Should be called immediately after a core soft reset in + * order to get the reset values. + */ +static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 gnptxfsiz; + u32 hptxfsiz; + bool forced; + + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + return; + + forced = dwc2_force_mode_if_needed(hsotg, true); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; + hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/* + * Gets device hardware parameters. Forces device mode if not + * currently in device mode. Should be called immediately after a core + * soft reset in order to get the reset values. + */ +static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + bool forced; + u32 gnptxfsiz; + + if (hsotg->dr_mode == USB_DR_MODE_HOST) + return; + + forced = dwc2_force_mode_if_needed(hsotg, false); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + /** * During device initialization, read various hardware configuration * registers and interpret the contents. @@ -3061,8 +1407,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) struct dwc2_hw_params *hw = &hsotg->hw_params; unsigned width; u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; - u32 hptxfsiz, grxfsiz, gnptxfsiz; - u32 gusbcfg; + u32 grxfsiz; /* * Attempt to ensure this device is really a DWC_otg Controller. @@ -3094,20 +1439,16 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); - /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */ - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg |= GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - usleep_range(100000, 150000); + /* + * Host specific hardware parameters. Reading these parameters + * requires the controller to be in host mode. The mode will + * be forced, if necessary, to read these values. + */ + dwc2_get_host_hwparams(hsotg); + dwc2_get_dev_hwparams(hsotg); - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); - gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG); - gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; - dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); - usleep_range(100000, 150000); + /* hwcfg1 */ + hw->dev_ep_dirs = hwcfg1; /* hwcfg2 */ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> @@ -3137,13 +1478,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; hw->max_transfer_size = (1 << (width + 11)) - 1; - /* - * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates - * coherent buffers with this size, and if it's too large we can - * exhaust the coherent DMA pool. - */ - if (hw->max_transfer_size > 65535) - hw->max_transfer_size = 65535; width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; hw->max_packet_count = (1 << (width + 4)) - 1; @@ -3163,10 +1497,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) /* fifo sizes */ hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> GRXFSIZ_DEPTH_SHIFT; - hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; - hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; dev_dbg(hsotg->dev, "Detected values from hardware:\n"); dev_dbg(hsotg->dev, " op_mode=%d\n", @@ -3275,6 +1605,43 @@ void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); } +/* Returns the controller's GHWCFG2.OTG_MODE. */ +unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg) +{ + u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); + + return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >> + GHWCFG2_OP_MODE_SHIFT; +} + +/* Returns true if the controller is capable of DRD. */ +bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) || + (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) || + (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE); +} + +/* Returns true if the controller is host-only. */ +bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) || + (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST); +} + +/* Returns true if the controller is device-only. */ +bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg) +{ + unsigned op_mode = dwc2_op_mode(hsotg); + + return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || + (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE); +} + MODULE_DESCRIPTION("DESIGNWARE HS OTG Core"); MODULE_AUTHOR("Synopsys, Inc."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index a66d3cb62b65..3c58d633ce80 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -44,6 +44,26 @@ #include <linux/usb/phy.h> #include "hw.h" +/* + * Suggested defines for tracers: + * - no_printk: Disable tracing + * - pr_info: Print this info to the console + * - trace_printk: Print this info to trace buffer (good for verbose logging) + */ + +#define DWC2_TRACE_SCHEDULER no_printk +#define DWC2_TRACE_SCHEDULER_VB no_printk + +/* Detailed scheduler tracing, but won't overwhelm console */ +#define dwc2_sch_dbg(hsotg, fmt, ...) \ + DWC2_TRACE_SCHEDULER(pr_fmt("%s: SCH: " fmt), \ + dev_name(hsotg->dev), ##__VA_ARGS__) + +/* Verbose scheduler tracing */ +#define dwc2_sch_vdbg(hsotg, fmt, ...) \ + DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ + dev_name(hsotg->dev), ##__VA_ARGS__) + static inline u32 dwc2_readl(const void __iomem *addr) { u32 value = __raw_readl(addr); @@ -246,6 +266,13 @@ enum dwc2_ep0_state { * value for this if none is specified. * 0 - Address DMA * 1 - Descriptor DMA (default, if available) + * @dma_desc_fs_enable: When DMA mode is enabled, specifies whether to use + * address DMA mode or descriptor DMA mode for accessing + * the data FIFOs in Full Speed mode only. The driver + * will automatically detect the value for this if none is + * specified. + * 0 - Address DMA + * 1 - Descriptor DMA in FS (default, if available) * @speed: Specifies the maximum speed of operation in host and * device mode. The actual speed depends on the speed of * the attached device and the value of phy_type. @@ -375,6 +402,7 @@ struct dwc2_core_params { int otg_ver; int dma_enable; int dma_desc_enable; + int dma_desc_fs_enable; int speed; int enable_dynamic_fifo; int en_multiple_tx_fifo; @@ -451,15 +479,18 @@ struct dwc2_core_params { * 1 - 16 bits * 2 - 8 or 16 bits * @snpsid: Value from SNPSID register + * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) */ struct dwc2_hw_params { unsigned op_mode:3; unsigned arch:2; unsigned dma_desc_enable:1; + unsigned dma_desc_fs_enable:1; unsigned enable_dynamic_fifo:1; unsigned en_multiple_tx_fifo:1; unsigned host_rx_fifo_size:16; unsigned host_nperio_tx_fifo_size:16; + unsigned dev_nperio_tx_fifo_size:16; unsigned host_perio_tx_fifo_size:16; unsigned nperio_tx_q_depth:3; unsigned host_perio_tx_q_depth:3; @@ -476,6 +507,7 @@ struct dwc2_hw_params { unsigned power_optimized:1; unsigned utmi_phy_data_width:2; u32 snpsid; + u32 dev_ep_dirs; }; /* Size of control and EP0 buffers */ @@ -560,6 +592,84 @@ struct dwc2_hregs_backup { bool valid; }; +/* + * Constants related to high speed periodic scheduling + * + * We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a + * reservation point of view it's assumed that the schedule goes right back to + * the beginning after the end of the schedule. + * + * What does that mean for scheduling things with a long interval? It means + * we'll reserve time for them in every possible microframe that they could + * ever be scheduled in. ...but we'll still only actually schedule them as + * often as they were requested. + * + * We keep our schedule in a "bitmap" structure. This simplifies having + * to keep track of and merge intervals: we just let the bitmap code do most + * of the heavy lifting. In a way scheduling is much like memory allocation. + * + * We schedule 100us per uframe or 80% of 125us (the maximum amount you're + * supposed to schedule for periodic transfers). That's according to spec. + * + * Note that though we only schedule 80% of each microframe, the bitmap that we + * keep the schedule in is tightly packed (AKA it doesn't have 100us worth of + * space for each uFrame). + * + * Requirements: + * - DWC2_HS_SCHEDULE_UFRAMES must even divide 0x4000 (HFNUM_MAX_FRNUM + 1) + * - DWC2_HS_SCHEDULE_UFRAMES must be 8 times DWC2_LS_SCHEDULE_FRAMES (probably + * could be any multiple of 8 times DWC2_LS_SCHEDULE_FRAMES, but there might + * be bugs). The 8 comes from the USB spec: number of microframes per frame. + */ +#define DWC2_US_PER_UFRAME 125 +#define DWC2_HS_PERIODIC_US_PER_UFRAME 100 + +#define DWC2_HS_SCHEDULE_UFRAMES 8 +#define DWC2_HS_SCHEDULE_US (DWC2_HS_SCHEDULE_UFRAMES * \ + DWC2_HS_PERIODIC_US_PER_UFRAME) + +/* + * Constants related to low speed scheduling + * + * For high speed we schedule every 1us. For low speed that's a bit overkill, + * so we make up a unit called a "slice" that's worth 25us. There are 40 + * slices in a full frame and we can schedule 36 of those (90%) for periodic + * transfers. + * + * Our low speed schedule can be as short as 1 frame or could be longer. When + * we only schedule 1 frame it means that we'll need to reserve a time every + * frame even for things that only transfer very rarely, so something that runs + * every 2048 frames will get time reserved in every frame. Our low speed + * schedule can be longer and we'll be able to handle more overlap, but that + * will come at increased memory cost and increased time to schedule. + * + * Note: one other advantage of a short low speed schedule is that if we mess + * up and miss scheduling we can jump in and use any of the slots that we + * happened to reserve. + * + * With 25 us per slice and 1 frame in the schedule, we only need 4 bytes for + * the schedule. There will be one schedule per TT. + * + * Requirements: + * - DWC2_US_PER_SLICE must evenly divide DWC2_LS_PERIODIC_US_PER_FRAME. + */ +#define DWC2_US_PER_SLICE 25 +#define DWC2_SLICES_PER_UFRAME (DWC2_US_PER_UFRAME / DWC2_US_PER_SLICE) + +#define DWC2_ROUND_US_TO_SLICE(us) \ + (DIV_ROUND_UP((us), DWC2_US_PER_SLICE) * \ + DWC2_US_PER_SLICE) + +#define DWC2_LS_PERIODIC_US_PER_FRAME \ + 900 +#define DWC2_LS_PERIODIC_SLICES_PER_FRAME \ + (DWC2_LS_PERIODIC_US_PER_FRAME / \ + DWC2_US_PER_SLICE) + +#define DWC2_LS_SCHEDULE_FRAMES 1 +#define DWC2_LS_SCHEDULE_SLICES (DWC2_LS_SCHEDULE_FRAMES * \ + DWC2_LS_PERIODIC_SLICES_PER_FRAME) + /** * struct dwc2_hsotg - Holds the state of the driver, including the non-periodic * and periodic schedules @@ -645,11 +755,14 @@ struct dwc2_hregs_backup { * periodic_sched_ready because it must be rescheduled for * the next frame. Otherwise, the item moves to * periodic_sched_inactive. + * @split_order: List keeping track of channels doing splits, in order. * @periodic_usecs: Total bandwidth claimed so far for periodic transfers. * This value is in microseconds per (micro)frame. The * assumption is that all periodic transfers may occur in * the same (micro)frame. - * @frame_usecs: Internal variable used by the microframe scheduler + * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the + * host is in high speed mode; low speed schedules are + * stored elsewhere since we need one per TT. * @frame_number: Frame number read from the core at SOF. The value ranges * from 0 to HFNUM_MAX_FRNUM. * @periodic_qh_count: Count of periodic QHs, if using several eps. Used for @@ -676,6 +789,9 @@ struct dwc2_hregs_backup { * @otg_port: OTG port number * @frame_list: Frame list * @frame_list_dma: Frame list DMA address + * @frame_list_sz: Frame list size + * @desc_gen_cache: Kmem cache for generic descriptors + * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors * * These are for peripheral mode: * @@ -765,15 +881,19 @@ struct dwc2_hsotg { struct list_head periodic_sched_ready; struct list_head periodic_sched_assigned; struct list_head periodic_sched_queued; + struct list_head split_order; u16 periodic_usecs; - u16 frame_usecs[8]; + unsigned long hs_periodic_bitmap[ + DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)]; u16 frame_number; u16 periodic_qh_count; bool bus_suspended; + bool new_connection; + + u16 last_frame_num; #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS #define FRAME_NUM_ARRAY_SIZE 1000 - u16 last_frame_num; u16 *frame_num_array; u16 *last_frame_num_array; int frame_num_idx; @@ -794,6 +914,9 @@ struct dwc2_hsotg { u8 otg_port; u32 *frame_list; dma_addr_t frame_list_dma; + u32 frame_list_sz; + struct kmem_cache *desc_gen_cache; + struct kmem_cache *desc_hsisoc_cache; #ifdef DEBUG u32 frrem_samples; @@ -864,32 +987,13 @@ enum dwc2_halt_status { * The following functions support initialization of the core driver component * and the DWC_otg controller */ -extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg); +extern int dwc2_core_reset(struct dwc2_hsotg *hsotg); +extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); -/* - * Host core Functions. - * The following functions support managing the DWC_otg controller in host - * mode. - */ -extern void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); -extern void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, - enum dwc2_halt_status halt_status); -extern void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan); -extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg); -extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg); - -extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg); +void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); + extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); /* @@ -901,7 +1005,6 @@ extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes); extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); -extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq); extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); @@ -942,6 +1045,16 @@ extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val); extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val); /* + * When DMA mode is enabled specifies whether to use + * address DMA or DMA Descritor mode with full speed devices + * for accessing the data FIFOs in host mode. + * 0 - address DMA + * 1 - FS DMA Descriptor(default, if available) + */ +extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, + int val); + +/* * Specifies the maximum speed of operation in host and device mode. * The actual speed depends on the speed of the attached device and * the value of phy_type. The actual speed depends on the speed of the @@ -1110,6 +1223,31 @@ extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); /* + * The following functions check the controller's OTG operation mode + * capability (GHWCFG2.OTG_MODE). + * + * These functions can be used before the internal hsotg->hw_params + * are read in and cached so they always read directly from the + * GHWCFG2 register. + */ +unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg); +bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg); + +/* + * Returns the mode of operation, host or device + */ +static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg) +{ + return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0; +} +static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) +{ + return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0; +} + +/* * Dump core registers and SPRAM */ extern void dwc2_dump_dev_registers(struct dwc2_hsotg *hsotg); @@ -1133,6 +1271,8 @@ extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); +int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg); #else static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2) { return 0; } @@ -1150,20 +1290,37 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) +{ return 0; } +static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) +{ return 0; } #endif #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); -extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg); +extern int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us); +extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); +extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg); +int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg); +int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg); #else static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) { return 0; } -static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) {} +static inline int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, + int us) +{ return 0; } +static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {} +static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {} static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {} static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) { return 0; } +static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) +{ return 0; } +static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) +{ return 0; } + #endif #endif /* __DWC2_CORE_H__ */ diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 27daa42788b1..d85c5c9f96c1 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -86,9 +86,6 @@ static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg) hprt0 &= ~HPRT0_ENA; dwc2_writel(hprt0, hsotg->regs + HPRT0); } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_PRTINT, hsotg->regs + GINTSTS); } /** @@ -98,11 +95,11 @@ static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg) { - dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n", - dwc2_is_host_mode(hsotg) ? "Host" : "Device"); - /* Clear interrupt */ dwc2_writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS); + + dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n", + dwc2_is_host_mode(hsotg) ? "Host" : "Device"); } /** @@ -239,7 +236,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "a_suspend->a_peripheral (%d)\n", hsotg->op_state); spin_unlock(&hsotg->lock); - dwc2_hcd_disconnect(hsotg); + dwc2_hcd_disconnect(hsotg, false); spin_lock(&hsotg->lock); hsotg->op_state = OTG_STATE_A_PERIPHERAL; } else { @@ -276,9 +273,13 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg) { - u32 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + u32 gintmsk; + + /* Clear interrupt */ + dwc2_writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS); /* Need to disable SOF interrupt immediately */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); gintmsk &= ~GINTSTS_SOF; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); @@ -295,9 +296,6 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg) queue_work(hsotg->wq_otg, &hsotg->wf_otg); spin_lock(&hsotg->lock); } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS); } /** @@ -315,12 +313,12 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) { int ret; - dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n", - hsotg->lx_state); - /* Clear interrupt */ dwc2_writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n", + hsotg->lx_state); + if (dwc2_is_device_mode(hsotg)) { if (hsotg->lx_state == DWC2_L2) { ret = dwc2_exit_hibernation(hsotg, true); @@ -347,6 +345,10 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) { int ret; + + /* Clear interrupt */ + dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n"); dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state); @@ -368,10 +370,9 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) /* Change to L0 state */ hsotg->lx_state = DWC2_L0; } else { - if (hsotg->core_params->hibernation) { - dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); + if (hsotg->core_params->hibernation) return; - } + if (hsotg->lx_state != DWC2_L1) { u32 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); @@ -385,9 +386,6 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) hsotg->lx_state = DWC2_L0; } } - - /* Clear interrupt */ - dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS); } /* @@ -396,14 +394,14 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) */ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) { + dwc2_writel(GINTSTS_DISCONNINT, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n", dwc2_is_host_mode(hsotg) ? "Host" : "Device", dwc2_op_state_str(hsotg)); if (hsotg->op_state == OTG_STATE_A_HOST) - dwc2_hcd_disconnect(hsotg); - - dwc2_writel(GINTSTS_DISCONNINT, hsotg->regs + GINTSTS); + dwc2_hcd_disconnect(hsotg, false); } /* @@ -419,6 +417,9 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) u32 dsts; int ret; + /* Clear interrupt */ + dwc2_writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS); + dev_dbg(hsotg->dev, "USB SUSPEND\n"); if (dwc2_is_device_mode(hsotg)) { @@ -437,7 +438,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) if (!dwc2_is_device_connected(hsotg)) { dev_dbg(hsotg->dev, "ignore suspend request before enumeration\n"); - goto clear_int; + return; } ret = dwc2_enter_hibernation(hsotg); @@ -476,10 +477,6 @@ skip_power_saving: hsotg->op_state = OTG_STATE_A_HOST; } } - -clear_int: - /* Clear interrupt */ - dwc2_writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS); } #define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0abf73c91beb..e9940dd004e4 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2095,7 +2095,7 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) */ /* catch both EnumSpd_FS and EnumSpd_FS48 */ - switch (dsts & DSTS_ENUMSPD_MASK) { + switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { case DSTS_ENUMSPD_FS: case DSTS_ENUMSPD_FS48: hsotg->gadget.speed = USB_SPEED_FULL; @@ -2244,54 +2244,6 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) GINTSTS_RXFLVL) /** - * dwc2_hsotg_corereset - issue softreset to the core - * @hsotg: The device state - * - * Issue a soft reset to the core, and await the core finishing it. - */ -static int dwc2_hsotg_corereset(struct dwc2_hsotg *hsotg) -{ - int timeout; - u32 grstctl; - - dev_dbg(hsotg->dev, "resetting core\n"); - - /* issue soft reset */ - dwc2_writel(GRSTCTL_CSFTRST, hsotg->regs + GRSTCTL); - - timeout = 10000; - do { - grstctl = dwc2_readl(hsotg->regs + GRSTCTL); - } while ((grstctl & GRSTCTL_CSFTRST) && timeout-- > 0); - - if (grstctl & GRSTCTL_CSFTRST) { - dev_err(hsotg->dev, "Failed to get CSftRst asserted\n"); - return -EINVAL; - } - - timeout = 10000; - - while (1) { - u32 grstctl = dwc2_readl(hsotg->regs + GRSTCTL); - - if (timeout-- < 0) { - dev_info(hsotg->dev, - "%s: reset failed, GRSTCTL=%08x\n", - __func__, grstctl); - return -ETIMEDOUT; - } - - if (!(grstctl & GRSTCTL_AHBIDLE)) - continue; - - break; /* reset done */ - } - - dev_dbg(hsotg->dev, "reset successful\n"); - return 0; -} - -/** * dwc2_hsotg_core_init - issue softreset to the core * @hsotg: The device state * @@ -2307,7 +2259,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); if (!is_usb_reset) - if (dwc2_hsotg_corereset(hsotg)) + if (dwc2_core_reset(hsotg)) return; /* @@ -2585,7 +2537,7 @@ irq_retry: if (gintsts & GINTSTS_GOUTNAKEFF) { dev_info(hsotg->dev, "GOUTNakEff triggered\n"); - dwc2_writel(DCTL_CGOUTNAK, hsotg->regs + DCTL); + __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK); dwc2_hsotg_dump(hsotg); } @@ -2593,7 +2545,7 @@ irq_retry: if (gintsts & GINTSTS_GINNAKEFF) { dev_info(hsotg->dev, "GINNakEff triggered\n"); - dwc2_writel(DCTL_CGNPINNAK, hsotg->regs + DCTL); + __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); dwc2_hsotg_dump(hsotg); } @@ -2911,15 +2863,15 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, "%s: timeout DIEPINT.NAKEFF\n", __func__); } else { /* Clear any pending nak effect interrupt */ - dwc2_writel(GINTSTS_GINNAKEFF, hsotg->regs + GINTSTS); + dwc2_writel(GINTSTS_GOUTNAKEFF, hsotg->regs + GINTSTS); - __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); /* Wait for global nak to take effect */ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, - GINTSTS_GINNAKEFF, 100)) + GINTSTS_GOUTNAKEFF, 100)) dev_warn(hsotg->dev, - "%s: timeout GINTSTS.GINNAKEFF\n", __func__); + "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__); } /* Disable ep */ @@ -2944,7 +2896,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, /* TODO: Flush shared tx fifo */ } else { /* Remove global NAKs */ - __bic32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK); } } @@ -3403,8 +3355,8 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) /* check hardware configuration */ - cfg = dwc2_readl(hsotg->regs + GHWCFG2); - hsotg->num_of_eps = (cfg >> GHWCFG2_NUM_DEV_EP_SHIFT) & 0xF; + hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; + /* Add ep0 */ hsotg->num_of_eps++; @@ -3415,7 +3367,7 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) /* Same dwc2_hsotg_ep is used in both directions for ep0 */ hsotg->eps_out[0] = hsotg->eps_in[0]; - cfg = dwc2_readl(hsotg->regs + GHWCFG1); + cfg = hsotg->hw_params.dev_ep_dirs; for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { ep_type = cfg & 3; /* Direction in or both */ @@ -3434,11 +3386,8 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) } } - cfg = dwc2_readl(hsotg->regs + GHWCFG3); - hsotg->fifo_mem = (cfg >> GHWCFG3_DFIFO_DEPTH_SHIFT); - - cfg = dwc2_readl(hsotg->regs + GHWCFG4); - hsotg->dedicated_fifos = (cfg >> GHWCFG4_DED_FIFO_SHIFT) & 1; + hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; + hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", hsotg->num_of_eps, @@ -3563,6 +3512,17 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); /* Device tree specific probe */ dwc2_hsotg_of_probe(hsotg); + + /* Check against largest possible value. */ + if (hsotg->g_np_g_tx_fifo_sz > + hsotg->hw_params.dev_nperio_tx_fifo_size) { + dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n", + hsotg->g_np_g_tx_fifo_sz, + hsotg->hw_params.dev_nperio_tx_fifo_size); + hsotg->g_np_g_tx_fifo_sz = + hsotg->hw_params.dev_nperio_tx_fifo_size; + } + /* Dump fifo information */ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", hsotg->g_np_g_tx_fifo_sz); @@ -3579,31 +3539,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) hsotg->op_state = OTG_STATE_B_PERIPHERAL; - /* - * Force Device mode before initialization. - * This allows correctly configuring fifo for device mode. - */ - __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEHOSTMODE); - __orr32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE); - - /* - * According to Synopsys databook, this sleep is needed for the force - * device mode to take effect. - */ - msleep(25); - - dwc2_hsotg_corereset(hsotg); ret = dwc2_hsotg_hw_cfg(hsotg); if (ret) { dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); return ret; } - dwc2_hsotg_init(hsotg); - - /* Switch back to default configuration */ - __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE); - hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); if (!hsotg->ctrl_buff) { @@ -3727,3 +3668,105 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) return 0; } + +/** + * dwc2_backup_device_registers() - Backup controller device registers. + * When suspending usb bus, registers needs to be backuped + * if controller power is disabled once suspended. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_dregs_backup *dr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Backup dev regs */ + dr = &hsotg->dr_backup; + + dr->dcfg = dwc2_readl(hsotg->regs + DCFG); + dr->dctl = dwc2_readl(hsotg->regs + DCTL); + dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK); + dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK); + dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK); + + for (i = 0; i < hsotg->num_of_eps; i++) { + /* Backup IN EPs */ + dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i)); + + /* Ensure DATA PID is correctly configured */ + if (dr->diepctl[i] & DXEPCTL_DPID) + dr->diepctl[i] |= DXEPCTL_SETD1PID; + else + dr->diepctl[i] |= DXEPCTL_SETD0PID; + + dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i)); + dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i)); + + /* Backup OUT EPs */ + dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i)); + + /* Ensure DATA PID is correctly configured */ + if (dr->doepctl[i] & DXEPCTL_DPID) + dr->doepctl[i] |= DXEPCTL_SETD1PID; + else + dr->doepctl[i] |= DXEPCTL_SETD0PID; + + dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i)); + dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i)); + } + dr->valid = true; + return 0; +} + +/** + * dwc2_restore_device_registers() - Restore controller device registers. + * When resuming usb bus, device registers needs to be restored + * if controller power were disabled. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_dregs_backup *dr; + u32 dctl; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Restore dev regs */ + dr = &hsotg->dr_backup; + if (!dr->valid) { + dev_err(hsotg->dev, "%s: no device registers to restore\n", + __func__); + return -EINVAL; + } + dr->valid = false; + + dwc2_writel(dr->dcfg, hsotg->regs + DCFG); + dwc2_writel(dr->dctl, hsotg->regs + DCTL); + dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK); + dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK); + dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK); + + for (i = 0; i < hsotg->num_of_eps; i++) { + /* Restore IN EPs */ + dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i)); + dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i)); + dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i)); + + /* Restore OUT EPs */ + dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i)); + dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i)); + dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i)); + } + + /* Set the Power-On Programming done bit */ + dctl = dwc2_readl(hsotg->regs + DCTL); + dctl |= DCTL_PWRONPRGDONE; + dwc2_writel(dctl, hsotg->regs + DCTL); + + return 0; +} diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 571c21727ff9..1f6255131857 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -54,6 +54,535 @@ #include "core.h" #include "hcd.h" +/* + * ========================================================================= + * Host Core Layer Functions + * ========================================================================= + */ + +/** + * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, + * used in both device and host modes + * + * @hsotg: Programming view of the DWC_otg controller + */ +static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk; + + /* Clear any pending OTG Interrupts */ + dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); + + /* Clear any pending interrupts */ + dwc2_writel(0xffffffff, hsotg->regs + GINTSTS); + + /* Enable the interrupts in the GINTMSK */ + intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; + + if (hsotg->core_params->dma_enable <= 0) + intmsk |= GINTSTS_RXFLVL; + if (hsotg->core_params->external_id_pin_ctl <= 0) + intmsk |= GINTSTS_CONIDSTSCHNG; + + intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | + GINTSTS_SESSREQINT; + + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/* + * Initializes the FSLSPClkSel field of the HCFG register depending on the + * PHY type + */ +static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) +{ + u32 hcfg, val; + + if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && + hsotg->core_params->ulpi_fs_ls > 0) || + hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* Full speed PHY */ + val = HCFG_FSLSPCLKSEL_48_MHZ; + } else { + /* High speed PHY running at full speed or high speed */ + val = HCFG_FSLSPCLKSEL_30_60_MHZ; + } + + dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg &= ~HCFG_FSLSPCLKSEL_MASK; + hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; + dwc2_writel(hcfg, hsotg->regs + HCFG); +} + +static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg, i2cctl; + int retval = 0; + + /* + * core_init() is now called on every switch so only call the + * following for the first time through + */ + if (select_phy) { + dev_dbg(hsotg->dev, "FS PHY selected\n"); + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + if (!(usbcfg & GUSBCFG_PHYSEL)) { + usbcfg |= GUSBCFG_PHYSEL; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Reset after a PHY select */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } + } + } + + /* + * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also + * do this on HNP Dev/Host mode switches (done in dev_init and + * host_init). + */ + if (dwc2_is_host_mode(hsotg)) + dwc2_init_fs_ls_pclk_sel(hsotg); + + if (hsotg->core_params->i2c_enable > 0) { + dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); + + /* Program GUSBCFG.OtgUtmiFsSel to I2C */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Program GI2CCTL.I2CEn */ + i2cctl = dwc2_readl(hsotg->regs + GI2CCTL); + i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; + i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; + i2cctl &= ~GI2CCTL_I2CEN; + dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); + i2cctl |= GI2CCTL_I2CEN; + dwc2_writel(i2cctl, hsotg->regs + GI2CCTL); + } + + return retval; +} + +static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg, usbcfg_old; + int retval = 0; + + if (!select_phy) + return 0; + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg_old = usbcfg; + + /* + * HS PHY parameters. These parameters are preserved during soft reset + * so only program the first time. Do a soft reset immediately after + * setting phyif. + */ + switch (hsotg->core_params->phy_type) { + case DWC2_PHY_TYPE_PARAM_ULPI: + /* ULPI interface */ + dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); + usbcfg |= GUSBCFG_ULPI_UTMI_SEL; + usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); + if (hsotg->core_params->phy_ulpi_ddr > 0) + usbcfg |= GUSBCFG_DDRSEL; + break; + case DWC2_PHY_TYPE_PARAM_UTMI: + /* UTMI+ interface */ + dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); + usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); + if (hsotg->core_params->phy_utmi_width == 16) + usbcfg |= GUSBCFG_PHYIF16; + break; + default: + dev_err(hsotg->dev, "FS PHY selected at HS!\n"); + break; + } + + if (usbcfg != usbcfg_old) { + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* Reset after setting the PHY parameters */ + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, + "%s: Reset failed, aborting", __func__); + return retval; + } + } + + return retval; +} + +static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) +{ + u32 usbcfg; + int retval = 0; + + if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && + hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* If FS mode with FS PHY */ + retval = dwc2_fs_phy_init(hsotg, select_phy); + if (retval) + return retval; + } else { + /* High speed PHY */ + retval = dwc2_hs_phy_init(hsotg, select_phy); + if (retval) + return retval; + } + + if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && + hsotg->core_params->ulpi_fs_ls > 0) { + dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_ULPI_FS_LS; + usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + } else { + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~GUSBCFG_ULPI_FS_LS; + usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + } + + return retval; +} + +static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) +{ + u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG); + + switch (hsotg->hw_params.arch) { + case GHWCFG2_EXT_DMA_ARCH: + dev_err(hsotg->dev, "External DMA Mode not supported\n"); + return -EINVAL; + + case GHWCFG2_INT_DMA_ARCH: + dev_dbg(hsotg->dev, "Internal DMA Mode\n"); + if (hsotg->core_params->ahbcfg != -1) { + ahbcfg &= GAHBCFG_CTRL_MASK; + ahbcfg |= hsotg->core_params->ahbcfg & + ~GAHBCFG_CTRL_MASK; + } + break; + + case GHWCFG2_SLAVE_ONLY_ARCH: + default: + dev_dbg(hsotg->dev, "Slave Only Mode\n"); + break; + } + + dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", + hsotg->core_params->dma_enable, + hsotg->core_params->dma_desc_enable); + + if (hsotg->core_params->dma_enable > 0) { + if (hsotg->core_params->dma_desc_enable > 0) + dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); + else + dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); + } else { + dev_dbg(hsotg->dev, "Using Slave mode\n"); + hsotg->core_params->dma_desc_enable = 0; + } + + if (hsotg->core_params->dma_enable > 0) + ahbcfg |= GAHBCFG_DMA_EN; + + dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); + + return 0; +} + +static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) +{ + u32 usbcfg; + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); + + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + if (hsotg->core_params->otg_cap == + DWC2_CAP_PARAM_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_HNPCAP; + if (hsotg->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_SRPCAP; + break; + + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + if (hsotg->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) + usbcfg |= GUSBCFG_SRPCAP; + break; + + case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: + default: + break; + } + + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); +} + +/** + * dwc2_enable_host_interrupts() - Enables the Host mode interrupts + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk; + + dev_dbg(hsotg->dev, "%s()\n", __func__); + + /* Disable all interrupts */ + dwc2_writel(0, hsotg->regs + GINTMSK); + dwc2_writel(0, hsotg->regs + HAINTMSK); + + /* Enable the common interrupts */ + dwc2_enable_common_interrupts(hsotg); + + /* Enable host mode interrupts without disturbing common interrupts */ + intmsk = dwc2_readl(hsotg->regs + GINTMSK); + intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/** + * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) +{ + u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK); + + /* Disable host mode interrupts without disturbing common interrupts */ + intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | + GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); + dwc2_writel(intmsk, hsotg->regs + GINTMSK); +} + +/* + * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size + * For system that have a total fifo depth that is smaller than the default + * RX + TX fifo size. + * + * @hsotg: Programming view of DWC_otg controller + */ +static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; + + total_fifo_size = hw->total_fifo_size; + rxfsiz = params->host_rx_fifo_size; + nptxfsiz = params->host_nperio_tx_fifo_size; + ptxfsiz = params->host_perio_tx_fifo_size; + + /* + * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth + * allocation with support for high bandwidth endpoints. Synopsys + * defines MPS(Max Packet size) for a periodic EP=1024, and for + * non-periodic as 512. + */ + if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { + /* + * For Buffer DMA mode/Scatter Gather DMA mode + * 2 * ((Largest Packet size / 4) + 1 + 1) + n + * with n = number of host channel. + * 2 * ((1024/4) + 2) = 516 + */ + rxfsiz = 516 + hw->host_channels; + + /* + * min non-periodic tx fifo depth + * 2 * (largest non-periodic USB packet used / 4) + * 2 * (512/4) = 256 + */ + nptxfsiz = 256; + + /* + * min periodic tx fifo depth + * (largest packet size*MC)/4 + * (1024 * 3)/4 = 768 + */ + ptxfsiz = 768; + + params->host_rx_fifo_size = rxfsiz; + params->host_nperio_tx_fifo_size = nptxfsiz; + params->host_perio_tx_fifo_size = ptxfsiz; + } + + /* + * If the summation of RX, NPTX and PTX fifo sizes is still + * bigger than the total_fifo_size, then we have a problem. + * + * We won't be able to allocate as many endpoints. Right now, + * we're just printing an error message, but ideally this FIFO + * allocation algorithm would be improved in the future. + * + * FIXME improve this FIFO allocation algorithm. + */ + if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) + dev_err(hsotg->dev, "invalid fifo sizes\n"); +} + +static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *params = hsotg->core_params; + u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; + + if (!params->enable_dynamic_fifo) + return; + + dwc2_calculate_dynamic_fifo(hsotg); + + /* Rx FIFO */ + grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); + dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); + grxfsiz &= ~GRXFSIZ_DEPTH_MASK; + grxfsiz |= params->host_rx_fifo_size << + GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; + dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ); + dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GRXFSIZ)); + + /* Non-periodic Tx FIFO */ + dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GNPTXFSIZ)); + nptxfsiz = params->host_nperio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + nptxfsiz |= params->host_rx_fifo_size << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + GNPTXFSIZ)); + + /* Periodic Tx FIFO */ + dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + HPTXFSIZ)); + hptxfsiz = params->host_perio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + hptxfsiz |= (params->host_rx_fifo_size + + params->host_nperio_tx_fifo_size) << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", + dwc2_readl(hsotg->regs + HPTXFSIZ)); + + if (hsotg->core_params->en_multiple_tx_fifo > 0 && + hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { + /* + * Global DFIFOCFG calculation for Host mode - + * include RxFIFO, NPTXFIFO and HPTXFIFO + */ + dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG); + dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; + dfifocfg |= (params->host_rx_fifo_size + + params->host_nperio_tx_fifo_size + + params->host_perio_tx_fifo_size) << + GDFIFOCFG_EPINFOBASE_SHIFT & + GDFIFOCFG_EPINFOBASE_MASK; + dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG); + } +} + +/** + * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for + * the HFIR register according to PHY type and speed + * + * @hsotg: Programming view of DWC_otg controller + * + * NOTE: The caller can modify the value of the HFIR register only after the + * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) + * has been set + */ +u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) +{ + u32 usbcfg; + u32 hprt0; + int clock = 60; /* default value */ + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + hprt0 = dwc2_readl(hsotg->regs + HPRT0); + + if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && + !(usbcfg & GUSBCFG_PHYIF16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == + GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) + clock = 48; + if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) + clock = 30; + if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && + !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSEL) && + hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) + clock = 48; + + if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) + /* High speed case */ + return 125 * clock - 1; + + /* FS/LS case */ + return 1000 * clock - 1; +} + +/** + * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination + * buffer + * + * @core_if: Programming view of DWC_otg controller + * @dest: Destination buffer for the packet + * @bytes: Number of bytes to copy to the destination + */ +void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) +{ + u32 __iomem *fifo = hsotg->regs + HCFIFO(0); + u32 *data_buf = (u32 *)dest; + int word_count = (bytes + 3) / 4; + int i; + + /* + * Todo: Account for the case where dest is not dword aligned. This + * requires reading data from the FIFO into a u32 temp buffer, then + * moving it into the data buffer. + */ + + dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); + + for (i = 0; i < word_count; i++, data_buf++) + *data_buf = dwc2_readl(fifo); +} + /** * dwc2_dump_channel_info() - Prints the state of a host channel * @@ -77,7 +606,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, u32 hc_dma; int i; - if (chan == NULL) + if (!chan) return; hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); @@ -120,6 +649,1056 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, } /* + * ========================================================================= + * Low Level Host Channel Access Functions + * ========================================================================= + */ + +static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcintmsk = HCINTMSK_CHHLTD; + + switch (chan->ep_type) { + case USB_ENDPOINT_XFER_CONTROL: + case USB_ENDPOINT_XFER_BULK: + dev_vdbg(hsotg->dev, "control/bulk\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_STALL; + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_DATATGLERR; + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_BBLERR; + } else { + hcintmsk |= HCINTMSK_NAK; + hcintmsk |= HCINTMSK_NYET; + if (chan->do_ping) + hcintmsk |= HCINTMSK_ACK; + } + + if (chan->do_split) { + hcintmsk |= HCINTMSK_NAK; + if (chan->complete_split) + hcintmsk |= HCINTMSK_NYET; + else + hcintmsk |= HCINTMSK_ACK; + } + + if (chan->error_state) + hcintmsk |= HCINTMSK_ACK; + break; + + case USB_ENDPOINT_XFER_INT: + if (dbg_perio()) + dev_vdbg(hsotg->dev, "intr\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_NAK; + hcintmsk |= HCINTMSK_STALL; + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_DATATGLERR; + hcintmsk |= HCINTMSK_FRMOVRUN; + + if (chan->ep_is_in) + hcintmsk |= HCINTMSK_BBLERR; + if (chan->error_state) + hcintmsk |= HCINTMSK_ACK; + if (chan->do_split) { + if (chan->complete_split) + hcintmsk |= HCINTMSK_NYET; + else + hcintmsk |= HCINTMSK_ACK; + } + break; + + case USB_ENDPOINT_XFER_ISOC: + if (dbg_perio()) + dev_vdbg(hsotg->dev, "isoc\n"); + hcintmsk |= HCINTMSK_XFERCOMPL; + hcintmsk |= HCINTMSK_FRMOVRUN; + hcintmsk |= HCINTMSK_ACK; + + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_XACTERR; + hcintmsk |= HCINTMSK_BBLERR; + } + break; + default: + dev_err(hsotg->dev, "## Unknown EP type ##\n"); + break; + } + + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); +} + +static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcintmsk = HCINTMSK_CHHLTD; + + /* + * For Descriptor DMA mode core halts the channel on AHB error. + * Interrupt is not required. + */ + if (hsotg->core_params->dma_desc_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA disabled\n"); + hcintmsk |= HCINTMSK_AHBERR; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA enabled\n"); + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + hcintmsk |= HCINTMSK_XFERCOMPL; + } + + if (chan->error_state && !chan->do_split && + chan->ep_type != USB_ENDPOINT_XFER_ISOC) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "setting ACK\n"); + hcintmsk |= HCINTMSK_ACK; + if (chan->ep_is_in) { + hcintmsk |= HCINTMSK_DATATGLERR; + if (chan->ep_type != USB_ENDPOINT_XFER_INT) + hcintmsk |= HCINTMSK_NAK; + } + } + + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); +} + +static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 intmsk; + + if (hsotg->core_params->dma_enable > 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA enabled\n"); + dwc2_hc_enable_dma_ints(hsotg, chan); + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA disabled\n"); + dwc2_hc_enable_slave_ints(hsotg, chan); + } + + /* Enable the top level host channel interrupt */ + intmsk = dwc2_readl(hsotg->regs + HAINTMSK); + intmsk |= 1 << chan->hc_num; + dwc2_writel(intmsk, hsotg->regs + HAINTMSK); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); + + /* Make sure host channel interrupts are enabled */ + intmsk = dwc2_readl(hsotg->regs + GINTMSK); + intmsk |= GINTSTS_HCHINT; + dwc2_writel(intmsk, hsotg->regs + GINTMSK); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); +} + +/** + * dwc2_hc_init() - Prepares a host channel for transferring packets to/from + * a specific endpoint + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * The HCCHARn register is set up with the characteristics specified in chan. + * Host channel interrupts that may need to be serviced while this transfer is + * in progress are enabled. + */ +static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) +{ + u8 hc_num = chan->hc_num; + u32 hcintmsk; + u32 hcchar; + u32 hcsplt = 0; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + /* Clear old interrupt conditions for this host channel */ + hcintmsk = 0xffffffff; + hcintmsk &= ~HCINTMSK_RESERVED14_31; + dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num)); + + /* Enable channel interrupts required for this transfer */ + dwc2_hc_enable_ints(hsotg, chan); + + /* + * Program the HCCHARn register with the endpoint characteristics for + * the current transfer + */ + hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; + hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; + if (chan->ep_is_in) + hcchar |= HCCHAR_EPDIR; + if (chan->speed == USB_SPEED_LOW) + hcchar |= HCCHAR_LSPDDEV; + hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; + hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num)); + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", + hc_num, hcchar); + + dev_vdbg(hsotg->dev, "%s: Channel %d\n", + __func__, hc_num); + dev_vdbg(hsotg->dev, " Dev Addr: %d\n", + chan->dev_addr); + dev_vdbg(hsotg->dev, " Ep Num: %d\n", + chan->ep_num); + dev_vdbg(hsotg->dev, " Is In: %d\n", + chan->ep_is_in); + dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", + chan->speed == USB_SPEED_LOW); + dev_vdbg(hsotg->dev, " Ep Type: %d\n", + chan->ep_type); + dev_vdbg(hsotg->dev, " Max Pkt: %d\n", + chan->max_packet); + } + + /* Program the HCSPLT register for SPLITs */ + if (chan->do_split) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, + "Programming HC %d with split --> %s\n", + hc_num, + chan->complete_split ? "CSPLIT" : "SSPLIT"); + if (chan->complete_split) + hcsplt |= HCSPLT_COMPSPLT; + hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & + HCSPLT_XACTPOS_MASK; + hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & + HCSPLT_HUBADDR_MASK; + hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & + HCSPLT_PRTADDR_MASK; + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, " comp split %d\n", + chan->complete_split); + dev_vdbg(hsotg->dev, " xact pos %d\n", + chan->xact_pos); + dev_vdbg(hsotg->dev, " hub addr %d\n", + chan->hub_addr); + dev_vdbg(hsotg->dev, " hub port %d\n", + chan->hub_port); + dev_vdbg(hsotg->dev, " is_in %d\n", + chan->ep_is_in); + dev_vdbg(hsotg->dev, " Max Pkt %d\n", + chan->max_packet); + dev_vdbg(hsotg->dev, " xferlen %d\n", + chan->xfer_len); + } + } + + dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num)); +} + +/** + * dwc2_hc_halt() - Attempts to halt a host channel + * + * @hsotg: Controller register interface + * @chan: Host channel to halt + * @halt_status: Reason for halting the channel + * + * This function should only be called in Slave mode or to abort a transfer in + * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the + * controller halts the channel when the transfer is complete or a condition + * occurs that requires application intervention. + * + * In slave mode, checks for a free request queue entry, then sets the Channel + * Enable and Channel Disable bits of the Host Channel Characteristics + * register of the specified channel to intiate the halt. If there is no free + * request queue entry, sets only the Channel Disable bit of the HCCHARn + * register to flush requests for this channel. In the latter case, sets a + * flag to indicate that the host channel needs to be halted when a request + * queue slot is open. + * + * In DMA mode, always sets the Channel Enable and Channel Disable bits of the + * HCCHARn register. The controller ensures there is space in the request + * queue before submitting the halt request. + * + * Some time may elapse before the core flushes any posted requests for this + * host channel and halts. The Channel Halted interrupt handler completes the + * deactivation of the host channel. + */ +void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, + enum dwc2_halt_status halt_status) +{ + u32 nptxsts, hptxsts, hcchar; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) + dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); + + if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || + halt_status == DWC2_HC_XFER_AHB_ERR) { + /* + * Disable all channel interrupts except Ch Halted. The QTD + * and QH state associated with this transfer has been cleared + * (in the case of URB_DEQUEUE), so the channel needs to be + * shut down carefully to prevent crashes. + */ + u32 hcintmsk = HCINTMSK_CHHLTD; + + dev_vdbg(hsotg->dev, "dequeue/error\n"); + dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num)); + + /* + * Make sure no other interrupts besides halt are currently + * pending. Handling another interrupt could cause a crash due + * to the QTD and QH state. + */ + dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num)); + + /* + * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR + * even if the channel was already halted for some other + * reason + */ + chan->halt_status = halt_status; + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + if (!(hcchar & HCCHAR_CHENA)) { + /* + * The channel is either already halted or it hasn't + * started yet. In DMA mode, the transfer may halt if + * it finishes normally or a condition occurs that + * requires driver intervention. Don't want to halt + * the channel again. In either Slave or DMA mode, + * it's possible that the transfer has been assigned + * to a channel, but not started yet when an URB is + * dequeued. Don't want to halt a channel that hasn't + * started yet. + */ + return; + } + } + if (chan->halt_pending) { + /* + * A halt has already been issued for this channel. This might + * happen when a transfer is aborted by a higher level in + * the stack. + */ + dev_vdbg(hsotg->dev, + "*** %s: Channel %d, chan->halt_pending already set ***\n", + __func__, chan->hc_num); + return; + } + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + + /* No need to set the bit in DDMA for disabling the channel */ + /* TODO check it everywhere channel is disabled */ + if (hsotg->core_params->dma_desc_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "desc DMA disabled\n"); + hcchar |= HCCHAR_CHENA; + } else { + if (dbg_hc(chan)) + dev_dbg(hsotg->dev, "desc DMA enabled\n"); + } + hcchar |= HCCHAR_CHDIS; + + if (hsotg->core_params->dma_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA not enabled\n"); + hcchar |= HCCHAR_CHENA; + + /* Check for space in the request queue to issue the halt */ + if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || + chan->ep_type == USB_ENDPOINT_XFER_BULK) { + dev_vdbg(hsotg->dev, "control/bulk\n"); + nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS); + if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { + dev_vdbg(hsotg->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } else { + if (dbg_perio()) + dev_vdbg(hsotg->dev, "isoc/intr\n"); + hptxsts = dwc2_readl(hsotg->regs + HPTXSTS); + if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || + hsotg->queuing_high_bandwidth) { + if (dbg_perio()) + dev_vdbg(hsotg->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "DMA enabled\n"); + } + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + chan->halt_status = halt_status; + + if (hcchar & HCCHAR_CHENA) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Channel enabled\n"); + chan->halt_pending = 1; + chan->halt_on_queue = 0; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Channel disabled\n"); + chan->halt_on_queue = 1; + } + + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", + hcchar); + dev_vdbg(hsotg->dev, " halt_pending: %d\n", + chan->halt_pending); + dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", + chan->halt_on_queue); + dev_vdbg(hsotg->dev, " halt_status: %d\n", + chan->halt_status); + } +} + +/** + * dwc2_hc_cleanup() - Clears the transfer state for a host channel + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Identifies the host channel to clean up + * + * This function is normally called after a transfer is done and the host + * channel is being released + */ +void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) +{ + u32 hcintmsk; + + chan->xfer_started = 0; + + list_del_init(&chan->split_order_list_entry); + + /* + * Clear channel interrupt enables and any unhandled channel interrupt + * conditions + */ + dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num)); + hcintmsk = 0xffffffff; + hcintmsk &= ~HCINTMSK_RESERVED14_31; + dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num)); +} + +/** + * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in + * which frame a periodic transfer should occur + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Identifies the host channel to set up and its properties + * @hcchar: Current value of the HCCHAR register for the specified host channel + * + * This function has no effect on non-periodic transfers + */ +static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan, u32 *hcchar) +{ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + int host_speed; + int xfer_ns; + int xfer_us; + int bytes_in_fifo; + u16 fifo_space; + u16 frame_number; + u16 wire_frame; + + /* + * Try to figure out if we're an even or odd frame. If we set + * even and the current frame number is even the the transfer + * will happen immediately. Similar if both are odd. If one is + * even and the other is odd then the transfer will happen when + * the frame number ticks. + * + * There's a bit of a balancing act to get this right. + * Sometimes we may want to send data in the current frame (AK + * right away). We might want to do this if the frame number + * _just_ ticked, but we might also want to do this in order + * to continue a split transaction that happened late in a + * microframe (so we didn't know to queue the next transfer + * until the frame number had ticked). The problem is that we + * need a lot of knowledge to know if there's actually still + * time to send things or if it would be better to wait until + * the next frame. + * + * We can look at how much time is left in the current frame + * and make a guess about whether we'll have time to transfer. + * We'll do that. + */ + + /* Get speed host is running at */ + host_speed = (chan->speed != USB_SPEED_HIGH && + !chan->do_split) ? chan->speed : USB_SPEED_HIGH; + + /* See how many bytes are in the periodic FIFO right now */ + fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) & + TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; + bytes_in_fifo = sizeof(u32) * + (hsotg->core_params->host_perio_tx_fifo_size - + fifo_space); + + /* + * Roughly estimate bus time for everything in the periodic + * queue + our new transfer. This is "rough" because we're + * using a function that makes takes into account IN/OUT + * and INT/ISO and we're just slamming in one value for all + * transfers. This should be an over-estimate and that should + * be OK, but we can probably tighten it. + */ + xfer_ns = usb_calc_bus_time(host_speed, false, false, + chan->xfer_len + bytes_in_fifo); + xfer_us = NS_TO_US(xfer_ns); + + /* See what frame number we'll be at by the time we finish */ + frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us); + + /* This is when we were scheduled to be on the wire */ + wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1); + + /* + * If we'd finish _after_ the frame we're scheduled in then + * it's hopeless. Just schedule right away and hope for the + * best. Note that it _might_ be wise to call back into the + * scheduler to pick a better frame, but this is better than + * nothing. + */ + if (dwc2_frame_num_gt(frame_number, wire_frame)) { + dwc2_sch_vdbg(hsotg, + "QH=%p EO MISS fr=%04x=>%04x (%+d)\n", + chan->qh, wire_frame, frame_number, + dwc2_frame_num_dec(frame_number, + wire_frame)); + wire_frame = frame_number; + + /* + * We picked a different frame number; communicate this + * back to the scheduler so it doesn't try to schedule + * another in the same frame. + * + * Remember that next_active_frame is 1 before the wire + * frame. + */ + chan->qh->next_active_frame = + dwc2_frame_num_dec(frame_number, 1); + } + + if (wire_frame & 1) + *hcchar |= HCCHAR_ODDFRM; + else + *hcchar &= ~HCCHAR_ODDFRM; + } +} + +static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) +{ + /* Set up the initial PID for the transfer */ + if (chan->speed == USB_SPEED_HIGH) { + if (chan->ep_is_in) { + if (chan->multi_count == 1) + chan->data_pid_start = DWC2_HC_PID_DATA0; + else if (chan->multi_count == 2) + chan->data_pid_start = DWC2_HC_PID_DATA1; + else + chan->data_pid_start = DWC2_HC_PID_DATA2; + } else { + if (chan->multi_count == 1) + chan->data_pid_start = DWC2_HC_PID_DATA0; + else + chan->data_pid_start = DWC2_HC_PID_MDATA; + } + } else { + chan->data_pid_start = DWC2_HC_PID_DATA0; + } +} + +/** + * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with + * the Host Channel + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. For a channel associated + * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel + * associated with a periodic EP, the periodic Tx FIFO is written. + * + * Upon return the xfer_buf and xfer_count fields in chan are incremented by + * the number of bytes written to the Tx FIFO. + */ +static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 i; + u32 remaining_count; + u32 byte_count; + u32 dword_count; + u32 __iomem *data_fifo; + u32 *data_buf = (u32 *)chan->xfer_buf; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); + + remaining_count = chan->xfer_len - chan->xfer_count; + if (remaining_count > chan->max_packet) + byte_count = chan->max_packet; + else + byte_count = remaining_count; + + dword_count = (byte_count + 3) / 4; + + if (((unsigned long)data_buf & 0x3) == 0) { + /* xfer_buf is DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buf++) + dwc2_writel(*data_buf, data_fifo); + } else { + /* xfer_buf is not DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buf++) { + u32 data = data_buf[0] | data_buf[1] << 8 | + data_buf[2] << 16 | data_buf[3] << 24; + dwc2_writel(data, data_fifo); + } + } + + chan->xfer_count += byte_count; + chan->xfer_buf += byte_count; +} + +/** + * dwc2_hc_do_ping() - Starts a PING transfer + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. The Do Ping bit is set in + * the HCTSIZ register, then the channel is enabled. + */ +static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcchar; + u32 hctsiz; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + + hctsiz = TSIZ_DOPNG; + hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); +} + +/** + * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host + * channel and starts the transfer + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel. The xfer_len value + * may be reduced to accommodate the max widths of the XferSize and + * PktCnt fields in the HCTSIZn register. The multi_count value may be + * changed to reflect the final xfer_len value. + * + * This function may be called in either Slave mode or DMA mode. In Slave mode, + * the caller must ensure that there is sufficient space in the request queue + * and Tx Data FIFO. + * + * For an OUT transfer in Slave mode, it loads a data packet into the + * appropriate FIFO. If necessary, additional data packets are loaded in the + * Host ISR. + * + * For an IN transfer in Slave mode, a data packet is requested. The data + * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, + * additional data packets are requested in the Host ISR. + * + * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ + * register along with a packet count of 1 and the channel is enabled. This + * causes a single PING transaction to occur. Other fields in HCTSIZ are + * simply set to 0 since no data transfer occurs in this case. + * + * For a PING transfer in DMA mode, the HCTSIZ register is initialized with + * all the information required to perform the subsequent data transfer. In + * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the + * controller performs the entire PING protocol, then starts the data + * transfer. + */ +static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; + u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; + u32 hcchar; + u32 hctsiz = 0; + u16 num_packets; + u32 ec_mc; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s()\n", __func__); + + if (chan->do_ping) { + if (hsotg->core_params->dma_enable <= 0) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "ping, no DMA\n"); + dwc2_hc_do_ping(hsotg, chan); + chan->xfer_started = 1; + return; + } + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "ping, DMA\n"); + + hctsiz |= TSIZ_DOPNG; + } + + if (chan->do_split) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "split\n"); + num_packets = 1; + + if (chan->complete_split && !chan->ep_is_in) + /* + * For CSPLIT OUT Transfer, set the size to 0 so the + * core doesn't expect any data written to the FIFO + */ + chan->xfer_len = 0; + else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) + chan->xfer_len = chan->max_packet; + else if (!chan->ep_is_in && chan->xfer_len > 188) + chan->xfer_len = 188; + + hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + + /* For split set ec_mc for immediate retries */ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + ec_mc = 3; + else + ec_mc = 1; + } else { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "no split\n"); + /* + * Ensure that the transfer length and packet count will fit + * in the widths allocated for them in the HCTSIZn register + */ + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + /* + * Make sure the transfer size is no larger than one + * (micro)frame's worth of data. (A check was done + * when the periodic transfer was accepted to ensure + * that a (micro)frame's worth of data can be + * programmed into a channel.) + */ + u32 max_periodic_len = + chan->multi_count * chan->max_packet; + + if (chan->xfer_len > max_periodic_len) + chan->xfer_len = max_periodic_len; + } else if (chan->xfer_len > max_hc_xfer_size) { + /* + * Make sure that xfer_len is a multiple of max packet + * size + */ + chan->xfer_len = + max_hc_xfer_size - chan->max_packet + 1; + } + + if (chan->xfer_len > 0) { + num_packets = (chan->xfer_len + chan->max_packet - 1) / + chan->max_packet; + if (num_packets > max_hc_pkt_count) { + num_packets = max_hc_pkt_count; + chan->xfer_len = num_packets * chan->max_packet; + } + } else { + /* Need 1 packet for transfer length of 0 */ + num_packets = 1; + } + + if (chan->ep_is_in) + /* + * Always program an integral # of max packets for IN + * transfers + */ + chan->xfer_len = num_packets * chan->max_packet; + + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + /* + * Make sure that the multi_count field matches the + * actual transfer length + */ + chan->multi_count = num_packets; + + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + dwc2_set_pid_isoc(chan); + + hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + + /* The ec_mc gets the multi_count for non-split */ + ec_mc = chan->multi_count; + } + + chan->start_pkt_count = num_packets; + hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; + hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", + hctsiz, chan->hc_num); + + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " Xfer Size: %d\n", + (hctsiz & TSIZ_XFERSIZE_MASK) >> + TSIZ_XFERSIZE_SHIFT); + dev_vdbg(hsotg->dev, " Num Pkts: %d\n", + (hctsiz & TSIZ_PKTCNT_MASK) >> + TSIZ_PKTCNT_SHIFT); + dev_vdbg(hsotg->dev, " Start PID: %d\n", + (hctsiz & TSIZ_SC_MC_PID_MASK) >> + TSIZ_SC_MC_PID_SHIFT); + } + + if (hsotg->core_params->dma_enable > 0) { + dwc2_writel((u32)chan->xfer_dma, + hsotg->regs + HCDMA(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", + (unsigned long)chan->xfer_dma, chan->hc_num); + } + + /* Start the split */ + if (chan->do_split) { + u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num)); + + hcsplt |= HCSPLT_SPLTENA; + dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num)); + } + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; + dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hsotg->dev, + "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, chan->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", + (hcchar & HCCHAR_MULTICNT_MASK) >> + HCCHAR_MULTICNT_SHIFT); + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, + chan->hc_num); + + chan->xfer_started = 1; + chan->requests++; + + if (hsotg->core_params->dma_enable <= 0 && + !chan->ep_is_in && chan->xfer_len > 0) + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hsotg, chan); +} + +/** + * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a + * host channel and starts the transfer in Descriptor DMA mode + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. + * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field + * with micro-frame bitmap. + * + * Initializes HCDMA register with descriptor list address and CTD value then + * starts the transfer via enabling the channel. + */ +void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + u32 hcchar; + u32 hctsiz = 0; + + if (chan->do_ping) + hctsiz |= TSIZ_DOPNG; + + if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) + dwc2_set_pid_isoc(chan); + + /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ + hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + + /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ + hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; + + /* Non-zero only for high-speed interrupt endpoints */ + hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; + + if (dbg_hc(chan)) { + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + dev_vdbg(hsotg->dev, " Start PID: %d\n", + chan->data_pid_start); + dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); + } + + dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num)); + + dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, + chan->desc_list_sz, DMA_TO_DEVICE); + + dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num)); + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", + &chan->desc_list_addr, chan->hc_num); + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK; + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hsotg->dev, + "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, chan->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", + (hcchar & HCCHAR_MULTICNT_MASK) >> + HCCHAR_MULTICNT_SHIFT); + + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, + chan->hc_num); + + chan->xfer_started = 1; + chan->requests++; +} + +/** + * dwc2_hc_continue_transfer() - Continues a data transfer that was started by + * a previous call to dwc2_hc_start_transfer() + * + * @hsotg: Programming view of DWC_otg controller + * @chan: Information needed to initialize the host channel + * + * The caller must ensure there is sufficient space in the request queue and Tx + * Data FIFO. This function should only be called in Slave mode. In DMA mode, + * the controller acts autonomously to complete transfers programmed to a host + * channel. + * + * For an OUT transfer, a new data packet is loaded into the appropriate FIFO + * if there is any data remaining to be queued. For an IN transfer, another + * data packet is always requested. For the SETUP phase of a control transfer, + * this function does nothing. + * + * Return: 1 if a new request is queued, 0 if no more requests are required + * for this transfer + */ +static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan) +{ + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, + chan->hc_num); + + if (chan->do_split) + /* SPLITs always queue just once per channel */ + return 0; + + if (chan->data_pid_start == DWC2_HC_PID_SETUP) + /* SETUPs are queued only once since they can't be NAK'd */ + return 0; + + if (chan->ep_is_in) { + /* + * Always queue another request for other IN transfers. If + * back-to-back INs are issued and NAKs are received for both, + * the driver may still be processing the first NAK when the + * second NAK is received. When the interrupt handler clears + * the NAK interrupt for the first NAK, the second NAK will + * not be seen. So we can't depend on the NAK interrupt + * handler to requeue a NAK'd request. Instead, IN requests + * are issued each time this function is called. When the + * transfer completes, the extra requests for the channel will + * be flushed. + */ + u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num)); + + dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", + hcchar); + dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num)); + chan->requests++; + return 1; + } + + /* OUT transfers */ + + if (chan->xfer_count < chan->xfer_len) { + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) { + u32 hcchar = dwc2_readl(hsotg->regs + + HCCHAR(chan->hc_num)); + + dwc2_hc_set_even_odd_frame(hsotg, chan, + &hcchar); + } + + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hsotg, chan); + chan->requests++; + return 1; + } + + return 0; +} + +/* + * ========================================================================= + * HCD + * ========================================================================= + */ + +/* * Processes all the URBs in a single list of QHs. Completes them with * -ETIMEDOUT and frees the QTD. * @@ -164,6 +1743,9 @@ static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, qtd_list_entry) dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); + if (qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hcd_qh_free(hsotg, qh); spin_lock_irqsave(&hsotg->lock, flags); @@ -268,15 +1850,33 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) } /** + * dwc2_hcd_connect() - Handles connect of the HCD + * + * @hsotg: Pointer to struct dwc2_hsotg + * + * Must be called with interrupt disabled and spinlock held + */ +void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) +{ + if (hsotg->lx_state != DWC2_L0) + usb_hcd_resume_root_hub(hsotg->priv); + + hsotg->flags.b.port_connect_status_change = 1; + hsotg->flags.b.port_connect_status = 1; +} + +/** * dwc2_hcd_disconnect() - Handles disconnect of the HCD * * @hsotg: Pointer to struct dwc2_hsotg + * @force: If true, we won't try to reconnect even if we see device connected. * * Must be called with interrupt disabled and spinlock held */ -void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) +void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) { u32 intr; + u32 hprt0; /* Set status flags for the hub driver */ hsotg->flags.b.port_connect_status_change = 1; @@ -315,6 +1915,24 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) dwc2_hcd_cleanup_channels(hsotg); dwc2_host_disconnect(hsotg); + + /* + * Add an extra check here to see if we're actually connected but + * we don't have a detection interrupt pending. This can happen if: + * 1. hardware sees connect + * 2. hardware sees disconnect + * 3. hardware sees connect + * 4. dwc2_port_intr() - clears connect interrupt + * 5. dwc2_handle_common_intr() - calls here + * + * Without the extra check here we will end calling disconnect + * and won't get any future interrupts to handle the connect. + */ + if (!force) { + hprt0 = dwc2_readl(hsotg->regs + HPRT0); + if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS)) + dwc2_hcd_connect(hsotg); + } } /** @@ -518,7 +2136,12 @@ static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); ep->hcpriv = NULL; + + if (qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); + dwc2_hcd_qh_free(hsotg, qh); return 0; @@ -544,6 +2167,224 @@ static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, return 0; } +/** + * dwc2_core_init() - Initializes the DWC_otg controller registers and + * prepares the core for device mode or host mode operation + * + * @hsotg: Programming view of the DWC_otg controller + * @initial_setup: If true then this is the first init for this instance. + */ +static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) +{ + u32 usbcfg, otgctl; + int retval; + + dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + + /* Set ULPI External VBUS bit if needed */ + usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; + if (hsotg->core_params->phy_ulpi_ext_vbus == + DWC2_PHY_ULPI_EXTERNAL_VBUS) + usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; + + /* Set external TS Dline pulsing bit if needed */ + usbcfg &= ~GUSBCFG_TERMSELDLPULSE; + if (hsotg->core_params->ts_dline > 0) + usbcfg |= GUSBCFG_TERMSELDLPULSE; + + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + + /* + * Reset the Controller + * + * We only need to reset the controller if this is a re-init. + * For the first init we know for sure that earlier code reset us (it + * needed to in order to properly detect various parameters). + */ + if (!initial_setup) { + retval = dwc2_core_reset_and_force_dr_mode(hsotg); + if (retval) { + dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", + __func__); + return retval; + } + } + + /* + * This needs to happen in FS mode before any other programming occurs + */ + retval = dwc2_phy_init(hsotg, initial_setup); + if (retval) + return retval; + + /* Program the GAHBCFG Register */ + retval = dwc2_gahbcfg_init(hsotg); + if (retval) + return retval; + + /* Program the GUSBCFG register */ + dwc2_gusbcfg_init(hsotg); + + /* Program the GOTGCTL register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_OTGVER; + if (hsotg->core_params->otg_ver > 0) + otgctl |= GOTGCTL_OTGVER; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); + + /* Clear the SRP success bit for FS-I2c */ + hsotg->srp_success = 0; + + /* Enable common interrupts */ + dwc2_enable_common_interrupts(hsotg); + + /* + * Do device or host initialization based on mode during PCD and + * HCD initialization + */ + if (dwc2_is_host_mode(hsotg)) { + dev_dbg(hsotg->dev, "Host Mode\n"); + hsotg->op_state = OTG_STATE_A_HOST; + } else { + dev_dbg(hsotg->dev, "Device Mode\n"); + hsotg->op_state = OTG_STATE_B_PERIPHERAL; + } + + return 0; +} + +/** + * dwc2_core_host_init() - Initializes the DWC_otg controller registers for + * Host mode + * + * @hsotg: Programming view of DWC_otg controller + * + * This function flushes the Tx and Rx FIFOs and flushes any entries in the + * request queues. Host channels are reset to ensure that they are ready for + * performing transfers. + */ +static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) +{ + u32 hcfg, hfir, otgctl; + + dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + + /* Restart the Phy Clock */ + dwc2_writel(0, hsotg->regs + PCGCTL); + + /* Initialize Host Configuration Register */ + dwc2_init_fs_ls_pclk_sel(hsotg); + if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_FSLSSUPP; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + + /* + * This bit allows dynamic reloading of the HFIR register during + * runtime. This bit needs to be programmed during initial configuration + * and its value must not be changed during runtime. + */ + if (hsotg->core_params->reload_ctl > 0) { + hfir = dwc2_readl(hsotg->regs + HFIR); + hfir |= HFIR_RLDCTRL; + dwc2_writel(hfir, hsotg->regs + HFIR); + } + + if (hsotg->core_params->dma_desc_enable > 0) { + u32 op_mode = hsotg->hw_params.op_mode; + + if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || + !hsotg->hw_params.dma_desc_enable || + op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || + op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || + op_mode == GHWCFG2_OP_MODE_UNDEFINED) { + dev_err(hsotg->dev, + "Hardware does not support descriptor DMA mode -\n"); + dev_err(hsotg->dev, + "falling back to buffer DMA mode.\n"); + hsotg->core_params->dma_desc_enable = 0; + } else { + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + } + + /* Configure data FIFO sizes */ + dwc2_config_fifos(hsotg); + + /* TODO - check this */ + /* Clear Host Set HNP Enable in the OTG Control Register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_HSTSETHNPEN; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + + /* Make sure the FIFOs are flushed */ + dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); + dwc2_flush_rx_fifo(hsotg); + + /* Clear Host Set HNP Enable in the OTG Control Register */ + otgctl = dwc2_readl(hsotg->regs + GOTGCTL); + otgctl &= ~GOTGCTL_HSTSETHNPEN; + dwc2_writel(otgctl, hsotg->regs + GOTGCTL); + + if (hsotg->core_params->dma_desc_enable <= 0) { + int num_channels, i; + u32 hcchar; + + /* Flush out any leftover queued requests */ + num_channels = hsotg->core_params->host_channels; + for (i = 0; i < num_channels; i++) { + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + hcchar &= ~HCCHAR_CHENA; + hcchar |= HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); + } + + /* Halt all channels to put them into a known state */ + for (i = 0; i < num_channels; i++) { + int count = 0; + + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + dwc2_writel(hcchar, hsotg->regs + HCCHAR(i)); + dev_dbg(hsotg->dev, "%s: Halt channel %d\n", + __func__, i); + do { + hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); + if (++count > 1000) { + dev_err(hsotg->dev, + "Unable to clear enable on channel %d\n", + i); + break; + } + udelay(1); + } while (hcchar & HCCHAR_CHENA); + } + } + + /* Turn on the vbus power */ + dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); + if (hsotg->op_state == OTG_STATE_A_HOST) { + u32 hprt0 = dwc2_read_hprt0(hsotg); + + dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", + !!(hprt0 & HPRT0_PWR)); + if (!(hprt0 & HPRT0_PWR)) { + hprt0 |= HPRT0_PWR; + dwc2_writel(hprt0, hsotg->regs + HPRT0); + } + } + + dwc2_enable_host_interrupts(hsotg); +} + /* * Initializes dynamic portions of the DWC_otg HCD state * @@ -599,9 +2440,9 @@ static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, chan->hub_port = (u8)hub_port; } -static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, - struct dwc2_host_chan *chan, - struct dwc2_qtd *qtd, void *bufptr) +static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan, + struct dwc2_qtd *qtd) { struct dwc2_hcd_urb *urb = qtd->urb; struct dwc2_hcd_iso_packet_desc *frame_desc; @@ -621,7 +2462,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, else chan->xfer_buf = urb->setup_packet; chan->xfer_len = 8; - bufptr = NULL; break; case DWC2_CONTROL_DATA: @@ -648,7 +2488,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; - bufptr = NULL; break; } break; @@ -681,14 +2520,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; - /* For non-dword aligned buffers */ - if (hsotg->core_params->dma_enable > 0 && - (chan->xfer_dma & 0x3)) - bufptr = (u8 *)urb->buf + frame_desc->offset + - qtd->isoc_split_offset; - else - bufptr = NULL; - if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { if (chan->xfer_len <= 188) chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; @@ -697,63 +2528,93 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, } break; } +} + +#define DWC2_USB_DMA_ALIGN 4 + +struct dma_aligned_buffer { + void *kmalloc_ptr; + void *old_xfer_buffer; + u8 data[0]; +}; + +static void dwc2_free_dma_aligned_buffer(struct urb *urb) +{ + struct dma_aligned_buffer *temp; + + if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) + return; - return bufptr; + temp = container_of(urb->transfer_buffer, + struct dma_aligned_buffer, data); + + if (usb_urb_dir_in(urb)) + memcpy(temp->old_xfer_buffer, temp->data, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->old_xfer_buffer; + kfree(temp->kmalloc_ptr); + + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } -static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, - struct dwc2_host_chan *chan, - struct dwc2_hcd_urb *urb, void *bufptr) +static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - u32 buf_size; - struct urb *usb_urb; - struct usb_hcd *hcd; + struct dma_aligned_buffer *temp, *kmalloc_ptr; + size_t kmalloc_size; - if (!qh->dw_align_buf) { - if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) - buf_size = hsotg->core_params->max_transfer_size; - else - /* 3072 = 3 max-size Isoc packets */ - buf_size = 3072; + if (urb->num_sgs || urb->sg || + urb->transfer_buffer_length == 0 || + !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) + return 0; - qh->dw_align_buf = kmalloc(buf_size, GFP_ATOMIC | GFP_DMA); - if (!qh->dw_align_buf) - return -ENOMEM; - qh->dw_align_buf_size = buf_size; - } + /* Allocate a buffer with enough padding for alignment */ + kmalloc_size = urb->transfer_buffer_length + + sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; - if (chan->xfer_len) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - usb_urb = urb->priv; + kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); + if (!kmalloc_ptr) + return -ENOMEM; - if (usb_urb) { - if (usb_urb->transfer_flags & - (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG | - URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) { - hcd = dwc2_hsotg_to_hcd(hsotg); - usb_hcd_unmap_urb_for_dma(hcd, usb_urb); - } - if (!chan->ep_is_in) - memcpy(qh->dw_align_buf, bufptr, - chan->xfer_len); - } else { - dev_warn(hsotg->dev, "no URB in dwc2_urb\n"); - } - } + /* Position our struct dma_aligned_buffer such that data is aligned */ + temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; + temp->kmalloc_ptr = kmalloc_ptr; + temp->old_xfer_buffer = urb->transfer_buffer; + if (usb_urb_dir_out(urb)) + memcpy(temp->data, urb->transfer_buffer, + urb->transfer_buffer_length); + urb->transfer_buffer = temp->data; - qh->dw_align_buf_dma = dma_map_single(hsotg->dev, - qh->dw_align_buf, qh->dw_align_buf_size, - chan->ep_is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { - dev_err(hsotg->dev, "can't map align_buf\n"); - chan->align_buf = 0; - return -EINVAL; - } + urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; - chan->align_buf = qh->dw_align_buf_dma; return 0; } +static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + int ret; + + /* We assume setup_dma is always aligned; warn if not */ + WARN_ON_ONCE(urb->setup_dma && + (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1))); + + ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags); + if (ret) + return ret; + + ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); + if (ret) + dwc2_free_dma_aligned_buffer(urb); + + return ret; +} + +static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + usb_hcd_unmap_urb_for_dma(hcd, urb); + dwc2_free_dma_aligned_buffer(urb); +} + /** * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host * channel and initializes the host channel to perform the transactions. The @@ -768,7 +2629,6 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) struct dwc2_host_chan *chan; struct dwc2_hcd_urb *urb; struct dwc2_qtd *qtd; - void *bufptr = NULL; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); @@ -830,16 +2690,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->core_params->dma_enable > 0) chan->xfer_dma = urb->dma + urb->actual_length; - - /* For non-dword aligned case */ - if (hsotg->core_params->dma_desc_enable <= 0 && - (chan->xfer_dma & 0x3)) - bufptr = (u8 *)urb->buf + urb->actual_length; - } else { + else chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; - } chan->xfer_len = urb->length - urb->actual_length; chan->xfer_count = 0; @@ -851,27 +2705,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) chan->do_split = 0; /* Set the transfer attributes */ - bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); - - /* Non DWORD-aligned buffer case */ - if (bufptr) { - dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); - if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) { - dev_err(hsotg->dev, - "%s: Failed to allocate memory to handle non-dword aligned buffer\n", - __func__); - /* Add channel back to free list */ - chan->align_buf = 0; - chan->multi_count = 0; - list_add_tail(&chan->hc_list_entry, - &hsotg->free_hc_list); - qtd->in_process = 0; - qh->channel = NULL; - return -ENOMEM; - } - } else { - chan->align_buf = 0; - } + dwc2_hc_init_xfer(hsotg, chan, qtd); if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) @@ -881,8 +2715,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) */ chan->multi_count = dwc2_hb_mult(qh->maxp); - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->core_params->dma_desc_enable > 0) { chan->desc_list_addr = qh->desc_list_dma; + chan->desc_list_sz = qh->desc_list_sz; + } dwc2_hc_init(hsotg, chan); chan->qh = qh; @@ -930,7 +2766,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * periodic assigned schedule */ qh_ptr = qh_ptr->next; - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_assigned); ret_val = DWC2_TRANSACTION_PERIODIC; } @@ -963,8 +2800,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * non-periodic active schedule */ qh_ptr = qh_ptr->next; - list_move(&qh->qh_list_entry, - &hsotg->non_periodic_sched_active); + list_move_tail(&qh->qh_list_entry, + &hsotg->non_periodic_sched_active); if (ret_val == DWC2_TRANSACTION_NONE) ret_val = DWC2_TRANSACTION_NON_PERIODIC; @@ -1005,6 +2842,11 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, { int retval = 0; + if (chan->do_split) + /* Put ourselves on the list to keep order straight */ + list_move_tail(&chan->split_order_list_entry, + &hsotg->split_order); + if (hsotg->core_params->dma_enable > 0) { if (hsotg->core_params->dma_desc_enable > 0) { if (!chan->xfer_started || @@ -1064,10 +2906,14 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) u32 fspcavail; u32 gintmsk; int status; - int no_queue_space = 0; - int no_fifo_space = 0; + bool no_queue_space = false; + bool no_fifo_space = false; u32 qspcavail; + /* If empty list then just adjust interrupt enables */ + if (list_empty(&hsotg->periodic_sched_assigned)) + goto exit; + if (dbg_perio()) dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); @@ -1137,50 +2983,40 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * Move the QH from the periodic assigned schedule to * the periodic queued schedule */ - list_move(&qh->qh_list_entry, - &hsotg->periodic_sched_queued); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_queued); /* done queuing high bandwidth */ hsotg->queuing_high_bandwidth = 0; } } - if (hsotg->core_params->dma_enable <= 0) { - tx_status = dwc2_readl(hsotg->regs + HPTXSTS); - qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> - TXSTS_QSPCAVAIL_SHIFT; - fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> - TXSTS_FSPCAVAIL_SHIFT; - if (dbg_perio()) { - dev_vdbg(hsotg->dev, - " P Tx Req Queue Space Avail (after queue): %d\n", - qspcavail); - dev_vdbg(hsotg->dev, - " P Tx FIFO Space Avail (after queue): %d\n", - fspcavail); - } - - if (!list_empty(&hsotg->periodic_sched_assigned) || - no_queue_space || no_fifo_space) { - /* - * May need to queue more transactions as the request - * queue or Tx FIFO empties. Enable the periodic Tx - * FIFO empty interrupt. (Always use the half-empty - * level to ensure that new requests are loaded as - * soon as possible.) - */ - gintmsk = dwc2_readl(hsotg->regs + GINTMSK); +exit: + if (no_queue_space || no_fifo_space || + (hsotg->core_params->dma_enable <= 0 && + !list_empty(&hsotg->periodic_sched_assigned))) { + /* + * May need to queue more transactions as the request + * queue or Tx FIFO empties. Enable the periodic Tx + * FIFO empty interrupt. (Always use the half-empty + * level to ensure that new requests are loaded as + * soon as possible.) + */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + if (!(gintmsk & GINTSTS_PTXFEMP)) { gintmsk |= GINTSTS_PTXFEMP; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); - } else { - /* - * Disable the Tx FIFO empty interrupt since there are - * no more transactions that need to be queued right - * now. This function is called from interrupt - * handlers to queue more transactions as transfer - * states change. - */ - gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + } + } else { + /* + * Disable the Tx FIFO empty interrupt since there are + * no more transactions that need to be queued right + * now. This function is called from interrupt + * handlers to queue more transactions as transfer + * states change. + */ + gintmsk = dwc2_readl(hsotg->regs + GINTMSK); + if (gintmsk & GINTSTS_PTXFEMP) { gintmsk &= ~GINTSTS_PTXFEMP; dwc2_writel(gintmsk, hsotg->regs + GINTMSK); } @@ -1327,9 +3163,8 @@ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "Queue Transactions\n"); #endif /* Process host channels associated with periodic transfers */ - if ((tr_type == DWC2_TRANSACTION_PERIODIC || - tr_type == DWC2_TRANSACTION_ALL) && - !list_empty(&hsotg->periodic_sched_assigned)) + if (tr_type == DWC2_TRANSACTION_PERIODIC || + tr_type == DWC2_TRANSACTION_ALL) dwc2_process_periodic_channels(hsotg); /* Process host channels associated with non-periodic transfers */ @@ -1382,7 +3217,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work) dev_err(hsotg->dev, "Connection id status change timed out\n"); hsotg->op_state = OTG_STATE_B_PERIPHERAL; - dwc2_core_init(hsotg, false, -1); + dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); spin_lock_irqsave(&hsotg->lock, flags); dwc2_hsotg_core_init_disconnected(hsotg, false); @@ -1405,7 +3240,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work) hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ - dwc2_core_init(hsotg, false, -1); + dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); dwc2_hcd_start(hsotg); } @@ -1734,6 +3569,28 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_TEST; /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ + if (hsotg->core_params->dma_desc_fs_enable) { + /* + * Enable descriptor DMA only if a full speed + * device is connected. + */ + if (hsotg->new_connection && + ((port_status & + (USB_PORT_STAT_CONNECTION | + USB_PORT_STAT_HIGH_SPEED | + USB_PORT_STAT_LOW_SPEED)) == + USB_PORT_STAT_CONNECTION)) { + u32 hcfg; + + dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); + hsotg->core_params->dma_desc_enable = 1; + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg |= HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + hsotg->new_connection = false; + } + } + dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); *(__le32 *)buf = cpu_to_le32(port_status); break; @@ -1887,6 +3744,35 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; } +int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us) +{ + u32 hprt = dwc2_readl(hsotg->regs + HPRT0); + u32 hfir = dwc2_readl(hsotg->regs + HFIR); + u32 hfnum = dwc2_readl(hsotg->regs + HFNUM); + unsigned int us_per_frame; + unsigned int frame_number; + unsigned int remaining; + unsigned int interval; + unsigned int phy_clks; + + /* High speed has 125 us per (micro) frame; others are 1 ms per */ + us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125; + + /* Extract fields */ + frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; + remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT; + interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT; + + /* + * Number of phy clocks since the last tick of the frame number after + * "us" has passed. + */ + phy_clks = (interval - remaining) + + DIV_ROUND_UP(interval * us, us_per_frame); + + return dwc2_frame_num_inc(frame_number, phy_clks / interval); +} + int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) { return hsotg->op_state == OTG_STATE_B_HOST; @@ -2163,6 +4049,90 @@ void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, *hub_port = urb->dev->ttport; } +/** + * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context + * + * This will get the dwc2_tt structure (and ttport) associated with the given + * context (which is really just a struct urb pointer). + * + * The first time this is called for a given TT we allocate memory for our + * structure. When everyone is done and has called dwc2_host_put_tt_info() + * then the refcount for the structure will go to 0 and we'll free it. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: The QH structure. + * @context: The priv pointer from a struct dwc2_hcd_urb. + * @mem_flags: Flags for allocating memory. + * @ttport: We'll return this device's port number here. That's used to + * reference into the bitmap if we're on a multi_tt hub. + * + * Return: a pointer to a struct dwc2_tt. Don't forget to call + * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure. + */ + +struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, + gfp_t mem_flags, int *ttport) +{ + struct urb *urb = context; + struct dwc2_tt *dwc_tt = NULL; + + if (urb->dev->tt) { + *ttport = urb->dev->ttport; + + dwc_tt = urb->dev->tt->hcpriv; + if (dwc_tt == NULL) { + size_t bitmap_size; + + /* + * For single_tt we need one schedule. For multi_tt + * we need one per port. + */ + bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP * + sizeof(dwc_tt->periodic_bitmaps[0]); + if (urb->dev->tt->multi) + bitmap_size *= urb->dev->tt->hub->maxchild; + + dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size, + mem_flags); + if (dwc_tt == NULL) + return NULL; + + dwc_tt->usb_tt = urb->dev->tt; + dwc_tt->usb_tt->hcpriv = dwc_tt; + } + + dwc_tt->refcount++; + } + + return dwc_tt; +} + +/** + * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info() + * + * Frees resources allocated by dwc2_host_get_tt_info() if all current holders + * of the structure are done. + * + * It's OK to call this with NULL. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @dwc_tt: The pointer returned by dwc2_host_get_tt_info. + */ +void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt) +{ + /* Model kfree and make put of NULL a no-op */ + if (dwc_tt == NULL) + return; + + WARN_ON(dwc_tt->refcount < 1); + + dwc_tt->refcount--; + if (!dwc_tt->refcount) { + dwc_tt->usb_tt->hcpriv = NULL; + kfree(dwc_tt); + } +} + int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) { struct urb *urb = context; @@ -2274,9 +4244,7 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, kfree(qtd->urb); qtd->urb = NULL; - spin_unlock(&hsotg->lock); usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); - spin_lock(&hsotg->lock); } /* @@ -2298,13 +4266,19 @@ static void dwc2_hcd_reset_func(struct work_struct *work) { struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, reset_work.work); + unsigned long flags; u32 hprt0; dev_dbg(hsotg->dev, "USB RESET function called\n"); + + spin_lock_irqsave(&hsotg->lock, flags); + hprt0 = dwc2_read_hprt0(hsotg); hprt0 &= ~HPRT0_RST; dwc2_writel(hprt0, hsotg->regs + HPRT0); hsotg->flags.b.port_reset_change = 1; + + spin_unlock_irqrestore(&hsotg->lock, flags); } /* @@ -2366,7 +4340,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd) spin_lock_irqsave(&hsotg->lock, flags); /* Ensure hcd is disconnected */ - dwc2_hcd_disconnect(hsotg); + dwc2_hcd_disconnect(hsotg, true); dwc2_hcd_stop(hsotg); hsotg->lx_state = DWC2_L3; hcd->state = HC_STATE_HALT; @@ -2723,6 +4697,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, fail3: dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); + if (qh_allocated && qh->channel && qh->channel->qh == qh) + qh->channel->qh = NULL; fail2: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; @@ -2889,7 +4865,7 @@ static struct hc_driver dwc2_hc_driver = { .hcd_priv_size = sizeof(struct wrapper_priv_data), .irq = _dwc2_hcd_irq, - .flags = HCD_MEMORY | HCD_USB2, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, .start = _dwc2_hcd_start, .stop = _dwc2_hcd_stop, @@ -2905,6 +4881,9 @@ static struct hc_driver dwc2_hc_driver = { .bus_suspend = _dwc2_hcd_suspend, .bus_resume = _dwc2_hcd_resume, + + .map_urb_for_dma = dwc2_map_urb_for_dma, + .unmap_urb_for_dma = dwc2_unmap_urb_for_dma, }; /* @@ -3015,8 +4994,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); if (!hsotg->last_frame_num_array) goto error1; - hsotg->last_frame_num = HFNUM_MAX_FRNUM; #endif + hsotg->last_frame_num = HFNUM_MAX_FRNUM; /* Check if the bus driver or platform code has setup a dma_mask */ if (hsotg->core_params->dma_enable > 0 && @@ -3054,7 +5033,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) dwc2_disable_global_interrupts(hsotg); /* Initialize the DWC_otg core, and select the Phy type */ - retval = dwc2_core_init(hsotg, true, irq); + retval = dwc2_core_init(hsotg, true); if (retval) goto error2; @@ -3080,6 +5059,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); INIT_LIST_HEAD(&hsotg->periodic_sched_queued); + INIT_LIST_HEAD(&hsotg->split_order); + /* * Create a host channel descriptor for each host channel implemented * in the controller. Initialize the channel descriptor array. @@ -3093,12 +5074,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (channel == NULL) goto error3; channel->hc_num = i; + INIT_LIST_HEAD(&channel->split_order_list_entry); hsotg->hc_ptr_array[i] = channel; } - if (hsotg->core_params->uframe_sched > 0) - dwc2_hcd_init_usecs(hsotg); - /* Initialize hsotg start work */ INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); @@ -3122,6 +5101,47 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (!hsotg->status_buf) goto error3; + /* + * Create kmem caches to handle descriptor buffers in descriptor + * DMA mode. + * Alignment must be set to 512 bytes. + */ + if (hsotg->core_params->dma_desc_enable || + hsotg->core_params->dma_desc_fs_enable) { + hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc", + sizeof(struct dwc2_hcd_dma_desc) * + MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA, + NULL); + if (!hsotg->desc_gen_cache) { + dev_err(hsotg->dev, + "unable to create dwc2 generic desc cache\n"); + + /* + * Disable descriptor dma mode since it will not be + * usable. + */ + hsotg->core_params->dma_desc_enable = 0; + hsotg->core_params->dma_desc_fs_enable = 0; + } + + hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", + sizeof(struct dwc2_hcd_dma_desc) * + MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL); + if (!hsotg->desc_hsisoc_cache) { + dev_err(hsotg->dev, + "unable to create dwc2 hs isoc desc cache\n"); + + kmem_cache_destroy(hsotg->desc_gen_cache); + + /* + * Disable descriptor dma mode since it will not be + * usable. + */ + hsotg->core_params->dma_desc_enable = 0; + hsotg->core_params->dma_desc_fs_enable = 0; + } + } + hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; @@ -3145,7 +5165,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) */ retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval < 0) - goto error3; + goto error4; device_wakeup_enable(hcd->self.controller); @@ -3155,6 +5175,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) return 0; +error4: + kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->desc_hsisoc_cache); error3: dwc2_hcd_release(hsotg); error2: @@ -3195,6 +5218,10 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) usb_remove_hcd(hcd); hsotg->priv = NULL; + + kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->desc_hsisoc_cache); + dwc2_hcd_release(hsotg); usb_put_hcd(hcd); @@ -3203,3 +5230,67 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) kfree(hsotg->frame_num_array); #endif } + +/** + * dwc2_backup_host_registers() - Backup controller host registers. + * When suspending usb bus, registers needs to be backuped + * if controller power is disabled once suspended. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hregs_backup *hr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Backup Host regs */ + hr = &hsotg->hr_backup; + hr->hcfg = dwc2_readl(hsotg->regs + HCFG); + hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); + for (i = 0; i < hsotg->core_params->host_channels; ++i) + hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); + + hr->hprt0 = dwc2_read_hprt0(hsotg); + hr->hfir = dwc2_readl(hsotg->regs + HFIR); + hr->valid = true; + + return 0; +} + +/** + * dwc2_restore_host_registers() - Restore controller host registers. + * When resuming usb bus, device registers needs to be restored + * if controller power were disabled. + * + * @hsotg: Programming view of the DWC_otg controller + */ +int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hregs_backup *hr; + int i; + + dev_dbg(hsotg->dev, "%s\n", __func__); + + /* Restore host regs */ + hr = &hsotg->hr_backup; + if (!hr->valid) { + dev_err(hsotg->dev, "%s: no host registers to restore\n", + __func__); + return -EINVAL; + } + hr->valid = false; + + dwc2_writel(hr->hcfg, hsotg->regs + HCFG); + dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); + + for (i = 0; i < hsotg->core_params->host_channels; ++i) + dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); + + dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); + dwc2_writel(hr->hfir, hsotg->regs + HFIR); + hsotg->frame_number = 0; + + return 0; +} diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index f105bada2fd1..89fa26cb25f4 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -75,8 +75,6 @@ struct dwc2_qh; * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf - * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not - * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer @@ -107,6 +105,8 @@ struct dwc2_qh; * @qh: QH for the transfer being processed by this channel * @hc_list_entry: For linking to list of host channels * @desc_list_addr: Current QH's descriptor list DMA address + * @desc_list_sz: Current QH's descriptor list size + * @split_order_list_entry: List entry for keeping track of the order of splits * * This structure represents the state of a single host channel when acting in * host mode. It contains the data items needed to transfer packets to an @@ -132,7 +132,6 @@ struct dwc2_host_chan { u8 *xfer_buf; dma_addr_t xfer_dma; - dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; @@ -159,6 +158,8 @@ struct dwc2_host_chan { struct dwc2_qh *qh; struct list_head hc_list_entry; dma_addr_t desc_list_addr; + u32 desc_list_sz; + struct list_head split_order_list_entry; }; struct dwc2_hcd_pipe_info { @@ -211,9 +212,47 @@ enum dwc2_transaction_type { DWC2_TRANSACTION_ALL, }; +/* The number of elements per LS bitmap (per port on multi_tt) */ +#define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \ + BITS_PER_LONG) + +/** + * struct dwc2_tt - dwc2 data associated with a usb_tt + * + * @refcount: Number of Queue Heads (QHs) holding a reference. + * @usb_tt: Pointer back to the official usb_tt. + * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted + * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP + * elements (so sizeof(long) times that in bytes). + * + * This structure is stored in the hcpriv of the official usb_tt. + */ +struct dwc2_tt { + int refcount; + struct usb_tt *usb_tt; + unsigned long periodic_bitmaps[]; +}; + +/** + * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus. + * + * @start_schedule_usecs: The start time on the main bus schedule. Note that + * the main bus schedule is tightly packed and this + * time should be interpreted as tightly packed (so + * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us + * instead of 125 us). + * @duration_us: How long this transfer goes. + */ + +struct dwc2_hs_transfer_time { + u32 start_schedule_us; + u16 duration_us; +}; + /** * struct dwc2_qh - Software queue head structure * + * @hsotg: The HCD state structure for the DWC OTG controller * @ep_type: Endpoint type. One of the following values: * - USB_ENDPOINT_XFER_CONTROL * - USB_ENDPOINT_XFER_BULK @@ -234,33 +273,59 @@ enum dwc2_transaction_type { * @do_split: Full/low speed endpoint on high-speed hub requires split * @td_first: Index of first activated isochronous transfer descriptor * @td_last: Index of last activated isochronous transfer descriptor - * @usecs: Bandwidth in microseconds per (micro)frame - * @interval: Interval between transfers in (micro)frames - * @sched_frame: (Micro)frame to initialize a periodic transfer. - * The transfer executes in the following (micro)frame. - * @frame_usecs: Internal variable used by the microframe scheduler - * @start_split_frame: (Micro)frame at which last start split was initialized + * @host_us: Bandwidth in microseconds per transfer as seen by host + * @device_us: Bandwidth in microseconds per transfer as seen by device + * @host_interval: Interval between transfers as seen by the host. If + * the host is high speed and the device is low speed this + * will be 8 times device interval. + * @device_interval: Interval between transfers as seen by the device. + * interval. + * @next_active_frame: (Micro)frame _before_ we next need to put something on + * the bus. We'll move the qh to active here. If the + * host is in high speed mode this will be a uframe. If + * the host is in low speed mode this will be a full frame. + * @start_active_frame: If we are partway through a split transfer, this will be + * what next_active_frame was when we started. Otherwise + * it should always be the same as next_active_frame. + * @num_hs_transfers: Number of transfers in hs_transfers. + * Normally this is 1 but can be more than one for splits. + * Always >= 1 unless the host is in low/full speed mode. + * @hs_transfers: Transfers that are scheduled as seen by the high speed + * bus. Not used if host is in low or full speed mode (but + * note that it IS USED if the device is low or full speed + * as long as the HOST is in high speed mode). + * @ls_start_schedule_slice: Start time (in slices) on the low speed bus + * schedule that's being used by this device. This + * will be on the periodic_bitmap in a + * "struct dwc2_tt". Not used if this device is high + * speed. Note that this is in "schedule slice" which + * is tightly packed. + * @ls_duration_us: Duration on the low speed bus schedule. * @ntd: Actual number of transfer descriptors in a list - * @dw_align_buf: Used instead of original buffer if its physical address - * is not dword-aligned - * @dw_align_buf_size: Size of dw_align_buf - * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic * schedule * @desc_list: List of transfer descriptors * @desc_list_dma: Physical address of desc_list + * @desc_list_sz: Size of descriptors list * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * descriptor and indicates original XferSize value for the * descriptor + * @unreserve_timer: Timer for releasing periodic reservation. + * @dwc2_tt: Pointer to our tt info (or NULL if no tt). + * @ttport: Port number within our tt. * @tt_buffer_dirty True if clear_tt_buffer_complete is pending + * @unreserve_pending: True if we planned to unreserve but haven't yet. + * @schedule_low_speed: True if we have a low/full speed component (either the + * host is in low/full speed mode or do_split). * * A Queue Head (QH) holds the static characteristics of an endpoint and * maintains a list of transfers (QTDs) for that endpoint. A QH structure may * be entered in either the non-periodic or periodic schedule. */ struct dwc2_qh { + struct dwc2_hsotg *hsotg; u8 ep_type; u8 ep_is_in; u16 maxp; @@ -270,22 +335,29 @@ struct dwc2_qh { u8 do_split; u8 td_first; u8 td_last; - u16 usecs; - u16 interval; - u16 sched_frame; - u16 frame_usecs[8]; - u16 start_split_frame; + u16 host_us; + u16 device_us; + u16 host_interval; + u16 device_interval; + u16 next_active_frame; + u16 start_active_frame; + s16 num_hs_transfers; + struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; + u32 ls_start_schedule_slice; u16 ntd; - u8 *dw_align_buf; - int dw_align_buf_size; - dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; struct dwc2_hcd_dma_desc *desc_list; dma_addr_t desc_list_dma; + u32 desc_list_sz; u32 *n_bytes; + struct timer_list unreserve_timer; + struct dwc2_tt *dwc_tt; + int ttport; unsigned tt_buffer_dirty:1; + unsigned unreserve_pending:1; + unsigned schedule_low_speed:1; }; /** @@ -340,6 +412,8 @@ struct dwc2_qtd { u8 isoc_split_pos; u16 isoc_frame_index; u16 isoc_split_offset; + u16 isoc_td_last; + u16 isoc_td_first; u32 ssplit_out_xfer_count; u8 error_count; u8 n_desc; @@ -356,6 +430,8 @@ struct hc_xfer_info { }; #endif +u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg); + /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */ static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg) { @@ -377,17 +453,11 @@ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr) dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum)); } -/* - * Returns the mode of operation, host or device - */ -static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg) -{ - return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0; -} -static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) -{ - return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0; -} +void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); +void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, + enum dwc2_halt_status halt_status); +void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, + struct dwc2_host_chan *chan); /* * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they @@ -462,7 +532,6 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ -extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags); @@ -535,6 +604,19 @@ static inline bool dbg_perio(void) { return false; } #define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff) /* + * Returns true if frame1 index is greater than frame2 index. The comparison + * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the + * frame number when the max index frame number is reached. + */ +static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2) +{ + u16 diff = fr_idx1 - fr_idx2; + u16 sign = diff & (FRLISTEN_64_SIZE >> 1); + + return diff && !sign; +} + +/* * Returns true if frame1 is less than or equal to frame2. The comparison is * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the * frame number when the max frame number is reached. @@ -564,6 +646,11 @@ static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc) return (frame + inc) & HFNUM_MAX_FRNUM; } +static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec) +{ + return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM; +} + static inline u16 dwc2_full_frame_num(u16 frame) { return (frame & HFNUM_MAX_FRNUM) >> 3; @@ -641,7 +728,7 @@ static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg, return 0; } - return qh->usecs; + return qh->host_us; } extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, @@ -710,6 +797,12 @@ extern void dwc2_host_start(struct dwc2_hsotg *hsotg); extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg); extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, int *hub_port); +extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, + void *context, gfp_t mem_flags, + int *ttport); + +extern void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, + struct dwc2_tt *dwc_tt); extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, int status); @@ -732,7 +825,7 @@ do { \ _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \ qtd_list_entry); \ if (usb_pipeint(_qtd_->urb->pipe) && \ - (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \ + (_qh_)->start_active_frame != 0 && !_qtd_->complete_split) { \ _hfnum_.d32 = dwc2_readl((_hcd_)->regs + HFNUM); \ switch (_hfnum_.b.frnum & 0x7) { \ case 7: \ diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 78993aba9335..0e1d42b5dec5 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -81,28 +81,37 @@ static u16 dwc2_max_desc_num(struct dwc2_qh *qh) static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) { return qh->dev_speed == USB_SPEED_HIGH ? - (qh->interval + 8 - 1) / 8 : qh->interval; + (qh->host_interval + 8 - 1) / 8 : qh->host_interval; } static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, gfp_t flags) { - qh->desc_list = dma_alloc_coherent(hsotg->dev, - sizeof(struct dwc2_hcd_dma_desc) * - dwc2_max_desc_num(qh), &qh->desc_list_dma, - flags); + struct kmem_cache *desc_cache; + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + + qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * + dwc2_max_desc_num(qh); + + qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); if (!qh->desc_list) return -ENOMEM; - memset(qh->desc_list, 0, - sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); + qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list, + qh->desc_list_sz, + DMA_TO_DEVICE); qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags); if (!qh->n_bytes) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, + DMA_FROM_DEVICE); + kmem_cache_free(desc_cache, qh->desc_list); qh->desc_list = NULL; return -ENOMEM; } @@ -112,10 +121,18 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + struct kmem_cache *desc_cache; + + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + if (qh->desc_list) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, DMA_FROM_DEVICE); + kmem_cache_free(desc_cache, qh->desc_list); qh->desc_list = NULL; } @@ -128,21 +145,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) if (hsotg->frame_list) return 0; - hsotg->frame_list = dma_alloc_coherent(hsotg->dev, - 4 * FRLISTEN_64_SIZE, - &hsotg->frame_list_dma, - mem_flags); + hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; + hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA); if (!hsotg->frame_list) return -ENOMEM; - memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); + hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + return 0; } static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) { - u32 *frame_list; - dma_addr_t frame_list_dma; unsigned long flags; spin_lock_irqsave(&hsotg->lock, flags); @@ -152,14 +168,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) return; } - frame_list = hsotg->frame_list; - frame_list_dma = hsotg->frame_list_dma; + dma_unmap_single(hsotg->dev, hsotg->frame_list_dma, + hsotg->frame_list_sz, DMA_FROM_DEVICE); + + kfree(hsotg->frame_list); hsotg->frame_list = NULL; spin_unlock_irqrestore(&hsotg->lock, flags); - dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list, - frame_list_dma); } static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) @@ -236,7 +252,7 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, chan = qh->channel; inc = dwc2_frame_incr_val(qh); if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) - i = dwc2_frame_list_idx(qh->sched_frame); + i = dwc2_frame_list_idx(qh->next_active_frame); else i = 0; @@ -249,17 +265,26 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, j = (j + inc) & (FRLISTEN_64_SIZE - 1); } while (j != i); + /* + * Sync frame list since controller will access it if periodic + * channel is currently enabled. + */ + dma_sync_single_for_device(hsotg->dev, + hsotg->frame_list_dma, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + if (!enable) return; chan->schinfo = 0; - if (chan->speed == USB_SPEED_HIGH && qh->interval) { + if (chan->speed == USB_SPEED_HIGH && qh->host_interval) { j = 1; /* TODO - check this */ - inc = (8 + qh->interval - 1) / qh->interval; + inc = (8 + qh->host_interval - 1) / qh->host_interval; for (i = 0; i < inc; i++) { chan->schinfo |= j; - j = j << qh->interval; + j = j << qh->host_interval; } } else { chan->schinfo = 0xff; @@ -278,6 +303,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, hsotg->non_periodic_channels--; } else { dwc2_update_frame_list(hsotg, qh, 0); + hsotg->available_host_channels++; } /* @@ -360,6 +386,8 @@ err0: */ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + unsigned long flags; + dwc2_desc_list_free(hsotg, qh); /* @@ -369,8 +397,10 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * when it comes here from endpoint disable routine * channel remains assigned. */ + spin_lock_irqsave(&hsotg->lock, flags); if (qh->channel) dwc2_release_channel_ddma(hsotg, qh); + spin_unlock_irqrestore(&hsotg->lock, flags); if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || qh->ep_type == USB_ENDPOINT_XFER_INT) && @@ -401,7 +431,10 @@ static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); - /* sched_frame is always frame number (not uFrame) both in FS and HS! */ + /* + * next_active_frame is always frame number (not uFrame) both in FS + * and HS! + */ /* * skip_frames is used to limit activated descriptors number @@ -484,13 +517,13 @@ static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, */ fr_idx_tmp = dwc2_frame_list_idx(frame); fr_idx = (FRLISTEN_64_SIZE + - dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) - % dwc2_frame_incr_val(qh); + dwc2_frame_list_idx(qh->next_active_frame) - + fr_idx_tmp) % dwc2_frame_incr_val(qh); fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; } else { - qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, + qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); - fr_idx = dwc2_frame_list_idx(qh->sched_frame); + fr_idx = dwc2_frame_list_idx(qh->next_active_frame); } qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); @@ -524,14 +557,23 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & HOST_DMA_ISOC_NBYTES_MASK; + /* Set active bit */ + dma_desc->status |= HOST_DMA_A; + + qh->ntd++; + qtd->isoc_frame_index_last++; + #ifdef ISOC_URB_GIVEBACK_ASAP /* Set IOC for each descriptor corresponding to last frame of URB */ if (qtd->isoc_frame_index_last == qtd->urb->packet_count) dma_desc->status |= HOST_DMA_IOC; #endif - qh->ntd++; - qtd->isoc_frame_index_last++; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, @@ -539,32 +581,58 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, { struct dwc2_qtd *qtd; u32 max_xfer_size; - u16 idx, inc, n_desc, ntd_max = 0; + u16 idx, inc, n_desc = 0, ntd_max = 0; + u16 cur_idx; + u16 next_idx; idx = qh->td_last; - inc = qh->interval; - n_desc = 0; + inc = qh->host_interval; + hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); - if (qh->interval) { - ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / - qh->interval; + /* + * Ensure current frame number didn't overstep last scheduled + * descriptor. If it happens, the only way to recover is to move + * qh->td_last to current frame number + 1. + * So that next isoc descriptor will be scheduled on frame number + 1 + * and not on a past frame. + */ + if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { + if (inc < 32) { + dev_vdbg(hsotg->dev, + "current frame number overstep last descriptor\n"); + qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, + qh->dev_speed); + idx = qh->td_last; + } + } + + if (qh->host_interval) { + ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) / + qh->host_interval; if (skip_frames && !qh->channel) - ntd_max -= skip_frames / qh->interval; + ntd_max -= skip_frames / qh->host_interval; } max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { + if (qtd->in_process && + qtd->isoc_frame_index_last == + qtd->urb->packet_count) + continue; + + qtd->isoc_td_first = idx; while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < qtd->urb->packet_count) { - if (n_desc > 1) - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, max_xfer_size, idx); idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); n_desc++; } + qtd->isoc_td_last = idx; qtd->in_process = 1; } @@ -575,6 +643,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (qh->ntd == ntd_max) { idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } #else /* @@ -604,13 +677,12 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); #endif - - if (n_desc) { - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; - if (n_desc > 1) - qh->desc_list[0].status |= HOST_DMA_A; - } } static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, @@ -647,6 +719,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->buf = (u32)chan->xfer_dma; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (n_desc * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); + /* * Last (or only) descriptor of IN transfer with actual size less * than MaxPacket @@ -697,6 +775,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, "set A bit in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + ((n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); dev_vdbg(hsotg->dev, @@ -722,10 +806,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); if (n_desc > 1) { qh->desc_list[0].status |= HOST_DMA_A; dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", &qh->desc_list[0]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma, + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } chan->ntd = n_desc; } @@ -800,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; + struct dwc2_hcd_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; u16 remain = 0; int rc = 0; @@ -808,6 +901,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (!qtd->urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + + dma_desc = &qh->desc_list[idx]; + frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); if (chan->ep_is_in) @@ -911,17 +1011,51 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { if (!qtd->in_process) break; + + /* + * Ensure idx corresponds to descriptor where first urb of this + * qtd was added. In fact, during isoc desc init, dwc2 may skip + * an index if current frame number is already over this index. + */ + if (idx != qtd->isoc_td_first) { + dev_vdbg(hsotg->dev, + "try to complete %d instead of %d\n", + idx, qtd->isoc_td_first); + idx = qtd->isoc_td_first; + } + do { + struct dwc2_qtd *qtd_next; + u16 cur_idx; + rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, idx); if (rc < 0) return; - idx = dwc2_desclist_idx_inc(idx, qh->interval, + idx = dwc2_desclist_idx_inc(idx, qh->host_interval, chan->speed); - if (rc == DWC2_CMPL_STOP) - goto stop_scan; + if (!rc) + continue; + if (rc == DWC2_CMPL_DONE) break; + + /* rc == DWC2_CMPL_STOP */ + + if (qh->host_interval >= 32) + goto stop_scan; + + qh->td_first = idx; + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + qtd_next = list_first_entry(&qh->qtd_list, + struct dwc2_qtd, + qtd_list_entry); + if (dwc2_frame_idx_num_gt(cur_idx, + qtd_next->isoc_td_last)) + break; + + goto stop_scan; + } while (idx != qh->td_first); } @@ -1029,6 +1163,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, if (!urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, + qh->desc_list_dma + (desc_num * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + dma_desc = &qh->desc_list[desc_num]; n_bytes = qh->n_bytes[desc_num]; dev_vdbg(hsotg->dev, @@ -1040,8 +1180,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { dwc2_host_complete(hsotg, qtd, urb->status); dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); - dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", - failed, *xfer_done, urb->status); + dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n", + failed, *xfer_done); return failed; } @@ -1096,21 +1236,25 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { int i; + int qtd_desc_count; qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); xfer_done = 0; + qtd_desc_count = qtd->n_desc; - for (i = 0; i < qtd->n_desc; i++) { + for (i = 0; i < qtd_desc_count; i++) { if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, desc_num, halt_status, &xfer_done)) { qtd = NULL; - break; + goto stop_scan; } + desc_num++; } } +stop_scan: if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { /* * Resetting the data toggle for bulk and interrupt endpoints @@ -1118,8 +1262,8 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, */ if (halt_status == DWC2_HC_XFER_STALL) qh->data_toggle = DWC2_HC_PID_DATA0; - else if (qtd) - dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); + else + dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL); } if (halt_status == DWC2_HC_XFER_COMPLETE) { @@ -1165,6 +1309,21 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, /* Release the channel if halted or session completed */ if (halt_status != DWC2_HC_XFER_COMPLETE || list_empty(&qh->qtd_list)) { + struct dwc2_qtd *qtd, *qtd_tmp; + + /* + * Kill all remainings QTDs since channel has been + * halted. + */ + list_for_each_entry_safe(qtd, qtd_tmp, + &qh->qtd_list, + qtd_list_entry) { + dwc2_host_complete(hsotg, qtd, + -ECONNRESET); + dwc2_hcd_qtd_unlink_and_free(hsotg, + qtd, qh); + } + /* Halt the channel if session completed */ if (halt_status == DWC2_HC_XFER_COMPLETE) dwc2_hc_halt(hsotg, chan, halt_status); @@ -1172,9 +1331,14 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, dwc2_hcd_qh_unlink(hsotg, qh); } else { /* Keep in assigned schedule to continue transfer */ - list_move(&qh->qh_list_entry, - &hsotg->periodic_sched_assigned); - continue_isoc_xfer = 1; + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_assigned); + /* + * If channel has been halted during giveback of urb + * then prevent any new scheduling. + */ + if (!chan->halt_status) + continue_isoc_xfer = 1; } /* * Todo: Consider the case when period exceeds FrameList size. diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index bda0b21b850f..906f223542ee 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -55,12 +55,16 @@ /* This function is for debug only */ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { -#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS u16 curr_frame_number = hsotg->frame_number; + u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1); + + if (expected != curr_frame_number) + dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n", + expected, curr_frame_number); +#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { - if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) != - curr_frame_number) { + if (expected != curr_frame_number) { hsotg->frame_num_array[hsotg->frame_num_idx] = curr_frame_number; hsotg->last_frame_num_array[hsotg->frame_num_idx] = @@ -79,14 +83,15 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) } hsotg->dumped_frame_num_array = 1; } - hsotg->last_frame_num = curr_frame_number; #endif + hsotg->last_frame_num = curr_frame_number; } static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd) { + struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub; struct urb *usb_urb; if (!chan->qh) @@ -102,6 +107,15 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt) return; + /* + * The root hub doesn't really have a TT, but Linux thinks it + * does because how could you have a "high speed hub" that + * directly talks directly to low speed devices without a TT? + * It's all lies. Lies, I tell you. + */ + if (usb_urb->dev->tt->hub == root_hub) + return; + if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) { chan->qh->tt_buffer_dirty = 1; if (usb_hub_clear_tt_buffer(usb_urb)) @@ -122,6 +136,9 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) struct dwc2_qh *qh; enum dwc2_transaction_type tr_type; + /* Clear interrupt */ + dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS); + #ifdef DEBUG_SOF dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n"); #endif @@ -135,20 +152,23 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg) while (qh_entry != &hsotg->periodic_sched_inactive) { qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry); qh_entry = qh_entry->next; - if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) + if (dwc2_frame_num_le(qh->next_active_frame, + hsotg->frame_number)) { + dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n", + qh, hsotg->frame_number, + qh->next_active_frame); + /* * Move QH to the ready list to be executed next * (micro)frame */ - list_move(&qh->qh_list_entry, + list_move_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); + } } tr_type = dwc2_hcd_select_transactions(hsotg); if (tr_type != DWC2_TRANSACTION_NONE) dwc2_hcd_queue_transactions(hsotg, tr_type); - - /* Clear interrupt */ - dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS); } /* @@ -312,6 +332,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, if (do_reset) { *hprt0_modify |= HPRT0_RST; + dwc2_writel(*hprt0_modify, hsotg->regs + HPRT0); queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work, msecs_to_jiffies(60)); } else { @@ -347,15 +368,12 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) * Set flag and clear if detected */ if (hprt0 & HPRT0_CONNDET) { + dwc2_writel(hprt0_modify | HPRT0_CONNDET, hsotg->regs + HPRT0); + dev_vdbg(hsotg->dev, "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n", hprt0); - if (hsotg->lx_state != DWC2_L0) - usb_hcd_resume_root_hub(hsotg->priv); - - hsotg->flags.b.port_connect_status_change = 1; - hsotg->flags.b.port_connect_status = 1; - hprt0_modify |= HPRT0_CONNDET; + dwc2_hcd_connect(hsotg); /* * The Hub driver asserts a reset when it sees port connect @@ -368,27 +386,36 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) * Clear if detected - Set internal flag if disabled */ if (hprt0 & HPRT0_ENACHG) { + dwc2_writel(hprt0_modify | HPRT0_ENACHG, hsotg->regs + HPRT0); dev_vdbg(hsotg->dev, " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n", hprt0, !!(hprt0 & HPRT0_ENA)); - hprt0_modify |= HPRT0_ENACHG; - if (hprt0 & HPRT0_ENA) + if (hprt0 & HPRT0_ENA) { + hsotg->new_connection = true; dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); - else + } else { hsotg->flags.b.port_enable_change = 1; + if (hsotg->core_params->dma_desc_fs_enable) { + u32 hcfg; + + hsotg->core_params->dma_desc_enable = 0; + hsotg->new_connection = false; + hcfg = dwc2_readl(hsotg->regs + HCFG); + hcfg &= ~HCFG_DESCDMA; + dwc2_writel(hcfg, hsotg->regs + HCFG); + } + } } /* Overcurrent Change Interrupt */ if (hprt0 & HPRT0_OVRCURRCHG) { + dwc2_writel(hprt0_modify | HPRT0_OVRCURRCHG, + hsotg->regs + HPRT0); dev_vdbg(hsotg->dev, " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n", hprt0); hsotg->flags.b.port_over_current_change = 1; - hprt0_modify |= HPRT0_OVRCURRCHG; } - - /* Clear Port Interrupts */ - dwc2_writel(hprt0_modify, hsotg->regs + HPRT0); } /* @@ -465,18 +492,6 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, xfer_length = urb->length - urb->actual_length; } - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && xfer_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + urb->actual_length, - chan->qh->dw_align_buf, xfer_length); - } - dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n", urb->actual_length, xfer_length); urb->actual_length += xfer_length; @@ -518,11 +533,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { + if (WARN(!chan || !chan->qh, + "chan->qh must be specified for non-control eps\n")) + return; + if (pid == TSIZ_SC_MC_PID_DATA0) chan->qh->data_toggle = DWC2_HC_PID_DATA0; else chan->qh->data_toggle = DWC2_HC_PID_DATA1; } else { + if (WARN(!qtd, + "qtd must be specified for control eps\n")) + return; + if (pid == TSIZ_SC_MC_PID_DATA0) qtd->data_toggle = DWC2_HC_PID_DATA0; else @@ -558,21 +581,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( frame_desc->status = 0; frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, halt_status, NULL); - - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && frame_desc->actual_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", - __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + frame_desc->offset + - qtd->isoc_split_offset, - chan->qh->dw_align_buf, - frame_desc->actual_length); - } break; case DWC2_HC_XFER_FRAME_OVERRUN: urb->error_count++; @@ -593,21 +601,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, halt_status, NULL); - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && frame_desc->actual_length) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", - __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + frame_desc->offset + - qtd->isoc_split_offset, - chan->qh->dw_align_buf, - frame_desc->actual_length); - } - /* Skip whole frame */ if (chan->qh->do_split && chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && @@ -673,8 +666,6 @@ static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, } no_qtd: - if (qh->channel) - qh->channel->align_buf = 0; qh->channel = NULL; dwc2_hcd_qh_deactivate(hsotg, qh, continue_split); } @@ -831,7 +822,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg, * halt to be queued when the periodic schedule is * processed. */ - list_move(&chan->qh->qh_list_entry, + list_move_tail(&chan->qh->qh_list_entry, &hsotg->periodic_sched_assigned); /* @@ -939,14 +930,6 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, frame_desc->actual_length += len; - if (chan->align_buf) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, DMA_FROM_DEVICE); - memcpy(qtd->urb->buf + frame_desc->offset + - qtd->isoc_split_offset, chan->qh->dw_align_buf, len); - } - qtd->isoc_split_offset += len; if (frame_desc->actual_length >= frame_desc->length) { @@ -1169,19 +1152,6 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg, xfer_length = urb->length - urb->actual_length; } - /* Non DWORD-aligned buffer case handling */ - if (chan->align_buf && xfer_length && chan->ep_is_in) { - dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); - dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, - chan->qh->dw_align_buf_size, - chan->ep_is_in ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (chan->ep_is_in) - memcpy(urb->buf + urb->actual_length, - chan->qh->dw_align_buf, - xfer_length); - } - urb->actual_length += xfer_length; hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); @@ -1401,14 +1371,50 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { - int frnum = dwc2_hcd_get_frame_number(hsotg); + struct dwc2_qh *qh = chan->qh; + bool past_end; + + if (hsotg->core_params->uframe_sched <= 0) { + int frnum = dwc2_hcd_get_frame_number(hsotg); + + /* Don't have num_hs_transfers; simple logic */ + past_end = dwc2_full_frame_num(frnum) != + dwc2_full_frame_num(qh->next_active_frame); + } else { + int end_frnum; - if (dwc2_full_frame_num(frnum) != - dwc2_full_frame_num(chan->qh->sched_frame)) { /* - * No longer in the same full speed frame. - * Treat this as a transaction error. - */ + * Figure out the end frame based on schedule. + * + * We don't want to go on trying again and again + * forever. Let's stop when we've done all the + * transfers that were scheduled. + * + * We're going to be comparing start_active_frame + * and next_active_frame, both of which are 1 + * before the time the packet goes on the wire, + * so that cancels out. Basically if had 1 + * transfer and we saw 1 NYET then we're done. + * We're getting a NYET here so if next >= + * (start + num_transfers) we're done. The + * complexity is that for all but ISOC_OUT we + * skip one slot. + */ + end_frnum = dwc2_frame_num_inc( + qh->start_active_frame, + qh->num_hs_transfers); + + if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || + qh->ep_is_in) + end_frnum = + dwc2_frame_num_inc(end_frnum, 1); + + past_end = dwc2_frame_num_le( + end_frnum, qh->next_active_frame); + } + + if (past_end) { + /* Treat this as a transaction error. */ #if 0 /* * Todo: Fix system performance so this can @@ -1993,6 +1999,16 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) } dwc2_writel(hcint, hsotg->regs + HCINT(chnum)); + + /* + * If we got an interrupt after someone called + * dwc2_hcd_endpoint_disable() we don't want to crash below + */ + if (!chan->qh) { + dev_warn(hsotg->dev, "Interrupt on disabled channel\n"); + return; + } + chan->hcint = hcint; hcint &= hcintmsk; @@ -2115,6 +2131,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) { u32 haint; int i; + struct dwc2_host_chan *chan, *chan_tmp; haint = dwc2_readl(hsotg->regs + HAINT); if (dbg_perio()) { @@ -2123,6 +2140,22 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint); } + /* + * According to USB 2.0 spec section 11.18.8, a host must + * issue complete-split transactions in a microframe for a + * set of full-/low-speed endpoints in the same relative + * order as the start-splits were issued in a microframe for. + */ + list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order, + split_order_list_entry) { + int hc_num = chan->hc_num; + + if (haint & (1 << hc_num)) { + dwc2_hc_n_intr(hsotg, hc_num); + haint &= ~(1 << hc_num); + } + } + for (i = 0; i < hsotg->core_params->host_channels; i++) { if (haint & (1 << i)) dwc2_hc_n_intr(hsotg, i); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 7d8d06cfe3c1..7f634fd771c7 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -38,6 +38,7 @@ * This file contains the functions to manage Queue Heads and Queue * Transfer Descriptors for Host mode */ +#include <linux/gcd.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> @@ -53,194 +54,8 @@ #include "core.h" #include "hcd.h" -/** - * dwc2_qh_init() - Initializes a QH structure - * - * @hsotg: The HCD state structure for the DWC OTG controller - * @qh: The QH to init - * @urb: Holds the information about the device/endpoint needed to initialize - * the QH - */ -#define SCHEDULE_SLOP 10 -static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, - struct dwc2_hcd_urb *urb) -{ - int dev_speed, hub_addr, hub_port; - char *speed, *type; - - dev_vdbg(hsotg->dev, "%s()\n", __func__); - - /* Initialize QH */ - qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); - qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; - - qh->data_toggle = DWC2_HC_PID_DATA0; - qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); - INIT_LIST_HEAD(&qh->qtd_list); - INIT_LIST_HEAD(&qh->qh_list_entry); - - /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ - dev_speed = dwc2_host_get_speed(hsotg, urb->priv); - - dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); - - if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && - hub_addr != 0 && hub_addr != 1) { - dev_vdbg(hsotg->dev, - "QH init: EP %d: TT found at hub addr %d, for port %d\n", - dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, - hub_port); - qh->do_split = 1; - } - - if (qh->ep_type == USB_ENDPOINT_XFER_INT || - qh->ep_type == USB_ENDPOINT_XFER_ISOC) { - /* Compute scheduling parameters once and save them */ - u32 hprt, prtspd; - - /* Todo: Account for split transfers in the bus time */ - int bytecount = - dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); - - qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ? - USB_SPEED_HIGH : dev_speed, qh->ep_is_in, - qh->ep_type == USB_ENDPOINT_XFER_ISOC, - bytecount)); - - /* Ensure frame_number corresponds to the reality */ - hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); - /* Start in a slightly future (micro)frame */ - qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, - SCHEDULE_SLOP); - qh->interval = urb->interval; -#if 0 - /* Increase interrupt polling rate for debugging */ - if (qh->ep_type == USB_ENDPOINT_XFER_INT) - qh->interval = 8; -#endif - hprt = dwc2_readl(hsotg->regs + HPRT0); - prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; - if (prtspd == HPRT0_SPD_HIGH_SPEED && - (dev_speed == USB_SPEED_LOW || - dev_speed == USB_SPEED_FULL)) { - qh->interval *= 8; - qh->sched_frame |= 0x7; - qh->start_split_frame = qh->sched_frame; - } - dev_dbg(hsotg->dev, "interval=%d\n", qh->interval); - } - - dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", - dwc2_hcd_get_dev_addr(&urb->pipe_info)); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", - dwc2_hcd_get_ep_num(&urb->pipe_info), - dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); - - qh->dev_speed = dev_speed; - - switch (dev_speed) { - case USB_SPEED_LOW: - speed = "low"; - break; - case USB_SPEED_FULL: - speed = "full"; - break; - case USB_SPEED_HIGH: - speed = "high"; - break; - default: - speed = "?"; - break; - } - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); - - switch (qh->ep_type) { - case USB_ENDPOINT_XFER_ISOC: - type = "isochronous"; - break; - case USB_ENDPOINT_XFER_INT: - type = "interrupt"; - break; - case USB_ENDPOINT_XFER_CONTROL: - type = "control"; - break; - case USB_ENDPOINT_XFER_BULK: - type = "bulk"; - break; - default: - type = "?"; - break; - } - - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); - - if (qh->ep_type == USB_ENDPOINT_XFER_INT) { - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", - qh->usecs); - dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", - qh->interval); - } -} - -/** - * dwc2_hcd_qh_create() - Allocates and initializes a QH - * - * @hsotg: The HCD state structure for the DWC OTG controller - * @urb: Holds the information about the device/endpoint needed - * to initialize the QH - * @atomic_alloc: Flag to do atomic allocation if needed - * - * Return: Pointer to the newly allocated QH, or NULL on error - */ -struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, - struct dwc2_hcd_urb *urb, - gfp_t mem_flags) -{ - struct dwc2_qh *qh; - - if (!urb->priv) - return NULL; - - /* Allocate memory */ - qh = kzalloc(sizeof(*qh), mem_flags); - if (!qh) - return NULL; - - dwc2_qh_init(hsotg, qh, urb); - - if (hsotg->core_params->dma_desc_enable > 0 && - dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { - dwc2_hcd_qh_free(hsotg, qh); - return NULL; - } - - return qh; -} - -/** - * dwc2_hcd_qh_free() - Frees the QH - * - * @hsotg: HCD instance - * @qh: The QH to free - * - * QH should already be removed from the list. QTD list should already be empty - * if called from URB Dequeue. - * - * Must NOT be called with interrupt disabled or spinlock held - */ -void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) -{ - if (hsotg->core_params->dma_desc_enable > 0) { - dwc2_hcd_qh_free_ddma(hsotg, qh); - } else { - /* kfree(NULL) is safe */ - kfree(qh->dw_align_buf); - qh->dw_align_buf_dma = (dma_addr_t)0; - } - kfree(qh); -} +/* Wait this long before releasing periodic reservation */ +#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) /** * dwc2_periodic_channel_available() - Checks that a channel is available for a @@ -301,19 +116,19 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, * High speed mode * Max periodic usecs is 80% x 125 usec = 100 usec */ - max_claimed_usecs = 100 - qh->usecs; + max_claimed_usecs = 100 - qh->host_us; } else { /* * Full speed mode * Max periodic usecs is 90% x 1000 usec = 900 usec */ - max_claimed_usecs = 900 - qh->usecs; + max_claimed_usecs = 900 - qh->host_us; } if (hsotg->periodic_usecs > max_claimed_usecs) { dev_err(hsotg->dev, "%s: already claimed usecs %d, required usecs %d\n", - __func__, hsotg->periodic_usecs, qh->usecs); + __func__, hsotg->periodic_usecs, qh->host_us); status = -ENOSPC; } @@ -321,113 +136,1177 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, } /** - * Microframe scheduler - * track the total use in hsotg->frame_usecs - * keep each qh use in qh->frame_usecs - * when surrendering the qh then donate the time back + * pmap_schedule() - Schedule time in a periodic bitmap (pmap). + * + * @map: The bitmap representing the schedule; will be updated + * upon success. + * @bits_per_period: The schedule represents several periods. This is how many + * bits are in each period. It's assumed that the beginning + * of the schedule will repeat after its end. + * @periods_in_map: The number of periods in the schedule. + * @num_bits: The number of bits we need per period we want to reserve + * in this function call. + * @interval: How often we need to be scheduled for the reservation this + * time. 1 means every period. 2 means every other period. + * ...you get the picture? + * @start: The bit number to start at. Normally 0. Must be within + * the interval or we return failure right away. + * @only_one_period: Normally we'll allow picking a start anywhere within the + * first interval, since we can still make all repetition + * requirements by doing that. However, if you pass true + * here then we'll return failure if we can't fit within + * the period that "start" is in. + * + * The idea here is that we want to schedule time for repeating events that all + * want the same resource. The resource is divided into fixed-sized periods + * and the events want to repeat every "interval" periods. The schedule + * granularity is one bit. + * + * To keep things "simple", we'll represent our schedule with a bitmap that + * contains a fixed number of periods. This gets rid of a lot of complexity + * but does mean that we need to handle things specially (and non-ideally) if + * the number of the periods in the schedule doesn't match well with the + * intervals that we're trying to schedule. + * + * Here's an explanation of the scheme we'll implement, assuming 8 periods. + * - If interval is 1, we need to take up space in each of the 8 + * periods we're scheduling. Easy. + * - If interval is 2, we need to take up space in half of the + * periods. Again, easy. + * - If interval is 3, we actually need to fall back to interval 1. + * Why? Because we might need time in any period. AKA for the + * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be + * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to + * 0, 3, and 6. Since we could be in any frame we need to reserve + * for all of them. Sucks, but that's what you gotta do. Note that + * if we were instead scheduling 8 * 3 = 24 we'd do much better, but + * then we need more memory and time to do scheduling. + * - If interval is 4, easy. + * - If interval is 5, we again need interval 1. The schedule will be + * 0, 5, 2, 7, 4, 1, 6, 3, 0 + * - If interval is 6, we need interval 2. 0, 6, 4, 2. + * - If interval is 7, we need interval 1. + * - If interval is 8, we need interval 8. + * + * If you do the math, you'll see that we need to pretend that interval is + * equal to the greatest_common_divisor(interval, periods_in_map). + * + * Note that at the moment this function tends to front-pack the schedule. + * In some cases that's really non-ideal (it's hard to schedule things that + * need to repeat every period). In other cases it's perfect (you can easily + * schedule bigger, less often repeating things). + * + * Here's the algorithm in action (8 periods, 5 bits per period): + * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0 + * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2 + * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5 + * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2 + * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2 + * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3 + * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6 + * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4 + * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1 + * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0 + * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5 + * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2 + * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3 + * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6 + * | | | | | | | | | Remv 1 bits, intv 1 at 4 + * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0 + * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2 + * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3 + * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5 + * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6 + * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8 + * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12 + * + * This function is pretty generic and could be easily abstracted if anything + * needed similar scheduling. + * + * Returns either -ENOSPC or a >= 0 start bit which should be passed to the + * unschedule routine. The map bitmap will be updated on a non-error result. */ -static const unsigned short max_uframe_usecs[] = { - 100, 100, 100, 100, 100, 100, 30, 0 -}; +static int pmap_schedule(unsigned long *map, int bits_per_period, + int periods_in_map, int num_bits, + int interval, int start, bool only_one_period) +{ + int interval_bits; + int to_reserve; + int first_end; + int i; + + if (num_bits > bits_per_period) + return -ENOSPC; + + /* Adjust interval as per description */ + interval = gcd(interval, periods_in_map); + + interval_bits = bits_per_period * interval; + to_reserve = periods_in_map / interval; + + /* If start has gotten us past interval then we can't schedule */ + if (start >= interval_bits) + return -ENOSPC; + + if (only_one_period) + /* Must fit within same period as start; end at begin of next */ + first_end = (start / bits_per_period + 1) * bits_per_period; + else + /* Can fit anywhere in the first interval */ + first_end = interval_bits; + + /* + * We'll try to pick the first repetition, then see if that time + * is free for each of the subsequent repetitions. If it's not + * we'll adjust the start time for the next search of the first + * repetition. + */ + while (start + num_bits <= first_end) { + int end; + + /* Need to stay within this period */ + end = (start / bits_per_period + 1) * bits_per_period; + + /* Look for num_bits us in this microframe starting at start */ + start = bitmap_find_next_zero_area(map, end, start, num_bits, + 0); + + /* + * We should get start >= end if we fail. We might be + * able to check the next microframe depending on the + * interval, so continue on (start already updated). + */ + if (start >= end) { + start = end; + continue; + } + + /* At this point we have a valid point for first one */ + for (i = 1; i < to_reserve; i++) { + int ith_start = start + interval_bits * i; + int ith_end = end + interval_bits * i; + int ret; + + /* Use this as a dumb "check if bits are 0" */ + ret = bitmap_find_next_zero_area( + map, ith_start + num_bits, ith_start, num_bits, + 0); + + /* We got the right place, continue checking */ + if (ret == ith_start) + continue; + + /* Move start up for next time and exit for loop */ + ith_start = bitmap_find_next_zero_area( + map, ith_end, ith_start, num_bits, 0); + if (ith_start >= ith_end) + /* Need a while new period next time */ + start = end; + else + start = ith_start - interval_bits * i; + break; + } + + /* If didn't exit the for loop with a break, we have success */ + if (i == to_reserve) + break; + } -void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) + if (start + num_bits > first_end) + return -ENOSPC; + + for (i = 0; i < to_reserve; i++) { + int ith_start = start + interval_bits * i; + + bitmap_set(map, ith_start, num_bits); + } + + return start; +} + +/** + * pmap_unschedule() - Undo work done by pmap_schedule() + * + * @map: See pmap_schedule(). + * @bits_per_period: See pmap_schedule(). + * @periods_in_map: See pmap_schedule(). + * @num_bits: The number of bits that was passed to schedule. + * @interval: The interval that was passed to schedule. + * @start: The return value from pmap_schedule(). + */ +static void pmap_unschedule(unsigned long *map, int bits_per_period, + int periods_in_map, int num_bits, + int interval, int start) { + int interval_bits; + int to_release; int i; - for (i = 0; i < 8; i++) - hsotg->frame_usecs[i] = max_uframe_usecs[i]; + /* Adjust interval as per description in pmap_schedule() */ + interval = gcd(interval, periods_in_map); + + interval_bits = bits_per_period * interval; + to_release = periods_in_map / interval; + + for (i = 0; i < to_release; i++) { + int ith_start = start + interval_bits * i; + + bitmap_clear(map, ith_start, num_bits); + } } -static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +/* + * cat_printf() - A printf() + strcat() helper + * + * This is useful for concatenating a bunch of strings where each string is + * constructed using printf. + * + * @buf: The destination buffer; will be updated to point after the printed + * data. + * @size: The number of bytes in the buffer (includes space for '\0'). + * @fmt: The format for printf. + * @...: The args for printf. + */ +static void cat_printf(char **buf, size_t *size, const char *fmt, ...) { - unsigned short utime = qh->usecs; + va_list args; int i; - for (i = 0; i < 8; i++) { - /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ - if (utime <= hsotg->frame_usecs[i]) { - hsotg->frame_usecs[i] -= utime; - qh->frame_usecs[i] += utime; - return i; - } + if (*size == 0) + return; + + va_start(args, fmt); + i = vsnprintf(*buf, *size, fmt, args); + va_end(args); + + if (i >= *size) { + (*buf)[*size - 1] = '\0'; + *buf += *size; + *size = 0; + } else { + *buf += i; + *size -= i; } - return -ENOSPC; } /* - * use this for FS apps that can span multiple uframes + * pmap_print() - Print the given periodic map + * + * Will attempt to print out the periodic schedule. + * + * @map: See pmap_schedule(). + * @bits_per_period: See pmap_schedule(). + * @periods_in_map: See pmap_schedule(). + * @period_name: The name of 1 period, like "uFrame" + * @units: The name of the units, like "us". + * @print_fn: The function to call for printing. + * @print_data: Opaque data to pass to the print function. + */ +static void pmap_print(unsigned long *map, int bits_per_period, + int periods_in_map, const char *period_name, + const char *units, + void (*print_fn)(const char *str, void *data), + void *print_data) +{ + int period; + + for (period = 0; period < periods_in_map; period++) { + char tmp[64]; + char *buf = tmp; + size_t buf_size = sizeof(tmp); + int period_start = period * bits_per_period; + int period_end = period_start + bits_per_period; + int start = 0; + int count = 0; + bool printed = false; + int i; + + for (i = period_start; i < period_end + 1; i++) { + /* Handle case when ith bit is set */ + if (i < period_end && + bitmap_find_next_zero_area(map, i + 1, + i, 1, 0) != i) { + if (count == 0) + start = i - period_start; + count++; + continue; + } + + /* ith bit isn't set; don't care if count == 0 */ + if (count == 0) + continue; + + if (!printed) + cat_printf(&buf, &buf_size, "%s %d: ", + period_name, period); + else + cat_printf(&buf, &buf_size, ", "); + printed = true; + + cat_printf(&buf, &buf_size, "%d %s -%3d %s", start, + units, start + count - 1, units); + count = 0; + } + + if (printed) + print_fn(tmp, print_data); + } +} + +/** + * dwc2_get_ls_map() - Get the map used for the given qh + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * + * We'll always get the periodic map out of our TT. Note that even if we're + * running the host straight in low speed / full speed mode it appears as if + * a TT is allocated for us, so we'll use it. If that ever changes we can + * add logic here to get a map out of "hsotg" if !qh->do_split. + * + * Returns: the map or NULL if a map couldn't be found. */ -static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) { - unsigned short utime = qh->usecs; - unsigned short xtime; - int t_left; + unsigned long *map; + + /* Don't expect to be missing a TT and be doing low speed scheduling */ + if (WARN_ON(!qh->dwc_tt)) + return NULL; + + /* Get the map and adjust if this is a multi_tt hub */ + map = qh->dwc_tt->periodic_bitmaps; + if (qh->dwc_tt->usb_tt->multi) + map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + + return map; +} + +struct dwc2_qh_print_data { + struct dwc2_hsotg *hsotg; + struct dwc2_qh *qh; +}; + +/** + * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print() + * + * @str: The string to print + * @data: A pointer to a struct dwc2_qh_print_data + */ +static void dwc2_qh_print(const char *str, void *data) +{ + struct dwc2_qh_print_data *print_data = data; + + dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str); +} + +/** + * dwc2_qh_schedule_print() - Print the periodic schedule + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH to print. + */ +static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + struct dwc2_qh_print_data print_data = { hsotg, qh }; int i; - int j; - int k; - for (i = 0; i < 8; i++) { - if (hsotg->frame_usecs[i] <= 0) + /* + * The printing functions are quite slow and inefficient. + * If we don't have tracing turned on, don't run unless the special + * define is turned on. + */ +#ifndef DWC2_PRINT_SCHEDULE + return; +#endif + + if (qh->schedule_low_speed) { + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + + dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us", + qh, qh->device_us, + DWC2_ROUND_US_TO_SLICE(qh->device_us), + DWC2_US_PER_SLICE * qh->ls_start_schedule_slice); + + if (map) { + dwc2_sch_dbg(hsotg, + "QH=%p Whole low/full speed map %p now:\n", + qh, map); + pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices", + dwc2_qh_print, &print_data); + } + } + + for (i = 0; i < qh->num_hs_transfers; i++) { + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i; + int uframe = trans_time->start_schedule_us / + DWC2_HS_PERIODIC_US_PER_UFRAME; + int rel_us = trans_time->start_schedule_us % + DWC2_HS_PERIODIC_US_PER_UFRAME; + + dwc2_sch_dbg(hsotg, + "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n", + qh, i, trans_time->duration_us, uframe, rel_us); + } + if (qh->num_hs_transfers) { + dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh); + pmap_print(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us", + dwc2_qh_print, &print_data); + } + +} + +/** + * dwc2_ls_pmap_schedule() - Schedule a low speed QH + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * @search_slice: We'll start trying to schedule at the passed slice. + * Remember that slices are the units of the low speed + * schedule (think 25us or so). + * + * Wraps pmap_schedule() with the right parameters for low speed scheduling. + * + * Normally we schedule low speed devices on the map associated with the TT. + * + * Returns: 0 for success or an error code. + */ +static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + int search_slice) +{ + int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + int slice; + + if (map == NULL) + return -EINVAL; + + /* + * Schedule on the proper low speed map with our low speed scheduling + * parameters. Note that we use the "device_interval" here since + * we want the low speed interval and the only way we'd be in this + * function is if the device is low speed. + * + * If we happen to be doing low speed and high speed scheduling for the + * same transaction (AKA we have a split) we always do low speed first. + * That means we can always pass "false" for only_one_period (that + * parameters is only useful when we're trying to get one schedule to + * match what we already planned in the other schedule). + */ + slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, slices, + qh->device_interval, search_slice, false); + + if (slice < 0) + return slice; + + qh->ls_start_schedule_slice = slice; + return 0; +} + +/** + * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule() + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); + unsigned long *map = dwc2_get_ls_map(hsotg, qh); + + /* Schedule should have failed, so no worries about no error code */ + if (map == NULL) + return; + + pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, + DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval, + qh->ls_start_schedule_slice); +} + +/** + * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule + * + * This will schedule something on the main dwc2 schedule. + * + * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll + * update this with the result upon success. We also use the duration from + * the same structure. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * @only_one_period: If true we will limit ourselves to just looking at + * one period (aka one 100us chunk). This is used if we have + * already scheduled something on the low speed schedule and + * need to find something that matches on the high speed one. + * @index: The index into qh->hs_transfers that we're working with. + * + * Returns: 0 for success or an error code. Upon success the + * dwc2_hs_transfer_time specified by "index" will be updated. + */ +static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + bool only_one_period, int index) +{ + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index; + int us; + + us = pmap_schedule(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us, + qh->host_interval, trans_time->start_schedule_us, + only_one_period); + + if (us < 0) + return us; + + trans_time->start_schedule_us = us; + return 0; +} + +/** + * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule() + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, int index) +{ + struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index; + + pmap_unschedule(hsotg->hs_periodic_bitmap, + DWC2_HS_PERIODIC_US_PER_UFRAME, + DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us, + qh->host_interval, trans_time->start_schedule_us); +} + +/** + * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer. + * + * This is the most complicated thing in USB. We have to find matching time + * in both the global high speed schedule for the port and the low speed + * schedule for the TT associated with the given device. + * + * Being here means that the host must be running in high speed mode and the + * device is in low or full speed mode (and behind a hub). + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); + int ls_search_slice; + int err = 0; + int host_interval_in_sched; + + /* + * The interval (how often to repeat) in the actual host schedule. + * See pmap_schedule() for gcd() explanation. + */ + host_interval_in_sched = gcd(qh->host_interval, + DWC2_HS_SCHEDULE_UFRAMES); + + /* + * We always try to find space in the low speed schedule first, then + * try to find high speed time that matches. If we don't, we'll bump + * up the place we start searching in the low speed schedule and try + * again. To start we'll look right at the beginning of the low speed + * schedule. + * + * Note that this will tend to front-load the high speed schedule. + * We may eventually want to try to avoid this by either considering + * both schedules together or doing some sort of round robin. + */ + ls_search_slice = 0; + + while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) { + int start_s_uframe; + int ssplit_s_uframe; + int second_s_uframe; + int rel_uframe; + int first_count; + int middle_count; + int end_count; + int first_data_bytes; + int other_data_bytes; + int i; + + if (qh->schedule_low_speed) { + err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice); + + /* + * If we got an error here there's no other magic we + * can do, so bail. All the looping above is only + * helpful to redo things if we got a low speed slot + * and then couldn't find a matching high speed slot. + */ + if (err) + return err; + } else { + /* Must be missing the tt structure? Why? */ + WARN_ON_ONCE(1); + } + + /* + * This will give us a number 0 - 7 if + * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ... + */ + start_s_uframe = qh->ls_start_schedule_slice / + DWC2_SLICES_PER_UFRAME; + + /* Get a number that's always 0 - 7 */ + rel_uframe = (start_s_uframe % 8); + + /* + * If we were going to start in uframe 7 then we would need to + * issue a start split in uframe 6, which spec says is not OK. + * Move on to the next full frame (assuming there is one). + * + * See 11.18.4 Host Split Transaction Scheduling Requirements + * bullet 1. + */ + if (rel_uframe == 7) { + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + ls_search_slice = + (qh->ls_start_schedule_slice / + DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) * + DWC2_LS_PERIODIC_SLICES_PER_FRAME; continue; + } /* - * we need n consecutive slots so use j as a start slot - * j plus j+1 must be enough time (for now) + * For ISOC in: + * - start split (frame -1) + * - complete split w/ data (frame +1) + * - complete split w/ data (frame +2) + * - ... + * - complete split w/ data (frame +num_data_packets) + * - complete split w/ data (frame +num_data_packets+1) + * - complete split w/ data (frame +num_data_packets+2, max 8) + * ...though if frame was "0" then max is 7... + * + * For ISOC out we might need to do: + * - start split w/ data (frame -1) + * - start split w/ data (frame +0) + * - ... + * - start split w/ data (frame +num_data_packets-2) + * + * For INTERRUPT in we might need to do: + * - start split (frame -1) + * - complete split w/ data (frame +1) + * - complete split w/ data (frame +2) + * - complete split w/ data (frame +3, max 8) + * + * For INTERRUPT out we might need to do: + * - start split w/ data (frame -1) + * - complete split (frame +1) + * - complete split (frame +2) + * - complete split (frame +3, max 8) + * + * Start adjusting! */ - xtime = hsotg->frame_usecs[i]; - for (j = i + 1; j < 8; j++) { - /* - * if we add this frame remaining time to xtime we may - * be OK, if not we need to test j for a complete frame - */ - if (xtime + hsotg->frame_usecs[j] < utime) { - if (hsotg->frame_usecs[j] < - max_uframe_usecs[j]) - continue; + ssplit_s_uframe = (start_s_uframe + + host_interval_in_sched - 1) % + host_interval_in_sched; + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in) + second_s_uframe = start_s_uframe; + else + second_s_uframe = start_s_uframe + 1; + + /* First data transfer might not be all 188 bytes. */ + first_data_bytes = 188 - + DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice % + DWC2_SLICES_PER_UFRAME), + DWC2_SLICES_PER_UFRAME); + if (first_data_bytes > bytecount) + first_data_bytes = bytecount; + other_data_bytes = bytecount - first_data_bytes; + + /* + * For now, skip OUT xfers where first xfer is partial + * + * Main dwc2 code assumes: + * - INT transfers never get split in two. + * - ISOC transfers can always transfer 188 bytes the first + * time. + * + * Until that code is fixed, try again if the first transfer + * couldn't transfer everything. + * + * This code can be removed if/when the rest of dwc2 handles + * the above cases. Until it's fixed we just won't be able + * to schedule quite as tightly. + */ + if (!qh->ep_is_in && + (first_data_bytes != min_t(int, 188, bytecount))) { + dwc2_sch_dbg(hsotg, + "QH=%p avoiding broken 1st xfer (%d, %d)\n", + qh, first_data_bytes, bytecount); + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + ls_search_slice = (start_s_uframe + 1) * + DWC2_SLICES_PER_UFRAME; + continue; + } + + /* Start by assuming transfers for the bytes */ + qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188); + + /* + * Everything except ISOC OUT has extra transfers. Rules are + * complicated. See 11.18.4 Host Split Transaction Scheduling + * Requirements bullet 3. + */ + if (qh->ep_type == USB_ENDPOINT_XFER_INT) { + if (rel_uframe == 6) + qh->num_hs_transfers += 2; + else + qh->num_hs_transfers += 3; + + if (qh->ep_is_in) { + /* + * First is start split, middle/end is data. + * Allocate full data bytes for all data. + */ + first_count = 4; + middle_count = bytecount; + end_count = bytecount; + } else { + /* + * First is data, middle/end is complete. + * First transfer and second can have data. + * Rest should just have complete split. + */ + first_count = first_data_bytes; + middle_count = max_t(int, 4, other_data_bytes); + end_count = 4; } - if (xtime >= utime) { - t_left = utime; - for (k = i; k < 8; k++) { - t_left -= hsotg->frame_usecs[k]; - if (t_left <= 0) { - qh->frame_usecs[k] += - hsotg->frame_usecs[k] - + t_left; - hsotg->frame_usecs[k] = -t_left; - return i; - } else { - qh->frame_usecs[k] += - hsotg->frame_usecs[k]; - hsotg->frame_usecs[k] = 0; - } - } + } else { + if (qh->ep_is_in) { + int last; + + /* Account for the start split */ + qh->num_hs_transfers++; + + /* Calculate "L" value from spec */ + last = rel_uframe + qh->num_hs_transfers + 1; + + /* Start with basic case */ + if (last <= 6) + qh->num_hs_transfers += 2; + else + qh->num_hs_transfers += 1; + + /* Adjust downwards */ + if (last >= 6 && rel_uframe == 0) + qh->num_hs_transfers--; + + /* 1st = start; rest can contain data */ + first_count = 4; + middle_count = min_t(int, 188, bytecount); + end_count = middle_count; + } else { + /* All contain data, last might be smaller */ + first_count = first_data_bytes; + middle_count = min_t(int, 188, + other_data_bytes); + end_count = other_data_bytes % 188; } - /* add the frame time to x time */ - xtime += hsotg->frame_usecs[j]; - /* we must have a fully available next frame or break */ - if (xtime < utime && - hsotg->frame_usecs[j] == max_uframe_usecs[j]) - continue; } + + /* Assign durations per uFrame */ + qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count); + for (i = 1; i < qh->num_hs_transfers - 1; i++) + qh->hs_transfers[i].duration_us = + HS_USECS_ISO(middle_count); + if (qh->num_hs_transfers > 1) + qh->hs_transfers[qh->num_hs_transfers - 1].duration_us = + HS_USECS_ISO(end_count); + + /* + * Assign start us. The call below to dwc2_hs_pmap_schedule() + * will start with these numbers but may adjust within the same + * microframe. + */ + qh->hs_transfers[0].start_schedule_us = + ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME; + for (i = 1; i < qh->num_hs_transfers; i++) + qh->hs_transfers[i].start_schedule_us = + ((second_s_uframe + i - 1) % + DWC2_HS_SCHEDULE_UFRAMES) * + DWC2_HS_PERIODIC_US_PER_UFRAME; + + /* Try to schedule with filled in hs_transfers above */ + for (i = 0; i < qh->num_hs_transfers; i++) { + err = dwc2_hs_pmap_schedule(hsotg, qh, true, i); + if (err) + break; + } + + /* If we scheduled all w/out breaking out then we're all good */ + if (i == qh->num_hs_transfers) + break; + + for (; i >= 0; i--) + dwc2_hs_pmap_unschedule(hsotg, qh, i); + + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + + /* Try again starting in the next microframe */ + ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME; } - return -ENOSPC; + + if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES) + return -ENOSPC; + + return 0; +} + +/** + * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer. + * + * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean + * interface. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* In non-split host and device time are the same */ + WARN_ON(qh->host_us != qh->device_us); + WARN_ON(qh->host_interval != qh->device_interval); + WARN_ON(qh->num_hs_transfers != 1); + + /* We'll have one transfer; init start to 0 before calling scheduler */ + qh->hs_transfers[0].start_schedule_us = 0; + qh->hs_transfers[0].duration_us = qh->host_us; + + return dwc2_hs_pmap_schedule(hsotg, qh, false, 0); +} + +/** + * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer. + * + * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean + * interface. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* In non-split host and device time are the same */ + WARN_ON(qh->host_us != qh->device_us); + WARN_ON(qh->host_interval != qh->device_interval); + WARN_ON(!qh->schedule_low_speed); + + /* Run on the main low speed schedule (no split = no hub = no TT) */ + return dwc2_ls_pmap_schedule(hsotg, qh, 0); } -static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +/** + * dwc2_uframe_schedule - Schedule a QH for a periodic xfer. + * + * Calls one of the 3 sub-function depending on what type of transfer this QH + * is for. Also adds some printing. + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int ret; - if (qh->dev_speed == USB_SPEED_HIGH) { - /* if this is a hs transaction we need a full frame */ - ret = dwc2_find_single_uframe(hsotg, qh); + if (qh->dev_speed == USB_SPEED_HIGH) + ret = dwc2_uframe_schedule_hs(hsotg, qh); + else if (!qh->do_split) + ret = dwc2_uframe_schedule_ls(hsotg, qh); + else + ret = dwc2_uframe_schedule_split(hsotg, qh); + + if (ret) + dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret); + else + dwc2_qh_schedule_print(hsotg, qh); + + return ret; +} + +/** + * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule(). + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + */ +static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + int i; + + for (i = 0; i < qh->num_hs_transfers; i++) + dwc2_hs_pmap_unschedule(hsotg, qh, i); + + if (qh->schedule_low_speed) + dwc2_ls_pmap_unschedule(hsotg, qh); + + dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh); +} + +/** + * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled + * + * Takes a qh that has already been scheduled (which means we know we have the + * bandwdith reserved for us) and set the next_active_frame and the + * start_active_frame. + * + * This is expected to be called on qh's that weren't previously actively + * running. It just picks the next frame that we can fit into without any + * thought about the past. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for a periodic endpoint + * + */ +static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + u16 frame_number; + u16 earliest_frame; + u16 next_active_frame; + u16 relative_frame; + u16 interval; + + /* + * Use the real frame number rather than the cached value as of the + * last SOF to give us a little extra slop. + */ + frame_number = dwc2_hcd_get_frame_number(hsotg); + + /* + * We wouldn't want to start any earlier than the next frame just in + * case the frame number ticks as we're doing this calculation. + * + * NOTE: if we could quantify how long till we actually get scheduled + * we might be able to avoid the "+ 1" by looking at the upper part of + * HFNUM (the FRREM field). For now we'll just use the + 1 though. + */ + earliest_frame = dwc2_frame_num_inc(frame_number, 1); + next_active_frame = earliest_frame; + + /* Get the "no microframe schduler" out of the way... */ + if (hsotg->core_params->uframe_sched <= 0) { + if (qh->do_split) + /* Splits are active at microframe 0 minus 1 */ + next_active_frame |= 0x7; + goto exit; + } + + if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { + /* + * We're either at high speed or we're doing a split (which + * means we're talking high speed to a hub). In any case + * the first frame should be based on when the first scheduled + * event is. + */ + WARN_ON(qh->num_hs_transfers < 1); + + relative_frame = qh->hs_transfers[0].start_schedule_us / + DWC2_HS_PERIODIC_US_PER_UFRAME; + + /* Adjust interval as per high speed schedule */ + interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES); + } else { /* - * if this is a fs transaction we may need a sequence - * of frames + * Low or full speed directly on dwc2. Just about the same + * as high speed but on a different schedule and with slightly + * different adjustments. Note that this works because when + * the host and device are both low speed then frames in the + * controller tick at low speed. */ - ret = dwc2_find_multi_uframe(hsotg, qh); + relative_frame = qh->ls_start_schedule_slice / + DWC2_LS_PERIODIC_SLICES_PER_FRAME; + interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES); } - return ret; + + /* Scheduler messed up if frame is past interval */ + WARN_ON(relative_frame >= interval); + + /* + * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've + * done the gcd(), so it's safe to move to the beginning of the current + * interval like this. + * + * After this we might be before earliest_frame, but don't worry, + * we'll fix it... + */ + next_active_frame = (next_active_frame / interval) * interval; + + /* + * Actually choose to start at the frame number we've been + * scheduled for. + */ + next_active_frame = dwc2_frame_num_inc(next_active_frame, + relative_frame); + + /* + * We actually need 1 frame before since the next_active_frame is + * the frame number we'll be put on the ready list and we won't be on + * the bus until 1 frame later. + */ + next_active_frame = dwc2_frame_num_dec(next_active_frame, 1); + + /* + * By now we might actually be before the earliest_frame. Let's move + * up intervals until we're not. + */ + while (dwc2_frame_num_gt(earliest_frame, next_active_frame)) + next_active_frame = dwc2_frame_num_inc(next_active_frame, + interval); + +exit: + qh->next_active_frame = next_active_frame; + qh->start_active_frame = next_active_frame; + + dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n", + qh, frame_number, qh->next_active_frame); +} + +/** + * dwc2_do_reserve() - Make a periodic reservation + * + * Try to allocate space in the periodic schedule. Depending on parameters + * this might use the microframe scheduler or the dumb scheduler. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for the periodic transfer. + * + * Returns: 0 upon success; error upon failure. + */ +static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + int status; + + if (hsotg->core_params->uframe_sched > 0) { + status = dwc2_uframe_schedule(hsotg, qh); + } else { + status = dwc2_periodic_channel_available(hsotg); + if (status) { + dev_info(hsotg->dev, + "%s: No host channel available for periodic transfer\n", + __func__); + return status; + } + + status = dwc2_check_periodic_bandwidth(hsotg, qh); + } + + if (status) { + dev_dbg(hsotg->dev, + "%s: Insufficient periodic bandwidth for periodic transfer\n", + __func__); + return status; + } + + if (hsotg->core_params->uframe_sched <= 0) + /* Reserve periodic channel */ + hsotg->periodic_channels++; + + /* Update claimed usecs per (micro)frame */ + hsotg->periodic_usecs += qh->host_us; + + dwc2_pick_first_frame(hsotg, qh); + + return 0; +} + +/** + * dwc2_do_unreserve() - Actually release the periodic reservation + * + * This function actually releases the periodic bandwidth that was reserved + * by the given qh. + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: QH for the periodic transfer. + */ +static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + assert_spin_locked(&hsotg->lock); + + WARN_ON(!qh->unreserve_pending); + + /* No more unreserve pending--we're doing it */ + qh->unreserve_pending = false; + + if (WARN_ON(!list_empty(&qh->qh_list_entry))) + list_del_init(&qh->qh_list_entry); + + /* Update claimed usecs per (micro)frame */ + hsotg->periodic_usecs -= qh->host_us; + + if (hsotg->core_params->uframe_sched > 0) { + dwc2_uframe_unschedule(hsotg, qh); + } else { + /* Release periodic channel reservation */ + hsotg->periodic_channels--; + } +} + +/** + * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation + * + * According to the kernel doc for usb_submit_urb() (specifically the part about + * "Reserved Bandwidth Transfers"), we need to keep a reservation active as + * long as a device driver keeps submitting. Since we're using HCD_BH to give + * back the URB we need to give the driver a little bit of time before we + * release the reservation. This worker is called after the appropriate + * delay. + * + * @work: Pointer to a qh unreserve_work. + */ +static void dwc2_unreserve_timer_fn(unsigned long data) +{ + struct dwc2_qh *qh = (struct dwc2_qh *)data; + struct dwc2_hsotg *hsotg = qh->hsotg; + unsigned long flags; + + /* + * Wait for the lock, or for us to be scheduled again. We + * could be scheduled again if: + * - We started executing but didn't get the lock yet. + * - A new reservation came in, but cancel didn't take effect + * because we already started executing. + * - The timer has been kicked again. + * In that case cancel and wait for the next call. + */ + while (!spin_trylock_irqsave(&hsotg->lock, flags)) { + if (timer_pending(&qh->unreserve_timer)) + return; + } + + /* + * Might be no more unreserve pending if: + * - We started executing but didn't get the lock yet. + * - A new reservation came in, but cancel didn't take effect + * because we already started executing. + * + * We can't put this in the loop above because unreserve_pending needs + * to be accessed under lock, so we can only check it once we got the + * lock. + */ + if (qh->unreserve_pending) + dwc2_do_unreserve(hsotg, qh); + + spin_unlock_irqrestore(&hsotg->lock, flags); } /** @@ -474,42 +1353,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int status; - if (hsotg->core_params->uframe_sched > 0) { - int frame = -1; - - status = dwc2_find_uframe(hsotg, qh); - if (status == 0) - frame = 7; - else if (status > 0) - frame = status - 1; - - /* Set the new frame up */ - if (frame >= 0) { - qh->sched_frame &= ~0x7; - qh->sched_frame |= (frame & 7); - } - - if (status > 0) - status = 0; - } else { - status = dwc2_periodic_channel_available(hsotg); - if (status) { - dev_info(hsotg->dev, - "%s: No host channel available for periodic transfer\n", - __func__); - return status; - } - - status = dwc2_check_periodic_bandwidth(hsotg, qh); - } - - if (status) { - dev_dbg(hsotg->dev, - "%s: Insufficient periodic bandwidth for periodic transfer\n", - __func__); - return status; - } - status = dwc2_check_max_xfer_size(hsotg, qh); if (status) { dev_dbg(hsotg->dev, @@ -518,6 +1361,35 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) return status; } + /* Cancel pending unreserve; if canceled OK, unreserve was pending */ + if (del_timer(&qh->unreserve_timer)) + WARN_ON(!qh->unreserve_pending); + + /* + * Only need to reserve if there's not an unreserve pending, since if an + * unreserve is pending then by definition our old reservation is still + * valid. Unreserve might still be pending even if we didn't cancel if + * dwc2_unreserve_timer_fn() already started. Code in the timer handles + * that case. + */ + if (!qh->unreserve_pending) { + status = dwc2_do_reserve(hsotg, qh); + if (status) + return status; + } else { + /* + * It might have been a while, so make sure that frame_number + * is still good. Note: we could also try to use the similar + * dwc2_next_periodic_start() but that schedules much more + * tightly and we might need to hurry and queue things up. + */ + if (dwc2_frame_num_le(qh->next_active_frame, + hsotg->frame_number)) + dwc2_pick_first_frame(hsotg, qh); + } + + qh->unreserve_pending = 0; + if (hsotg->core_params->dma_desc_enable > 0) /* Don't rely on SOF and start in ready schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); @@ -526,14 +1398,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); - if (hsotg->core_params->uframe_sched <= 0) - /* Reserve periodic channel */ - hsotg->periodic_channels++; - - /* Update claimed usecs per (micro)frame */ - hsotg->periodic_usecs += qh->usecs; - - return status; + return 0; } /** @@ -546,25 +1411,231 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - int i; + bool did_modify; + + assert_spin_locked(&hsotg->lock); + + /* + * Schedule the unreserve to happen in a little bit. Cases here: + * - Unreserve worker might be sitting there waiting to grab the lock. + * In this case it will notice it's been schedule again and will + * quit. + * - Unreserve worker might not be scheduled. + * + * We should never already be scheduled since dwc2_schedule_periodic() + * should have canceled the scheduled unreserve timer (hence the + * warning on did_modify). + * + * We add + 1 to the timer to guarantee that at least 1 jiffy has + * passed (otherwise if the jiffy counter might tick right after we + * read it and we'll get no delay). + */ + did_modify = mod_timer(&qh->unreserve_timer, + jiffies + DWC2_UNRESERVE_DELAY + 1); + WARN_ON(did_modify); + qh->unreserve_pending = 1; list_del_init(&qh->qh_list_entry); +} - /* Update claimed usecs per (micro)frame */ - hsotg->periodic_usecs -= qh->usecs; +/** + * dwc2_qh_init() - Initializes a QH structure + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @qh: The QH to init + * @urb: Holds the information about the device/endpoint needed to initialize + * the QH + * @mem_flags: Flags for allocating memory. + */ +static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, + struct dwc2_hcd_urb *urb, gfp_t mem_flags) +{ + int dev_speed = dwc2_host_get_speed(hsotg, urb->priv); + u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); + bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info); + bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC); + bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT); + u32 hprt = dwc2_readl(hsotg->regs + HPRT0); + u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; + bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED && + dev_speed != USB_SPEED_HIGH); + int maxp = dwc2_hcd_get_mps(&urb->pipe_info); + int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp); + char *speed, *type; - if (hsotg->core_params->uframe_sched > 0) { - for (i = 0; i < 8; i++) { - hsotg->frame_usecs[i] += qh->frame_usecs[i]; - qh->frame_usecs[i] = 0; + /* Initialize QH */ + qh->hsotg = hsotg; + setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn, + (unsigned long)qh); + qh->ep_type = ep_type; + qh->ep_is_in = ep_is_in; + + qh->data_toggle = DWC2_HC_PID_DATA0; + qh->maxp = maxp; + INIT_LIST_HEAD(&qh->qtd_list); + INIT_LIST_HEAD(&qh->qh_list_entry); + + qh->do_split = do_split; + qh->dev_speed = dev_speed; + + if (ep_is_int || ep_is_isoc) { + /* Compute scheduling parameters once and save them */ + int host_speed = do_split ? USB_SPEED_HIGH : dev_speed; + struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv, + mem_flags, + &qh->ttport); + int device_ns; + + qh->dwc_tt = dwc_tt; + + qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in, + ep_is_isoc, bytecount)); + device_ns = usb_calc_bus_time(dev_speed, ep_is_in, + ep_is_isoc, bytecount); + + if (do_split && dwc_tt) + device_ns += dwc_tt->usb_tt->think_time; + qh->device_us = NS_TO_US(device_ns); + + + qh->device_interval = urb->interval; + qh->host_interval = urb->interval * (do_split ? 8 : 1); + + /* + * Schedule low speed if we're running the host in low or + * full speed OR if we've got a "TT" to deal with to access this + * device. + */ + qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED || + dwc_tt; + + if (do_split) { + /* We won't know num transfers until we schedule */ + qh->num_hs_transfers = -1; + } else if (dev_speed == USB_SPEED_HIGH) { + qh->num_hs_transfers = 1; + } else { + qh->num_hs_transfers = 0; } - } else { - /* Release periodic channel reservation */ - hsotg->periodic_channels--; + + /* We'll schedule later when we have something to do */ + } + + switch (dev_speed) { + case USB_SPEED_LOW: + speed = "low"; + break; + case USB_SPEED_FULL: + speed = "full"; + break; + case USB_SPEED_HIGH: + speed = "high"; + break; + default: + speed = "?"; + break; + } + + switch (qh->ep_type) { + case USB_ENDPOINT_XFER_ISOC: + type = "isochronous"; + break; + case USB_ENDPOINT_XFER_INT: + type = "interrupt"; + break; + case USB_ENDPOINT_XFER_CONTROL: + type = "control"; + break; + case USB_ENDPOINT_XFER_BULK: + type = "bulk"; + break; + default: + type = "?"; + break; + } + + dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type, + speed, bytecount); + dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh, + dwc2_hcd_get_dev_addr(&urb->pipe_info), + dwc2_hcd_get_ep_num(&urb->pipe_info), + ep_is_in ? "IN" : "OUT"); + if (ep_is_int || ep_is_isoc) { + dwc2_sch_dbg(hsotg, + "QH=%p ...duration: host=%d us, device=%d us\n", + qh, qh->host_us, qh->device_us); + dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n", + qh, qh->host_interval, qh->device_interval); + if (qh->schedule_low_speed) + dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n", + qh, dwc2_get_ls_map(hsotg, qh)); } } /** + * dwc2_hcd_qh_create() - Allocates and initializes a QH + * + * @hsotg: The HCD state structure for the DWC OTG controller + * @urb: Holds the information about the device/endpoint needed + * to initialize the QH + * @atomic_alloc: Flag to do atomic allocation if needed + * + * Return: Pointer to the newly allocated QH, or NULL on error + */ +struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, + struct dwc2_hcd_urb *urb, + gfp_t mem_flags) +{ + struct dwc2_qh *qh; + + if (!urb->priv) + return NULL; + + /* Allocate memory */ + qh = kzalloc(sizeof(*qh), mem_flags); + if (!qh) + return NULL; + + dwc2_qh_init(hsotg, qh, urb, mem_flags); + + if (hsotg->core_params->dma_desc_enable > 0 && + dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { + dwc2_hcd_qh_free(hsotg, qh); + return NULL; + } + + return qh; +} + +/** + * dwc2_hcd_qh_free() - Frees the QH + * + * @hsotg: HCD instance + * @qh: The QH to free + * + * QH should already be removed from the list. QTD list should already be empty + * if called from URB Dequeue. + * + * Must NOT be called with interrupt disabled or spinlock held + */ +void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) +{ + /* Make sure any unreserve work is finished. */ + if (del_timer_sync(&qh->unreserve_timer)) { + unsigned long flags; + + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_do_unreserve(hsotg, qh); + spin_unlock_irqrestore(&hsotg->lock, flags); + } + dwc2_host_put_tt_info(hsotg, qh->dwc_tt); + + if (qh->desc_list) + dwc2_hcd_qh_free_ddma(hsotg, qh); + kfree(qh); +} + +/** * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic * schedule if it is not already in the schedule. If the QH is already in * the schedule, no action is taken. @@ -586,16 +1657,12 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* QH already in a schedule */ return 0; - if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) && - !hsotg->frame_number) { - dev_dbg(hsotg->dev, - "reset frame number counter\n"); - qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, - SCHEDULE_SLOP); - } - /* Add the new QH to the appropriate schedule */ if (dwc2_qh_is_non_per(qh)) { + /* Schedule right away */ + qh->start_active_frame = hsotg->frame_number; + qh->next_active_frame = qh->start_active_frame; + /* Always start in inactive schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->non_periodic_sched_inactive); @@ -649,39 +1716,164 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) } } -/* - * Schedule the next continuing periodic split transfer +/** + * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split. + * + * This is called for setting next_active_frame for periodic splits for all but + * the first packet of the split. Confusing? I thought so... + * + * Periodic splits are single low/full speed transfers that we end up splitting + * up into several high speed transfers. They always fit into one full (1 ms) + * frame but might be split over several microframes (125 us each). We to put + * each of the parts on a very specific high speed frame. + * + * This function figures out where the next active uFrame needs to be. + * + * @hsotg: The HCD state structure + * @qh: QH for the periodic transfer. + * @frame_number: The current frame number. + * + * Return: number missed by (or 0 if we didn't miss). */ -static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, - struct dwc2_qh *qh, u16 frame_number, - int sched_next_periodic_split) +static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, u16 frame_number) { + u16 old_frame = qh->next_active_frame; + u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); + int missed = 0; u16 incr; - if (sched_next_periodic_split) { - qh->sched_frame = frame_number; - incr = dwc2_frame_num_inc(qh->start_split_frame, 1); - if (dwc2_frame_num_le(frame_number, incr)) { - /* - * Allow one frame to elapse after start split - * microframe before scheduling complete split, but - * DON'T if we are doing the next start split in the - * same frame for an ISOC out - */ - if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || - qh->ep_is_in != 0) { - qh->sched_frame = - dwc2_frame_num_inc(qh->sched_frame, 1); - } - } - } else { - qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, - qh->interval); - if (dwc2_frame_num_le(qh->sched_frame, frame_number)) - qh->sched_frame = frame_number; - qh->sched_frame |= 0x7; - qh->start_split_frame = qh->sched_frame; + /* + * See dwc2_uframe_schedule_split() for split scheduling. + * + * Basically: increment 1 normally, but 2 right after the start split + * (except for ISOC out). + */ + if (old_frame == qh->start_active_frame && + !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)) + incr = 2; + else + incr = 1; + + qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr); + + /* + * Note that it's OK for frame_number to be 1 frame past + * next_active_frame. Remember that next_active_frame is supposed to + * be 1 frame _before_ when we want to be scheduled. If we're 1 frame + * past it just means schedule ASAP. + * + * It's _not_ OK, however, if we're more than one frame past. + */ + if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) { + /* + * OOPS, we missed. That's actually pretty bad since + * the hub will be unhappy; try ASAP I guess. + */ + missed = dwc2_frame_num_dec(prev_frame_number, + qh->next_active_frame); + qh->next_active_frame = frame_number; } + + return missed; +} + +/** + * dwc2_next_periodic_start() - Set next_active_frame for next transfer start + * + * This is called for setting next_active_frame for a periodic transfer for + * all cases other than midway through a periodic split. This will also update + * start_active_frame. + * + * Since we _always_ keep start_active_frame as the start of the previous + * transfer this is normally pretty easy: we just add our interval to + * start_active_frame and we've got our answer. + * + * The tricks come into play if we miss. In that case we'll look for the next + * slot we can fit into. + * + * @hsotg: The HCD state structure + * @qh: QH for the periodic transfer. + * @frame_number: The current frame number. + * + * Return: number missed by (or 0 if we didn't miss). + */ +static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, u16 frame_number) +{ + int missed = 0; + u16 interval = qh->host_interval; + u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); + + qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame, + interval); + + /* + * The dwc2_frame_num_gt() function used below won't work terribly well + * with if we just incremented by a really large intervals since the + * frame counter only goes to 0x3fff. It's terribly unlikely that we + * will have missed in this case anyway. Just go to exit. If we want + * to try to do better we'll need to keep track of a bigger counter + * somewhere in the driver and handle overflows. + */ + if (interval >= 0x1000) + goto exit; + + /* + * Test for misses, which is when it's too late to schedule. + * + * A few things to note: + * - We compare against prev_frame_number since start_active_frame + * and next_active_frame are always 1 frame before we want things + * to be active and we assume we can still get scheduled in the + * current frame number. + * - It's possible for start_active_frame (now incremented) to be + * next_active_frame if we got an EO MISS (even_odd miss) which + * basically means that we detected there wasn't enough time for + * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us + * at the last second. We want to make sure we don't schedule + * another transfer for the same frame. My test webcam doesn't seem + * terribly upset by missing a transfer but really doesn't like when + * we do two transfers in the same frame. + * - Some misses are expected. Specifically, in order to work + * perfectly dwc2 really needs quite spectacular interrupt latency + * requirements. It needs to be able to handle its interrupts + * completely within 125 us of them being asserted. That not only + * means that the dwc2 interrupt handler needs to be fast but it + * means that nothing else in the system has to block dwc2 for a long + * time. We can help with the dwc2 parts of this, but it's hard to + * guarantee that a system will have interrupt latency < 125 us, so + * we have to be robust to some misses. + */ + if (qh->start_active_frame == qh->next_active_frame || + dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) { + u16 ideal_start = qh->start_active_frame; + int periods_in_map; + + /* + * Adjust interval as per gcd with map size. + * See pmap_schedule() for more details here. + */ + if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH) + periods_in_map = DWC2_HS_SCHEDULE_UFRAMES; + else + periods_in_map = DWC2_LS_SCHEDULE_FRAMES; + interval = gcd(interval, periods_in_map); + + do { + qh->start_active_frame = dwc2_frame_num_inc( + qh->start_active_frame, interval); + } while (dwc2_frame_num_gt(prev_frame_number, + qh->start_active_frame)); + + missed = dwc2_frame_num_dec(qh->start_active_frame, + ideal_start); + } + +exit: + qh->next_active_frame = qh->start_active_frame; + + return missed; } /* @@ -700,7 +1892,9 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, int sched_next_periodic_split) { + u16 old_frame = qh->next_active_frame; u16 frame_number; + int missed; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s()\n", __func__); @@ -713,33 +1907,44 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, return; } + /* + * Use the real frame number rather than the cached value as of the + * last SOF just to get us a little closer to reality. Note that + * means we don't actually know if we've already handled the SOF + * interrupt for this frame. + */ frame_number = dwc2_hcd_get_frame_number(hsotg); - if (qh->do_split) { - dwc2_sched_periodic_split(hsotg, qh, frame_number, - sched_next_periodic_split); - } else { - qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, - qh->interval); - if (dwc2_frame_num_le(qh->sched_frame, frame_number)) - qh->sched_frame = frame_number; - } + if (sched_next_periodic_split) + missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number); + else + missed = dwc2_next_periodic_start(hsotg, qh, frame_number); + + dwc2_sch_vdbg(hsotg, + "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n", + qh, sched_next_periodic_split, frame_number, old_frame, + qh->next_active_frame, + dwc2_frame_num_dec(qh->next_active_frame, old_frame), + missed, missed ? "MISS" : ""); if (list_empty(&qh->qtd_list)) { dwc2_hcd_qh_unlink(hsotg, qh); return; } + /* * Remove from periodic_sched_queued and move to * appropriate queue + * + * Note: we purposely use the frame_number from the "hsotg" structure + * since we know SOF interrupt will handle future frames. */ - if ((hsotg->core_params->uframe_sched > 0 && - dwc2_frame_num_le(qh->sched_frame, frame_number)) || - (hsotg->core_params->uframe_sched <= 0 && - qh->sched_frame == frame_number)) - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready); + if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number)) + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_ready); else - list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); + list_move_tail(&qh->qh_list_entry, + &hsotg->periodic_sched_inactive); } /** diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 553f24606c43..281b57b36ab4 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -769,10 +769,6 @@ #define TSIZ_XFERSIZE_SHIFT 0 #define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch)) -#define HCDMA_DMA_ADDR_MASK (0x1fffff << 11) -#define HCDMA_DMA_ADDR_SHIFT 11 -#define HCDMA_CTD_MASK (0xff << 3) -#define HCDMA_CTD_SHIFT 3 #define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch)) diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 39c1cbf0e75d..88629bed6614 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -54,11 +54,44 @@ static const char dwc2_driver_name[] = "dwc2"; +static const struct dwc2_core_params params_hi6220 = { + .otg_cap = 2, /* No HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_enable = 1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 512, + .host_perio_tx_fifo_size = 512, + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 16, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + static const struct dwc2_core_params params_bcm2835 = { .otg_cap = 0, /* HNP/SRP capable */ .otg_ver = 0, /* 1.3 */ .dma_enable = 1, .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, .speed = 0, /* High Speed */ .enable_dynamic_fifo = 1, .en_multiple_tx_fifo = 1, @@ -89,13 +122,14 @@ static const struct dwc2_core_params params_rk3066 = { .otg_ver = -1, .dma_enable = -1, .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, .speed = -1, .enable_dynamic_fifo = 1, .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 520, /* 520 DWORDs */ + .host_rx_fifo_size = 525, /* 525 DWORDs */ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ - .max_transfer_size = 65535, + .max_transfer_size = -1, .max_packet_count = -1, .host_channels = -1, .phy_type = -1, @@ -115,6 +149,103 @@ static const struct dwc2_core_params params_rk3066 = { .hibernation = -1, }; +static const struct dwc2_core_params params_ltq = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_enable = -1, + .dma_desc_enable = -1, + .dma_desc_fs_enable = -1, + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 288, /* 288 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +/* + * Check the dr_mode against the module configuration and hardware + * capabilities. + * + * The hardware, module, and dr_mode, can each be set to host, device, + * or otg. Check that all these values are compatible and adjust the + * value of dr_mode if possible. + * + * actual + * HW MOD dr_mode dr_mode + * ------------------------------ + * HST HST any : HST + * HST DEV any : --- + * HST OTG any : HST + * + * DEV HST any : --- + * DEV DEV any : DEV + * DEV OTG any : DEV + * + * OTG HST any : HST + * OTG DEV any : DEV + * OTG OTG any : dr_mode + */ +static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg) +{ + enum usb_dr_mode mode; + + hsotg->dr_mode = usb_get_dr_mode(hsotg->dev); + if (hsotg->dr_mode == USB_DR_MODE_UNKNOWN) + hsotg->dr_mode = USB_DR_MODE_OTG; + + mode = hsotg->dr_mode; + + if (dwc2_hw_is_device(hsotg)) { + if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) { + dev_err(hsotg->dev, + "Controller does not support host mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_PERIPHERAL; + } else if (dwc2_hw_is_host(hsotg)) { + if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) { + dev_err(hsotg->dev, + "Controller does not support device mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_HOST; + } else { + if (IS_ENABLED(CONFIG_USB_DWC2_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL)) + mode = USB_DR_MODE_PERIPHERAL; + } + + if (mode != hsotg->dr_mode) { + dev_warn(hsotg->dev, + "Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "device"); + + hsotg->dr_mode = mode; + } + + return 0; +} + static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) { struct platform_device *pdev = to_platform_device(hsotg->dev); @@ -306,9 +437,31 @@ static int dwc2_driver_remove(struct platform_device *dev) return 0; } +/** + * dwc2_driver_shutdown() - Called on device shutdown + * + * @dev: Platform device + * + * In specific conditions (involving usb hubs) dwc2 devices can create a + * lot of interrupts, even to the point of overwhelming devices running + * at low frequencies. Some devices need to do special clock handling + * at shutdown-time which may bring the system clock below the threshold + * of being able to handle the dwc2 interrupts. Disabling dwc2-irqs + * prevents reboots/poweroffs from getting stuck in such cases. + */ +static void dwc2_driver_shutdown(struct platform_device *dev) +{ + struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); + + disable_irq(hsotg->irq); +} + static const struct of_device_id dwc2_of_match_table[] = { { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, + { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, + { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, + { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, { .compatible = "snps,dwc2", .data = NULL }, { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, {}, @@ -335,7 +488,6 @@ static int dwc2_driver_probe(struct platform_device *dev) struct dwc2_hsotg *hsotg; struct resource *res; int retval; - int irq; match = of_match_device(dwc2_of_match_table, &dev->dev); if (match && match->data) { @@ -348,8 +500,10 @@ static int dwc2_driver_probe(struct platform_device *dev) /* * Disable descriptor dma mode by default as the HW can support * it, but does not support it for SPLIT transactions. + * Disable it for FS devices as well. */ defparams.dma_desc_enable = 0; + defparams.dma_desc_fs_enable = 0; } hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL); @@ -375,19 +529,6 @@ static int dwc2_driver_probe(struct platform_device *dev) dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", (unsigned long)res->start, hsotg->regs); - hsotg->dr_mode = usb_get_dr_mode(&dev->dev); - if (IS_ENABLED(CONFIG_USB_DWC2_HOST) && - hsotg->dr_mode != USB_DR_MODE_HOST) { - hsotg->dr_mode = USB_DR_MODE_HOST; - dev_warn(hsotg->dev, - "Configuration mismatch. Forcing host mode\n"); - } else if (IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) && - hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { - hsotg->dr_mode = USB_DR_MODE_PERIPHERAL; - dev_warn(hsotg->dev, - "Configuration mismatch. Forcing peripheral mode\n"); - } - retval = dwc2_lowlevel_hw_init(hsotg); if (retval) return retval; @@ -401,15 +542,15 @@ static int dwc2_driver_probe(struct platform_device *dev) dwc2_set_all_params(hsotg->core_params, -1); - irq = platform_get_irq(dev, 0); - if (irq < 0) { + hsotg->irq = platform_get_irq(dev, 0); + if (hsotg->irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); - return irq; + return hsotg->irq; } dev_dbg(hsotg->dev, "registering common handler for irq%d\n", - irq); - retval = devm_request_irq(hsotg->dev, irq, + hsotg->irq); + retval = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_handle_common_intr, IRQF_SHARED, dev_name(hsotg->dev), hsotg); if (retval) @@ -419,6 +560,16 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) return retval; + retval = dwc2_get_dr_mode(hsotg); + if (retval) + return retval; + + /* + * Reset before dwc2_get_hwparams() then it could get power-on real + * reset value form registers. + */ + dwc2_core_reset_and_force_dr_mode(hsotg); + /* Detect config values from hardware */ retval = dwc2_get_hwparams(hsotg); if (retval) @@ -427,15 +578,17 @@ static int dwc2_driver_probe(struct platform_device *dev) /* Validate parameter values */ dwc2_set_parameters(hsotg, params); + dwc2_force_dr_mode(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_HOST) { - retval = dwc2_gadget_init(hsotg, irq); + retval = dwc2_gadget_init(hsotg, hsotg->irq); if (retval) goto error; hsotg->gadget_enabled = 1; } if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { - retval = dwc2_hcd_init(hsotg, irq); + retval = dwc2_hcd_init(hsotg, hsotg->irq); if (retval) { if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); @@ -502,6 +655,7 @@ static struct platform_driver dwc2_platform_driver = { }, .probe = dwc2_driver_probe, .remove = dwc2_driver_remove, + .shutdown = dwc2_driver_shutdown, }; module_platform_driver(dwc2_platform_driver); |