diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Makefile | 2 | ||||
-rw-r--r-- | drivers/amba/Makefile | 4 | ||||
-rw-r--r-- | drivers/amba/tegra-ahb.c | 293 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 117 | ||||
-rw-r--r-- | drivers/input/keyboard/pxa27x_keypad.c | 52 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 21 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 1076 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-esdhc-imx.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pltfm.c | 8 | ||||
-rw-r--r-- | drivers/usb/host/ehci-tegra.c | 5 |
12 files changed, 1536 insertions, 47 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 0ee98d50f975..2ba29ffef2cb 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/ # PnP must come after ACPI since it will eventually need to check if acpi # was used and do nothing if so obj-$(CONFIG_PNP) += pnp/ -obj-$(CONFIG_ARM_AMBA) += amba/ +obj-y += amba/ # Many drivers will want to use DMA so this has to be made available # really early. obj-$(CONFIG_DMA_ENGINE) += dma/ diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile index 40fe74097be2..66e81c2f1e3c 100644 --- a/drivers/amba/Makefile +++ b/drivers/amba/Makefile @@ -1,2 +1,2 @@ -obj-y += bus.o - +obj-$(CONFIG_ARM_AMBA) += bus.o +obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c new file mode 100644 index 000000000000..aa0b1f160528 --- /dev/null +++ b/drivers/amba/tegra-ahb.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Jay Cheng <jacheng@nvidia.com> + * James Wylder <james.wylder@motorola.com> + * Benoit Goby <benoit@android.com> + * Colin Cross <ccross@android.com> + * Hiroshi DOYU <hdoyu@nvidia.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/io.h> + +#define DRV_NAME "tegra-ahb" + +#define AHB_ARBITRATION_DISABLE 0x00 +#define AHB_ARBITRATION_PRIORITY_CTRL 0x04 +#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29) +#define PRIORITY_SELECT_USB BIT(6) +#define PRIORITY_SELECT_USB2 BIT(18) +#define PRIORITY_SELECT_USB3 BIT(17) + +#define AHB_GIZMO_AHB_MEM 0x0c +#define ENB_FAST_REARBITRATE BIT(2) +#define DONT_SPLIT_AHB_WR BIT(7) + +#define AHB_GIZMO_APB_DMA 0x10 +#define AHB_GIZMO_IDE 0x18 +#define AHB_GIZMO_USB 0x1c +#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20 +#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24 +#define AHB_GIZMO_COP_AHB_BRIDGE 0x28 +#define AHB_GIZMO_XBAR_APB_CTLR 0x2c +#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30 +#define AHB_GIZMO_NAND 0x3c +#define AHB_GIZMO_SDMMC4 0x44 +#define AHB_GIZMO_XIO 0x48 +#define AHB_GIZMO_BSEV 0x60 +#define AHB_GIZMO_BSEA 0x70 +#define AHB_GIZMO_NOR 0x74 +#define AHB_GIZMO_USB2 0x78 +#define AHB_GIZMO_USB3 0x7c +#define IMMEDIATE BIT(18) + +#define AHB_GIZMO_SDMMC1 0x80 +#define AHB_GIZMO_SDMMC2 0x84 +#define AHB_GIZMO_SDMMC3 0x88 +#define AHB_MEM_PREFETCH_CFG_X 0xd8 +#define AHB_ARBITRATION_XBAR_CTRL 0xdc +#define AHB_MEM_PREFETCH_CFG3 0xe0 +#define AHB_MEM_PREFETCH_CFG4 0xe4 +#define AHB_MEM_PREFETCH_CFG1 0xec +#define AHB_MEM_PREFETCH_CFG2 0xf0 +#define PREFETCH_ENB BIT(31) +#define MST_ID(x) (((x) & 0x1f) << 26) +#define AHBDMA_MST_ID MST_ID(5) +#define USB_MST_ID MST_ID(6) +#define USB2_MST_ID MST_ID(18) +#define USB3_MST_ID MST_ID(17) +#define ADDR_BNDRY(x) (((x) & 0xf) << 21) +#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0) + +#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8 + +#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17) + +static struct platform_driver tegra_ahb_driver; + +static const u32 tegra_ahb_gizmo[] = { + AHB_ARBITRATION_DISABLE, + AHB_ARBITRATION_PRIORITY_CTRL, + AHB_GIZMO_AHB_MEM, + AHB_GIZMO_APB_DMA, + AHB_GIZMO_IDE, + AHB_GIZMO_USB, + AHB_GIZMO_AHB_XBAR_BRIDGE, + AHB_GIZMO_CPU_AHB_BRIDGE, + AHB_GIZMO_COP_AHB_BRIDGE, + AHB_GIZMO_XBAR_APB_CTLR, + AHB_GIZMO_VCP_AHB_BRIDGE, + AHB_GIZMO_NAND, + AHB_GIZMO_SDMMC4, + AHB_GIZMO_XIO, + AHB_GIZMO_BSEV, + AHB_GIZMO_BSEA, + AHB_GIZMO_NOR, + AHB_GIZMO_USB2, + AHB_GIZMO_USB3, + AHB_GIZMO_SDMMC1, + AHB_GIZMO_SDMMC2, + AHB_GIZMO_SDMMC3, + AHB_MEM_PREFETCH_CFG_X, + AHB_ARBITRATION_XBAR_CTRL, + AHB_MEM_PREFETCH_CFG3, + AHB_MEM_PREFETCH_CFG4, + AHB_MEM_PREFETCH_CFG1, + AHB_MEM_PREFETCH_CFG2, + AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID, +}; + +struct tegra_ahb { + void __iomem *regs; + struct device *dev; + u32 ctx[0]; +}; + +static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset) +{ + return readl(ahb->regs + offset); +} + +static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset) +{ + writel(value, ahb->regs + offset); +} + +#ifdef CONFIG_ARCH_TEGRA_3x_SOC +static int tegra_ahb_match_by_smmu(struct device *dev, void *data) +{ + struct tegra_ahb *ahb = dev_get_drvdata(dev); + struct device_node *dn = data; + + return (ahb->dev->of_node == dn) ? 1 : 0; +} + +int tegra_ahb_enable_smmu(struct device_node *dn) +{ + struct device *dev; + u32 val; + struct tegra_ahb *ahb; + + dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn, + tegra_ahb_match_by_smmu); + if (!dev) + return -EPROBE_DEFER; + ahb = dev_get_drvdata(dev); + val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL); + val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE; + gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL); + return 0; +} +EXPORT_SYMBOL(tegra_ahb_enable_smmu); +#endif + +static int tegra_ahb_suspend(struct device *dev) +{ + int i; + struct tegra_ahb *ahb = dev_get_drvdata(dev); + + for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++) + ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]); + return 0; +} + +static int tegra_ahb_resume(struct device *dev) +{ + int i; + struct tegra_ahb *ahb = dev_get_drvdata(dev); + + for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++) + gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]); + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm, + tegra_ahb_suspend, + tegra_ahb_resume, NULL); + +static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb) +{ + u32 val; + + val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM); + val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR; + gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM); + + val = gizmo_readl(ahb, AHB_GIZMO_USB); + val |= IMMEDIATE; + gizmo_writel(ahb, val, AHB_GIZMO_USB); + + val = gizmo_readl(ahb, AHB_GIZMO_USB2); + val |= IMMEDIATE; + gizmo_writel(ahb, val, AHB_GIZMO_USB2); + + val = gizmo_readl(ahb, AHB_GIZMO_USB3); + val |= IMMEDIATE; + gizmo_writel(ahb, val, AHB_GIZMO_USB3); + + val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL); + val |= PRIORITY_SELECT_USB | + PRIORITY_SELECT_USB2 | + PRIORITY_SELECT_USB3 | + AHB_PRIORITY_WEIGHT(7); + gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL); + + val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1); + val &= ~MST_ID(~0); + val |= PREFETCH_ENB | + AHBDMA_MST_ID | + ADDR_BNDRY(0xc) | + INACTIVITY_TIMEOUT(0x1000); + gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1); + + val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2); + val &= ~MST_ID(~0); + val |= PREFETCH_ENB | + USB_MST_ID | + ADDR_BNDRY(0xc) | + INACTIVITY_TIMEOUT(0x1000); + gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2); + + val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3); + val &= ~MST_ID(~0); + val |= PREFETCH_ENB | + USB3_MST_ID | + ADDR_BNDRY(0xc) | + INACTIVITY_TIMEOUT(0x1000); + gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3); + + val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4); + val &= ~MST_ID(~0); + val |= PREFETCH_ENB | + USB2_MST_ID | + ADDR_BNDRY(0xc) | + INACTIVITY_TIMEOUT(0x1000); + gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4); +} + +static int __devinit tegra_ahb_probe(struct platform_device *pdev) +{ + struct resource *res; + struct tegra_ahb *ahb; + size_t bytes; + + bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo); + ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL); + if (!ahb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + ahb->regs = devm_request_and_ioremap(&pdev->dev, res); + if (!ahb->regs) + return -EBUSY; + + ahb->dev = &pdev->dev; + platform_set_drvdata(pdev, ahb); + tegra_ahb_gizmo_init(ahb); + return 0; +} + +static int __devexit tegra_ahb_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id tegra_ahb_of_match[] __devinitconst = { + { .compatible = "nvidia,tegra30-ahb", }, + { .compatible = "nvidia,tegra20-ahb", }, + {}, +}; + +static struct platform_driver tegra_ahb_driver = { + .probe = tegra_ahb_probe, + .remove = __devexit_p(tegra_ahb_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = tegra_ahb_of_match, + .pm = &tegra_ahb_pm, + }, +}; +module_platform_driver(tegra_ahb_driver); + +MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); +MODULE_DESCRIPTION("Tegra AHB driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f6e9b572b998..c64917ec313d 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -71,6 +71,7 @@ #define M2M_CONTROL_TM_SHIFT 13 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_NFBINT BIT(21) #define M2M_CONTROL_RSS_SHIFT 22 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) @@ -79,7 +80,22 @@ #define M2M_CONTROL_PWSC_SHIFT 25 #define M2M_INTERRUPT 0x0004 -#define M2M_INTERRUPT_DONEINT BIT(1) +#define M2M_INTERRUPT_MASK 6 + +#define M2M_STATUS 0x000c +#define M2M_STATUS_CTL_SHIFT 1 +#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT) +#define M2M_STATUS_BUF_SHIFT 4 +#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT) +#define M2M_STATUS_DONE BIT(6) #define M2M_BCR0 0x0010 #define M2M_BCR1 0x0014 @@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) /* * M2M DMA implementation - * - * For the M2M transfers we don't use NFB at all. This is because it simply - * doesn't work well with memcpy transfers. When you submit both buffers it is - * extremely unlikely that you get an NFB interrupt, but it instead reports - * DONE interrupt and both buffers are already transferred which means that we - * weren't able to update the next buffer. - * - * So for now we "simulate" NFB by just submitting buffer after buffer - * without double buffering. */ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) @@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) m2m_fill_desc(edmac); control |= M2M_CONTROL_DONEINT; + if (ep93xx_dma_advance_active(edmac)) { + m2m_fill_desc(edmac); + control |= M2M_CONTROL_NFBINT; + } + /* * Now we can finally enable the channel. For M2M channel this must be * done _after_ the BCRx registers are programmed. @@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) } } +/* + * According to EP93xx User's Guide, we should receive DONE interrupt when all + * M2M DMA controller transactions complete normally. This is not always the + * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel + * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel + * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation). + * In effect, disabling the channel when only DONE bit is set could stop + * currently running DMA transfer. To avoid this, we use Buffer FSM and + * Control FSM to check current state of DMA channel. + */ static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) { + u32 status = readl(edmac->regs + M2M_STATUS); + u32 ctl_fsm = status & M2M_STATUS_CTL_MASK; + u32 buf_fsm = status & M2M_STATUS_BUF_MASK; + bool done = status & M2M_STATUS_DONE; + bool last_done; u32 control; + struct ep93xx_dma_desc *desc; - if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) + /* Accept only DONE and NFB interrupts */ + if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) return INTERRUPT_UNKNOWN; - /* Clear the DONE bit */ - writel(0, edmac->regs + M2M_INTERRUPT); + if (done) { + /* Clear the DONE bit */ + writel(0, edmac->regs + M2M_INTERRUPT); + } - /* Disable interrupts and the channel */ - control = readl(edmac->regs + M2M_CONTROL); - control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); - writel(control, edmac->regs + M2M_CONTROL); + /* + * Check whether we are done with descriptors or not. This, together + * with DMA channel state, determines action to take in interrupt. + */ + desc = ep93xx_dma_get_active(edmac); + last_done = !desc || desc->txd.cookie; /* - * Since we only get DONE interrupt we have to find out ourselves - * whether there still is something to process. So we try to advance - * the chain an see whether it succeeds. + * Use M2M DMA Buffer FSM and Control FSM to check current state of + * DMA channel. Using DONE and NFB bits from channel status register + * or bits from channel interrupt register is not reliable. */ - if (ep93xx_dma_advance_active(edmac)) { - edmac->edma->hw_submit(edmac); - return INTERRUPT_NEXT_BUFFER; + if (!last_done && + (buf_fsm == M2M_STATUS_BUF_NO || + buf_fsm == M2M_STATUS_BUF_ON)) { + /* + * Two buffers are ready for update when Buffer FSM is in + * DMA_NO_BUF state. Only one buffer can be prepared without + * disabling the channel or polling the DONE bit. + * To simplify things, always prepare only one buffer. + */ + if (ep93xx_dma_advance_active(edmac)) { + m2m_fill_desc(edmac); + if (done && !edmac->chan.private) { + /* Software trigger for memcpy channel */ + control = readl(edmac->regs + M2M_CONTROL); + control |= M2M_CONTROL_START; + writel(control, edmac->regs + M2M_CONTROL); + } + return INTERRUPT_NEXT_BUFFER; + } else { + last_done = true; + } + } + + /* + * Disable the channel only when Buffer FSM is in DMA_NO_BUF state + * and Control FSM is in DMA_STALL state. + */ + if (last_done && + buf_fsm == M2M_STATUS_BUF_NO && + ctl_fsm == M2M_STATUS_CTL_STALL) { + /* Disable interrupts and the channel */ + control = readl(edmac->regs + M2M_CONTROL); + control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT + | M2M_CONTROL_ENABLE); + writel(control, edmac->regs + M2M_CONTROL); + return INTERRUPT_DONE; } - return INTERRUPT_DONE; + /* + * Nothing to do this time. + */ + return INTERRUPT_NEXT_BUFFER; } /* diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 29fe1b2be1c1..7f7b72464a37 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad) if (pdata->enable_rotary0 || pdata->enable_rotary1) pxa27x_keypad_scan_rotary(keypad); - new_state = KPDK_DK(kpdk) & keypad->direct_key_mask; + /* + * The KPDR_DK only output the key pin level, so it relates to board, + * and low level may be active. + */ + if (pdata->direct_key_low_active) + new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask; + else + new_state = KPDK_DK(kpdk) & keypad->direct_key_mask; + bits_changed = keypad->direct_key_state ^ new_state; if (bits_changed == 0) @@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad) if (pdata->direct_key_num > direct_key_num) direct_key_num = pdata->direct_key_num; - keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask; + /* + * Direct keys usage may not start from KP_DKIN0, check the platfrom + * mask data to config the specific. + */ + if (pdata->direct_key_mask) + keypad->direct_key_mask = pdata->direct_key_mask; + else + keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask; /* enable direct key */ if (direct_key_num) @@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev) struct pxa27x_keypad *keypad = input_get_drvdata(dev); /* Enable unit clock */ - clk_enable(keypad->clk); + clk_prepare_enable(keypad->clk); pxa27x_keypad_config(keypad); return 0; @@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev) struct pxa27x_keypad *keypad = input_get_drvdata(dev); /* Disable clock unit */ - clk_disable(keypad->clk); + clk_disable_unprepare(keypad->clk); } #ifdef CONFIG_PM @@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); - clk_disable(keypad->clk); - + /* + * If the keypad is used a wake up source, clock can not be disabled. + * Or it can not detect the key pressing. + */ if (device_may_wakeup(&pdev->dev)) enable_irq_wake(keypad->irq); + else + clk_disable_unprepare(keypad->clk); return 0; } @@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev) struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); struct input_dev *input_dev = keypad->input_dev; - if (device_may_wakeup(&pdev->dev)) + /* + * If the keypad is used as wake up source, the clock is not turned + * off. So do not need configure it again. + */ + if (device_may_wakeup(&pdev->dev)) { disable_irq_wake(keypad->irq); + } else { + mutex_lock(&input_dev->mutex); - mutex_lock(&input_dev->mutex); + if (input_dev->users) { + /* Enable unit clock */ + clk_prepare_enable(keypad->clk); + pxa27x_keypad_config(keypad); + } - if (input_dev->users) { - /* Enable unit clock */ - clk_enable(keypad->clk); - pxa27x_keypad_config(keypad); + mutex_unlock(&input_dev->mutex); } - mutex_unlock(&input_dev->mutex); - return 0; } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c69843742bb0..340893727538 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU space through the SMMU (System Memory Management Unit) hardware included on Tegra SoCs. +config EXYNOS_IOMMU + bool "Exynos IOMMU Support" + depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU + select IOMMU_API + help + Support for the IOMMU(System MMU) of Samsung Exynos application + processor family. This enables H/W multimedia accellerators to see + non-linear physical memory chunks as a linear memory in their + address spaces + + If unsure, say N here. + +config EXYNOS_IOMMU_DEBUG + bool "Debugging log for Exynos IOMMU" + depends on EXYNOS_IOMMU + help + Select this to see the detailed log message that shows what + happens in the IOMMU driver + + Say N unless you need kernel log message for IOMMU debugging + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 3e5e82ae9f0d..76e54ef796de 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o +obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c new file mode 100644 index 000000000000..9a114b9ff170 --- /dev/null +++ b/drivers/iommu/exynos-iommu.c @@ -0,0 +1,1076 @@ +/* linux/drivers/iommu/exynos_iommu.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef CONFIG_EXYNOS_IOMMU_DEBUG +#define DEBUG +#endif + +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/iommu.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/export.h> + +#include <asm/cacheflush.h> +#include <asm/pgtable.h> + +#include <mach/sysmmu.h> + +/* We does not consider super section mapping (16MB) */ +#define SECT_ORDER 20 +#define LPAGE_ORDER 16 +#define SPAGE_ORDER 12 + +#define SECT_SIZE (1 << SECT_ORDER) +#define LPAGE_SIZE (1 << LPAGE_ORDER) +#define SPAGE_SIZE (1 << SPAGE_ORDER) + +#define SECT_MASK (~(SECT_SIZE - 1)) +#define LPAGE_MASK (~(LPAGE_SIZE - 1)) +#define SPAGE_MASK (~(SPAGE_SIZE - 1)) + +#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) +#define lv1ent_page(sent) ((*(sent) & 3) == 1) +#define lv1ent_section(sent) ((*(sent) & 3) == 2) + +#define lv2ent_fault(pent) ((*(pent) & 3) == 0) +#define lv2ent_small(pent) ((*(pent) & 2) == 2) +#define lv2ent_large(pent) ((*(pent) & 3) == 1) + +#define section_phys(sent) (*(sent) & SECT_MASK) +#define section_offs(iova) ((iova) & 0xFFFFF) +#define lpage_phys(pent) (*(pent) & LPAGE_MASK) +#define lpage_offs(iova) ((iova) & 0xFFFF) +#define spage_phys(pent) (*(pent) & SPAGE_MASK) +#define spage_offs(iova) ((iova) & 0xFFF) + +#define lv1ent_offset(iova) ((iova) >> SECT_ORDER) +#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) + +#define NUM_LV1ENTRIES 4096 +#define NUM_LV2ENTRIES 256 + +#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) + +#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) + +#define lv2table_base(sent) (*(sent) & 0xFFFFFC00) + +#define mk_lv1ent_sect(pa) ((pa) | 2) +#define mk_lv1ent_page(pa) ((pa) | 1) +#define mk_lv2ent_lpage(pa) ((pa) | 1) +#define mk_lv2ent_spage(pa) ((pa) | 2) + +#define CTRL_ENABLE 0x5 +#define CTRL_BLOCK 0x7 +#define CTRL_DISABLE 0x0 + +#define REG_MMU_CTRL 0x000 +#define REG_MMU_CFG 0x004 +#define REG_MMU_STATUS 0x008 +#define REG_MMU_FLUSH 0x00C +#define REG_MMU_FLUSH_ENTRY 0x010 +#define REG_PT_BASE_ADDR 0x014 +#define REG_INT_STATUS 0x018 +#define REG_INT_CLEAR 0x01C + +#define REG_PAGE_FAULT_ADDR 0x024 +#define REG_AW_FAULT_ADDR 0x028 +#define REG_AR_FAULT_ADDR 0x02C +#define REG_DEFAULT_SLAVE_ADDR 0x030 + +#define REG_MMU_VERSION 0x034 + +#define REG_PB0_SADDR 0x04C +#define REG_PB0_EADDR 0x050 +#define REG_PB1_SADDR 0x054 +#define REG_PB1_EADDR 0x058 + +static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) +{ + return pgtable + lv1ent_offset(iova); +} + +static unsigned long *page_entry(unsigned long *sent, unsigned long iova) +{ + return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); +} + +enum exynos_sysmmu_inttype { + SYSMMU_PAGEFAULT, + SYSMMU_AR_MULTIHIT, + SYSMMU_AW_MULTIHIT, + SYSMMU_BUSERROR, + SYSMMU_AR_SECURITY, + SYSMMU_AR_ACCESS, + SYSMMU_AW_SECURITY, + SYSMMU_AW_PROTECTION, /* 7 */ + SYSMMU_FAULT_UNKNOWN, + SYSMMU_FAULTS_NUM +}; + +/* + * @itype: type of fault. + * @pgtable_base: the physical address of page table base. This is 0 if @itype + * is SYSMMU_BUSERROR. + * @fault_addr: the device (virtual) address that the System MMU tried to + * translated. This is 0 if @itype is SYSMMU_BUSERROR. + */ +typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, + unsigned long pgtable_base, unsigned long fault_addr); + +static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { + REG_PAGE_FAULT_ADDR, + REG_AR_FAULT_ADDR, + REG_AW_FAULT_ADDR, + REG_DEFAULT_SLAVE_ADDR, + REG_AR_FAULT_ADDR, + REG_AR_FAULT_ADDR, + REG_AW_FAULT_ADDR, + REG_AW_FAULT_ADDR +}; + +static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { + "PAGE FAULT", + "AR MULTI-HIT FAULT", + "AW MULTI-HIT FAULT", + "BUS ERROR", + "AR SECURITY PROTECTION FAULT", + "AR ACCESS PROTECTION FAULT", + "AW SECURITY PROTECTION FAULT", + "AW ACCESS PROTECTION FAULT", + "UNKNOWN FAULT" +}; + +struct exynos_iommu_domain { + struct list_head clients; /* list of sysmmu_drvdata.node */ + unsigned long *pgtable; /* lv1 page table, 16KB */ + short *lv2entcnt; /* free lv2 entry counter for each section */ + spinlock_t lock; /* lock for this structure */ + spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ +}; + +struct sysmmu_drvdata { + struct list_head node; /* entry of exynos_iommu_domain.clients */ + struct device *sysmmu; /* System MMU's device descriptor */ + struct device *dev; /* Owner of system MMU */ + char *dbgname; + int nsfrs; + void __iomem **sfrbases; + struct clk *clk[2]; + int activations; + rwlock_t lock; + struct iommu_domain *domain; + sysmmu_fault_handler_t fault_handler; + unsigned long pgtable; +}; + +static bool set_sysmmu_active(struct sysmmu_drvdata *data) +{ + /* return true if the System MMU was not active previously + and it needs to be initialized */ + return ++data->activations == 1; +} + +static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) +{ + /* return true if the System MMU is needed to be disabled */ + BUG_ON(data->activations < 1); + return --data->activations == 0; +} + +static bool is_sysmmu_active(struct sysmmu_drvdata *data) +{ + return data->activations > 0; +} + +static void sysmmu_unblock(void __iomem *sfrbase) +{ + __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); +} + +static bool sysmmu_block(void __iomem *sfrbase) +{ + int i = 120; + + __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); + while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) + --i; + + if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { + sysmmu_unblock(sfrbase); + return false; + } + + return true; +} + +static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) +{ + __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); +} + +static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, + unsigned long iova) +{ + __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY); +} + +static void __sysmmu_set_ptbase(void __iomem *sfrbase, + unsigned long pgd) +{ + __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ + __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); + + __sysmmu_tlb_invalidate(sfrbase); +} + +static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base, + unsigned long size, int idx) +{ + __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8); + __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); +} + +void exynos_sysmmu_set_prefbuf(struct device *dev, + unsigned long base0, unsigned long size0, + unsigned long base1, unsigned long size1) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + unsigned long flags; + int i; + + BUG_ON((base0 + size0) <= base0); + BUG_ON((size1 > 0) && ((base1 + size1) <= base1)); + + read_lock_irqsave(&data->lock, flags); + if (!is_sysmmu_active(data)) + goto finish; + + for (i = 0; i < data->nsfrs; i++) { + if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { + if (!sysmmu_block(data->sfrbases[i])) + continue; + + if (size1 == 0) { + if (size0 <= SZ_128K) { + base1 = base0; + size1 = size0; + } else { + size1 = size0 - + ALIGN(size0 / 2, SZ_64K); + size0 = size0 - size1; + base1 = base0 + size0; + } + } + + __sysmmu_set_prefbuf( + data->sfrbases[i], base0, size0, 0); + __sysmmu_set_prefbuf( + data->sfrbases[i], base1, size1, 1); + + sysmmu_unblock(data->sfrbases[i]); + } + } +finish: + read_unlock_irqrestore(&data->lock, flags); +} + +static void __set_fault_handler(struct sysmmu_drvdata *data, + sysmmu_fault_handler_t handler) +{ + unsigned long flags; + + write_lock_irqsave(&data->lock, flags); + data->fault_handler = handler; + write_unlock_irqrestore(&data->lock, flags); +} + +void exynos_sysmmu_set_fault_handler(struct device *dev, + sysmmu_fault_handler_t handler) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + + __set_fault_handler(data, handler); +} + +static int default_fault_handler(enum exynos_sysmmu_inttype itype, + unsigned long pgtable_base, unsigned long fault_addr) +{ + unsigned long *ent; + + if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) + itype = SYSMMU_FAULT_UNKNOWN; + + pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n", + sysmmu_fault_name[itype], fault_addr, pgtable_base); + + ent = section_entry(__va(pgtable_base), fault_addr); + pr_err("\tLv1 entry: 0x%lx\n", *ent); + + if (lv1ent_page(ent)) { + ent = page_entry(ent, fault_addr); + pr_err("\t Lv2 entry: 0x%lx\n", *ent); + } + + pr_err("Generating Kernel OOPS... because it is unrecoverable.\n"); + + BUG(); + + return 0; +} + +static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) +{ + /* SYSMMU is in blocked when interrupt occurred. */ + struct sysmmu_drvdata *data = dev_id; + struct resource *irqres; + struct platform_device *pdev; + enum exynos_sysmmu_inttype itype; + unsigned long addr = -1; + + int i, ret = -ENOSYS; + + read_lock(&data->lock); + + WARN_ON(!is_sysmmu_active(data)); + + pdev = to_platform_device(data->sysmmu); + for (i = 0; i < (pdev->num_resources / 2); i++) { + irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (irqres && ((int)irqres->start == irq)) + break; + } + + if (i == pdev->num_resources) { + itype = SYSMMU_FAULT_UNKNOWN; + } else { + itype = (enum exynos_sysmmu_inttype) + __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS)); + if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) + itype = SYSMMU_FAULT_UNKNOWN; + else + addr = __raw_readl( + data->sfrbases[i] + fault_reg_offset[itype]); + } + + if (data->domain) + ret = report_iommu_fault(data->domain, data->dev, + addr, itype); + + if ((ret == -ENOSYS) && data->fault_handler) { + unsigned long base = data->pgtable; + if (itype != SYSMMU_FAULT_UNKNOWN) + base = __raw_readl( + data->sfrbases[i] + REG_PT_BASE_ADDR); + ret = data->fault_handler(itype, base, addr); + } + + if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) + __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); + else + dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", + data->dbgname, sysmmu_fault_name[itype]); + + if (itype != SYSMMU_FAULT_UNKNOWN) + sysmmu_unblock(data->sfrbases[i]); + + read_unlock(&data->lock); + + return IRQ_HANDLED; +} + +static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data) +{ + unsigned long flags; + bool disabled = false; + int i; + + write_lock_irqsave(&data->lock, flags); + + if (!set_sysmmu_inactive(data)) + goto finish; + + for (i = 0; i < data->nsfrs; i++) + __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL); + + if (data->clk[1]) + clk_disable(data->clk[1]); + if (data->clk[0]) + clk_disable(data->clk[0]); + + disabled = true; + data->pgtable = 0; + data->domain = NULL; +finish: + write_unlock_irqrestore(&data->lock, flags); + + if (disabled) + dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname); + else + dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n", + data->dbgname, data->activations); + + return disabled; +} + +/* __exynos_sysmmu_enable: Enables System MMU + * + * returns -error if an error occurred and System MMU is not enabled, + * 0 if the System MMU has been just enabled and 1 if System MMU was already + * enabled before. + */ +static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, + unsigned long pgtable, struct iommu_domain *domain) +{ + int i, ret = 0; + unsigned long flags; + + write_lock_irqsave(&data->lock, flags); + + if (!set_sysmmu_active(data)) { + if (WARN_ON(pgtable != data->pgtable)) { + ret = -EBUSY; + set_sysmmu_inactive(data); + } else { + ret = 1; + } + + dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname); + goto finish; + } + + if (data->clk[0]) + clk_enable(data->clk[0]); + if (data->clk[1]) + clk_enable(data->clk[1]); + + data->pgtable = pgtable; + + for (i = 0; i < data->nsfrs; i++) { + __sysmmu_set_ptbase(data->sfrbases[i], pgtable); + + if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { + /* System MMU version is 3.x */ + __raw_writel((1 << 12) | (2 << 28), + data->sfrbases[i] + REG_MMU_CFG); + __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0); + __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1); + } + + __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL); + } + + data->domain = domain; + + dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); +finish: + write_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + int ret; + + BUG_ON(!memblock_is_memory(pgtable)); + + ret = pm_runtime_get_sync(data->sysmmu); + if (ret < 0) { + dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname); + return ret; + } + + ret = __exynos_sysmmu_enable(data, pgtable, NULL); + if (WARN_ON(ret < 0)) { + pm_runtime_put(data->sysmmu); + dev_err(data->sysmmu, + "(%s) Already enabled with page table %#lx\n", + data->dbgname, data->pgtable); + } else { + data->dev = dev; + } + + return ret; +} + +bool exynos_sysmmu_disable(struct device *dev) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + bool disabled; + + disabled = __exynos_sysmmu_disable(data); + pm_runtime_put(data->sysmmu); + + return disabled; +} + +static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova) +{ + unsigned long flags; + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + + read_lock_irqsave(&data->lock, flags); + + if (is_sysmmu_active(data)) { + int i; + for (i = 0; i < data->nsfrs; i++) { + if (sysmmu_block(data->sfrbases[i])) { + __sysmmu_tlb_invalidate_entry( + data->sfrbases[i], iova); + sysmmu_unblock(data->sfrbases[i]); + } + } + } else { + dev_dbg(data->sysmmu, + "(%s) Disabled. Skipping invalidating TLB.\n", + data->dbgname); + } + + read_unlock_irqrestore(&data->lock, flags); +} + +void exynos_sysmmu_tlb_invalidate(struct device *dev) +{ + unsigned long flags; + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + + read_lock_irqsave(&data->lock, flags); + + if (is_sysmmu_active(data)) { + int i; + for (i = 0; i < data->nsfrs; i++) { + if (sysmmu_block(data->sfrbases[i])) { + __sysmmu_tlb_invalidate(data->sfrbases[i]); + sysmmu_unblock(data->sfrbases[i]); + } + } + } else { + dev_dbg(data->sysmmu, + "(%s) Disabled. Skipping invalidating TLB.\n", + data->dbgname); + } + + read_unlock_irqrestore(&data->lock, flags); +} + +static int exynos_sysmmu_probe(struct platform_device *pdev) +{ + int i, ret; + struct device *dev; + struct sysmmu_drvdata *data; + + dev = &pdev->dev; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + dev_dbg(dev, "Not enough memory\n"); + ret = -ENOMEM; + goto err_alloc; + } + + ret = dev_set_drvdata(dev, data); + if (ret) { + dev_dbg(dev, "Unabled to initialize driver data\n"); + goto err_init; + } + + data->nsfrs = pdev->num_resources / 2; + data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, + GFP_KERNEL); + if (data->sfrbases == NULL) { + dev_dbg(dev, "Not enough memory\n"); + ret = -ENOMEM; + goto err_init; + } + + for (i = 0; i < data->nsfrs; i++) { + struct resource *res; + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) { + dev_dbg(dev, "Unable to find IOMEM region\n"); + ret = -ENOENT; + goto err_res; + } + + data->sfrbases[i] = ioremap(res->start, resource_size(res)); + if (!data->sfrbases[i]) { + dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", + res->start); + ret = -ENOENT; + goto err_res; + } + } + + for (i = 0; i < data->nsfrs; i++) { + ret = platform_get_irq(pdev, i); + if (ret <= 0) { + dev_dbg(dev, "Unable to find IRQ resource\n"); + goto err_irq; + } + + ret = request_irq(ret, exynos_sysmmu_irq, 0, + dev_name(dev), data); + if (ret) { + dev_dbg(dev, "Unabled to register interrupt handler\n"); + goto err_irq; + } + } + + if (dev_get_platdata(dev)) { + char *deli, *beg; + struct sysmmu_platform_data *platdata = dev_get_platdata(dev); + + beg = platdata->clockname; + + for (deli = beg; (*deli != '\0') && (*deli != ','); deli++) + /* NOTHING */; + + if (*deli == '\0') + deli = NULL; + else + *deli = '\0'; + + data->clk[0] = clk_get(dev, beg); + if (IS_ERR(data->clk[0])) { + data->clk[0] = NULL; + dev_dbg(dev, "No clock descriptor registered\n"); + } + + if (data->clk[0] && deli) { + *deli = ','; + data->clk[1] = clk_get(dev, deli + 1); + if (IS_ERR(data->clk[1])) + data->clk[1] = NULL; + } + + data->dbgname = platdata->dbgname; + } + + data->sysmmu = dev; + rwlock_init(&data->lock); + INIT_LIST_HEAD(&data->node); + + __set_fault_handler(data, &default_fault_handler); + + if (dev->parent) + pm_runtime_enable(dev); + + dev_dbg(dev, "(%s) Initialized\n", data->dbgname); + return 0; +err_irq: + while (i-- > 0) { + int irq; + + irq = platform_get_irq(pdev, i); + free_irq(irq, data); + } +err_res: + while (data->nsfrs-- > 0) + iounmap(data->sfrbases[data->nsfrs]); + kfree(data->sfrbases); +err_init: + kfree(data); +err_alloc: + dev_err(dev, "Failed to initialize\n"); + return ret; +} + +static struct platform_driver exynos_sysmmu_driver = { + .probe = exynos_sysmmu_probe, + .driver = { + .owner = THIS_MODULE, + .name = "exynos-sysmmu", + } +}; + +static inline void pgtable_flush(void *vastart, void *vaend) +{ + dmac_flush_range(vastart, vaend); + outer_flush_range(virt_to_phys(vastart), + virt_to_phys(vaend)); +} + +static int exynos_iommu_domain_init(struct iommu_domain *domain) +{ + struct exynos_iommu_domain *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pgtable = (unsigned long *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, 2); + if (!priv->pgtable) + goto err_pgtable; + + priv->lv2entcnt = (short *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, 1); + if (!priv->lv2entcnt) + goto err_counter; + + pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->pgtablelock); + INIT_LIST_HEAD(&priv->clients); + + domain->priv = priv; + return 0; + +err_counter: + free_pages((unsigned long)priv->pgtable, 2); +err_pgtable: + kfree(priv); + return -ENOMEM; +} + +static void exynos_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct exynos_iommu_domain *priv = domain->priv; + struct sysmmu_drvdata *data; + unsigned long flags; + int i; + + WARN_ON(!list_empty(&priv->clients)); + + spin_lock_irqsave(&priv->lock, flags); + + list_for_each_entry(data, &priv->clients, node) { + while (!exynos_sysmmu_disable(data->dev)) + ; /* until System MMU is actually disabled */ + } + + spin_unlock_irqrestore(&priv->lock, flags); + + for (i = 0; i < NUM_LV1ENTRIES; i++) + if (lv1ent_page(priv->pgtable + i)) + kfree(__va(lv2table_base(priv->pgtable + i))); + + free_pages((unsigned long)priv->pgtable, 2); + free_pages((unsigned long)priv->lv2entcnt, 1); + kfree(domain->priv); + domain->priv = NULL; +} + +static int exynos_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct exynos_iommu_domain *priv = domain->priv; + unsigned long flags; + int ret; + + ret = pm_runtime_get_sync(data->sysmmu); + if (ret < 0) + return ret; + + ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + + ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); + + if (ret == 0) { + /* 'data->node' must not be appeared in priv->clients */ + BUG_ON(!list_empty(&data->node)); + data->dev = dev; + list_add_tail(&data->node, &priv->clients); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + if (ret < 0) { + dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", + __func__, __pa(priv->pgtable)); + pm_runtime_put(data->sysmmu); + } else if (ret > 0) { + dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n", + __func__, __pa(priv->pgtable)); + } else { + dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n", + __func__, __pa(priv->pgtable)); + } + + return ret; +} + +static void exynos_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct exynos_iommu_domain *priv = domain->priv; + struct list_head *pos; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&priv->lock, flags); + + list_for_each(pos, &priv->clients) { + if (list_entry(pos, struct sysmmu_drvdata, node) == data) { + found = true; + break; + } + } + + if (!found) + goto finish; + + if (__exynos_sysmmu_disable(data)) { + dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", + __func__, __pa(priv->pgtable)); + list_del(&data->node); + INIT_LIST_HEAD(&data->node); + + } else { + dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", + __func__, __pa(priv->pgtable)); + } + +finish: + spin_unlock_irqrestore(&priv->lock, flags); + + if (found) + pm_runtime_put(data->sysmmu); +} + +static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, + short *pgcounter) +{ + if (lv1ent_fault(sent)) { + unsigned long *pent; + + pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); + BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); + if (!pent) + return NULL; + + *sent = mk_lv1ent_page(__pa(pent)); + *pgcounter = NUM_LV2ENTRIES; + pgtable_flush(pent, pent + NUM_LV2ENTRIES); + pgtable_flush(sent, sent + 1); + } + + return page_entry(sent, iova); +} + +static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) +{ + if (lv1ent_section(sent)) + return -EADDRINUSE; + + if (lv1ent_page(sent)) { + if (*pgcnt != NUM_LV2ENTRIES) + return -EADDRINUSE; + + kfree(page_entry(sent, 0)); + + *pgcnt = 0; + } + + *sent = mk_lv1ent_sect(paddr); + + pgtable_flush(sent, sent + 1); + + return 0; +} + +static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, + short *pgcnt) +{ + if (size == SPAGE_SIZE) { + if (!lv2ent_fault(pent)) + return -EADDRINUSE; + + *pent = mk_lv2ent_spage(paddr); + pgtable_flush(pent, pent + 1); + *pgcnt -= 1; + } else { /* size == LPAGE_SIZE */ + int i; + for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { + if (!lv2ent_fault(pent)) { + memset(pent, 0, sizeof(*pent) * i); + return -EADDRINUSE; + } + + *pent = mk_lv2ent_lpage(paddr); + } + pgtable_flush(pent - SPAGES_PER_LPAGE, pent); + *pgcnt -= SPAGES_PER_LPAGE; + } + + return 0; +} + +static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct exynos_iommu_domain *priv = domain->priv; + unsigned long *entry; + unsigned long flags; + int ret = -ENOMEM; + + BUG_ON(priv->pgtable == NULL); + + spin_lock_irqsave(&priv->pgtablelock, flags); + + entry = section_entry(priv->pgtable, iova); + + if (size == SECT_SIZE) { + ret = lv1set_section(entry, paddr, + &priv->lv2entcnt[lv1ent_offset(iova)]); + } else { + unsigned long *pent; + + pent = alloc_lv2entry(entry, iova, + &priv->lv2entcnt[lv1ent_offset(iova)]); + + if (!pent) + ret = -ENOMEM; + else + ret = lv2set_page(pent, paddr, size, + &priv->lv2entcnt[lv1ent_offset(iova)]); + } + + if (ret) { + pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", + __func__, iova, size); + } + + spin_unlock_irqrestore(&priv->pgtablelock, flags); + + return ret; +} + +static size_t exynos_iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct exynos_iommu_domain *priv = domain->priv; + struct sysmmu_drvdata *data; + unsigned long flags; + unsigned long *ent; + + BUG_ON(priv->pgtable == NULL); + + spin_lock_irqsave(&priv->pgtablelock, flags); + + ent = section_entry(priv->pgtable, iova); + + if (lv1ent_section(ent)) { + BUG_ON(size < SECT_SIZE); + + *ent = 0; + pgtable_flush(ent, ent + 1); + size = SECT_SIZE; + goto done; + } + + if (unlikely(lv1ent_fault(ent))) { + if (size > SECT_SIZE) + size = SECT_SIZE; + goto done; + } + + /* lv1ent_page(sent) == true here */ + + ent = page_entry(ent, iova); + + if (unlikely(lv2ent_fault(ent))) { + size = SPAGE_SIZE; + goto done; + } + + if (lv2ent_small(ent)) { + *ent = 0; + size = SPAGE_SIZE; + priv->lv2entcnt[lv1ent_offset(iova)] += 1; + goto done; + } + + /* lv1ent_large(ent) == true here */ + BUG_ON(size < LPAGE_SIZE); + + memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); + + size = LPAGE_SIZE; + priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; +done: + spin_unlock_irqrestore(&priv->pgtablelock, flags); + + spin_lock_irqsave(&priv->lock, flags); + list_for_each_entry(data, &priv->clients, node) + sysmmu_tlb_invalidate_entry(data->dev, iova); + spin_unlock_irqrestore(&priv->lock, flags); + + + return size; +} + +static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + struct exynos_iommu_domain *priv = domain->priv; + unsigned long *entry; + unsigned long flags; + phys_addr_t phys = 0; + + spin_lock_irqsave(&priv->pgtablelock, flags); + + entry = section_entry(priv->pgtable, iova); + + if (lv1ent_section(entry)) { + phys = section_phys(entry) + section_offs(iova); + } else if (lv1ent_page(entry)) { + entry = page_entry(entry, iova); + + if (lv2ent_large(entry)) + phys = lpage_phys(entry) + lpage_offs(iova); + else if (lv2ent_small(entry)) + phys = spage_phys(entry) + spage_offs(iova); + } + + spin_unlock_irqrestore(&priv->pgtablelock, flags); + + return phys; +} + +static struct iommu_ops exynos_iommu_ops = { + .domain_init = &exynos_iommu_domain_init, + .domain_destroy = &exynos_iommu_domain_destroy, + .attach_dev = &exynos_iommu_attach_device, + .detach_dev = &exynos_iommu_detach_device, + .map = &exynos_iommu_map, + .unmap = &exynos_iommu_unmap, + .iova_to_phys = &exynos_iommu_iova_to_phys, + .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, +}; + +static int __init exynos_iommu_init(void) +{ + int ret; + + ret = platform_driver_register(&exynos_sysmmu_driver); + + if (ret == 0) + bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); + + return ret; +} +subsys_initcall(exynos_iommu_init); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index a9fc714fb38d..9a7a60aeb19e 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1781,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) pdata->slots[0].nonremovable = true; pdata->slots[0].no_regulator_off_init = true; } - of_property_read_u32(np, "ti,bus-width", &bus_width); + of_property_read_u32(np, "bus-width", &bus_width); if (bus_width == 4) pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; else if (bus_width == 8) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d190d04636a7..365b16c230f8 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -404,7 +404,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (!np) return -ENODEV; - if (of_get_property(np, "fsl,card-wired", NULL)) + if (of_get_property(np, "non-removable", NULL)) boarddata->cd_type = ESDHC_CD_PERMANENT; if (of_get_property(np, "fsl,cd-controller", NULL)) diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index c5c2a48bdd94..d9a4ef4f1ed0 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = { #ifdef CONFIG_OF static bool sdhci_of_wp_inverted(struct device_node *np) { - if (of_get_property(np, "sdhci,wp-inverted", NULL)) + if (of_get_property(np, "sdhci,wp-inverted", NULL) || + of_get_property(np, "wp-inverted", NULL)) return true; /* Old device trees don't have the wp-inverted property. */ @@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); const __be32 *clk; + u32 bus_width; int size; if (of_device_is_available(np)) { if (of_get_property(np, "sdhci,auto-cmd12", NULL)) host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; - if (of_get_property(np, "sdhci,1-bit-only", NULL)) + if (of_get_property(np, "sdhci,1-bit-only", NULL) || + (of_property_read_u32(np, "bus-width", &bus_width) == 0 && + bus_width == 1)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; if (sdhci_of_wp_inverted(np)) diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 4a44bf833611..68548236ec42 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev) } } - tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config, - TEGRA_USB_PHY_MODE_HOST); + tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs, + pdata->phy_config, + TEGRA_USB_PHY_MODE_HOST); if (IS_ERR(tegra->phy)) { dev_err(&pdev->dev, "Failed to open USB phy\n"); err = -ENXIO; |