From 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 5 Jul 2018 13:29:55 -0600 Subject: Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()" This commit may cause a less than required dma mask to be used for some allocations, which apparently leads to module load failures for iwlwifi sometimes. This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a. Signed-off-by: Christoph Hellwig Reported-by: Fabio Coatti Tested-by: Fabio Coatti --- drivers/iommu/Kconfig | 1 - drivers/iommu/intel-iommu.c | 62 +++++++++++++++++++++++++++++++++------------ 2 files changed, 46 insertions(+), 17 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..689ffe538370 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -142,7 +142,6 @@ config DMAR_TABLE config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) - select DMA_DIRECT_OPS select IOMMU_API select IOMMU_IOVA select NEED_DMA_MAP_STATE diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..b344a883f116 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - void *vaddr; + struct page *page = NULL; + int order; - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); - if (iommu_no_mapping(dev) || !vaddr) - return vaddr; + size = PAGE_ALIGN(size); + order = get_order(size); - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), - PAGE_ALIGN(size), DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (!*dma_handle) - goto out_free_pages; - return vaddr; + if (!iommu_no_mapping(dev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } + + if (gfpflags_allow_blocking(flags)) { + unsigned int count = size >> PAGE_SHIFT; + + page = dma_alloc_from_contiguous(dev, count, order, flags); + if (page && iommu_no_mapping(dev) && + page_to_phys(page) + size > dev->coherent_dma_mask) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + + if (!page) + page = alloc_pages(flags, order); + if (!page) + return NULL; + memset(page_address(page), 0, size); + + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, + DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); + if (*dma_handle) + return page_address(page); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); -out_free_pages: - dma_direct_free(dev, size, vaddr, *dma_handle, attrs); return NULL; } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (!iommu_no_mapping(dev)) - intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); - dma_direct_free(dev, size, vaddr, dma_handle, attrs); + int order; + struct page *page = virt_to_page(vaddr); + + size = PAGE_ALIGN(size); + order = get_order(size); + + intel_unmap(dev, dma_handle, size); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); } static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, -- cgit v1.2.1 From 4674686d6c8974eb4eebec894c398a0f6c506af3 Mon Sep 17 00:00:00 2001 From: "yzhai003@ucr.edu" Date: Fri, 1 Jun 2018 11:30:14 -0700 Subject: iommu/amd: Argument page_size could be uninitialized Argument "page_size" passing to function "fetch_pte" could be uninitialized if the function returns NULL The caller "iommu_unmap_page" checks the return value but the page_size is used outside the if block. Signed-off-by: yzhai003@ucr.edu Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 596b95c50051..e688169721b8 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1404,6 +1404,8 @@ static u64 *fetch_pte(struct protection_domain *domain, int level; u64 *pte; + *page_size = 0; + if (address > PM_LEVEL_SIZE(domain->mode)) return NULL; -- cgit v1.2.1 From 3c120143f584360a13614787e23ae2cdcb5e5ccd Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Wed, 6 Jun 2018 10:18:46 +0800 Subject: iommu/amd: make sure TLB to be flushed before IOVA freed Although the mapping has already been removed in the page table, it maybe still exist in TLB. Suppose the freed IOVAs is reused by others before the flush operation completed, the new user can not correctly access to its meomory. Signed-off-by: Zhen Lei Fixes: b1516a14657a ('iommu/amd: Implement flush queue') Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e688169721b8..874e6480e391 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2407,9 +2407,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, } if (amd_iommu_unmap_flush) { - dma_ops_free_iova(dma_dom, dma_addr, pages); domain_flush_tlb(&dma_dom->domain); domain_flush_complete(&dma_dom->domain); + dma_ops_free_iova(dma_dom, dma_addr, pages); } else { pages = __roundup_pow_of_two(pages); queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); -- cgit v1.2.1 From 0f725561e168485eff7277d683405c05b192f537 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Thu, 7 Jun 2018 09:56:59 -0700 Subject: iommu/vt-d: Add definitions for PFSID When SRIOV VF device IOTLB is invalidated, we need to provide the PF source ID such that IOMMU hardware can gauge the depth of invalidation queue which is shared among VFs. This is needed when device invalidation throttle (DIT) capability is supported. This patch adds bit definitions for checking and tracking PFSID. Signed-off-by: Jacob Pan Cc: stable@vger.kernel.org Cc: "Ashok Raj" Cc: "Lu Baolu" Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..7f6194ef48f4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -422,6 +422,7 @@ struct device_domain_info { struct list_head global; /* link to global list */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ + u16 pfsid; /* SRIOV physical function source ID */ u8 pasid_supported:3; u8 pasid_enabled:1; u8 pri_supported:1; -- cgit v1.2.1 From 1c48db44924298ad0cb5a6386b88017539be8822 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Thu, 7 Jun 2018 09:57:00 -0700 Subject: iommu/vt-d: Fix dev iotlb pfsid use PFSID should be used in the invalidation descriptor for flushing device IOTLBs on SRIOV VFs. Signed-off-by: Jacob Pan Cc: stable@vger.kernel.org Cc: "Ashok Raj" Cc: "Lu Baolu" Signed-off-by: Joerg Roedel --- drivers/iommu/dmar.c | 6 +++--- drivers/iommu/intel-iommu.c | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 75456b5aa825..d9c748b6f9e4 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1339,8 +1339,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, qi_submit_sync(&desc, iommu); } -void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, - u64 addr, unsigned mask) +void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned mask) { struct qi_desc desc; @@ -1355,7 +1355,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, qdep = 0; desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | - QI_DIOTLB_TYPE; + QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); qi_submit_sync(&desc, iommu); } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 7f6194ef48f4..497ef94c5a8c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1475,6 +1475,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) return; pdev = to_pci_dev(info->dev); + /* For IOMMU that supports device IOTLB throttling (DIT), we assign + * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge + * queue depth at PF level. If DIT is not set, PFSID will be treated as + * reserved, which should be set to 0. + */ + if (!ecap_dit(info->iommu->ecap)) + info->pfsid = 0; + else { + struct pci_dev *pf_pdev; + + /* pdev will be returned if device is not a vf */ + pf_pdev = pci_physfn(pdev); + info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn); + } #ifdef CONFIG_INTEL_IOMMU_SVM /* The PCIe spec, in its wisdom, declares that the behaviour of @@ -1540,7 +1554,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, sid = info->bus << 8 | info->devfn; qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); + qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, + qdep, addr, mask); } spin_unlock_irqrestore(&device_domain_lock, flags); } -- cgit v1.2.1 From 379521462e4add27f3514da8e4ab1fd7a54fe1c7 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 12 Jun 2018 16:06:10 +0200 Subject: iommu/msm: Don't call iommu_device_{,un}link from atomic context Fixes the following splat during boot: BUG: sleeping function called from invalid context at kernel/locking/mutex.c:747 in_atomic(): 1, irqs_disabled(): 128, pid: 77, name: kworker/2:1 4 locks held by kworker/2:1/77: #0: (ptrval) ((wq_completion)"events"){+.+.}, at: process_one_work+0x1fc/0x8fc #1: (ptrval) (deferred_probe_work){+.+.}, at: process_one_work+0x1fc/0x8fc #2: (ptrval) (&dev->mutex){....}, at: __device_attach+0x40/0x178 #3: (ptrval) (msm_iommu_lock){....}, at: msm_iommu_add_device+0x28/0xcc irq event stamp: 348 hardirqs last enabled at (347): [] kfree+0xe0/0x3c0 hardirqs last disabled at (348): [] _raw_spin_lock_irqsave+0x2c/0x68 softirqs last enabled at (0): [] copy_process.part.5+0x280/0x1a68 softirqs last disabled at (0): [<00000000>] (null) Preemption disabled at: [<00000000>] (null) CPU: 2 PID: 77 Comm: kworker/2:1 Not tainted 4.17.0-rc5-wt-ath-01075-gaca0516bb4cf #239 Hardware name: Generic DT based system Workqueue: events deferred_probe_work_func [] (unwind_backtrace) from [] (show_stack+0x20/0x24) [] (show_stack) from [] (dump_stack+0xa0/0xcc) [] (dump_stack) from [] (___might_sleep+0x1f8/0x2d4) ath10k_sdio mmc2:0001:1: Direct firmware load for ath10k/QCA9377/hw1.0/board-2.bin failed with error -2 [] (___might_sleep) from [] (__might_sleep+0x70/0xa8) [] (__might_sleep) from [] (__mutex_lock+0x50/0xb28) [] (__mutex_lock) from [] (mutex_lock_nested+0x2c/0x34) ath10k_sdio mmc2:0001:1: board_file api 1 bmi_id N/A crc32 544289f7 [] (mutex_lock_nested) from [] (kernfs_find_and_get_ns+0x30/0x5c) [] (kernfs_find_and_get_ns) from [] (sysfs_add_link_to_group+0x28/0x58) [] (sysfs_add_link_to_group) from [] (iommu_device_link+0x50/0xb4) [] (iommu_device_link) from [] (msm_iommu_add_device+0xa0/0xcc) [] (msm_iommu_add_device) from [] (add_iommu_group+0x3c/0x64) [] (add_iommu_group) from [] (bus_for_each_dev+0x84/0xc4) [] (bus_for_each_dev) from [] (bus_set_iommu+0xd0/0x10c) [] (bus_set_iommu) from [] (msm_iommu_probe+0x5b8/0x66c) [] (msm_iommu_probe) from [] (platform_drv_probe+0x60/0xbc) [] (platform_drv_probe) from [] (driver_probe_device+0x30c/0x4cc) [] (driver_probe_device) from [] (__device_attach_driver+0xac/0x14c) [] (__device_attach_driver) from [] (bus_for_each_drv+0x68/0xc8) [] (bus_for_each_drv) from [] (__device_attach+0xe4/0x178) [] (__device_attach) from [] (device_initial_probe+0x1c/0x20) [] (device_initial_probe) from [] (bus_probe_device+0x98/0xa0) [] (bus_probe_device) from [] (deferred_probe_work_func+0x74/0x198) [] (deferred_probe_work_func) from [] (process_one_work+0x2c4/0x8fc) [] (process_one_work) from [] (worker_thread+0x2c4/0x5cc) [] (worker_thread) from [] (kthread+0x180/0x188) [] (kthread) from [] (ret_from_fork+0x14/0x20) Fixes: 42df43b36163 ("iommu/msm: Make use of iommu_device_register interface") Signed-off-by: Niklas Cassel Reviewed-by: Vivek Gautam Signed-off-by: Joerg Roedel --- drivers/iommu/msm_iommu.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 0d3350463a3f..9a95c9b9d0d8 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -395,20 +395,15 @@ static int msm_iommu_add_device(struct device *dev) struct msm_iommu_dev *iommu; struct iommu_group *group; unsigned long flags; - int ret = 0; spin_lock_irqsave(&msm_iommu_lock, flags); - iommu = find_iommu_for_dev(dev); + spin_unlock_irqrestore(&msm_iommu_lock, flags); + if (iommu) iommu_device_link(&iommu->iommu, dev); else - ret = -ENODEV; - - spin_unlock_irqrestore(&msm_iommu_lock, flags); - - if (ret) - return ret; + return -ENODEV; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) @@ -425,13 +420,12 @@ static void msm_iommu_remove_device(struct device *dev) unsigned long flags; spin_lock_irqsave(&msm_iommu_lock, flags); - iommu = find_iommu_for_dev(dev); + spin_unlock_irqrestore(&msm_iommu_lock, flags); + if (iommu) iommu_device_unlink(&iommu->iommu, dev); - spin_unlock_irqrestore(&msm_iommu_lock, flags); - iommu_group_remove_device(dev); } -- cgit v1.2.1 From bad614b24293ae463e74d2465685f0e4e229baca Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 12 Jun 2018 16:41:21 -0500 Subject: iommu: Enable debugfs exposure of IOMMU driver internals Provide base enablement for using debugfs to expose internal data of an IOMMU driver. When called, create the /sys/kernel/debug/iommu directory. Emit a strong warning at boot time to indicate that this feature is enabled. This function is called from iommu_init, and creates the initial DebugFS directory. Drivers may then call iommu_debugfs_new_driver_dir() to instantiate a device-specific directory to expose internal data. It will return a pointer to the new dentry structure created in /sys/kernel/debug/iommu, or NULL in the event of a failure. Since the IOMMU driver can not be removed from the running system, there is no need for an "off" function. Signed-off-by: Gary R Hook Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 10 +++++++ drivers/iommu/Makefile | 1 + drivers/iommu/iommu-debugfs.c | 66 +++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/iommu.c | 2 ++ 4 files changed, 79 insertions(+) create mode 100644 drivers/iommu/iommu-debugfs.c (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..ab9181d8af3b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -60,6 +60,16 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST endmenu +config IOMMU_DEBUGFS + bool "Export IOMMU internals in DebugFS" + depends on DEBUG_FS + help + Allows exposure of IOMMU device internals. This option enables + the use of debugfs by IOMMU drivers as required. Devices can, + at initialization time, cause the IOMMU code to create a top-level + debug/iommu directory, and then populate a subdirectory with + entries as required. + config IOMMU_IOVA tristate diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 1fb695854809..74cfbc392862 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o +obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c new file mode 100644 index 000000000000..3b1bf88fd1b0 --- /dev/null +++ b/drivers/iommu/iommu-debugfs.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IOMMU debugfs core infrastructure + * + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook + */ + +#include +#include +#include + +struct dentry *iommu_debugfs_dir; + +/** + * iommu_debugfs_setup - create the top-level iommu directory in debugfs + * + * Provide base enablement for using debugfs to expose internal data of an + * IOMMU driver. When called, this function creates the + * /sys/kernel/debug/iommu directory. + * + * Emit a strong warning at boot time to indicate that this feature is + * enabled. + * + * This function is called from iommu_init; drivers may then call + * iommu_debugfs_new_driver_dir() to instantiate a vendor-specific + * directory to be used to expose internal data. + */ +void iommu_debugfs_setup(void) +{ + if (!iommu_debugfs_dir) { + iommu_debugfs_dir = debugfs_create_dir("iommu", NULL); + pr_warn("\n"); + pr_warn("*************************************************************\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("** **\n"); + pr_warn("** IOMMU DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); + pr_warn("** **\n"); + pr_warn("** This means that this kernel is built to expose internal **\n"); + pr_warn("** IOMMU data structures, which may compromise security on **\n"); + pr_warn("** your system. **\n"); + pr_warn("** **\n"); + pr_warn("** If you see this message and you are not debugging the **\n"); + pr_warn("** kernel, report this immediately to your vendor! **\n"); + pr_warn("** **\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("*************************************************************\n"); + } +} + +/** + * iommu_debugfs_new_driver_dir - create a vendor directory under debugfs/iommu + * @vendor: name of the vendor-specific subdirectory to create + * + * This function is called by an IOMMU driver to create the top-level debugfs + * directory for that driver. + * + * Return: upon success, a pointer to the dentry for the new directory. + * NULL in case of failure. + */ +struct dentry *iommu_debugfs_new_driver_dir(const char *vendor) +{ + return debugfs_create_dir(vendor, iommu_debugfs_dir); +} +EXPORT_SYMBOL_GPL(iommu_debugfs_new_driver_dir); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 63b37563db7e..d227b864a109 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1748,6 +1748,8 @@ static int __init iommu_init(void) NULL, kernel_kobj); BUG_ON(!iommu_group_kset); + iommu_debugfs_setup(); + return 0; } core_initcall(iommu_init); -- cgit v1.2.1 From 7d0f5fd3e4d687424cc2ab68b55472e328e2ee0a Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 12 Jun 2018 16:41:30 -0500 Subject: iommu/amd: Add basic debugfs infrastructure for AMD IOMMU Implement a skeleton framework for debugfs support in the AMD IOMMU. Add an AMD-specific Kconfig boolean that depends upon general enablement of DebugFS in the IOMMU. Signed-off-by: Gary R Hook Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 12 ++++++++++++ drivers/iommu/Makefile | 1 + drivers/iommu/amd_iommu_debugfs.c | 33 +++++++++++++++++++++++++++++++++ drivers/iommu/amd_iommu_init.c | 6 ++++-- drivers/iommu/amd_iommu_proto.h | 6 ++++++ drivers/iommu/amd_iommu_types.h | 5 +++++ 6 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 drivers/iommu/amd_iommu_debugfs.c (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index ab9181d8af3b..8d0a2886658f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -145,6 +145,18 @@ config AMD_IOMMU_V2 hardware. Select this option if you want to use devices that support the PCI PRI and PASID interface. +config AMD_IOMMU_DEBUGFS + bool "Enable AMD IOMMU internals in DebugFS" + depends on AMD_IOMMU && IOMMU_DEBUGFS + ---help--- + !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! + + DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!! + Exposes AMD IOMMU device internals in DebugFS. + + This option is -NOT- intended for production environments, and should + not generally be enabled. + # Intel IOMMU support config DMAR_TABLE bool diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 74cfbc392862..47fd6ea9de2d 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c new file mode 100644 index 000000000000..c6a5c737ef09 --- /dev/null +++ b/drivers/iommu/amd_iommu_debugfs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD IOMMU driver + * + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook + */ + +#include +#include +#include +#include "amd_iommu_proto.h" +#include "amd_iommu_types.h" + +static struct dentry *amd_iommu_debugfs; +static DEFINE_MUTEX(amd_iommu_debugfs_lock); + +#define MAX_NAME_LEN 20 + +void amd_iommu_debugfs_setup(struct amd_iommu *iommu) +{ + char name[MAX_NAME_LEN + 1]; + + mutex_lock(&amd_iommu_debugfs_lock); + if (!amd_iommu_debugfs) + amd_iommu_debugfs = debugfs_create_dir("amd", + iommu_debugfs_dir); + mutex_unlock(&amd_iommu_debugfs_lock); + + snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); + iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); +} diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 904c575d1677..031e6dbb8345 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2721,6 +2721,7 @@ int __init amd_iommu_enable_faulting(void) */ static int __init amd_iommu_init(void) { + struct amd_iommu *iommu; int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); @@ -2730,14 +2731,15 @@ static int __init amd_iommu_init(void) disable_iommus(); free_iommu_resources(); } else { - struct amd_iommu *iommu; - uninit_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); } } + for_each_iommu(iommu) + amd_iommu_debugfs_setup(iommu); + return ret; } diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 640c286a0ab9..a8cd0296fb16 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -33,6 +33,12 @@ extern void amd_iommu_uninit_devices(void); extern void amd_iommu_init_notifier(void); extern int amd_iommu_init_api(void); +#ifdef CONFIG_AMD_IOMMU_DEBUGFS +void amd_iommu_debugfs_setup(struct amd_iommu *iommu); +#else +static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} +#endif + /* Needed for interrupt remapping */ extern int amd_iommu_prepare(void); extern int amd_iommu_enable(void); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 986cbe0cc189..cfac9d842b0f 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -594,6 +594,11 @@ struct amd_iommu { u32 flags; volatile u64 __aligned(8) cmd_sem; + +#ifdef CONFIG_AMD_IOMMU_DEBUGFS + /* DebugFS Info */ + struct dentry *debugfs; +#endif }; static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) -- cgit v1.2.1 From ddbbddd76a3fcec12fe59bb1b8b855842e1e7fe8 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 14 Jun 2018 12:48:21 +0200 Subject: iommu/ipmmu-vmsa: Increase maximum micro-TLBS to 48 Bump up the maximum numbers of micro-TLBS to 48. Each IPMMU device instance get micro-TLB assignment via the "iommus" property in DT. Older SoCs tend to use a maximum number of 32 micro-TLBs per IPMMU instance however newer SoCs such as r8a7796 make use of up to 48 micro-TLBs. At this point no SoC specific handling is done to validate the maximum number of micro-TLBs, and because of that the DT information is assumed to be within correct range for each particular SoC. If needed in the future SoC specific feature flags can be added to handle the maximum number of micro-TLBs without requiring DT changes, however at this point this does not seem necessary. Signed-off-by: Magnus Damm Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 40ae6e87cb88..5c1ade7e9ae4 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -194,7 +194,9 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IMPMBA(n) (0x0280 + ((n) * 4)) #define IMPMBD(n) (0x02c0 + ((n) * 4)) -#define IMUCTR(n) (0x0300 + ((n) * 16)) +#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) +#define IMUCTR0(n) (0x0300 + ((n) * 16)) +#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) #define IMUCTR_FIXADDEN (1 << 31) #define IMUCTR_FIXADD_MASK (0xff << 16) #define IMUCTR_FIXADD_SHIFT 16 @@ -204,7 +206,9 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IMUCTR_FLUSH (1 << 1) #define IMUCTR_MMUEN (1 << 0) -#define IMUASID(n) (0x0308 + ((n) * 16)) +#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) +#define IMUASID0(n) (0x0308 + ((n) * 16)) +#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) #define IMUASID_ASID8_MASK (0xff << 8) #define IMUASID_ASID8_SHIFT 8 #define IMUASID_ASID0_MASK (0xff << 0) @@ -955,7 +959,7 @@ static int ipmmu_probe(struct platform_device *pdev) } mmu->dev = &pdev->dev; - mmu->num_utlbs = 32; + mmu->num_utlbs = 48; spin_lock_init(&mmu->lock); bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); -- cgit v1.2.1 From 0b8ac1409641e18912e5fe0accd92a5f3e2d2581 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 14 Jun 2018 12:48:22 +0200 Subject: iommu/ipmmu-vmsa: Hook up r8a7796 DT matching code Support the r8a7796 IPMMU by sharing feature flags between r8a7795 and r8a7796. Also update IOMMU_OF_DECLARE to hook up the updated compat string. [rebased on v4.17] Signed-off-by: Jacopo Mondi Signed-off-by: Magnus Damm Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 5c1ade7e9ae4..04c801555140 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -760,8 +760,9 @@ static bool ipmmu_slave_whitelist(struct device *dev) return false; } -static const struct soc_device_attribute soc_r8a7795[] = { +static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a7795", }, + { .soc_id = "r8a7796", }, { /* sentinel */ } }; @@ -769,7 +770,7 @@ static int ipmmu_of_xlate(struct device *dev, struct of_phandle_args *spec) { /* For R-Car Gen3 use a white list to opt-in slave devices */ - if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev)) + if (soc_device_match(soc_rcar_gen3) && !ipmmu_slave_whitelist(dev)) return -ENODEV; iommu_fwspec_add_ids(dev, spec->args, 1); @@ -923,7 +924,7 @@ static const struct ipmmu_features ipmmu_features_default = { .twobit_imttbcr_sl0 = false, }; -static const struct ipmmu_features ipmmu_features_r8a7795 = { +static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .use_ns_alias_offset = false, .has_cache_leaf_nodes = true, .number_of_contexts = 8, @@ -937,7 +938,10 @@ static const struct of_device_id ipmmu_of_ids[] = { .data = &ipmmu_features_default, }, { .compatible = "renesas,ipmmu-r8a7795", - .data = &ipmmu_features_r8a7795, + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a7796", + .data = &ipmmu_features_rcar_gen3, }, { /* Terminator */ }, @@ -1114,6 +1118,7 @@ module_exit(ipmmu_exit); IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa"); IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795"); +IOMMU_OF_DECLARE(ipmmu_r8a7796_iommu_of, "renesas,ipmmu-r8a7796"); MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); MODULE_AUTHOR("Laurent Pinchart "); -- cgit v1.2.1 From 3701c123e1c13cdf258e10b26df7ae4bef6a5a93 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Thu, 14 Jun 2018 12:48:23 +0200 Subject: iommu/ipmmu-vmsa: Hook up r8a779(70|95) DT matching code Support the r8a77970 (R-Car V3M) and r8a77995 (R-Car D3) IPMMUs by sharing feature flags with r8a7795 (R-Car H3) and r8a7796 (R-Car M3-W). Also update IOMMU_OF_DECLARE to hook up the compat strings. Based on work for the r8a7796 by Magnus Damm [rebased on v4.17] Signed-off-by: Jacopo Mondi Signed-off-by: Simon Horman Reviewed-by: Geert Uytterhoeven Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 04c801555140..933a3dabe9ef 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -763,6 +763,8 @@ static bool ipmmu_slave_whitelist(struct device *dev) static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a7795", }, { .soc_id = "r8a7796", }, + { .soc_id = "r8a77970", }, + { .soc_id = "r8a77995", }, { /* sentinel */ } }; @@ -942,6 +944,12 @@ static const struct of_device_id ipmmu_of_ids[] = { }, { .compatible = "renesas,ipmmu-r8a7796", .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77970", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77995", + .data = &ipmmu_features_rcar_gen3, }, { /* Terminator */ }, @@ -1119,6 +1127,8 @@ module_exit(ipmmu_exit); IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa"); IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795"); IOMMU_OF_DECLARE(ipmmu_r8a7796_iommu_of, "renesas,ipmmu-r8a7796"); +IOMMU_OF_DECLARE(ipmmu_r8a77970_iommu_of, "renesas,ipmmu-r8a77970"); +IOMMU_OF_DECLARE(ipmmu_r8a77995_iommu_of, "renesas,ipmmu-r8a77995"); MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); MODULE_AUTHOR("Laurent Pinchart "); -- cgit v1.2.1 From 98dbffd39a6513b9d0b60e9a20265b2d7866af3c Mon Sep 17 00:00:00 2001 From: Jacopo Mondi Date: Thu, 14 Jun 2018 12:48:25 +0200 Subject: iommu/ipmmu-vmsa: Hook up R8A77965 DT matching code Add support for R-Car M3-N (R8A77965) SoC IPMMUs. Signed-off-by: Jacopo Mondi Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 933a3dabe9ef..6a0e7142f41b 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -763,6 +763,7 @@ static bool ipmmu_slave_whitelist(struct device *dev) static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a7795", }, { .soc_id = "r8a7796", }, + { .soc_id = "r8a77965", }, { .soc_id = "r8a77970", }, { .soc_id = "r8a77995", }, { /* sentinel */ } @@ -944,6 +945,9 @@ static const struct of_device_id ipmmu_of_ids[] = { }, { .compatible = "renesas,ipmmu-r8a7796", .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77965", + .data = &ipmmu_features_rcar_gen3, }, { .compatible = "renesas,ipmmu-r8a77970", .data = &ipmmu_features_rcar_gen3, @@ -1127,6 +1131,7 @@ module_exit(ipmmu_exit); IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa"); IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795"); IOMMU_OF_DECLARE(ipmmu_r8a7796_iommu_of, "renesas,ipmmu-r8a7796"); +IOMMU_OF_DECLARE(ipmmu_r8a77965_iommu_of, "renesas,ipmmu-r8a77965"); IOMMU_OF_DECLARE(ipmmu_r8a77970_iommu_of, "renesas,ipmmu-r8a77970"); IOMMU_OF_DECLARE(ipmmu_r8a77995_iommu_of, "renesas,ipmmu-r8a77995"); -- cgit v1.2.1 From 3e781fcafedb6d364f93caa44128d63e975daa6d Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 19 Jun 2018 10:49:19 +0800 Subject: iommu/vt-d: Remove unnecessary WARN_ON() When drivers call intel_svm_available() to check whether the Shared Virtual Memory(SVM) is supported by the IOMMU driver, they will get a warning in the kernel message if the SVM is not supported by the hardware. [ 3.790876] WARNING: CPU: 0 PID: 267 at drivers/iommu/intel-svm.c:334 intel_svm_bind_mm+0x292/0x570 [ 3.790877] Modules linked in: dsa(+) vfio_mdev mdev vfio_iommu_type1 soundcore vfio serio_raw parport_pc ppdev lp parport autofs4 psmouse virtio_net pata_acpi [ 3.790884] CPU: 0 PID: 267 Comm: systemd-udevd Not tainted 4.15.0+ #358 [ 3.790885] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014 [ 3.790887] RIP: 0010:intel_svm_bind_mm+0x292/0x570 [ 3.790887] RSP: 0000:ffffac72c08a3a70 EFLAGS: 00010246 [ 3.790889] RAX: 0000000000000000 RBX: ffff90447a5160a0 RCX: 0000000000000000 [ 3.790889] RDX: 0000000000000000 RSI: 0000000000000082 RDI: ffff90447fc16550 [ 3.790890] RBP: ffff90447a516000 R08: 0000000000000000 R09: 0000000000000178 [ 3.790891] R10: 0000000000000220 R11: 0000000000aaaaaa R12: 0000000000000000 [ 3.790891] R13: 0000000000000002 R14: ffffac72c08a3b18 R15: ffffac72c08a3eb8 [ 3.790893] FS: 00007fb21e85b8c0(0000) GS:ffff90447fc00000(0000) knlGS:0000000000000000 [ 3.790894] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 3.790894] CR2: 000055c08167d148 CR3: 000000013a6f4000 CR4: 00000000000006f0 [ 3.790903] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 3.790904] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 This is caused by a unnecessary WARN_ON() in intel_svm_bind_mm(). Hence, remove it. Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 45f6e581cd56..2cc0aac93201 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -309,7 +309,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ int pasid_max; int ret; - if (WARN_ON(!iommu || !iommu->pasid_table)) + if (!iommu || !iommu->pasid_table) return -EINVAL; if (dev_is_pci(dev)) { -- cgit v1.2.1 From e881dbd5d4a6950c9e2e7623c79d9578949365c9 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 27 Jun 2018 10:31:21 -0500 Subject: iommu/amd: Add support for higher 64-bit IOMMU Control Register Currently, the driver only supports lower 32-bit of IOMMU Control register. However, newer AMD IOMMU specification has extended this register to 64-bit. Therefore, replace the accessing API with the 64-bit version. Cc: Joerg Roedel Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 904c575d1677..7d494f2c28a1 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -280,9 +280,9 @@ static void clear_translation_pre_enabled(struct amd_iommu *iommu) static void init_translation_status(struct amd_iommu *iommu) { - u32 ctrl; + u64 ctrl; - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); if (ctrl & (1<flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; } @@ -386,30 +386,30 @@ static void iommu_set_device_table(struct amd_iommu *iommu) /* Generic functions to enable/disable certain features of the IOMMU. */ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { - u32 ctrl; + u64 ctrl; - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl |= (1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl |= (1ULL << bit); + writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) { - u32 ctrl; + u64 ctrl; - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~(1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl &= ~(1ULL << bit); + writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) { - u32 ctrl; + u64 ctrl; - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~CTRL_INV_TO_MASK; ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); + writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } /* Function to enable the hardware */ -- cgit v1.2.1 From 90fcffd9cf5e7cc593169f529799f3e3c5437e75 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 27 Jun 2018 10:31:22 -0500 Subject: iommu/amd: Add support for IOMMU XT mode The AMD IOMMU XT mode enables interrupt remapping with 32-bit destination APIC ID, which is required for x2APIC. The feature is available when the XTSup bit is set in the IOMMU Extended Feature register and/or the IVHD Type 10h IOMMU Feature Reporting field. For more information, please see section "IOMMU x2APIC Support" of the AMD I/O Virtualization Technology (IOMMU) Specification. Cc: Joerg Roedel Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 21 ++++++++++++++++----- drivers/iommu/amd_iommu_init.c | 25 +++++++++++++++++++++++-- drivers/iommu/amd_iommu_types.h | 17 +++++++++++------ 3 files changed, 50 insertions(+), 13 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 874e6480e391..64cfe854e0f5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3876,7 +3876,8 @@ static void irte_ga_prepare(void *entry, irte->lo.fields_remap.int_type = delivery_mode; irte->lo.fields_remap.dm = dest_mode; irte->hi.fields.vector = vector; - irte->lo.fields_remap.destination = dest_apicid; + irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); + irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); irte->lo.fields_remap.valid = 1; } @@ -3929,7 +3930,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, if (!irte->lo.fields_remap.guest_mode) { irte->hi.fields.vector = vector; - irte->lo.fields_remap.destination = dest_apicid; + irte->lo.fields_remap.destination = + APICID_TO_IRTE_DEST_LO(dest_apicid); + irte->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(dest_apicid); modify_irte_ga(devid, index, irte, NULL); } } @@ -4346,7 +4350,10 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) irte->lo.val = 0; irte->hi.fields.vector = cfg->vector; irte->lo.fields_remap.guest_mode = 0; - irte->lo.fields_remap.destination = cfg->dest_apicid; + irte->lo.fields_remap.destination = + APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); + irte->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); irte->lo.fields_remap.int_type = apic->irq_delivery_mode; irte->lo.fields_remap.dm = apic->irq_dest_mode; @@ -4463,8 +4470,12 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data) raw_spin_lock_irqsave(&table->lock, flags); if (ref->lo.fields_vapic.guest_mode) { - if (cpu >= 0) - ref->lo.fields_vapic.destination = cpu; + if (cpu >= 0) { + ref->lo.fields_vapic.destination = + APICID_TO_IRTE_DEST_LO(cpu); + ref->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cpu); + } ref->lo.fields_vapic.is_run = is_run; barrier(); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 7d494f2c28a1..f6dd63fed21e 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -153,6 +153,7 @@ bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; +static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; @@ -827,6 +828,19 @@ static int iommu_init_ga(struct amd_iommu *iommu) return ret; } +static void iommu_enable_xt(struct amd_iommu *iommu) +{ +#ifdef CONFIG_IRQ_REMAP + /* + * XT mode (32-bit APIC destination ID) requires + * GA mode (128-bit IRTE support) as a prerequisite. + */ + if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && + amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) + iommu_feature_enable(iommu, CONTROL_XT_EN); +#endif /* CONFIG_IRQ_REMAP */ +} + static void iommu_enable_gt(struct amd_iommu *iommu) { if (!iommu_feature(iommu, FEATURE_GT)) @@ -1507,6 +1521,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) + amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; break; case 0x11: case 0x40: @@ -1516,6 +1532,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) + amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; break; default: return -EINVAL; @@ -1832,6 +1850,8 @@ static void print_iommu_info(void) pr_info("AMD-Vi: Interrupt remapping enabled\n"); if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) pr_info("AMD-Vi: virtual APIC enabled\n"); + if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) + pr_info("AMD-Vi: X2APIC enabled\n"); } } @@ -2168,6 +2188,7 @@ static void early_enable_iommu(struct amd_iommu *iommu) iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); iommu_enable_ga(iommu); + iommu_enable_xt(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } @@ -2212,6 +2233,7 @@ static void early_enable_iommus(void) iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_enable_ga(iommu); + iommu_enable_xt(iommu); iommu_set_device_table(iommu); iommu_flush_all_caches(iommu); } @@ -2691,8 +2713,7 @@ int __init amd_iommu_enable(void) return ret; irq_remapping_enabled = 1; - - return 0; + return amd_iommu_xt_mode; } void amd_iommu_disable(void) diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 986cbe0cc189..aa892fdbd158 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -161,6 +161,7 @@ #define CONTROL_GAM_EN 0x19ULL #define CONTROL_GALOG_EN 0x1CULL #define CONTROL_GAINT_EN 0x1DULL +#define CONTROL_XT_EN 0x32ULL #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_NONE 0 @@ -378,9 +379,11 @@ #define IOMMU_CAP_EFR 27 /* IOMMU Feature Reporting Field (for IVHD type 10h */ +#define IOMMU_FEAT_XTSUP_SHIFT 0 #define IOMMU_FEAT_GASUP_SHIFT 6 /* IOMMU Extended Feature Register (EFR) */ +#define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_GASUP_SHIFT 7 #define MAX_DOMAIN_ID 65536 @@ -437,7 +440,6 @@ extern struct kmem_cache *amd_iommu_irq_cache; #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) - /* * This struct is used to pass information about * incoming PPR faults around. @@ -810,6 +812,9 @@ union irte { } fields; }; +#define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) +#define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) + union irte_ga_lo { u64 val; @@ -823,8 +828,8 @@ union irte_ga_lo { dm : 1, /* ------ */ guest_mode : 1, - destination : 8, - rsvd : 48; + destination : 24, + ga_tag : 32; } fields_remap; /* For guest vAPIC */ @@ -837,8 +842,7 @@ union irte_ga_lo { is_run : 1, /* ------ */ guest_mode : 1, - destination : 8, - rsvd2 : 16, + destination : 24, ga_tag : 32; } fields_vapic; }; @@ -849,7 +853,8 @@ union irte_ga_hi { u64 vector : 8, rsvd_1 : 4, ga_root_ptr : 40, - rsvd_2 : 12; + rsvd_2 : 4, + destination : 8; } fields; }; -- cgit v1.2.1 From 2db1581e1f432ac6b4efe152c57fdfb4de85c154 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sun, 8 Jul 2018 14:23:21 +0800 Subject: Revert "iommu/vt-d: Clean up pasid quirk for pre-production devices" This reverts commit ab96746aaa344fb720a198245a837e266fad3b62. The commit ab96746aaa34 ("iommu/vt-d: Clean up pasid quirk for pre-production devices") triggers ECS mode on some platforms which have broken ECS support. As the result, graphic device will be inoperable on boot. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107017 Cc: Ashok Raj Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b344a883f116..115ff26e9ced 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -484,14 +484,37 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int intel_iommu_ecs = 1; +static int intel_iommu_pasid28; static int iommu_identity_mapping; #define IDENTMAP_ALL 1 #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 -#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap)) -#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap)) +/* Broadwell and Skylake have broken ECS support — normal so-called "second + * level" translation of DMA requests-without-PASID doesn't actually happen + * unless you also set the NESTE bit in an extended context-entry. Which of + * course means that SVM doesn't work because it's trying to do nested + * translation of the physical addresses it finds in the process page tables, + * through the IOVA->phys mapping found in the "second level" page tables. + * + * The VT-d specification was retroactively changed to change the definition + * of the capability bits and pretend that Broadwell/Skylake never happened... + * but unfortunately the wrong bit was changed. It's ECS which is broken, but + * for some reason it was the PASID capability bit which was redefined (from + * bit 28 on BDW/SKL to bit 40 in future). + * + * So our test for ECS needs to eschew those implementations which set the old + * PASID capabiity bit 28, since those are the ones on which ECS is broken. + * Unless we are working around the 'pasid28' limitations, that is, by putting + * the device into passthrough mode for normal DMA and thus masking the bug. + */ +#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ + (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap))) +/* PASID support is thus enabled if ECS is enabled and *either* of the old + * or new capability bits are set. */ +#define pasid_enabled(iommu) (ecs_enabled(iommu) && \ + (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap))) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); @@ -554,6 +577,11 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable extended context table support\n"); intel_iommu_ecs = 0; + } else if (!strncmp(str, "pasid28", 7)) { + printk(KERN_INFO + "Intel-IOMMU: enable pre-production PASID support\n"); + intel_iommu_pasid28 = 1; + iommu_identity_mapping |= IDENTMAP_GFX; } else if (!strncmp(str, "tboot_noforce", 13)) { printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); -- cgit v1.2.1 From 2ae86955703e9e6a119af4bbe27f6b6dd7a43131 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 9 Jul 2018 11:53:31 +0900 Subject: iommu/ipmmu-vmsa: IMUCTRn.TTSEL needs a special usage on R-Car Gen3 The TTSEL bit of IMUCTRn register of R-Car Gen3 needs to be set unused MMU context number even if uTLBs are disabled (The MMUEN bit of IMUCTRn register = 0). Since initial values of IMUCTRn.TTSEL on all IPMMU-domains are 0, this patch adds a new feature "reserved_context" to reserve IPMMU context number 0 as the unused MMU context. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 6a0e7142f41b..6cbd2bdb92ce 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -47,6 +47,7 @@ struct ipmmu_features { unsigned int number_of_contexts; bool setup_imbuscr; bool twobit_imttbcr_sl0; + bool reserved_context; }; struct ipmmu_vmsa_device { @@ -925,6 +926,7 @@ static const struct ipmmu_features ipmmu_features_default = { .number_of_contexts = 1, /* software only tested with one context */ .setup_imbuscr = true, .twobit_imttbcr_sl0 = false, + .reserved_context = false, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { @@ -933,6 +935,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .number_of_contexts = 8, .setup_imbuscr = false, .twobit_imttbcr_sl0 = true, + .reserved_context = true, }; static const struct of_device_id ipmmu_of_ids[] = { @@ -1038,6 +1041,11 @@ static int ipmmu_probe(struct platform_device *pdev) } ipmmu_device_reset(mmu); + + if (mmu->features->reserved_context) { + dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); + set_bit(0, mmu->ctx); + } } /* -- cgit v1.2.1 From f1a066fcc9725960a73c00267afe7e346cc9fc6f Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 20 Jul 2018 10:45:45 +0200 Subject: iommu/amd: Remove redundant WARN_ON() The WARN_ON() was introduced in commit 272e4f99e966 ("iommu/amd: WARN when __[attach|detach]_device are called with irqs enabled") to ensure that the domain->lock is taken in proper irqs disabled context. This is required, because the domain->lock is taken as well in irq context. The proper context check by the WARN_ON() is redundant, because it is already covered by LOCKDEP. When working with locks and changing context, a run with LOCKDEP is required anyway and would detect the wrong lock context. Furthermore all callers for those functions are within the same file and all callers acquire another lock which already disables interrupts. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 64cfe854e0f5..8aaefee98d74 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1946,12 +1946,6 @@ static int __attach_device(struct iommu_dev_data *dev_data, { int ret; - /* - * Must be called with IRQs disabled. Warn here to detect early - * when its not. - */ - WARN_ON(!irqs_disabled()); - /* lock domain */ spin_lock(&domain->lock); @@ -2117,12 +2111,6 @@ static void __detach_device(struct iommu_dev_data *dev_data) { struct protection_domain *domain; - /* - * Must be called with IRQs disabled. Warn here to detect early - * when its not. - */ - WARN_ON(!irqs_disabled()); - domain = dev_data->domain; spin_lock(&domain->lock); -- cgit v1.2.1 From 562831747f6299abd481b5b00bd4fa19d5c8a259 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:54 +0800 Subject: iommu/vt-d: Global PASID name space This adds the system wide PASID name space for the PASID allocation. Currently we are using per IOMMU PASID name spaces which are not suitable for some use cases. For an example, one application (associated with a PASID) might talk to two physical devices simultaneously while the two devices could reside behind two different IOMMU units. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Suggested-by: Ashok Raj Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Liu Yi L Reviewed-by: Peter Xu Signed-off-by: Joerg Roedel --- drivers/iommu/Makefile | 2 +- drivers/iommu/intel-iommu.c | 13 ++++++++++ drivers/iommu/intel-pasid.c | 60 +++++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/intel-pasid.h | 21 ++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 drivers/iommu/intel-pasid.c create mode 100644 drivers/iommu/intel-pasid.h (limited to 'drivers/iommu') diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 1fb695854809..0a190b4b2ada 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o -obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o +obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 497ef94c5a8c..fa15ed036ddc 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -53,6 +53,7 @@ #include #include "irq_remapping.h" +#include "intel-pasid.h" #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE @@ -3293,6 +3294,18 @@ static int __init init_dmars(void) } for_each_active_iommu(iommu, drhd) { + /* + * Find the max pasid size of all IOMMU's in the system. + * We need to ensure the system pasid table is no bigger + * than the smallest supported. + */ + if (pasid_enabled(iommu)) { + u32 temp = 2 << ecap_pss(iommu->ecap); + + intel_pasid_max_id = min_t(u32, temp, + intel_pasid_max_id); + } + g_iommus[iommu->seq_id] = iommu; intel_iommu_init_qi(iommu); diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c new file mode 100644 index 000000000000..e918fe01ce7f --- /dev/null +++ b/drivers/iommu/intel-pasid.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * intel-pasid.c - PASID idr, table and entry manipulation + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Lu Baolu + */ + +#define pr_fmt(fmt) "DMAR: " fmt + +#include +#include +#include +#include +#include + +#include "intel-pasid.h" + +/* + * Intel IOMMU system wide PASID name space: + */ +static DEFINE_SPINLOCK(pasid_lock); +u32 intel_pasid_max_id = PASID_MAX; +static DEFINE_IDR(pasid_idr); + +int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp) +{ + int ret, min, max; + + min = max_t(int, start, PASID_MIN); + max = min_t(int, end, intel_pasid_max_id); + + WARN_ON(in_interrupt()); + idr_preload(gfp); + spin_lock(&pasid_lock); + ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC); + spin_unlock(&pasid_lock); + idr_preload_end(); + + return ret; +} + +void intel_pasid_free_id(int pasid) +{ + spin_lock(&pasid_lock); + idr_remove(&pasid_idr, pasid); + spin_unlock(&pasid_lock); +} + +void *intel_pasid_lookup_id(int pasid) +{ + void *p; + + spin_lock(&pasid_lock); + p = idr_find(&pasid_idr, pasid); + spin_unlock(&pasid_lock); + + return p; +} diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h new file mode 100644 index 000000000000..b1c5296290e5 --- /dev/null +++ b/drivers/iommu/intel-pasid.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * intel-pasid.h - PASID idr, table and entry header + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Lu Baolu + */ + +#ifndef __INTEL_PASID_H +#define __INTEL_PASID_H + +#define PASID_MIN 0x1 +#define PASID_MAX 0x100000 + +extern u32 intel_pasid_max_id; +int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); +void intel_pasid_free_id(int pasid); +void *intel_pasid_lookup_id(int pasid); + +#endif /* __INTEL_PASID_H */ -- cgit v1.2.1 From 51261aac51a05c791ef880a100ac2ceed201ef72 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:55 +0800 Subject: iommu/vt-d: Avoid using idr_for_each_entry() idr_for_each_entry() is used to iteratte over idr elements of a given type. It isn't suitable for the globle pasid idr since the pasid idr consumer could specify different types of pointers to bind with a pasid. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Liu Yi L Reviewed-by: Peter Xu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-svm.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 2cc0aac93201..36cc1d1b8afc 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -298,6 +298,7 @@ static const struct mmu_notifier_ops intel_mmuops = { }; static DEFINE_MUTEX(pasid_mutex); +static LIST_HEAD(global_svm_list); int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) { @@ -329,13 +330,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ mutex_lock(&pasid_mutex); if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) { - int i; + struct intel_svm *t; - idr_for_each_entry(&iommu->pasid_idr, svm, i) { - if (svm->mm != mm || - (svm->flags & SVM_FLAG_PRIVATE_PASID)) + list_for_each_entry(t, &global_svm_list, list) { + if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID)) continue; + svm = t; if (svm->pasid >= pasid_max) { dev_warn(dev, "Limited PASID width. Cannot use existing PASID %d\n", @@ -404,6 +405,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ svm->mm = mm; svm->flags = flags; INIT_LIST_HEAD_RCU(&svm->devs); + INIT_LIST_HEAD(&svm->list); ret = -ENOMEM; if (mm) { ret = mmu_notifier_register(&svm->notifier, mm); @@ -430,6 +432,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ */ if (cap_caching_mode(iommu->cap)) intel_flush_pasid_dev(svm, sdev, svm->pasid); + + list_add_tail(&svm->list, &global_svm_list); } list_add_rcu(&sdev->list, &svm->devs); @@ -485,6 +489,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) if (svm->mm) mmu_notifier_unregister(&svm->notifier, svm->mm); + list_del(&svm->list); + /* We mandate that no page faults may be outstanding * for the PASID when intel_svm_unbind_mm() is called. * If that is not obeyed, subtle errors will happen. -- cgit v1.2.1 From af39507305fb83a5d3c475c2851f4d59545d8a18 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:56 +0800 Subject: iommu/vt-d: Apply global PASID in SVA This patch applies the global pasid name space in the shared virtual address (SVA) implementation. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Liu Yi L Reviewed-by: Peter Xu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-svm.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 36cc1d1b8afc..56e65d0b1871 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -26,6 +26,8 @@ #include #include +#include "intel-pasid.h" + #define PASID_ENTRY_P BIT_ULL(0) #define PASID_ENTRY_FLPM_5LP BIT_ULL(9) #define PASID_ENTRY_SRE BIT_ULL(11) @@ -85,8 +87,6 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) iommu->name); } - idr_init(&iommu->pasid_idr); - return 0; } @@ -102,7 +102,7 @@ int intel_svm_free_pasid_tables(struct intel_iommu *iommu) free_pages((unsigned long)iommu->pasid_state_table, order); iommu->pasid_state_table = NULL; } - idr_destroy(&iommu->pasid_idr); + return 0; } @@ -392,9 +392,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ pasid_max = iommu->pasid_max; /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ - ret = idr_alloc(&iommu->pasid_idr, svm, - !!cap_caching_mode(iommu->cap), - pasid_max - 1, GFP_KERNEL); + ret = intel_pasid_alloc_id(svm, + !!cap_caching_mode(iommu->cap), + pasid_max - 1, GFP_KERNEL); if (ret < 0) { kfree(svm); kfree(sdev); @@ -410,7 +410,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (mm) { ret = mmu_notifier_register(&svm->notifier, mm); if (ret) { - idr_remove(&svm->iommu->pasid_idr, svm->pasid); + intel_pasid_free_id(svm->pasid); kfree(svm); kfree(sdev); goto out; @@ -460,7 +460,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) if (!iommu || !iommu->pasid_table) goto out; - svm = idr_find(&iommu->pasid_idr, pasid); + svm = intel_pasid_lookup_id(pasid); if (!svm) goto out; @@ -485,7 +485,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) svm->iommu->pasid_table[svm->pasid].val = 0; wmb(); - idr_remove(&svm->iommu->pasid_idr, svm->pasid); + intel_pasid_free_id(svm->pasid); if (svm->mm) mmu_notifier_unregister(&svm->notifier, svm->mm); @@ -520,7 +520,7 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid) if (!iommu || !iommu->pasid_table) goto out; - svm = idr_find(&iommu->pasid_idr, pasid); + svm = intel_pasid_lookup_id(pasid); if (!svm) goto out; @@ -618,7 +618,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!svm || svm->pasid != req->pasid) { rcu_read_lock(); - svm = idr_find(&iommu->pasid_idr, req->pasid); + svm = intel_pasid_lookup_id(req->pasid); /* It *can't* go away, because the driver is not permitted * to unbind the mm while any page faults are outstanding. * So we only need RCU to protect the internal idr code. */ -- cgit v1.2.1 From 9ddbfb42138d84bb326023616c40a3dc30ea2837 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:57 +0800 Subject: iommu/vt-d: Move device_domain_info to header This allows the per device iommu data and some helpers to be used in other files. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 63 +++------------------------------------------ 1 file changed, 4 insertions(+), 59 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index fa15ed036ddc..bb7f2b2a37ee 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -381,61 +381,6 @@ static int hw_pass_through = 1; for (idx = 0; idx < g_num_of_iommus; idx++) \ if (domain->iommu_refcnt[idx]) -struct dmar_domain { - int nid; /* node id */ - - unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; - /* Refcount of devices per iommu */ - - - u16 iommu_did[DMAR_UNITS_SUPPORTED]; - /* Domain ids per IOMMU. Use u16 since - * domain ids are 16 bit wide according - * to VT-d spec, section 9.3 */ - - bool has_iotlb_device; - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - - int flags; /* flags to find out type of domain */ - - int iommu_coherency;/* indicate coherency of iommu access */ - int iommu_snooping; /* indicate snooping control feature*/ - int iommu_count; /* reference count of iommu */ - int iommu_superpage;/* Level of superpages supported: - 0 == 4KiB (no superpages), 1 == 2MiB, - 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ - u64 max_addr; /* maximum mapped address */ - - struct iommu_domain domain; /* generic domain data structure for - iommu core */ -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u8 bus; /* PCI bus number */ - u8 devfn; /* PCI devfn number */ - u16 pfsid; /* SRIOV physical function source ID */ - u8 pasid_supported:3; - u8 pasid_enabled:1; - u8 pri_supported:1; - u8 pri_enabled:1; - u8 ats_supported:1; - u8 ats_enabled:1; - u8 ats_qdep; - struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ - struct intel_iommu *iommu; /* IOMMU used by this device */ - struct dmar_domain *domain; /* pointer to domain */ -}; - struct dmar_rmrr_unit { struct list_head list; /* list of rmrr units */ struct acpi_dmar_header *hdr; /* ACPI header */ @@ -604,7 +549,7 @@ static void set_iommu_domain(struct intel_iommu *iommu, u16 did, domains[did & 0xff] = domain; } -static inline void *alloc_pgtable_page(int node) +void *alloc_pgtable_page(int node) { struct page *page; void *vaddr = NULL; @@ -615,7 +560,7 @@ static inline void *alloc_pgtable_page(int node) return vaddr; } -static inline void free_pgtable_page(void *vaddr) +void free_pgtable_page(void *vaddr) { free_page((unsigned long)vaddr); } @@ -698,7 +643,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) } /* This functionin only returns single iommu in a domain */ -static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) +struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) { int iommu_id; @@ -3528,7 +3473,7 @@ static unsigned long intel_alloc_iova(struct device *dev, return iova_pfn; } -static struct dmar_domain *get_valid_domain_for_dev(struct device *dev) +struct dmar_domain *get_valid_domain_for_dev(struct device *dev) { struct dmar_domain *domain, *tmp; struct dmar_rmrr_unit *rmrr; -- cgit v1.2.1 From 85319dcc8955f8f31828dc8bafff29f6aa011d93 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:58 +0800 Subject: iommu/vt-d: Add for_each_device_domain() helper This adds a helper named for_each_device_domain() to iterate over the elements in device_domain_list and invoke a callback against each element. This allows to search the device_domain list in other source files. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bb7f2b2a37ee..f020da439ace 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -448,6 +448,27 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); +/* + * Iterate over elements in device_domain_list and call the specified + * callback @fn against each element. This helper should only be used + * in the context where the device_domain_lock has already been holden. + */ +int for_each_device_domain(int (*fn)(struct device_domain_info *info, + void *data), void *data) +{ + int ret = 0; + struct device_domain_info *info; + + assert_spin_locked(&device_domain_lock); + list_for_each_entry(info, &device_domain_list, global) { + ret = fn(info, data); + if (ret) + return ret; + } + + return 0; +} + const struct iommu_ops intel_iommu_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) -- cgit v1.2.1 From cc580e41260dbf1a46269235f1f2b572137d9d03 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:46:59 +0800 Subject: iommu/vt-d: Per PCI device pasid table interfaces This patch adds the interfaces for per PCI device pasid table management. Currently we allocate one pasid table for all PCI devices under the scope of an IOMMU. It's insecure in some cases where multiple devices under one single IOMMU unit support PASID features. With per PCI device pasid table, we can achieve finer protection and isolation granularity. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Suggested-by: Ashok Raj Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 1 + drivers/iommu/intel-pasid.c | 179 ++++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/intel-pasid.h | 18 +++++ drivers/iommu/intel-svm.c | 4 - 4 files changed, 198 insertions(+), 4 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f020da439ace..211925a75fb4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2451,6 +2451,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->dev = dev; info->domain = domain; info->iommu = iommu; + info->pasid_table = NULL; if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c index e918fe01ce7f..fe95c9bd4d33 100644 --- a/drivers/iommu/intel-pasid.c +++ b/drivers/iommu/intel-pasid.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include "intel-pasid.h" @@ -58,3 +60,180 @@ void *intel_pasid_lookup_id(int pasid) return p; } + +/* + * Per device pasid table management: + */ +static inline void +device_attach_pasid_table(struct device_domain_info *info, + struct pasid_table *pasid_table) +{ + info->pasid_table = pasid_table; + list_add(&info->table, &pasid_table->dev); +} + +static inline void +device_detach_pasid_table(struct device_domain_info *info, + struct pasid_table *pasid_table) +{ + info->pasid_table = NULL; + list_del(&info->table); +} + +struct pasid_table_opaque { + struct pasid_table **pasid_table; + int segment; + int bus; + int devfn; +}; + +static int search_pasid_table(struct device_domain_info *info, void *opaque) +{ + struct pasid_table_opaque *data = opaque; + + if (info->iommu->segment == data->segment && + info->bus == data->bus && + info->devfn == data->devfn && + info->pasid_table) { + *data->pasid_table = info->pasid_table; + return 1; + } + + return 0; +} + +static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct pasid_table_opaque *data = opaque; + + data->segment = pci_domain_nr(pdev->bus); + data->bus = PCI_BUS_NUM(alias); + data->devfn = alias & 0xff; + + return for_each_device_domain(&search_pasid_table, data); +} + +/* + * Allocate a pasid table for @dev. It should be called in a + * single-thread context. + */ +int intel_pasid_alloc_table(struct device *dev) +{ + struct device_domain_info *info; + struct pasid_table *pasid_table; + struct pasid_table_opaque data; + struct page *pages; + size_t size, count; + int ret, order; + + info = dev->archdata.iommu; + if (WARN_ON(!info || !dev_is_pci(dev) || + !info->pasid_supported || info->pasid_table)) + return -EINVAL; + + /* DMA alias device already has a pasid table, use it: */ + data.pasid_table = &pasid_table; + ret = pci_for_each_dma_alias(to_pci_dev(dev), + &get_alias_pasid_table, &data); + if (ret) + goto attach_out; + + pasid_table = kzalloc(sizeof(*pasid_table), GFP_ATOMIC); + if (!pasid_table) + return -ENOMEM; + INIT_LIST_HEAD(&pasid_table->dev); + + size = sizeof(struct pasid_entry); + count = min_t(int, pci_max_pasids(to_pci_dev(dev)), intel_pasid_max_id); + order = get_order(size * count); + pages = alloc_pages_node(info->iommu->node, + GFP_ATOMIC | __GFP_ZERO, + order); + if (!pages) + return -ENOMEM; + + pasid_table->table = page_address(pages); + pasid_table->order = order; + pasid_table->max_pasid = count; + +attach_out: + device_attach_pasid_table(info, pasid_table); + + return 0; +} + +void intel_pasid_free_table(struct device *dev) +{ + struct device_domain_info *info; + struct pasid_table *pasid_table; + + info = dev->archdata.iommu; + if (!info || !dev_is_pci(dev) || + !info->pasid_supported || !info->pasid_table) + return; + + pasid_table = info->pasid_table; + device_detach_pasid_table(info, pasid_table); + + if (!list_empty(&pasid_table->dev)) + return; + + free_pages((unsigned long)pasid_table->table, pasid_table->order); + kfree(pasid_table); +} + +struct pasid_table *intel_pasid_get_table(struct device *dev) +{ + struct device_domain_info *info; + + info = dev->archdata.iommu; + if (!info) + return NULL; + + return info->pasid_table; +} + +int intel_pasid_get_dev_max_id(struct device *dev) +{ + struct device_domain_info *info; + + info = dev->archdata.iommu; + if (!info || !info->pasid_table) + return 0; + + return info->pasid_table->max_pasid; +} + +struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid) +{ + struct pasid_table *pasid_table; + struct pasid_entry *entries; + + pasid_table = intel_pasid_get_table(dev); + if (WARN_ON(!pasid_table || pasid < 0 || + pasid >= intel_pasid_get_dev_max_id(dev))) + return NULL; + + entries = pasid_table->table; + + return &entries[pasid]; +} + +/* + * Interfaces for PASID table entry manipulation: + */ +static inline void pasid_clear_entry(struct pasid_entry *pe) +{ + WRITE_ONCE(pe->val, 0); +} + +void intel_pasid_clear_entry(struct device *dev, int pasid) +{ + struct pasid_entry *pe; + + pe = intel_pasid_get_entry(dev, pasid); + if (WARN_ON(!pe)) + return; + + pasid_clear_entry(pe); +} diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h index b1c5296290e5..1c05ed6fc5a5 100644 --- a/drivers/iommu/intel-pasid.h +++ b/drivers/iommu/intel-pasid.h @@ -13,9 +13,27 @@ #define PASID_MIN 0x1 #define PASID_MAX 0x100000 +struct pasid_entry { + u64 val; +}; + +/* The representative of a PASID table */ +struct pasid_table { + void *table; /* pasid table pointer */ + int order; /* page order of pasid table */ + int max_pasid; /* max pasid */ + struct list_head dev; /* device list */ +}; + extern u32 intel_pasid_max_id; int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); void intel_pasid_free_id(int pasid); void *intel_pasid_lookup_id(int pasid); +int intel_pasid_alloc_table(struct device *dev); +void intel_pasid_free_table(struct device *dev); +struct pasid_table *intel_pasid_get_table(struct device *dev); +int intel_pasid_get_dev_max_id(struct device *dev); +struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid); +void intel_pasid_clear_entry(struct device *dev, int pasid); #endif /* __INTEL_PASID_H */ diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 56e65d0b1871..9b5dc7262677 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -34,10 +34,6 @@ static irqreturn_t prq_event_thread(int irq, void *d); -struct pasid_entry { - u64 val; -}; - struct pasid_state_entry { u64 val; }; -- cgit v1.2.1 From a7fc93fed94b173e2f9815d50f4024161b0a39ae Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:47:00 +0800 Subject: iommu/vt-d: Allocate and free pasid table This patch allocates a PASID table for a PCI device at the time when the dmar dev_info is attached to dev->archdata.iommu, and free it in the opposite case. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 211925a75fb4..2d68e849a892 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2509,6 +2509,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, list_add(&info->global, &device_domain_list); if (dev) dev->archdata.iommu = info; + + if (dev && dev_is_pci(dev) && info->pasid_supported) { + ret = intel_pasid_alloc_table(dev); + if (ret) { + __dmar_remove_one_dev_info(info); + spin_unlock_irqrestore(&device_domain_lock, flags); + return NULL; + } + } spin_unlock_irqrestore(&device_domain_lock, flags); if (dev && domain_context_mapping(domain, dev)) { @@ -4843,6 +4852,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) if (info->dev) { iommu_disable_dev_iotlb(info); domain_context_clear(iommu, info->dev); + intel_pasid_free_table(info->dev); } unlink_domain_info(info); -- cgit v1.2.1 From 4774cc5245700b8f4414123908c3a7a1c78e5cbb Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:47:01 +0800 Subject: iommu/vt-d: Apply per pci device pasid table in SVA This patch applies the per pci device pasid table in the Shared Virtual Address (SVA) implementation. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 29 +++++++++-------------------- drivers/iommu/intel-svm.c | 22 ++++++++++------------ 2 files changed, 19 insertions(+), 32 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d68e849a892..f1a3c5e5aff0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5178,22 +5178,16 @@ static void intel_iommu_put_resv_regions(struct device *dev, #ifdef CONFIG_INTEL_IOMMU_SVM #define MAX_NR_PASID_BITS (20) -static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) +static inline unsigned long intel_iommu_get_pts(struct device *dev) { - /* - * Convert ecap_pss to extend context entry pts encoding, also - * respect the soft pasid_max value set by the iommu. - * - number of PASID bits = ecap_pss + 1 - * - number of PASID table entries = 2^(pts + 5) - * Therefore, pts = ecap_pss - 4 - * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15 - */ - if (ecap_pss(iommu->ecap) < 5) + int pts, max_pasid; + + max_pasid = intel_pasid_get_dev_max_id(dev); + pts = find_first_bit((unsigned long *)&max_pasid, MAX_NR_PASID_BITS); + if (pts < 5) return 0; - /* pasid_max is encoded as actual number of entries not the bits */ - return find_first_bit((unsigned long *)&iommu->pasid_max, - MAX_NR_PASID_BITS) - 5; + return pts - 5; } int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) @@ -5229,8 +5223,8 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd if (!(ctx_lo & CONTEXT_PASIDE)) { if (iommu->pasid_state_table) context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); - context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | - intel_iommu_get_pts(iommu); + context[1].lo = (u64)virt_to_phys(info->pasid_table->table) | + intel_iommu_get_pts(sdev->dev); wmb(); /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both @@ -5297,11 +5291,6 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) return NULL; } - if (!iommu->pasid_table) { - dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n"); - return NULL; - } - return iommu; } #endif /* CONFIG_INTEL_IOMMU_SVM */ diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 9b5dc7262677..a253cdeabd61 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -274,11 +274,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) * page) so that we end up taking a fault that the hardware really * *has* to handle gracefully without affecting other processes. */ - svm->iommu->pasid_table[svm->pasid].val = 0; - wmb(); - rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { + intel_pasid_clear_entry(sdev->dev, svm->pasid); intel_flush_pasid_dev(svm, sdev, svm->pasid); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); } @@ -299,6 +297,7 @@ static LIST_HEAD(global_svm_list); int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) { struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct pasid_entry *entry; struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; struct mm_struct *mm = NULL; @@ -306,7 +305,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ int pasid_max; int ret; - if (!iommu || !iommu->pasid_table) + if (!iommu) return -EINVAL; if (dev_is_pci(dev)) { @@ -384,8 +383,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ } svm->iommu = iommu; - if (pasid_max > iommu->pasid_max) - pasid_max = iommu->pasid_max; + if (pasid_max > intel_pasid_max_id) + pasid_max = intel_pasid_max_id; /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ ret = intel_pasid_alloc_id(svm, @@ -418,7 +417,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (cpu_feature_enabled(X86_FEATURE_LA57)) pasid_entry_val |= PASID_ENTRY_FLPM_5LP; - iommu->pasid_table[svm->pasid].val = pasid_entry_val; + entry = intel_pasid_get_entry(dev, svm->pasid); + entry->val = pasid_entry_val; wmb(); @@ -453,7 +453,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) mutex_lock(&pasid_mutex); iommu = intel_svm_device_to_iommu(dev); - if (!iommu || !iommu->pasid_table) + if (!iommu) goto out; svm = intel_pasid_lookup_id(pasid); @@ -476,11 +476,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) intel_flush_pasid_dev(svm, sdev, svm->pasid); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); kfree_rcu(sdev, rcu); + intel_pasid_clear_entry(dev, svm->pasid); if (list_empty(&svm->devs)) { - svm->iommu->pasid_table[svm->pasid].val = 0; - wmb(); - intel_pasid_free_id(svm->pasid); if (svm->mm) mmu_notifier_unregister(&svm->notifier, svm->mm); @@ -513,7 +511,7 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid) mutex_lock(&pasid_mutex); iommu = intel_svm_device_to_iommu(dev); - if (!iommu || !iommu->pasid_table) + if (!iommu) goto out; svm = intel_pasid_lookup_id(pasid); -- cgit v1.2.1 From d9737953d85131436b09668b5e8d3389c37c1f28 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 14 Jul 2018 15:47:02 +0800 Subject: iommu/vt-d: Remove the obsolete per iommu pasid tables The obsolete per iommu pasid tables are no longer used. Hence, clean up them. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Liu Yi L Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 6 +++--- drivers/iommu/intel-svm.c | 17 ++--------------- 2 files changed, 5 insertions(+), 18 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f1a3c5e5aff0..7ed0221dccf8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1756,7 +1756,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu) if (pasid_enabled(iommu)) { if (ecap_prs(iommu->ecap)) intel_svm_finish_prq(iommu); - intel_svm_free_pasid_tables(iommu); + intel_svm_exit(iommu); } #endif } @@ -3337,7 +3337,7 @@ static int __init init_dmars(void) hw_pass_through = 0; #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu)) - intel_svm_alloc_pasid_tables(iommu); + intel_svm_init(iommu); #endif } @@ -4300,7 +4300,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu)) - intel_svm_alloc_pasid_tables(iommu); + intel_svm_init(iommu); #endif if (dmaru->ignored) { diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index a253cdeabd61..eb308363e541 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -38,7 +38,7 @@ struct pasid_state_entry { u64 val; }; -int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) +int intel_svm_init(struct intel_iommu *iommu) { struct page *pages; int order; @@ -63,15 +63,6 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) iommu->pasid_max = 0x20000; order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); - pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!pages) { - pr_warn("IOMMU: %s: Failed to allocate PASID table\n", - iommu->name); - return -ENOMEM; - } - iommu->pasid_table = page_address(pages); - pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); - if (ecap_dis(iommu->ecap)) { /* Just making it explicit... */ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); @@ -86,14 +77,10 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) return 0; } -int intel_svm_free_pasid_tables(struct intel_iommu *iommu) +int intel_svm_exit(struct intel_iommu *iommu) { int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); - if (iommu->pasid_table) { - free_pages((unsigned long)iommu->pasid_table, order); - iommu->pasid_table = NULL; - } if (iommu->pasid_state_table) { free_pages((unsigned long)iommu->pasid_state_table, order); iommu->pasid_state_table = NULL; -- cgit v1.2.1 From fac83d29d95471ad6a104f8c0d21669a3d59097b Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 18 Jun 2018 12:27:54 +0100 Subject: iommu/io-pgtable-arm: Fix pgtable allocation in selftest Commit 4b123757eeaa ("iommu/io-pgtable-arm: Make allocations NUMA-aware") added a NUMA hint to page table allocation, but the pgtable selftest doesn't provide an SMMU device parameter. Since dev_to_node doesn't accept a NULL argument, add a special case for selftest. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 010a254305dd..88641b4560bc 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -237,7 +237,8 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, void *pages; VM_BUG_ON((gfp & __GFP_HIGHMEM)); - p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order); + p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, + gfp | __GFP_ZERO, order); if (!p) return NULL; -- cgit v1.2.1 From 29859aeb8a6ea17ba207933a81b6b77b4d4df81a Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Tue, 19 Jun 2018 13:52:24 +0100 Subject: iommu/io-pgtable-arm-v7s: Abort allocation when table address overflows the PTE When run on a 64-bit system in selftest, the v7s driver may obtain page table with physical addresses larger than 32-bit. Level-2 tables are 1KB and are are allocated with slab, which doesn't accept the GFP_DMA32 flag. Currently map() truncates the address written in the PTE, causing iova_to_phys() or unmap() to access invalid memory. Kasan reports it as a use-after-free. To avoid any nasty surprise, test if the physical address fits in a PTE before returning a new table. 32-bit systems, which are the main users of this page table format, shouldn't see any difference. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 50e3a9fcf43e..b5948ba6b3b3 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -192,6 +192,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, { struct io_pgtable_cfg *cfg = &data->iop.cfg; struct device *dev = cfg->iommu_dev; + phys_addr_t phys; dma_addr_t dma; size_t size = ARM_V7S_TABLE_SIZE(lvl); void *table = NULL; @@ -200,6 +201,10 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); + phys = virt_to_phys(table); + if (phys != (arm_v7s_iopte)phys) + /* Doesn't fit in PTE */ + goto out_free; if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) @@ -209,7 +214,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, * address directly, so if the DMA layer suggests otherwise by * translating or truncating them, that bodes very badly... */ - if (dma != virt_to_phys(table)) + if (dma != phys) goto out_unmap; } kmemleak_ignore(table); -- cgit v1.2.1 From d1e20222d5372e951bbb2fd3f6489ec4a6ea9b11 Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Thu, 19 Jul 2018 23:23:56 +0530 Subject: iommu/arm-smmu: Error out only if not enough context interrupts Currently we check if the number of context banks is not equal to num_context_interrupts. However, there are booloaders such as, one on sdm845 that reserves few context banks and thus kernel views less than the total available context banks. So, although the hardware definition in device tree would mention the correct number of context interrupts, this number can be greater than the number of context banks visible to smmu in kernel. We should therefore error out only when the number of context banks is greater than the available number of context interrupts. Signed-off-by: Vivek Gautam Suggested-by: Tomasz Figa Cc: Robin Murphy Cc: Will Deacon [will: drop useless printk] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f7a96bcf94a6..5349e22b5c78 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2103,12 +2103,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - if (smmu->version == ARM_SMMU_V2 && - smmu->num_context_banks != smmu->num_context_irqs) { - dev_err(dev, - "found only %d context interrupt(s) but %d required\n", - smmu->num_context_irqs, smmu->num_context_banks); - return -ENODEV; + if (smmu->version == ARM_SMMU_V2) { + if (smmu->num_context_banks > smmu->num_context_irqs) { + dev_err(dev, + "found only %d context irq(s) but %d required\n", + smmu->num_context_irqs, smmu->num_context_banks); + return -ENODEV; + } + + /* Ignore superfluous interrupts */ + smmu->num_context_irqs = smmu->num_context_banks; } for (i = 0; i < smmu->num_global_irqs; ++i) { -- cgit v1.2.1 From 0d535967ac658966c6ade8f82b5799092f7d5441 Mon Sep 17 00:00:00 2001 From: Miao Zhong Date: Mon, 23 Jul 2018 20:56:58 +0800 Subject: iommu/arm-smmu-v3: sync the OVACKFLG to PRIQ consumer register When PRI queue occurs overflow, driver should update the OVACKFLG to the PRIQ consumer register, otherwise subsequent PRI requests will not be processed. Cc: Will Deacon Cc: Robin Murphy Signed-off-by: Miao Zhong Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 1d647104bccc..deacc152f09f 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1301,6 +1301,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) /* Sync our overflow flag, as we believe we're up to speed */ q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + writel(q->cons, q->cons_reg); return IRQ_HANDLED; } -- cgit v1.2.1 From c52c72d3dee81af893cee0414444818ed91e2e11 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 11 Jul 2018 13:59:36 -0700 Subject: iommu: Add sysfs attribyte for domain type While we could print it at setup time, this is an easier way to match each device to their default IOMMU allocation type. Signed-off-by: Olof Johansson Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d227b864a109..7f50013b0bcf 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -294,11 +294,39 @@ static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, return (str - buf); } +static ssize_t iommu_group_show_type(struct iommu_group *group, + char *buf) +{ + char *type = "unknown\n"; + + if (group->default_domain) { + switch (group->default_domain->type) { + case IOMMU_DOMAIN_BLOCKED: + type = "blocked\n"; + break; + case IOMMU_DOMAIN_IDENTITY: + type = "identity\n"; + break; + case IOMMU_DOMAIN_UNMANAGED: + type = "unmanaged\n"; + break; + case IOMMU_DOMAIN_DMA: + type = "DMA"; + break; + } + } + strcpy(buf, type); + + return strlen(type); +} + static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(reserved_regions, 0444, iommu_group_show_resv_regions, NULL); +static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); + static void iommu_group_release(struct kobject *kobj) { struct iommu_group *group = to_iommu_group(kobj); @@ -380,6 +408,10 @@ struct iommu_group *iommu_group_alloc(void) if (ret) return ERR_PTR(ret); + ret = iommu_group_create_file(group, &iommu_group_attr_type); + if (ret) + return ERR_PTR(ret); + pr_debug("Allocated group %d\n", group->id); return group; -- cgit v1.2.1 From 58d1131777a4b7c228267b809bd88f7be66edcfb Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Fri, 20 Jul 2018 11:02:23 -0700 Subject: iommu: Add config option to set passthrough as default This allows the default behavior to be controlled by a kernel config option instead of changing the commandline for the kernel to include "iommu.passthrough=on" or "iommu=pt" on machines where this is desired. Likewise, for machines where this config option is enabled, it can be disabled at boot time with "iommu.passthrough=off" or "iommu=nopt". Also corrected iommu=pt documentation for IA-64, since it has no code that parses iommu= at all. Signed-off-by: Olof Johansson Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 11 +++++++++++ drivers/iommu/iommu.c | 4 ++++ 2 files changed, 15 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 8d0a2886658f..08c957759e74 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -70,6 +70,17 @@ config IOMMU_DEBUGFS debug/iommu directory, and then populate a subdirectory with entries as required. +config IOMMU_DEFAULT_PASSTHROUGH + bool "IOMMU passthrough by default" + depends on IOMMU_API + help + Enable passthrough by default, removing the need to pass in + iommu.passthrough=on or iommu=pt through command line. If this + is enabled, you can still disable with iommu.passthrough=off + or iommu=nopt depending on the architecture. + + If unsure, say N here. + config IOMMU_IOVA tristate diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7f50013b0bcf..f3698006cb53 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -36,7 +36,11 @@ static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); +#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; +#else static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; +#endif struct iommu_callback_data { const struct iommu_ops *ops; -- cgit v1.2.1 From 46583e8c48c5a094ba28060615b3a7c8c576690f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 20 Jul 2018 18:16:59 +0200 Subject: iommu/ipmmu-vmsa: Fix allocation in atomic context When attaching a device to an IOMMU group with CONFIG_DEBUG_ATOMIC_SLEEP=y: BUG: sleeping function called from invalid context at mm/slab.h:421 in_atomic(): 1, irqs_disabled(): 128, pid: 61, name: kworker/1:1 ... Call trace: ... arm_lpae_alloc_pgtable+0x114/0x184 arm_64_lpae_alloc_pgtable_s1+0x2c/0x128 arm_32_lpae_alloc_pgtable_s1+0x40/0x6c alloc_io_pgtable_ops+0x60/0x88 ipmmu_attach_device+0x140/0x334 ipmmu_attach_device() takes a spinlock, while arm_lpae_alloc_pgtable() allocates memory using GFP_KERNEL. Originally, the ipmmu-vmsa driver had its own custom page table allocation implementation using GFP_ATOMIC, hence the spinlock was fine. Fix this by replacing the spinlock by a mutex, like the arm-smmu driver does. Fixes: f20ed39f53145e45 ("iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator") Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 6cbd2bdb92ce..41eee3401f05 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -74,7 +74,7 @@ struct ipmmu_vmsa_domain { struct io_pgtable_ops *iop; unsigned int context_id; - spinlock_t lock; /* Protects mappings */ + struct mutex mutex; /* Protects mappings */ }; static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) @@ -600,7 +600,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) if (!domain) return NULL; - spin_lock_init(&domain->lock); + mutex_init(&domain->mutex); return &domain->io_domain; } @@ -646,7 +646,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned long flags; unsigned int i; int ret = 0; @@ -655,7 +654,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, return -ENXIO; } - spin_lock_irqsave(&domain->lock, flags); + mutex_lock(&domain->mutex); if (!domain->mmu) { /* The domain hasn't been used yet, initialize it. */ @@ -679,7 +678,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, } else dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); - spin_unlock_irqrestore(&domain->lock, flags); + mutex_unlock(&domain->mutex); if (ret < 0) return ret; -- cgit v1.2.1 From 15021d364d17b658f489aa654d96a4c81a71f386 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 25 Jul 2018 15:10:29 +0200 Subject: iommu/ipmmu-vmsa: Clarify supported platforms The Renesas IPMMU-VMSA driver supports not just R-Car H2 and M2 SoCs, but also other R-Car Gen2 and R-Car Gen3 SoCs. Drop a superfluous "Renesas" while at it. Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Reviewed-by: Simon Horman Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..e5401ac90a6f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -285,8 +285,8 @@ config IPMMU_VMSA select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU help - Support for the Renesas VMSA-compatible IPMMU Renesas found in the - R-Mobile APE6 and R-Car H2/M2 SoCs. + Support for the Renesas VMSA-compatible IPMMU found in the R-Mobile + APE6, R-Car Gen2, and R-Car Gen3 SoCs. If unsure, say N. -- cgit v1.2.1 From 5c5c87411488af3cd082221e567498d813d0fe83 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Fri, 27 Jul 2018 00:19:16 +0300 Subject: iommu/ipmmu-vmsa: Don't register as BUS IOMMU if machine doesn't have IPMMU-VMSA This fixes kernel crashing on NVIDIA Tegra if kernel is compiled in a multiplatform configuration and IPMMU-VMSA driver is enabled. Cc: # v3.20+ Signed-off-by: Dmitry Osipenko Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 41eee3401f05..51af2c528a31 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1108,12 +1108,19 @@ static struct platform_driver ipmmu_driver = { static int __init ipmmu_init(void) { + struct device_node *np; static bool setup_done; int ret; if (setup_done) return 0; + np = of_find_matching_node(NULL, ipmmu_of_ids); + if (!np) + return 0; + + of_node_put(np); + ret = platform_driver_register(&ipmmu_driver); if (ret < 0) return ret; -- cgit v1.2.1 From a71792dee2a33d2e935d4b67dd63924f5ceb203d Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 12 Jul 2018 17:28:43 +0800 Subject: iommu/arm-smmu-v3: Prevent any devices access to memory without registration Stream bypass is a potential security hole since a malicious device can be hotplugged in without matching any drivers, yet be granted the ability to access all of physical memory. Now that we attach devices to domains by default, we can toggle the disable_bypass default to "on", preventing DMA from unknown devices. Signed-off-by: Zhen Lei Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index deacc152f09f..7fb5230cd145 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -366,7 +366,7 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 -static bool disable_bypass; +static bool disable_bypass = 1; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); -- cgit v1.2.1 From b63b3439b85609338e4faabd5d2588dbda137e5c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 25 Jul 2018 15:58:43 +0100 Subject: iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel If we find that the SMMU is enabled during probe, we reset it by re-initialising its registers and either enabling translation or placing it into bypass based on the disable_bypass commandline option. In the case of a kdump kernel, the SMMU won't have been shutdown cleanly by the previous kernel and there may be concurrent DMA through the SMMU. Rather than reset the SMMU to bypass, which would likely lead to rampant data corruption, we can instead configure the SMMU to abort all incoming transactions when we find that it is enabled from within a kdump kernel. Reported-by: Sameer Goel Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 7fb5230cd145..446703eeee7a 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -2212,8 +2213,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) reg &= ~clr; reg |= set; writel_relaxed(reg | GBPA_UPDATE, gbpa); - return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), - 1, ARM_SMMU_POLL_TIMEOUT_US); + ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), + 1, ARM_SMMU_POLL_TIMEOUT_US); + + if (ret) + dev_err(smmu->dev, "GBPA not responding to update\n"); + return ret; } static void arm_smmu_free_msis(void *data) @@ -2393,8 +2398,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Clear CR0 and sync (disables SMMU and queue processing) */ reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); - if (reg & CR0_SMMUEN) + if (reg & CR0_SMMUEN) { + if (is_kdump_kernel()) { + arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); + arm_smmu_device_disable(smmu); + return -EBUSY; + } + dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); + } ret = arm_smmu_device_disable(smmu); if (ret) @@ -2492,10 +2504,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) enables |= CR0_SMMUEN; } else { ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); - if (ret) { - dev_err(smmu->dev, "GBPA not responding to update\n"); + if (ret) return ret; - } } ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); -- cgit v1.2.1 From d88e61faad526a5850e9330c846641b91cf971e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:36:26 +0200 Subject: iommu: Remove the ->map_sg indirection All iommu drivers use the default_iommu_map_sg implementation, and there is no good reason to ever override it. Just expose it as iommu_map_sg directly and remove the indirection, specially in our post-spectre world where indirect calls are horribly expensive. Signed-off-by: Christoph Hellwig Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 1 - drivers/iommu/arm-smmu-v3.c | 1 - drivers/iommu/arm-smmu.c | 1 - drivers/iommu/exynos-iommu.c | 1 - drivers/iommu/intel-iommu.c | 1 - drivers/iommu/iommu.c | 6 +++--- drivers/iommu/ipmmu-vmsa.c | 1 - drivers/iommu/msm_iommu.c | 1 - drivers/iommu/mtk_iommu.c | 1 - drivers/iommu/mtk_iommu_v1.c | 1 - drivers/iommu/omap-iommu.c | 1 - drivers/iommu/qcom_iommu.c | 1 - drivers/iommu/rockchip-iommu.c | 1 - drivers/iommu/tegra-gart.c | 1 - drivers/iommu/tegra-smmu.c | 1 - 15 files changed, 3 insertions(+), 17 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 596b95c50051..a23c6a4014a5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3192,7 +3192,6 @@ const struct iommu_ops amd_iommu_ops = { .detach_dev = amd_iommu_detach_device, .map = amd_iommu_map, .unmap = amd_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = amd_iommu_iova_to_phys, .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 1d647104bccc..f1dc294f8e08 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1997,7 +1997,6 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, .flush_iotlb_all = arm_smmu_iotlb_sync, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f7a96bcf94a6..644fd7ec8ac7 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1562,7 +1562,6 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .map_sg = default_iommu_map_sg, .flush_iotlb_all = arm_smmu_iotlb_sync, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 85879cfec52f..19e55cf6a9dd 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1332,7 +1332,6 @@ static const struct iommu_ops exynos_iommu_ops = { .detach_dev = exynos_iommu_detach_device, .map = exynos_iommu_map, .unmap = exynos_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = exynos_iommu_iova_to_phys, .device_group = generic_device_group, .add_device = exynos_iommu_add_device, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..afb6c872f203 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5308,7 +5308,6 @@ const struct iommu_ops intel_iommu_ops = { .detach_dev = intel_iommu_detach_device, .map = intel_iommu_map, .unmap = intel_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = intel_iommu_iova_to_phys, .add_device = intel_iommu_add_device, .remove_device = intel_iommu_remove_device, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f3698006cb53..8c15c5980299 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1673,8 +1673,8 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot) +size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) { struct scatterlist *s; size_t mapped = 0; @@ -1714,7 +1714,7 @@ out_err: return 0; } -EXPORT_SYMBOL_GPL(default_iommu_map_sg); +EXPORT_SYMBOL_GPL(iommu_map_sg); int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 40ae6e87cb88..c9ce27cfa7e8 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -889,7 +889,6 @@ static const struct iommu_ops ipmmu_ops = { .unmap = ipmmu_unmap, .flush_iotlb_all = ipmmu_iotlb_sync, .iotlb_sync = ipmmu_iotlb_sync, - .map_sg = default_iommu_map_sg, .iova_to_phys = ipmmu_iova_to_phys, .add_device = ipmmu_add_device, .remove_device = ipmmu_remove_device, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 0d3350463a3f..cdf6ab22ede9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -708,7 +708,6 @@ static struct iommu_ops msm_iommu_ops = { .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = msm_iommu_iova_to_phys, .add_device = msm_iommu_add_device, .remove_device = msm_iommu_remove_device, diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index f2832a10fcea..f9f69f7111a9 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -495,7 +495,6 @@ static struct iommu_ops mtk_iommu_ops = { .detach_dev = mtk_iommu_detach_device, .map = mtk_iommu_map, .unmap = mtk_iommu_unmap, - .map_sg = default_iommu_map_sg, .flush_iotlb_all = mtk_iommu_iotlb_sync, .iotlb_sync = mtk_iommu_iotlb_sync, .iova_to_phys = mtk_iommu_iova_to_phys, diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index a7c2a973784f..676c029494e4 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -531,7 +531,6 @@ static struct iommu_ops mtk_iommu_ops = { .detach_dev = mtk_iommu_detach_device, .map = mtk_iommu_map, .unmap = mtk_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = mtk_iommu_iova_to_phys, .add_device = mtk_iommu_add_device, .remove_device = mtk_iommu_remove_device, diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index af4a8e7fcd27..86d64ff507fa 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1548,7 +1548,6 @@ static const struct iommu_ops omap_iommu_ops = { .detach_dev = omap_iommu_detach_dev, .map = omap_iommu_map, .unmap = omap_iommu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = omap_iommu_iova_to_phys, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index fe88a4880d3a..6e914b3a2b4b 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -590,7 +590,6 @@ static const struct iommu_ops qcom_iommu_ops = { .detach_dev = qcom_iommu_detach_dev, .map = qcom_iommu_map, .unmap = qcom_iommu_unmap, - .map_sg = default_iommu_map_sg, .flush_iotlb_all = qcom_iommu_iotlb_sync, .iotlb_sync = qcom_iommu_iotlb_sync, .iova_to_phys = qcom_iommu_iova_to_phys, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 054cd2c8e9c8..90625cdd3b66 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1110,7 +1110,6 @@ static const struct iommu_ops rk_iommu_ops = { .detach_dev = rk_iommu_detach_device, .map = rk_iommu_map, .unmap = rk_iommu_unmap, - .map_sg = default_iommu_map_sg, .add_device = rk_iommu_add_device, .remove_device = rk_iommu_remove_device, .iova_to_phys = rk_iommu_iova_to_phys, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index a004f6da35f2..7b1361d57a17 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -377,7 +377,6 @@ static const struct iommu_ops gart_iommu_ops = { .remove_device = gart_iommu_remove_device, .device_group = generic_device_group, .map = gart_iommu_map, - .map_sg = default_iommu_map_sg, .unmap = gart_iommu_unmap, .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 44d40bc771b5..0d03341317c4 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -876,7 +876,6 @@ static const struct iommu_ops tegra_smmu_ops = { .device_group = tegra_smmu_device_group, .map = tegra_smmu_map, .unmap = tegra_smmu_unmap, - .map_sg = default_iommu_map_sg, .iova_to_phys = tegra_smmu_iova_to_phys, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, -- cgit v1.2.1 From 04c532a1cdc7e423656c07937aa4b5c1c2b064f9 Mon Sep 17 00:00:00 2001 From: Ralf Goebel Date: Mon, 6 Aug 2018 17:00:36 +0200 Subject: iommu/omap: Fix cache flushes on L2 table entries The base address used for DMA operations on the second-level table did incorrectly include the offset for the table entry. The offset was then added again which lead to incorrect behavior. Operations on the L1 table are not affected. The calculation of the base address is changed to point to the beginning of the L2 table. Fixes: bfee0cf0ee1d ("iommu/omap: Use DMA-API for performing cache flushes") Acked-by: Suman Anna Signed-off-by: Ralf Goebel Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index af4a8e7fcd27..3b05117118c3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -550,7 +550,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, pte_ready: iopte = iopte_offset(iopgd, da); - *pt_dma = virt_to_phys(iopte); + *pt_dma = iopgd_page_paddr(iopgd); dev_vdbg(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); @@ -738,7 +738,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) } bytes *= nent; memset(iopte, 0, nent * sizeof(*iopte)); - pt_dma = virt_to_phys(iopte); + pt_dma = iopgd_page_paddr(iopgd); flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); /* -- cgit v1.2.1