diff options
Diffstat (limited to 'drivers/iommu')
44 files changed, 5967 insertions, 3184 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e15cdcd8cb3c..d2fade984999 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -3,6 +3,10 @@ config IOMMU_IOVA tristate +# The IOASID library may also be used by non-IOMMU_API users +config IOASID + tristate + # IOMMU_API always gets selected by whoever wants it. config IOMMU_API bool @@ -78,7 +82,7 @@ config IOMMU_DEBUGFS config IOMMU_DEFAULT_PASSTHROUGH bool "IOMMU passthrough by default" depends on IOMMU_API - help + help Enable passthrough by default, removing the need to pass in iommu.passthrough=on or iommu=pt through command line. If this is enabled, you can still disable with iommu.passthrough=off @@ -87,8 +91,8 @@ config IOMMU_DEFAULT_PASSTHROUGH If unsure, say N here. config OF_IOMMU - def_bool y - depends on OF && IOMMU_API + def_bool y + depends on OF && IOMMU_API # IOMMU-agnostic DMA-mapping layer config IOMMU_DMA @@ -138,6 +142,7 @@ config AMD_IOMMU select PCI_PASID select IOMMU_API select IOMMU_IOVA + select IOMMU_DMA depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in @@ -177,11 +182,12 @@ config DMAR_TABLE config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" - depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) + depends on PCI_MSI && ACPI && (X86 || IA64) select IOMMU_API select IOMMU_IOVA select NEED_DMA_MAP_STATE select DMAR_TABLE + select SWIOTLB help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. @@ -206,7 +212,9 @@ config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" depends on INTEL_IOMMU && X86 select PCI_PASID + select PCI_PRI select MMU_NOTIFIER + select IOASID help Shared Virtual Memory (SVM) provides a facility for devices to access DMA resources through process address space by @@ -241,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA workaround will setup a 1:1 mapping for the first 16MiB to make floppy (an ISA device) work. +config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON + bool "Enable Intel IOMMU scalable mode by default" + depends on INTEL_IOMMU + help + Selecting this option will enable by default the scalable mode if + hardware presents the capability. The scalable mode is defined in + VT-d 3.0. The scalable mode capability could be checked by reading + /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option + is not selected, scalable mode support could also be enabled by + passing intel_iommu=sm_on to the kernel. If not sure, please use + the default value. + config IRQ_REMAP bool "Support for Interrupt Remapping" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI @@ -349,7 +369,7 @@ config SPAPR_TCE_IOMMU # ARM IOMMU support config ARM_SMMU - bool "ARM Ltd. System MMU (SMMU) Support" + tristate "ARM Ltd. System MMU (SMMU) Support" depends on (ARM64 || ARM) && MMU select IOMMU_API select IOMMU_IO_PGTABLE_LPAE @@ -361,6 +381,18 @@ config ARM_SMMU Say Y here if your SoC includes an IOMMU device implementing the ARM SMMU architecture. +config ARM_SMMU_LEGACY_DT_BINDINGS + bool "Support the legacy \"mmu-masters\" devicetree bindings" + depends on ARM_SMMU=y && OF + help + Support for the badly designed and deprecated "mmu-masters" + devicetree bindings. This allows some DMA masters to attach + to the SMMU but does not provide any support via the DMA API. + If you're lucky, you might be able to get VFIO up and running. + + If you say Y here then you'll make me very sad. Instead, say N + and move your firmware to the utopian future that was 2016. + config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT bool "Default to disabling bypass on ARM SMMU v1 and v2" depends on ARM_SMMU @@ -387,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT config. config ARM_SMMU_V3 - bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" + tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support" depends on ARM64 select IOMMU_API select IOMMU_IO_PGTABLE_LPAE @@ -466,7 +498,7 @@ config QCOM_IOMMU config HYPERV_IOMMU bool "Hyper-V x2APIC IRQ Handling" - depends on HYPERV + depends on HYPERV && X86 select IOMMU_API default HYPERV help diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f13f36ae1af6..2104fb8afc06 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,16 +7,19 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o +obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm-smmu.o +obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o +arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o +obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..aac132bd1ef0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -20,6 +20,7 @@ #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/dma-direct.h> +#include <linux/dma-iommu.h> #include <linux/iommu-helper.h> #include <linux/iommu.h> #include <linux/delay.h> @@ -70,7 +71,6 @@ */ #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) -static DEFINE_SPINLOCK(amd_iommu_devtable_lock); static DEFINE_SPINLOCK(pd_bitmap_lock); /* List of all available dev_data structures */ @@ -89,8 +89,6 @@ const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; -static const struct dma_map_ops amd_iommu_dma_ops; - /* * general struct to manage commands send to an IOMMU */ @@ -103,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); -static void iova_domain_flush_tlb(struct iova_domain *iovad); - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - /* generic protection domain information */ - struct protection_domain domain; - - /* IOVA RB-Tree */ - struct iova_domain iovad; -}; - -static struct iova_domain reserved_iova_ranges; -static struct lock_class_key reserved_rbtree_key; /**************************************************************************** * @@ -125,30 +108,6 @@ static struct lock_class_key reserved_rbtree_key; * ****************************************************************************/ -static inline int match_hid_uid(struct device *dev, - struct acpihid_map_entry *entry) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - const char *hid, *uid; - - if (!adev) - return -ENODEV; - - hid = acpi_device_hid(adev); - uid = acpi_device_uid(adev); - - if (!hid || !(*hid)) - return -ENODEV; - - if (!uid || !(*uid)) - return strcmp(hid, entry->hid); - - if (!(*entry->uid)) - return strcmp(hid, entry->hid); - - return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid)); -} - static inline u16 get_pci_device_id(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -159,10 +118,14 @@ static inline u16 get_pci_device_id(struct device *dev) static inline int get_acpihid_device_id(struct device *dev, struct acpihid_map_entry **entry) { + struct acpi_device *adev = ACPI_COMPANION(dev); struct acpihid_map_entry *p; + if (!adev) + return -ENODEV; + list_for_each_entry(p, &acpihid_map, list) { - if (!match_hid_uid(dev, p)) { + if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) { if (entry) *entry = p; return p->devid; @@ -188,12 +151,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } -static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) -{ - BUG_ON(domain->flags != PD_DMA_OPS_MASK); - return container_of(domain, struct dma_ops_domain, domain); -} - static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -202,6 +159,7 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid) if (!dev_data) return NULL; + spin_lock_init(&dev_data->lock); dev_data->devid = devid; ratelimit_default_init(&dev_data->rs); @@ -226,71 +184,58 @@ static struct iommu_dev_data *search_dev_data(u16 devid) return NULL; } -static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) -{ - *(u16 *)data = alias; - return 0; -} - -static u16 get_alias(struct device *dev) +static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { - struct pci_dev *pdev = to_pci_dev(dev); - u16 devid, ivrs_alias, pci_alias; + u16 devid = pci_dev_id(pdev); - /* The callers make sure that get_device_id() does not fail here */ - devid = get_device_id(dev); - - /* For ACPI HID devices, we simply return the devid as such */ - if (!dev_is_pci(dev)) - return devid; + if (devid == alias) + return 0; - ivrs_alias = amd_iommu_alias_table[devid]; + amd_iommu_rlookup_table[alias] = + amd_iommu_rlookup_table[devid]; + memcpy(amd_iommu_dev_table[alias].data, + amd_iommu_dev_table[devid].data, + sizeof(amd_iommu_dev_table[alias].data)); - pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); + return 0; +} - if (ivrs_alias == pci_alias) - return ivrs_alias; +static void clone_aliases(struct pci_dev *pdev) +{ + if (!pdev) + return; /* - * DMA alias showdown - * - * The IVRS is fairly reliable in telling us about aliases, but it - * can't know about every screwy device. If we don't have an IVRS - * reported alias, use the PCI reported alias. In that case we may - * still need to initialize the rlookup and dev_table entries if the - * alias is to a non-existent device. + * The IVRS alias stored in the alias table may not be + * part of the PCI DMA aliases if it's bus differs + * from the original device. */ - if (ivrs_alias == devid) { - if (!amd_iommu_rlookup_table[pci_alias]) { - amd_iommu_rlookup_table[pci_alias] = - amd_iommu_rlookup_table[devid]; - memcpy(amd_iommu_dev_table[pci_alias].data, - amd_iommu_dev_table[devid].data, - sizeof(amd_iommu_dev_table[pci_alias].data)); - } + clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL); - return pci_alias; - } + pci_for_each_dma_alias(pdev, clone_alias, NULL); +} + +static struct pci_dev *setup_aliases(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u16 ivrs_alias; - pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d " - "for device [%04x:%04x], kernel reported alias " - "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), - PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device, - PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), - PCI_FUNC(pci_alias)); + /* For ACPI HID devices, there are no aliases */ + if (!dev_is_pci(dev)) + return NULL; /* - * If we don't have a PCI DMA alias and the IVRS alias is on the same - * bus, then the IVRS table may know about a quirk that we don't. + * Add the IVRS alias to the pci aliases if it is on the same + * bus. The IVRS table may know about a quirk that we don't. */ - if (pci_alias == devid && - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { - pci_add_dma_alias(pdev, ivrs_alias & 0xff); - pci_info(pdev, "Added PCI DMA alias %02x.%d\n", - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias)); - } + ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)]; + if (ivrs_alias != pci_dev_id(pdev) && + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) + pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); + + clone_aliases(pdev); - return ivrs_alias; + return pdev; } static struct iommu_dev_data *find_dev_data(u16 devid) @@ -428,7 +373,7 @@ static int iommu_init_device(struct device *dev) if (!dev_data) return -ENOMEM; - dev_data->alias = get_alias(dev); + dev_data->pdev = setup_aliases(dev); /* * By default we use passthrough mode for IOMMUv2 capable device. @@ -436,7 +381,7 @@ static int iommu_init_device(struct device *dev) * invalid address), we ignore the capability for the device so * it'll be forced to go into translation mode. */ - if ((iommu_pass_through || !amd_iommu_force_isolation) && + if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { struct amd_iommu *iommu; @@ -453,20 +398,16 @@ static int iommu_init_device(struct device *dev) static void iommu_ignore_device(struct device *dev) { - u16 alias; int devid; devid = get_device_id(dev); if (devid < 0) return; - alias = get_alias(dev); - + amd_iommu_rlookup_table[devid] = NULL; memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); - memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); - amd_iommu_rlookup_table[devid] = NULL; - amd_iommu_rlookup_table[alias] = NULL; + setup_aliases(dev); } static void iommu_uninit_device(struct device *dev) @@ -501,6 +442,29 @@ static void iommu_uninit_device(struct device *dev) */ } +/* + * Helper function to get the first pte of a large mapping + */ +static u64 *first_pte_l7(u64 *pte, unsigned long *page_size, + unsigned long *count) +{ + unsigned long pte_mask, pg_size, cnt; + u64 *fpte; + + pg_size = PTE_PAGE_SIZE(*pte); + cnt = PAGE_SIZE_PTE_COUNT(pg_size); + pte_mask = ~((cnt << 3) - 1); + fpte = (u64 *)(((unsigned long)pte) & pte_mask); + + if (page_size) + *page_size = pg_size; + + if (count) + *count = cnt; + + return fpte; +} + /**************************************************************************** * * Interrupt handling functions @@ -560,7 +524,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) retry: type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; - pasid = PPR_PASID(*(u64 *)&event[0]); + pasid = (event[0] & EVENT_DOMID_MASK_HI) | + (event[1] & EVENT_DOMID_MASK_LO); flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; address = (u64)(((u64)event[3]) << 32) | event[2]; @@ -593,7 +558,7 @@ retry: address, flags); break; case EVENT_TYPE_PAGE_TAB_ERR: - dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", + dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; @@ -616,8 +581,7 @@ retry: pasid, address, flags); break; case EVENT_TYPE_INV_PPR_REQ: - pasid = ((event[0] >> 16) & 0xFFFF) - | ((event[1] << 6) & 0xF0000); + pasid = PPR_PASID(*((u64 *)__evt)); tag = event[1] & 0x03FF; dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), @@ -852,17 +816,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, struct iommu_cmd *cmd) { u8 *target; - - target = iommu->cmd_buf + iommu->cmd_buf_tail; - - iommu->cmd_buf_tail += sizeof(*cmd); - iommu->cmd_buf_tail %= CMD_BUFFER_SIZE; + u32 tail; /* Copy command to buffer */ + tail = iommu->cmd_buf_tail; + target = iommu->cmd_buf + tail; memcpy(target, cmd, sizeof(*cmd)); + tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; + iommu->cmd_buf_tail = tail; + /* Tell the IOMMU about it */ - writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } static void build_completion_wait(struct iommu_cmd *cmd, u64 address) @@ -1143,6 +1108,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) +{ + struct iommu_cmd cmd; + + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1201,6 +1177,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data, return iommu_queue_command(iommu, &cmd); } +static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct amd_iommu *iommu = data; + + return iommu_flush_dte(iommu, alias); +} + /* * Command send function for invalidating a device table entry */ @@ -1211,14 +1194,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data) int ret; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; - ret = iommu_flush_dte(iommu, dev_data->devid); - if (!ret && alias != dev_data->devid) - ret = iommu_flush_dte(iommu, alias); + if (dev_data->pdev) + ret = pci_for_each_dma_alias(dev_data->pdev, + device_flush_dte_alias, iommu); + else + ret = iommu_flush_dte(iommu, dev_data->devid); if (ret) return ret; + alias = amd_iommu_alias_table[dev_data->devid]; + if (alias != dev_data->devid) { + ret = iommu_flush_dte(iommu, alias); + if (ret) + return ret; + } + if (dev_data->ats.enabled) ret = device_flush_iotlb(dev_data, 0, ~0UL); @@ -1267,12 +1258,6 @@ static void domain_flush_pages(struct protection_domain *domain, __domain_flush_pages(domain, address, size, 0); } -/* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); -} - /* Flush the whole IO/TLB for a given protection domain - including PDE */ static void domain_flush_tlb_pde(struct protection_domain *domain) { @@ -1300,8 +1285,12 @@ static void domain_flush_np_cache(struct protection_domain *domain, dma_addr_t iova, size_t size) { if (unlikely(amd_iommu_np_cache)) { + unsigned long flags; + + spin_lock_irqsave(&domain->lock, flags); domain_flush_pages(domain, iova, size); domain_flush_complete(domain); + spin_unlock_irqrestore(&domain->lock, flags); } } @@ -1414,7 +1403,7 @@ static void free_pagetable(struct protection_domain *domain) BUG_ON(domain->mode < PAGE_MODE_NONE || domain->mode > PAGE_MODE_6_LEVEL); - free_sub_pt(root, domain->mode, freelist); + freelist = free_sub_pt(root, domain->mode, freelist); free_page_list(freelist); } @@ -1425,32 +1414,42 @@ static void free_pagetable(struct protection_domain *domain) * to 64 bits. */ static bool increase_address_space(struct protection_domain *domain, + unsigned long address, gfp_t gfp) { + unsigned long flags; + bool ret = false; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) - /* address space already 64 bit large */ - return false; + spin_lock_irqsave(&domain->lock, flags); + + if (address <= PM_LEVEL_SIZE(domain->mode) || + WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); domain->pt_root = pte; domain->mode += 1; - domain->updated = true; - return true; + ret = true; + +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; } static u64 *alloc_pte(struct protection_domain *domain, unsigned long address, unsigned long page_size, u64 **pte_page, - gfp_t gfp) + gfp_t gfp, + bool *updated) { int level, end_lvl; u64 *pte, *page; @@ -1458,7 +1457,7 @@ static u64 *alloc_pte(struct protection_domain *domain, BUG_ON(!is_power_of_2(page_size)); while (address > PM_LEVEL_SIZE(domain->mode)) - increase_address_space(domain, gfp); + *updated = increase_address_space(domain, address, gfp) || *updated; level = domain->mode - 1; pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; @@ -1472,9 +1471,32 @@ static u64 *alloc_pte(struct protection_domain *domain, __pte = *pte; pte_level = PM_PTE_LEVEL(__pte); - if (!IOMMU_PTE_PRESENT(__pte) || + /* + * If we replace a series of large PTEs, we need + * to tear down all of them. + */ + if (IOMMU_PTE_PRESENT(__pte) && pte_level == PAGE_MODE_7_LEVEL) { + unsigned long count, i; + u64 *lpte; + + lpte = first_pte_l7(pte, NULL, &count); + + /* + * Unmap the replicated PTEs that still match the + * original large mapping + */ + for (i = 0; i < count; ++i) + cmpxchg64(&lpte[i], __pte, 0ULL); + + *updated = true; + continue; + } + + if (!IOMMU_PTE_PRESENT(__pte) || + pte_level == PAGE_MODE_NONE) { page = (u64 *)get_zeroed_page(gfp); + if (!page) return NULL; @@ -1483,8 +1505,8 @@ static u64 *alloc_pte(struct protection_domain *domain, /* pte could have been changed somewhere. */ if (cmpxchg64(pte, __pte, __npte) != __pte) free_page((unsigned long)page); - else if (pte_level == PAGE_MODE_7_LEVEL) - domain->updated = true; + else if (IOMMU_PTE_PRESENT(__pte)) + *updated = true; continue; } @@ -1549,17 +1571,12 @@ static u64 *fetch_pte(struct protection_domain *domain, *page_size = PTE_LEVEL_PAGE_SIZE(level); } - if (PM_PTE_LEVEL(*pte) == 0x07) { - unsigned long pte_mask; - - /* - * If we have a series of large PTEs, make - * sure to return a pointer to the first one. - */ - *page_size = pte_mask = PTE_PAGE_SIZE(*pte); - pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); - pte = (u64 *)(((unsigned long)pte) & pte_mask); - } + /* + * If we have a series of large PTEs, make + * sure to return a pointer to the first one. + */ + if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL) + pte = first_pte_l7(pte, page_size, NULL); return pte; } @@ -1598,26 +1615,29 @@ static int iommu_map_page(struct protection_domain *dom, gfp_t gfp) { struct page *freelist = NULL; + bool updated = false; u64 __pte, *pte; - int i, count; + int ret, i, count; BUG_ON(!IS_ALIGNED(bus_addr, page_size)); BUG_ON(!IS_ALIGNED(phys_addr, page_size)); + ret = -EINVAL; if (!(prot & IOMMU_PROT_MASK)) - return -EINVAL; + goto out; count = PAGE_SIZE_PTE_COUNT(page_size); - pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp); + pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated); + ret = -ENOMEM; if (!pte) - return -ENOMEM; + goto out; for (i = 0; i < count; ++i) freelist = free_clear_pte(&pte[i], pte[i], freelist); if (freelist != NULL) - dom->updated = true; + updated = true; if (count > 1) { __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size); @@ -1633,12 +1653,21 @@ static int iommu_map_page(struct protection_domain *dom, for (i = 0; i < count; ++i) pte[i] = __pte; - update_domain(dom); + ret = 0; + +out: + if (updated) { + unsigned long flags; + + spin_lock_irqsave(&dom->lock, flags); + update_domain(dom); + spin_unlock_irqrestore(&dom->lock, flags); + } /* Everything flushed out, free pages now */ free_page_list(freelist); - return 0; + return ret; } static unsigned long iommu_unmap_page(struct protection_domain *dom, @@ -1676,43 +1705,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, /**************************************************************************** * - * The next functions belong to the address allocator for the dma_ops - * interface functions. - * - ****************************************************************************/ - - -static unsigned long dma_ops_alloc_iova(struct device *dev, - struct dma_ops_domain *dma_dom, - unsigned int pages, u64 dma_mask) -{ - unsigned long pfn = 0; - - pages = __roundup_pow_of_two(pages); - - if (dma_mask > DMA_BIT_MASK(32)) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(DMA_BIT_MASK(32)), false); - - if (!pfn) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(dma_mask), true); - - return (pfn << PAGE_SHIFT); -} - -static void dma_ops_free_iova(struct dma_ops_domain *dma_dom, - unsigned long address, - unsigned int pages) -{ - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - free_iova_fast(&dma_dom->iovad, address, pages); -} - -/**************************************************************************** - * * The next functions belong to the domain allocation. A domain is * allocated for every IOMMU as the default domain. If device isolation * is enabled, every device get its own domain. The most important thing @@ -1787,38 +1779,23 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } -static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) -{ - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); -} - -static void iova_domain_flush_tlb(struct iova_domain *iovad) -{ - struct dma_ops_domain *dom; - - dom = container_of(iovad, struct dma_ops_domain, iovad); - - dma_ops_domain_flush_tlb(dom); -} - /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table */ -static void dma_ops_domain_free(struct dma_ops_domain *dom) +static void dma_ops_domain_free(struct protection_domain *domain) { - if (!dom) + if (!domain) return; - put_iova_domain(&dom->iovad); + iommu_put_dma_cookie(&domain->domain); - free_pagetable(&dom->domain); + free_pagetable(domain); - if (dom->domain.id) - domain_id_free(dom->domain.id); + if (domain->id) + domain_id_free(domain->id); - kfree(dom); + kfree(domain); } /* @@ -1826,35 +1803,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) * It also initializes the page table and the address allocator data * structures required for the dma_ops interface */ -static struct dma_ops_domain *dma_ops_domain_alloc(void) +static struct protection_domain *dma_ops_domain_alloc(void) { - struct dma_ops_domain *dma_dom; + struct protection_domain *domain; - dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); - if (!dma_dom) + domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL); + if (!domain) return NULL; - if (protection_domain_init(&dma_dom->domain)) - goto free_dma_dom; - - dma_dom->domain.mode = PAGE_MODE_3_LEVEL; - dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); - dma_dom->domain.flags = PD_DMA_OPS_MASK; - if (!dma_dom->domain.pt_root) - goto free_dma_dom; - - init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN); + if (protection_domain_init(domain)) + goto free_domain; - if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) - goto free_dma_dom; + domain->mode = PAGE_MODE_3_LEVEL; + domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + domain->flags = PD_DMA_OPS_MASK; + if (!domain->pt_root) + goto free_domain; - /* Initialize reserved ranges */ - copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); + if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM) + goto free_domain; - return dma_dom; + return domain; -free_dma_dom: - dma_ops_domain_free(dma_dom); +free_domain: + dma_ops_domain_free(domain); return NULL; } @@ -1873,6 +1845,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, { u64 pte_root = 0; u64 flags = 0; + u32 old_domid; if (domain->mode != PAGE_MODE_NONE) pte_root = iommu_virt_to_phys(domain->pt_root); @@ -1922,8 +1895,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, flags &= ~DEV_DOMID_MASK; flags |= domain->id; + old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; amd_iommu_dev_table[devid].data[1] = flags; amd_iommu_dev_table[devid].data[0] = pte_root; + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } static void clear_dte_entry(u16 devid) @@ -1939,11 +1924,9 @@ static void do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu; - u16 alias; bool ats; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; ats = dev_data->ats.enabled; /* Update data structures */ @@ -1956,8 +1939,7 @@ static void do_attach(struct iommu_dev_data *dev_data, /* Update device table */ set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); - if (alias != dev_data->devid) - set_dte_entry(alias, domain, ats, dev_data->iommu_v2); + clone_aliases(dev_data->pdev); device_flush_dte(dev_data); } @@ -1966,17 +1948,14 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; - u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); clear_dte_entry(dev_data->devid); - if (alias != dev_data->devid) - clear_dte_entry(alias); + clone_aliases(dev_data->pdev); /* Flush the DTE entry */ device_flush_dte(dev_data); @@ -1992,36 +1971,6 @@ static void do_detach(struct iommu_dev_data *dev_data) domain->dev_cnt -= 1; } -/* - * If a device is not yet associated with a domain, this function makes the - * device visible in the domain - */ -static int __attach_device(struct iommu_dev_data *dev_data, - struct protection_domain *domain) -{ - int ret; - - /* lock domain */ - spin_lock(&domain->lock); - - ret = -EBUSY; - if (dev_data->domain != NULL) - goto out_unlock; - - /* Attach alias group root */ - do_attach(dev_data, domain); - - ret = 0; - -out_unlock: - - /* ready */ - spin_unlock(&domain->lock); - - return ret; -} - - static void pdev_iommuv2_disable(struct pci_dev *pdev) { pci_disable_ats(pdev); @@ -2103,19 +2052,28 @@ static int attach_device(struct device *dev, unsigned long flags; int ret; + spin_lock_irqsave(&domain->lock, flags); + dev_data = get_dev_data(dev); + spin_lock(&dev_data->lock); + + ret = -EBUSY; + if (dev_data->domain != NULL) + goto out; + if (!dev_is_pci(dev)) goto skip_ats_check; pdev = to_pci_dev(dev); if (domain->flags & PD_IOMMUV2_MASK) { + ret = -EINVAL; if (!dev_data->passthrough) - return -EINVAL; + goto out; if (dev_data->iommu_v2) { if (pdev_iommuv2_enable(pdev) != 0) - return -EINVAL; + goto out; dev_data->ats.enabled = true; dev_data->ats.qdep = pci_ats_queue_depth(pdev); @@ -2128,9 +2086,9 @@ static int attach_device(struct device *dev, } skip_ats_check: - spin_lock_irqsave(&amd_iommu_devtable_lock, flags); - ret = __attach_device(dev_data, domain); - spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + ret = 0; + + do_attach(dev_data, domain); /* * We might boot into a crash-kernel here. The crashed kernel @@ -2139,23 +2097,14 @@ skip_ats_check: */ domain_flush_tlb_pde(domain); - return ret; -} - -/* - * Removes a device from a protection domain (unlocked) - */ -static void __detach_device(struct iommu_dev_data *dev_data) -{ - struct protection_domain *domain; - - domain = dev_data->domain; + domain_flush_complete(domain); - spin_lock(&domain->lock); +out: + spin_unlock(&dev_data->lock); - do_detach(dev_data); + spin_unlock_irqrestore(&domain->lock, flags); - spin_unlock(&domain->lock); + return ret; } /* @@ -2170,6 +2119,10 @@ static void detach_device(struct device *dev) dev_data = get_dev_data(dev); domain = dev_data->domain; + spin_lock_irqsave(&domain->lock, flags); + + spin_lock(&dev_data->lock); + /* * First check if the device is still attached. It might already * be detached from its domain because the generic @@ -2177,15 +2130,12 @@ static void detach_device(struct device *dev) * our alias handling. */ if (WARN_ON(!dev_data->domain)) - return; + goto out; - /* lock device table */ - spin_lock_irqsave(&amd_iommu_devtable_lock, flags); - __detach_device(dev_data); - spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + do_detach(dev_data); if (!dev_is_pci(dev)) - return; + goto out; if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) pdev_iommuv2_disable(to_pci_dev(dev)); @@ -2193,6 +2143,11 @@ static void detach_device(struct device *dev) pci_disable_ats(to_pci_dev(dev)); dev_data->ats.enabled = false; + +out: + spin_unlock(&dev_data->lock); + + spin_unlock_irqrestore(&domain->lock, flags); } static int amd_iommu_add_device(struct device *dev) @@ -2226,15 +2181,15 @@ static int amd_iommu_add_device(struct device *dev) BUG_ON(!dev_data); - if (iommu_pass_through || dev_data->iommu_v2) + if (dev_data->iommu_v2) iommu_request_dm_for_dev(dev); /* Domains are initialized for this device - have a look what we ended up with */ domain = iommu_get_domain_for_dev(dev); if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; - else - dev->dma_ops = &amd_iommu_dma_ops; + else if (domain->type == IOMMU_DOMAIN_DMA) + iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); out: iommu_completion_wait(iommu); @@ -2268,43 +2223,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) return acpihid_device_group(dev); } +static int amd_iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + return -ENODEV; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = !amd_iommu_unmap_flush; + return 0; + default: + return -ENODEV; + } + break; + default: + return -EINVAL; + } +} + /***************************************************************************** * * The next functions belong to the dma_ops mapping/unmapping code. * *****************************************************************************/ -/* - * In the dma_ops path we only have the struct device. This function - * finds the corresponding IOMMU, the protection domain and the - * requestor id for a given device. - * If the device is not yet associated with a domain this is also done - * in this function. - */ -static struct protection_domain *get_domain(struct device *dev) -{ - struct protection_domain *domain; - struct iommu_domain *io_domain; - - if (!check_device(dev)) - return ERR_PTR(-EINVAL); - - domain = get_dev_data(dev)->domain; - if (domain == NULL && get_dev_data(dev)->defer_attach) { - get_dev_data(dev)->defer_attach = false; - io_domain = iommu_get_domain_for_dev(dev); - domain = to_pdomain(io_domain); - attach_device(dev, domain); - } - if (domain == NULL) - return ERR_PTR(-EBUSY); - - if (!dma_ops_domain(domain)) - return ERR_PTR(-EBUSY); - - return domain; -} - static void update_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; @@ -2312,468 +2256,16 @@ static void update_device_table(struct protection_domain *domain) list_for_each_entry(dev_data, &domain->dev_list, list) { set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, dev_data->iommu_v2); - - if (dev_data->devid == dev_data->alias) - continue; - - /* There is an alias, update device table entry for it */ - set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled, - dev_data->iommu_v2); + clone_aliases(dev_data->pdev); } } static void update_domain(struct protection_domain *domain) { - if (!domain->updated) - return; - update_device_table(domain); domain_flush_devices(domain); domain_flush_tlb_pde(domain); - - domain->updated = false; -} - -static int dir2prot(enum dma_data_direction direction) -{ - if (direction == DMA_TO_DEVICE) - return IOMMU_PROT_IR; - else if (direction == DMA_FROM_DEVICE) - return IOMMU_PROT_IW; - else if (direction == DMA_BIDIRECTIONAL) - return IOMMU_PROT_IW | IOMMU_PROT_IR; - else - return 0; -} - -/* - * This function contains common code for mapping of a physically - * contiguous memory region into DMA address space. It is used by all - * mapping functions provided with this IOMMU driver. - * Must be called with the domain lock held. - */ -static dma_addr_t __map_single(struct device *dev, - struct dma_ops_domain *dma_dom, - phys_addr_t paddr, - size_t size, - enum dma_data_direction direction, - u64 dma_mask) -{ - dma_addr_t offset = paddr & ~PAGE_MASK; - dma_addr_t address, start, ret; - unsigned int pages; - int prot = 0; - int i; - - pages = iommu_num_pages(paddr, size, PAGE_SIZE); - paddr &= PAGE_MASK; - - address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); - if (!address) - goto out; - - prot = dir2prot(direction); - - start = address; - for (i = 0; i < pages; ++i) { - ret = iommu_map_page(&dma_dom->domain, start, paddr, - PAGE_SIZE, prot, GFP_ATOMIC); - if (ret) - goto out_unmap; - - paddr += PAGE_SIZE; - start += PAGE_SIZE; - } - address += offset; - - domain_flush_np_cache(&dma_dom->domain, address, size); - -out: - return address; - -out_unmap: - - for (--i; i >= 0; --i) { - start -= PAGE_SIZE; - iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); - } - - domain_flush_tlb(&dma_dom->domain); - domain_flush_complete(&dma_dom->domain); - - dma_ops_free_iova(dma_dom, address, pages); - - return DMA_MAPPING_ERROR; -} - -/* - * Does the reverse of the __map_single function. Must be called with - * the domain lock held too - */ -static void __unmap_single(struct dma_ops_domain *dma_dom, - dma_addr_t dma_addr, - size_t size, - int dir) -{ - dma_addr_t i, start; - unsigned int pages; - - pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); - dma_addr &= PAGE_MASK; - start = dma_addr; - - for (i = 0; i < pages; ++i) { - iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); - start += PAGE_SIZE; - } - - if (amd_iommu_unmap_flush) { - domain_flush_tlb(&dma_dom->domain); - domain_flush_complete(&dma_dom->domain); - dma_ops_free_iova(dma_dom, dma_addr, pages); - } else { - pages = __roundup_pow_of_two(pages); - queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); - } -} - -/* - * The exported map_single function for dma_ops. - */ -static dma_addr_t map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = page_to_phys(page) + offset; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - u64 dma_mask; - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return (dma_addr_t)paddr; - else if (IS_ERR(domain)) - return DMA_MAPPING_ERROR; - - dma_mask = *dev->dma_mask; - dma_dom = to_dma_ops_domain(domain); - - return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); -} - -/* - * The exported unmap_single function for dma_ops. - */ -static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - dma_dom = to_dma_ops_domain(domain); - - __unmap_single(dma_dom, dma_addr, size, dir); -} - -static int sg_num_pages(struct device *dev, - struct scatterlist *sglist, - int nelems) -{ - unsigned long mask, boundary_size; - struct scatterlist *s; - int i, npages = 0; - - mask = dma_get_seg_boundary(dev); - boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT : - 1UL << (BITS_PER_LONG - PAGE_SHIFT); - - for_each_sg(sglist, s, nelems, i) { - int p, n; - - s->dma_address = npages << PAGE_SHIFT; - p = npages % boundary_size; - n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - if (p + n > boundary_size) - npages += boundary_size - p; - npages += n; - } - - return npages; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction, - unsigned long attrs) -{ - int mapped_pages = 0, npages = 0, prot = 0, i; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct scatterlist *s; - unsigned long address; - u64 dma_mask; - int ret; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return 0; - - dma_dom = to_dma_ops_domain(domain); - dma_mask = *dev->dma_mask; - - npages = sg_num_pages(dev, sglist, nelems); - - address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); - if (!address) - goto out_err; - - prot = dir2prot(direction); - - /* Map all sg entries */ - for_each_sg(sglist, s, nelems, i) { - int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - - for (j = 0; j < pages; ++j) { - unsigned long bus_addr, phys_addr; - - bus_addr = address + s->dma_address + (j << PAGE_SHIFT); - phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); - if (ret) - goto out_unmap; - - mapped_pages += 1; - } - } - - /* Everything is mapped - write the right values into s->dma_address */ - for_each_sg(sglist, s, nelems, i) { - /* - * Add in the remaining piece of the scatter-gather offset that - * was masked out when we were determining the physical address - * via (sg_phys(s) & PAGE_MASK) earlier. - */ - s->dma_address += address + (s->offset & ~PAGE_MASK); - s->dma_length = s->length; - } - - if (s) - domain_flush_np_cache(domain, s->dma_address, s->dma_length); - - return nelems; - -out_unmap: - dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n", - npages, ret); - - for_each_sg(sglist, s, nelems, i) { - int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); - - for (j = 0; j < pages; ++j) { - unsigned long bus_addr; - - bus_addr = address + s->dma_address + (j << PAGE_SHIFT); - iommu_unmap_page(domain, bus_addr, PAGE_SIZE); - - if (--mapped_pages == 0) - goto out_free_iova; - } - } - -out_free_iova: - free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); - -out_err: - return 0; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - unsigned long startaddr; - int npages; - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - startaddr = sg_dma_address(sglist) & PAGE_MASK; - dma_dom = to_dma_ops_domain(domain); - npages = sg_num_pages(dev, sglist, nelems); - - __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); -} - -/* - * The exported alloc_coherent function for dma_ops. - */ -static void *alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs) -{ - u64 dma_mask = dev->coherent_dma_mask; - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct page *page; - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) { - page = alloc_pages(flag, get_order(size)); - *dma_addr = page_to_phys(page); - return page_address(page); - } else if (IS_ERR(domain)) - return NULL; - - dma_dom = to_dma_ops_domain(domain); - size = PAGE_ALIGN(size); - dma_mask = dev->coherent_dma_mask; - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - flag |= __GFP_ZERO; - - page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); - if (!page) { - if (!gfpflags_allow_blocking(flag)) - return NULL; - - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), flag & __GFP_NOWARN); - if (!page) - return NULL; - } - - if (!dma_mask) - dma_mask = *dev->dma_mask; - - *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), - size, DMA_BIDIRECTIONAL, dma_mask); - - if (*dma_addr == DMA_MAPPING_ERROR) - goto out_free; - - return page_address(page); - -out_free: - - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, get_order(size)); - - return NULL; -} - -/* - * The exported free_coherent function for dma_ops. - */ -static void free_coherent(struct device *dev, size_t size, - void *virt_addr, dma_addr_t dma_addr, - unsigned long attrs) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - struct page *page; - - page = virt_to_page(virt_addr); - size = PAGE_ALIGN(size); - - domain = get_domain(dev); - if (IS_ERR(domain)) - goto free_mem; - - dma_dom = to_dma_ops_domain(domain); - - __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); - -free_mem: - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, get_order(size)); -} - -/* - * This function is called by the DMA layer to find out if we can handle a - * particular device. It is part of the dma_ops. - */ -static int amd_iommu_dma_supported(struct device *dev, u64 mask) -{ - if (!dma_direct_supported(dev, mask)) - return 0; - return check_device(dev); -} - -static const struct dma_map_ops amd_iommu_dma_ops = { - .alloc = alloc_coherent, - .free = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, -}; - -static int init_reserved_iova_ranges(void) -{ - struct pci_dev *pdev = NULL; - struct iova *val; - - init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN); - - lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, - &reserved_rbtree_key); - - /* MSI memory range */ - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END)); - if (!val) { - pr_err("Reserving MSI range failed\n"); - return -ENOMEM; - } - - /* HT memory range */ - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END)); - if (!val) { - pr_err("Reserving HT range failed\n"); - return -ENOMEM; - } - - /* - * Memory used for PCI resources - * FIXME: Check whether we can reserve the PCI-hole completly - */ - for_each_pci_dev(pdev) { - int i; - - for (i = 0; i < PCI_NUM_RESOURCES; ++i) { - struct resource *r = &pdev->resource[i]; - - if (!(r->flags & IORESOURCE_MEM)) - continue; - - val = reserve_iova(&reserved_iova_ranges, - IOVA_PFN(r->start), - IOVA_PFN(r->end)); - if (!val) { - pci_err(pdev, "Reserve pci-resource range %pR failed\n", r); - return -ENOMEM; - } - } - } - - return 0; } int __init amd_iommu_init_api(void) @@ -2784,10 +2276,6 @@ int __init amd_iommu_init_api(void) if (ret) return ret; - ret = init_reserved_iova_ranges(); - if (ret) - return ret; - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) return err; @@ -2805,8 +2293,7 @@ int __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { - swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; - iommu_detected = 1; + swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; if (amd_iommu_unmap_flush) pr_info("IO/TLB flush on unmap enabled\n"); @@ -2832,16 +2319,16 @@ static void cleanup_domain(struct protection_domain *domain) struct iommu_dev_data *entry; unsigned long flags; - spin_lock_irqsave(&amd_iommu_devtable_lock, flags); + spin_lock_irqsave(&domain->lock, flags); while (!list_empty(&domain->dev_list)) { entry = list_first_entry(&domain->dev_list, struct iommu_dev_data, list); BUG_ON(!entry->domain); - __detach_device(entry); + do_detach(entry); } - spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + spin_unlock_irqrestore(&domain->lock, flags); } static void protection_domain_free(struct protection_domain *domain) @@ -2858,7 +2345,6 @@ static void protection_domain_free(struct protection_domain *domain) static int protection_domain_init(struct protection_domain *domain) { spin_lock_init(&domain->lock); - mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); if (!domain->id) return -ENOMEM; @@ -2889,7 +2375,6 @@ out_err: static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *pdomain; - struct dma_ops_domain *dma_domain; switch (type) { case IOMMU_DOMAIN_UNMANAGED: @@ -2910,12 +2395,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) break; case IOMMU_DOMAIN_DMA: - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) { + pdomain = dma_ops_domain_alloc(); + if (!pdomain) { pr_err("Failed to allocate\n"); return NULL; } - pdomain = &dma_domain->domain; break; case IOMMU_DOMAIN_IDENTITY: pdomain = protection_domain_alloc(); @@ -2934,7 +2418,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; - struct dma_ops_domain *dma_dom; domain = to_pdomain(dom); @@ -2949,8 +2432,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) switch (dom->type) { case IOMMU_DOMAIN_DMA: /* Now release the domain */ - dma_dom = to_dma_ops_domain(domain); - dma_ops_domain_free(dma_dom); + dma_ops_domain_free(domain); break; default: if (domain->mode != PAGE_MODE_NONE) @@ -3006,6 +2488,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, return -EINVAL; dev_data = dev->archdata.iommu; + dev_data->defer_attach = false; iommu = amd_iommu_rlookup_table[dev_data->devid]; if (!iommu) @@ -3031,7 +2514,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, size_t page_size, int iommu_prot) + phys_addr_t paddr, size_t page_size, int iommu_prot, + gfp_t gfp) { struct protection_domain *domain = to_pdomain(dom); int prot = 0; @@ -3045,9 +2529,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, if (iommu_prot & IOMMU_WRITE) prot |= IOMMU_PROT_IW; - mutex_lock(&domain->api_lock); - ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); - mutex_unlock(&domain->api_lock); + ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp); domain_flush_np_cache(domain, iova, page_size); @@ -3055,19 +2537,15 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, } static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - size_t page_size) + size_t page_size, + struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); - size_t unmap_size; if (domain->mode == PAGE_MODE_NONE) return 0; - mutex_lock(&domain->api_lock); - unmap_size = iommu_unmap_page(domain, iova, page_size); - mutex_unlock(&domain->api_lock); - - return unmap_size; + return iommu_unmap_page(domain, iova, page_size); } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, @@ -3159,28 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -static void amd_iommu_put_resv_regions(struct device *dev, - struct list_head *head) -{ - struct iommu_resv_region *entry, *next; - - list_for_each_entry_safe(entry, next, head, list) - kfree(entry); -} - -static void amd_iommu_apply_resv_region(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region) -{ - struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); - unsigned long start, end; - - start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length - 1); - - WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); -} - static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { @@ -3191,14 +2647,18 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct protection_domain *dom = to_pdomain(domain); + unsigned long flags; + spin_lock_irqsave(&dom->lock, flags); domain_flush_tlb_pde(dom); domain_flush_complete(dom); + spin_unlock_irqrestore(&dom->lock, flags); } -static void amd_iommu_iotlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) +static void amd_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { + amd_iommu_flush_iotlb_all(domain); } const struct iommu_ops amd_iommu_ops = { @@ -3213,14 +2673,13 @@ const struct iommu_ops amd_iommu_ops = { .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, .device_group = amd_iommu_device_group, + .domain_get_attr = amd_iommu_domain_get_attr, .get_resv_regions = amd_iommu_get_resv_regions, - .put_resv_regions = amd_iommu_put_resv_regions, - .apply_resv_region = amd_iommu_apply_resv_region, + .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_range_add = amd_iommu_iotlb_range_add, - .iotlb_sync = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, }; /***************************************************************************** @@ -3255,7 +2714,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom) /* Update data structure */ domain->mode = PAGE_MODE_NONE; - domain->updated = true; /* Make changes visible to IOMMUs */ update_domain(domain); @@ -3301,7 +2759,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) domain->glx = levels; domain->flags |= PD_IOMMUV2_MASK; - domain->updated = true; update_domain(domain); @@ -3530,9 +2987,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr); struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) { struct protection_domain *pdomain; + struct iommu_domain *io_domain; + struct device *dev = &pdev->dev; + + if (!check_device(dev)) + return NULL; + + pdomain = get_dev_data(dev)->domain; + if (pdomain == NULL && get_dev_data(dev)->defer_attach) { + get_dev_data(dev)->defer_attach = false; + io_domain = iommu_get_domain_for_dev(dev); + pdomain = to_pdomain(io_domain); + attach_device(dev, pdomain); + } + if (pdomain == NULL) + return NULL; - pdomain = get_domain(&pdev->dev); - if (IS_ERR(pdomain)) + if (!dma_ops_domain(pdomain)) return NULL; /* Only return IOMMUv2 domains */ @@ -3672,7 +3143,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, iommu_flush_dte(iommu, devid); } -static struct irq_remap_table *alloc_irq_table(u16 devid) +static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, + void *data) +{ + struct irq_remap_table *table = data; + + irq_lookup_table[alias] = table; + set_dte_irq_entry(alias, table); + + iommu_flush_dte(amd_iommu_rlookup_table[alias], alias); + + return 0; +} + +static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev) { struct irq_remap_table *table = NULL; struct irq_remap_table *new_table = NULL; @@ -3718,7 +3202,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid) table = new_table; new_table = NULL; - set_remap_table_entry(iommu, devid, table); + if (pdev) + pci_for_each_dma_alias(pdev, set_remap_table_entry_alias, + table); + else + set_remap_table_entry(iommu, devid, table); + if (devid != alias) set_remap_table_entry(iommu, alias, table); @@ -3735,7 +3224,8 @@ out_unlock: return table; } -static int alloc_irq_index(u16 devid, int count, bool align) +static int alloc_irq_index(u16 devid, int count, bool align, + struct pci_dev *pdev) { struct irq_remap_table *table; int index, c, alignment = 1; @@ -3745,7 +3235,7 @@ static int alloc_irq_index(u16 devid, int count, bool align) if (!iommu) return -ENODEV; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, pdev); if (!table) return -ENODEV; @@ -4178,7 +3668,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, struct irq_remap_table *table; struct amd_iommu *iommu; - table = alloc_irq_table(devid); + table = alloc_irq_table(devid, NULL); if (table) { if (!table->min_index) { /* @@ -4195,11 +3685,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, } else { index = -ENOMEM; } - } else { + } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI || + info->type == X86_IRQ_ALLOC_TYPE_MSIX) { bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); - index = alloc_irq_index(devid, nr_irqs, align); + index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev); + } else { + index = alloc_irq_index(devid, nr_irqs, false, NULL); } + if (index < 0) { pr_warn("Failed to allocate IRTE\n"); ret = index; @@ -4313,13 +3807,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = { .deactivate = irq_remapping_deactivate, }; +int amd_iommu_activate_guest_mode(void *data) +{ + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || + !entry || entry->lo.fields_vapic.guest_mode) + return 0; + + entry->lo.val = 0; + entry->hi.val = 0; + + entry->lo.fields_vapic.guest_mode = 1; + entry->lo.fields_vapic.ga_log_intr = 1; + entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; + entry->hi.fields.vector = ir_data->ga_vector; + entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; + + return modify_irte_ga(ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry, NULL); +} +EXPORT_SYMBOL(amd_iommu_activate_guest_mode); + +int amd_iommu_deactivate_guest_mode(void *data) +{ + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + struct irq_cfg *cfg = ir_data->cfg; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || + !entry || !entry->lo.fields_vapic.guest_mode) + return 0; + + entry->lo.val = 0; + entry->hi.val = 0; + + entry->lo.fields_remap.dm = apic->irq_dest_mode; + entry->lo.fields_remap.int_type = apic->irq_delivery_mode; + entry->hi.fields.vector = cfg->vector; + entry->lo.fields_remap.destination = + APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); + entry->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); + + return modify_irte_ga(ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry, NULL); +} +EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); + static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) { + int ret; struct amd_iommu *iommu; struct amd_iommu_pi_data *pi_data = vcpu_info; struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; struct amd_ir_data *ir_data = data->chip_data; - struct irte_ga *irte = (struct irte_ga *) ir_data->entry; struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); @@ -4330,6 +3873,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) if (!dev_data || !dev_data->use_vapic) return 0; + ir_data->cfg = irqd_cfg(data); pi_data->ir_data = ir_data; /* Note: @@ -4348,37 +3892,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) pi_data->prev_ga_tag = ir_data->cached_ga_tag; if (pi_data->is_guest_mode) { - /* Setting */ - irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); - irte->hi.fields.vector = vcpu_pi_info->vector; - irte->lo.fields_vapic.ga_log_intr = 1; - irte->lo.fields_vapic.guest_mode = 1; - irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; - - ir_data->cached_ga_tag = pi_data->ga_tag; + ir_data->ga_root_ptr = (pi_data->base >> 12); + ir_data->ga_vector = vcpu_pi_info->vector; + ir_data->ga_tag = pi_data->ga_tag; + ret = amd_iommu_activate_guest_mode(ir_data); + if (!ret) + ir_data->cached_ga_tag = pi_data->ga_tag; } else { - /* Un-Setting */ - struct irq_cfg *cfg = irqd_cfg(data); - - irte->hi.val = 0; - irte->lo.val = 0; - irte->hi.fields.vector = cfg->vector; - irte->lo.fields_remap.guest_mode = 0; - irte->lo.fields_remap.destination = - APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); - irte->hi.fields.destination = - APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); - irte->lo.fields_remap.int_type = apic->irq_delivery_mode; - irte->lo.fields_remap.dm = apic->irq_dest_mode; + ret = amd_iommu_deactivate_guest_mode(ir_data); /* * This communicates the ga_tag back to the caller * so that it can do all the necessary clean up. */ - ir_data->cached_ga_tag = 0; + if (!ret) + ir_data->cached_ga_tag = 0; } - return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); + return ret; } diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h new file mode 100644 index 000000000000..12d540d9b59b --- /dev/null +++ b/drivers/iommu/amd_iommu.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef AMD_IOMMU_H +#define AMD_IOMMU_H + +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); + +#ifdef CONFIG_DMI +void amd_iommu_apply_ivrs_quirks(void); +#else +static void amd_iommu_apply_ivrs_quirks(void) { } +#endif + +#endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 4413aa67000e..2759a8d57b7f 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -32,6 +32,7 @@ #include <asm/irq_remapping.h> #include <linux/crash_dump.h> +#include "amd_iommu.h" #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" @@ -70,6 +71,8 @@ #define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08 +#define IVMD_FLAG_IW 0x04 +#define IVMD_FLAG_IR 0x02 #define IVMD_FLAG_UNITY_MAP 0x01 #define ACPI_DEVFLAG_INITPASS 0x01 @@ -146,7 +149,7 @@ bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; -static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; +static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; @@ -439,7 +442,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) return NULL; } - return (u8 __iomem *)ioremap_nocache(address, end); + return (u8 __iomem *)ioremap(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) @@ -713,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); + iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); iommu_feature_enable(iommu, CONTROL_PPR_EN); } @@ -1002,7 +1005,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; @@ -1115,21 +1118,17 @@ static int __init add_early_maps(void) */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) { - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) return; - if (iommu) { - /* - * We only can configure exclusion ranges per IOMMU, not - * per device. But we can enable the exclusion range per - * device. This is done here - */ - set_dev_entry_bit(devid, DEV_ENTRY_EX); - iommu->exclusion_start = m->range_start; - iommu->exclusion_length = m->range_length; - } + /* + * Treat per-device exclusion ranges as r/w unity-mapped regions + * since some buggy BIOSes might lead to the overwritten exclusion + * range (exclusion_start and exclusion_length members). This + * happens when there are multiple exclusion ranges (IVMD entries) + * defined in ACPI table. + */ + m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP); } /* @@ -1153,6 +1152,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, if (ret) return ret; + amd_iommu_apply_ivrs_quirks(); + /* * First save the recommended feature enable bits from ACPI */ @@ -1520,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; break; case 0x11: case 0x40: @@ -1531,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; - if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) - amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; + /* + * Note: Since iommu_update_intcapxt() leverages + * the IOMMU MMIO access to MSI capability block registers + * for MSI address lo/hi/data, we need to check both + * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support. + */ + if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && + (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) + amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; break; default: return -EINVAL; @@ -1652,27 +1658,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, static void init_iommu_perf_ctr(struct amd_iommu *iommu) { struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0; + u64 val = 0xabcd, val2 = 0, save_reg = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; + /* save the value to restore, if writable */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + goto pc_false; + /* Check if the performance counters can be written to */ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) { - pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); - amd_iommu_pc_present = false; - return; - } + (val != val2)) + goto pc_false; + + /* restore */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); + + return; + +pc_false: + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; } static ssize_t amd_iommu_show_cap(struct device *dev, @@ -1712,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = { static int __init iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; int ret; iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), @@ -1725,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, - &range); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, - &misc); if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) amd_iommu_iotlb_sup = false; /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; + iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); if (iommu_feature(iommu, FEATURE_GT)) { int glxval; @@ -1981,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu) struct irq_affinity_notify *notify = &iommu->intcapxt_notify; /** - * IntCapXT requires XTSup=1, which can be inferred - * amd_iommu_xt_mode. + * IntCapXT requires XTSup=1 and MsiCapMmioSup=1, + * which can be inferred from amd_iommu_xt_mode. */ if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) return 0; @@ -2029,7 +2039,7 @@ enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) - iommu_feature_enable(iommu, CONTROL_PPFINT_EN); + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); iommu_ga_log_enable(iommu); diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c new file mode 100644 index 000000000000..5120ce4fdce3 --- /dev/null +++ b/drivers/iommu/amd_iommu_quirks.c @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Quirks for AMD IOMMU + * + * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com> + */ + +#ifdef CONFIG_DMI +#include <linux/dmi.h> + +#include "amd_iommu.h" + +#define IVHD_SPECIAL_IOAPIC 1 + +struct ivrs_quirk_entry { + u8 id; + u16 devid; +}; + +enum { + DELL_INSPIRON_7375 = 0, + DELL_LATITUDE_5495, + LENOVO_IDEAPAD_330S_15ARR, +}; + +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = { + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */ + [DELL_INSPIRON_7375] = { + { .id = 4, .devid = 0xa0 }, + { .id = 5, .devid = 0x2 }, + {} + }, + /* ivrs_ioapic[4]=00:14.0 */ + [DELL_LATITUDE_5495] = { + { .id = 4, .devid = 0xa0 }, + {} + }, + /* ivrs_ioapic[32]=00:14.0 */ + [LENOVO_IDEAPAD_330S_15ARR] = { + { .id = 32, .devid = 0xa0 }, + {} + }, + {} +}; + +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d) +{ + const struct ivrs_quirk_entry *i; + + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++) + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0); + + return 0; +} + +static const struct dmi_system_id ivrs_quirks[] __initconst = { + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Inspiron 7375", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Latitude 5495", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + /* + * Acer Aspire A315-41 requires the very same workaround as + * Dell Latitude 5495 + */ + .callback = ivrs_ioapic_quirk_cb, + .ident = "Acer Aspire A315-41", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Lenovo ideapad 330S-15ARR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR], + }, + {} +}; + +void __init amd_iommu_apply_ivrs_quirks(void) +{ + dmi_check_system(ivrs_quirks); +} +#endif diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 64edd5a9694c..f8d01d6b00da 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -130,8 +130,8 @@ #define EVENT_TYPE_INV_PPR_REQ 0x9 #define EVENT_DEVID_MASK 0xffff #define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 +#define EVENT_DOMID_MASK_LO 0xffff +#define EVENT_DOMID_MASK_HI 0xf0000 #define EVENT_FLAGS_MASK 0xfff #define EVENT_FLAGS_SHIFT 0x10 @@ -147,8 +147,8 @@ #define CONTROL_COHERENT_EN 0x0aULL #define CONTROL_ISOC_EN 0x0bULL #define CONTROL_CMDBUF_EN 0x0cULL -#define CONTROL_PPFLOG_EN 0x0dULL -#define CONTROL_PPFINT_EN 0x0eULL +#define CONTROL_PPRLOG_EN 0x0dULL +#define CONTROL_PPRINT_EN 0x0eULL #define CONTROL_PPR_EN 0x0fULL #define CONTROL_GT_EN 0x10ULL #define CONTROL_GA_EN 0x11ULL @@ -377,12 +377,12 @@ #define IOMMU_CAP_EFR 27 /* IOMMU Feature Reporting Field (for IVHD type 10h */ -#define IOMMU_FEAT_XTSUP_SHIFT 0 #define IOMMU_FEAT_GASUP_SHIFT 6 /* IOMMU Extended Feature Register (EFR) */ #define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_GASUP_SHIFT 7 +#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 #define MAX_DOMAIN_ID 65536 @@ -463,19 +463,16 @@ struct amd_irte_ops; * independent of their use. */ struct protection_domain { - struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ struct iommu_domain domain; /* generic domain handle used by iommu core code */ spinlock_t lock; /* mostly used to lock the page table*/ - struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ int glx; /* Number of levels for GCR3 table */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ - bool updated; /* complete domain flush required */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; @@ -634,11 +631,14 @@ struct devid_map { * This struct contains device specific data for the IOMMU */ struct iommu_dev_data { + /*Protect against attach/detach races */ + spinlock_t lock; + struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ struct protection_domain *domain; /* Domain the device is bound to */ + struct pci_dev *pdev; u16 devid; /* PCI Device ID */ - u16 alias; /* Alias Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Device is identity mapped */ struct { @@ -873,6 +873,15 @@ struct amd_ir_data { struct msi_msg msi_entry; void *entry; /* Pointer to union irte or struct irte_ga */ void *ref; /* Pointer to the actual irte */ + + /** + * Store information for activate/de-activate + * Guest virtual APIC mode during runtime. + */ + struct irq_cfg *cfg; + int ga_vector; + int ga_root_ptr; + int ga_tag; }; struct amd_irte_ops { diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c new file mode 100644 index 000000000000..74d97a886e93 --- /dev/null +++ b/drivers/iommu/arm-smmu-impl.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Miscellaneous Arm SMMU implementation and integration quirks +// Copyright (C) 2019 Arm Limited + +#define pr_fmt(fmt) "arm-smmu: " fmt + +#include <linux/bitfield.h> +#include <linux/of.h> + +#include "arm-smmu.h" + + +static int arm_smmu_gr0_ns(int offset) +{ + switch(offset) { + case ARM_SMMU_GR0_sCR0: + case ARM_SMMU_GR0_sACR: + case ARM_SMMU_GR0_sGFSR: + case ARM_SMMU_GR0_sGFSYNR0: + case ARM_SMMU_GR0_sGFSYNR1: + case ARM_SMMU_GR0_sGFSYNR2: + return offset + 0x400; + default: + return offset; + } +} + +static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, + int offset) +{ + if (page == ARM_SMMU_GR0) + offset = arm_smmu_gr0_ns(offset); + return readl_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, + int offset, u32 val) +{ + if (page == ARM_SMMU_GR0) + offset = arm_smmu_gr0_ns(offset); + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +/* Since we don't care for sGFAR, we can do without 64-bit accessors */ +static const struct arm_smmu_impl calxeda_impl = { + .read_reg = arm_smmu_read_ns, + .write_reg = arm_smmu_write_ns, +}; + + +struct cavium_smmu { + struct arm_smmu_device smmu; + u32 id_base; +}; + +static int cavium_cfg_probe(struct arm_smmu_device *smmu) +{ + static atomic_t context_count = ATOMIC_INIT(0); + struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); + /* + * Cavium CN88xx erratum #27704. + * Ensure ASID and VMID allocation is unique across all SMMUs in + * the system. + */ + cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count); + dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); + + return 0; +} + +static int cavium_init_context(struct arm_smmu_domain *smmu_domain) +{ + struct cavium_smmu *cs = container_of(smmu_domain->smmu, + struct cavium_smmu, smmu); + + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) + smmu_domain->cfg.vmid += cs->id_base; + else + smmu_domain->cfg.asid += cs->id_base; + + return 0; +} + +static const struct arm_smmu_impl cavium_impl = { + .cfg_probe = cavium_cfg_probe, + .init_context = cavium_init_context, +}; + +static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu) +{ + struct cavium_smmu *cs; + + cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL); + if (!cs) + return ERR_PTR(-ENOMEM); + + cs->smmu = *smmu; + cs->smmu.impl = &cavium_impl; + + devm_kfree(smmu->dev, smmu); + + return &cs->smmu; +} + + +#define ARM_MMU500_ACTLR_CPRE (1 << 1) + +#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) +#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) + +int arm_mmu500_reset(struct arm_smmu_device *smmu) +{ + u32 reg, major; + int i; + /* + * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before + * writes to the context bank ACTLRs will stick. And we just hope that + * Secure has also cleared SACR.CACHE_LOCK for this to take effect... + */ + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); + major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); + if (major >= 2) + reg &= ~ARM_MMU500_ACR_CACHE_LOCK; + /* + * Allow unmatched Stream IDs to allocate bypass + * TLB entries for reduced latency. + */ + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg); + + /* + * Disable MMU-500's not-particularly-beneficial next-page + * prefetcher for the sake of errata #841119 and #826419. + */ + for (i = 0; i < smmu->num_context_banks; ++i) { + reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); + reg &= ~ARM_MMU500_ACTLR_CPRE; + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); + } + + return 0; +} + +static const struct arm_smmu_impl arm_mmu500_impl = { + .reset = arm_mmu500_reset, +}; + + +struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) +{ + /* + * We will inevitably have to combine model-specific implementation + * quirks with platform-specific integration quirks, but everything + * we currently support happens to work out as straightforward + * mutually-exclusive assignments. + */ + switch (smmu->model) { + case ARM_MMU500: + smmu->impl = &arm_mmu500_impl; + break; + case CAVIUM_SMMUV2: + return cavium_smmu_impl_init(smmu); + default: + break; + } + + if (of_property_read_bool(smmu->dev->of_node, + "calxeda,smmu-secure-config-access")) + smmu->impl = &calxeda_impl; + + if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500")) + return qcom_smmu_impl_init(smmu); + + return smmu; +} diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c new file mode 100644 index 000000000000..24c071c1d8b0 --- /dev/null +++ b/drivers/iommu/arm-smmu-qcom.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#include <linux/qcom_scm.h> + +#include "arm-smmu.h" + +struct qcom_smmu { + struct arm_smmu_device smmu; +}; + +static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu) +{ + int ret; + + arm_mmu500_reset(smmu); + + /* + * To address performance degradation in non-real time clients, + * such as USB and UFS, turn off wait-for-safe on sdm845 based boards, + * such as MTP and db845, whose firmwares implement secure monitor + * call handlers to turn on/off the wait-for-safe logic. + */ + ret = qcom_scm_qsmmu500_wait_safe_toggle(0); + if (ret) + dev_warn(smmu->dev, "Failed to turn off SAFE logic\n"); + + return ret; +} + +static const struct arm_smmu_impl qcom_smmu_impl = { + .reset = qcom_sdm845_smmu500_reset, +}; + +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +{ + struct qcom_smmu *qsmmu; + + qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL); + if (!qsmmu) + return ERR_PTR(-ENOMEM); + + qsmmu->smmu = *smmu; + + qsmmu->smmu.impl = &qcom_smmu_impl; + devm_kfree(smmu->dev, smmu); + + return &qsmmu->smmu; +} diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h deleted file mode 100644 index 1c278f7ae888..000000000000 --- a/drivers/iommu/arm-smmu-regs.h +++ /dev/null @@ -1,210 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * IOMMU API for ARM architected SMMU implementations. - * - * Copyright (C) 2013 ARM Limited - * - * Author: Will Deacon <will.deacon@arm.com> - */ - -#ifndef _ARM_SMMU_REGS_H -#define _ARM_SMMU_REGS_H - -/* Configuration registers */ -#define ARM_SMMU_GR0_sCR0 0x0 -#define sCR0_CLIENTPD (1 << 0) -#define sCR0_GFRE (1 << 1) -#define sCR0_GFIE (1 << 2) -#define sCR0_EXIDENABLE (1 << 3) -#define sCR0_GCFGFRE (1 << 4) -#define sCR0_GCFGFIE (1 << 5) -#define sCR0_USFCFG (1 << 10) -#define sCR0_VMIDPNE (1 << 11) -#define sCR0_PTM (1 << 12) -#define sCR0_FB (1 << 13) -#define sCR0_VMID16EN (1 << 31) -#define sCR0_BSU_SHIFT 14 -#define sCR0_BSU_MASK 0x3 - -/* Auxiliary Configuration register */ -#define ARM_SMMU_GR0_sACR 0x10 - -/* Identification registers */ -#define ARM_SMMU_GR0_ID0 0x20 -#define ARM_SMMU_GR0_ID1 0x24 -#define ARM_SMMU_GR0_ID2 0x28 -#define ARM_SMMU_GR0_ID3 0x2c -#define ARM_SMMU_GR0_ID4 0x30 -#define ARM_SMMU_GR0_ID5 0x34 -#define ARM_SMMU_GR0_ID6 0x38 -#define ARM_SMMU_GR0_ID7 0x3c -#define ARM_SMMU_GR0_sGFSR 0x48 -#define ARM_SMMU_GR0_sGFSYNR0 0x50 -#define ARM_SMMU_GR0_sGFSYNR1 0x54 -#define ARM_SMMU_GR0_sGFSYNR2 0x58 - -#define ID0_S1TS (1 << 30) -#define ID0_S2TS (1 << 29) -#define ID0_NTS (1 << 28) -#define ID0_SMS (1 << 27) -#define ID0_ATOSNS (1 << 26) -#define ID0_PTFS_NO_AARCH32 (1 << 25) -#define ID0_PTFS_NO_AARCH32S (1 << 24) -#define ID0_CTTW (1 << 14) -#define ID0_NUMIRPT_SHIFT 16 -#define ID0_NUMIRPT_MASK 0xff -#define ID0_NUMSIDB_SHIFT 9 -#define ID0_NUMSIDB_MASK 0xf -#define ID0_EXIDS (1 << 8) -#define ID0_NUMSMRG_SHIFT 0 -#define ID0_NUMSMRG_MASK 0xff - -#define ID1_PAGESIZE (1 << 31) -#define ID1_NUMPAGENDXB_SHIFT 28 -#define ID1_NUMPAGENDXB_MASK 7 -#define ID1_NUMS2CB_SHIFT 16 -#define ID1_NUMS2CB_MASK 0xff -#define ID1_NUMCB_SHIFT 0 -#define ID1_NUMCB_MASK 0xff - -#define ID2_OAS_SHIFT 4 -#define ID2_OAS_MASK 0xf -#define ID2_IAS_SHIFT 0 -#define ID2_IAS_MASK 0xf -#define ID2_UBS_SHIFT 8 -#define ID2_UBS_MASK 0xf -#define ID2_PTFS_4K (1 << 12) -#define ID2_PTFS_16K (1 << 13) -#define ID2_PTFS_64K (1 << 14) -#define ID2_VMID16 (1 << 15) - -#define ID7_MAJOR_SHIFT 4 -#define ID7_MAJOR_MASK 0xf - -/* Global TLB invalidation */ -#define ARM_SMMU_GR0_TLBIVMID 0x64 -#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 -#define ARM_SMMU_GR0_TLBIALLH 0x6c -#define ARM_SMMU_GR0_sTLBGSYNC 0x70 -#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 -#define sTLBGSTATUS_GSACTIVE (1 << 0) - -/* Stream mapping registers */ -#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) -#define SMR_VALID (1 << 31) -#define SMR_MASK_SHIFT 16 -#define SMR_ID_SHIFT 0 - -#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) -#define S2CR_CBNDX_SHIFT 0 -#define S2CR_CBNDX_MASK 0xff -#define S2CR_EXIDVALID (1 << 10) -#define S2CR_TYPE_SHIFT 16 -#define S2CR_TYPE_MASK 0x3 -enum arm_smmu_s2cr_type { - S2CR_TYPE_TRANS, - S2CR_TYPE_BYPASS, - S2CR_TYPE_FAULT, -}; - -#define S2CR_PRIVCFG_SHIFT 24 -#define S2CR_PRIVCFG_MASK 0x3 -enum arm_smmu_s2cr_privcfg { - S2CR_PRIVCFG_DEFAULT, - S2CR_PRIVCFG_DIPAN, - S2CR_PRIVCFG_UNPRIV, - S2CR_PRIVCFG_PRIV, -}; - -/* Context bank attribute registers */ -#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) -#define CBAR_VMID_SHIFT 0 -#define CBAR_VMID_MASK 0xff -#define CBAR_S1_BPSHCFG_SHIFT 8 -#define CBAR_S1_BPSHCFG_MASK 3 -#define CBAR_S1_BPSHCFG_NSH 3 -#define CBAR_S1_MEMATTR_SHIFT 12 -#define CBAR_S1_MEMATTR_MASK 0xf -#define CBAR_S1_MEMATTR_WB 0xf -#define CBAR_TYPE_SHIFT 16 -#define CBAR_TYPE_MASK 0x3 -#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) -#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) -#define CBAR_IRPTNDX_SHIFT 24 -#define CBAR_IRPTNDX_MASK 0xff - -#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) - -#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) -#define CBA2R_RW64_32BIT (0 << 0) -#define CBA2R_RW64_64BIT (1 << 0) -#define CBA2R_VMID_SHIFT 16 -#define CBA2R_VMID_MASK 0xffff - -#define ARM_SMMU_CB_SCTLR 0x0 -#define ARM_SMMU_CB_ACTLR 0x4 -#define ARM_SMMU_CB_RESUME 0x8 -#define ARM_SMMU_CB_TTBCR2 0x10 -#define ARM_SMMU_CB_TTBR0 0x20 -#define ARM_SMMU_CB_TTBR1 0x28 -#define ARM_SMMU_CB_TTBCR 0x30 -#define ARM_SMMU_CB_CONTEXTIDR 0x34 -#define ARM_SMMU_CB_S1_MAIR0 0x38 -#define ARM_SMMU_CB_S1_MAIR1 0x3c -#define ARM_SMMU_CB_PAR 0x50 -#define ARM_SMMU_CB_FSR 0x58 -#define ARM_SMMU_CB_FAR 0x60 -#define ARM_SMMU_CB_FSYNR0 0x68 -#define ARM_SMMU_CB_S1_TLBIVA 0x600 -#define ARM_SMMU_CB_S1_TLBIASID 0x610 -#define ARM_SMMU_CB_S1_TLBIVAL 0x620 -#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 -#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 -#define ARM_SMMU_CB_TLBSYNC 0x7f0 -#define ARM_SMMU_CB_TLBSTATUS 0x7f4 -#define ARM_SMMU_CB_ATS1PR 0x800 -#define ARM_SMMU_CB_ATSR 0x8f0 - -#define SCTLR_S1_ASIDPNE (1 << 12) -#define SCTLR_CFCFG (1 << 7) -#define SCTLR_CFIE (1 << 6) -#define SCTLR_CFRE (1 << 5) -#define SCTLR_E (1 << 4) -#define SCTLR_AFE (1 << 2) -#define SCTLR_TRE (1 << 1) -#define SCTLR_M (1 << 0) - -#define CB_PAR_F (1 << 0) - -#define ATSR_ACTIVE (1 << 0) - -#define RESUME_RETRY (0 << 0) -#define RESUME_TERMINATE (1 << 0) - -#define TTBCR2_SEP_SHIFT 15 -#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) -#define TTBCR2_AS (1 << 4) - -#define TTBRn_ASID_SHIFT 48 - -#define FSR_MULTI (1 << 31) -#define FSR_SS (1 << 30) -#define FSR_UUT (1 << 8) -#define FSR_ASF (1 << 7) -#define FSR_TLBLKF (1 << 6) -#define FSR_TLBMCF (1 << 5) -#define FSR_EF (1 << 4) -#define FSR_PF (1 << 3) -#define FSR_AFF (1 << 2) -#define FSR_TF (1 << 1) - -#define FSR_IGN (FSR_AFF | FSR_ASF | \ - FSR_TLBMCF | FSR_TLBLKF) -#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ - FSR_EF | FSR_PF | FSR_TF | FSR_IGN) - -#define FSYNR0_WNR (1 << 4) - -#endif /* _ARM_SMMU_REGS_H */ diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index c5c93e48b4db..aa3ac2a03807 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -21,8 +21,7 @@ #include <linux/io-pgtable.h> #include <linux/iommu.h> #include <linux/iopoll.h> -#include <linux/init.h> -#include <linux/moduleparam.h> +#include <linux/module.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_address.h> @@ -181,12 +180,13 @@ #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 #define ARM_SMMU_MEMATTR_OIWB 0xf -#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) -#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) -#define Q_OVERFLOW_FLAG (1 << 31) -#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) +#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) +#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) +#define Q_OVERFLOW_FLAG (1U << 31) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ - Q_IDX(q, p) * (q)->ent_dwords) + Q_IDX(&((q)->llq), p) * \ + (q)->ent_dwords) #define Q_BASE_RWA (1UL << 62) #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) @@ -223,9 +223,15 @@ #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4) #define STRTAB_STE_0_S1FMT_LINEAR 0 +#define STRTAB_STE_0_S1FMT_64K_L2 2 #define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6) #define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59) +#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0) +#define STRTAB_STE_1_S1DSS_TERMINATE 0x0 +#define STRTAB_STE_1_S1DSS_BYPASS 0x1 +#define STRTAB_STE_1_S1DSS_SSID0 0x2 + #define STRTAB_STE_1_S1C_CACHE_NC 0UL #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL #define STRTAB_STE_1_S1C_CACHE_WT 2UL @@ -250,6 +256,13 @@ #define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0) #define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32) +#define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0) +#define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6) +#define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8) +#define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10) +#define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12) +#define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14) +#define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16) #define STRTAB_STE_2_S2AA64 (1UL << 51) #define STRTAB_STE_2_S2ENDI (1UL << 52) #define STRTAB_STE_2_S2PTW (1UL << 54) @@ -257,30 +270,34 @@ #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4) -/* Context descriptor (stage-1 only) */ +/* + * Context descriptors. + * + * Linear: when less than 1024 SSIDs are supported + * 2lvl: at most 1024 L1 entries, + * 1024 lazy entries per table. + */ +#define CTXDESC_SPLIT 10 +#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT) + +#define CTXDESC_L1_DESC_DWORDS 1 +#define CTXDESC_L1_DESC_V (1UL << 0) +#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12) + #define CTXDESC_CD_DWORDS 8 #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) -#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0) #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) -#define ARM64_TCR_TG0 GENMASK_ULL(15, 14) #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) -#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8) #define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10) -#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10) #define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12) -#define ARM64_TCR_SH0 GENMASK_ULL(13, 12) #define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14) -#define ARM64_TCR_EPD0 (1ULL << 7) #define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30) -#define ARM64_TCR_EPD1 (1ULL << 23) #define CTXDESC_CD_0_ENDI (1UL << 15) #define CTXDESC_CD_0_V (1UL << 31) #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) -#define ARM64_TCR_IPS GENMASK_ULL(34, 32) #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) -#define ARM64_TCR_TBI0 (1ULL << 37) #define CTXDESC_CD_0_AA64 (1UL << 41) #define CTXDESC_CD_0_S (1UL << 44) @@ -291,9 +308,11 @@ #define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4) -/* Convert between AArch64 (CPU) TCR format and SMMU CD format */ -#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \ - FIELD_GET(ARM64_TCR_##fld, tcr)) +/* + * When the SMMU only supports linear context descriptor tables, pick a + * reasonable size limit (64kB). + */ +#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3)) /* Command queue */ #define CMDQ_ENT_SZ_SHIFT 4 @@ -306,6 +325,15 @@ #define CMDQ_ERR_CERROR_ABT_IDX 2 #define CMDQ_ERR_CERROR_ATC_INV_IDX 3 +#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG + +/* + * This is used to size the command queue and therefore must be at least + * BITS_PER_LONG so that the valid_map works correctly (it relies on the + * total number of queue entries being a multiple of BITS_PER_LONG). + */ +#define CMDQ_BATCH_ENTRIES BITS_PER_LONG + #define CMDQ_0_OP GENMASK_ULL(7, 0) #define CMDQ_0_SSV (1UL << 11) @@ -313,6 +341,7 @@ #define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0) #define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12) +#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12) #define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32) #define CMDQ_CFGI_1_LEAF (1UL << 0) #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0) @@ -368,17 +397,12 @@ #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) /* High-level queue structures */ -#define ARM_SMMU_POLL_TIMEOUT_US 100 -#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */ -#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10 +#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */ +#define ARM_SMMU_POLL_SPIN_COUNT 10 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 -/* - * not really modular, but the easiest way to keep compat with existing - * bootargs behaviour is to continue using module_param_named here. - */ static bool disable_bypass = 1; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, @@ -431,8 +455,11 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_CFGI_STE 0x3 #define CMDQ_OP_CFGI_ALL 0x4 + #define CMDQ_OP_CFGI_CD 0x5 + #define CMDQ_OP_CFGI_CD_ALL 0x6 struct { u32 sid; + u32 ssid; union { bool leaf; u8 span; @@ -472,13 +499,29 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_CMD_SYNC 0x46 struct { - u32 msidata; u64 msiaddr; } sync; }; }; +struct arm_smmu_ll_queue { + union { + u64 val; + struct { + u32 prod; + u32 cons; + }; + struct { + atomic_t prod; + atomic_t cons; + } atomic; + u8 __pad[SMP_CACHE_BYTES]; + } ____cacheline_aligned_in_smp; + u32 max_n_shift; +}; + struct arm_smmu_queue { + struct arm_smmu_ll_queue llq; int irq; /* Wired interrupt */ __le64 *base; @@ -486,17 +529,23 @@ struct arm_smmu_queue { u64 q_base; size_t ent_dwords; - u32 max_n_shift; - u32 prod; - u32 cons; u32 __iomem *prod_reg; u32 __iomem *cons_reg; }; +struct arm_smmu_queue_poll { + ktime_t timeout; + unsigned int delay; + unsigned int spin_cnt; + bool wfe; +}; + struct arm_smmu_cmdq { struct arm_smmu_queue q; - spinlock_t lock; + atomic_long_t *valid_map; + atomic_t owner_prod; + atomic_t lock; }; struct arm_smmu_evtq { @@ -516,16 +565,30 @@ struct arm_smmu_strtab_l1_desc { dma_addr_t l2ptr_dma; }; +struct arm_smmu_ctx_desc { + u16 asid; + u64 ttbr; + u64 tcr; + u64 mair; +}; + +struct arm_smmu_l1_ctx_desc { + __le64 *l2ptr; + dma_addr_t l2ptr_dma; +}; + +struct arm_smmu_ctx_desc_cfg { + __le64 *cdtab; + dma_addr_t cdtab_dma; + struct arm_smmu_l1_ctx_desc *l1_desc; + unsigned int num_l1_ents; +}; + struct arm_smmu_s1_cfg { - __le64 *cdptr; - dma_addr_t cdptr_dma; - - struct arm_smmu_ctx_desc { - u16 asid; - u64 ttbr; - u64 tcr; - u64 mair; - } cd; + struct arm_smmu_ctx_desc_cfg cdcfg; + struct arm_smmu_ctx_desc cd; + u8 s1fmt; + u8 s1cdmax; }; struct arm_smmu_s2_cfg { @@ -576,8 +639,6 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; - u32 sync_nr; - u8 prev_cmd_opcode; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -596,12 +657,6 @@ struct arm_smmu_device { struct arm_smmu_strtab_cfg strtab_cfg; - /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */ - union { - u32 sync_count; - u64 padding; - }; - /* IOMMU core code handle */ struct iommu_device iommu; }; @@ -614,7 +669,8 @@ struct arm_smmu_master { struct list_head domain_head; u32 *sids; unsigned int num_sids; - bool ats_enabled :1; + bool ats_enabled; + unsigned int ssid_bits; }; /* SMMU private data for an IOMMU domain */ @@ -631,6 +687,7 @@ struct arm_smmu_domain { struct io_pgtable_ops *pgtbl_ops; bool non_strict; + atomic_t nr_ats_masters; enum arm_smmu_domain_stage stage; union { @@ -685,85 +742,97 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } /* Low-level queue manipulation functions */ -static bool queue_full(struct arm_smmu_queue *q) +static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q, q->prod); + cons = Q_IDX(q, q->cons); + + if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) + space = (1 << q->max_n_shift) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + +static bool queue_full(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) != Q_WRP(q, q->cons); } -static bool queue_empty(struct arm_smmu_queue *q) +static bool queue_empty(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } -static void queue_sync_cons(struct arm_smmu_queue *q) +static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) { - q->cons = readl_relaxed(q->cons_reg); + return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || + ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); } -static void queue_inc_cons(struct arm_smmu_queue *q) +static void queue_sync_cons_out(struct arm_smmu_queue *q) { - u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; - - q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); - /* * Ensure that all CPU accesses (reads and writes) to the queue * are complete before we update the cons pointer. */ mb(); - writel_relaxed(q->cons, q->cons_reg); + writel_relaxed(q->llq.cons, q->cons_reg); +} + +static void queue_inc_cons(struct arm_smmu_ll_queue *q) +{ + u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } -static int queue_sync_prod(struct arm_smmu_queue *q) +static int queue_sync_prod_in(struct arm_smmu_queue *q) { int ret = 0; u32 prod = readl_relaxed(q->prod_reg); - if (Q_OVF(q, prod) != Q_OVF(q, q->prod)) + if (Q_OVF(prod) != Q_OVF(q->llq.prod)) ret = -EOVERFLOW; - q->prod = prod; + q->llq.prod = prod; return ret; } -static void queue_inc_prod(struct arm_smmu_queue *q) +static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) { - u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - - q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); - writel(q->prod, q->prod_reg); + u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; + return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } -/* - * Wait for the SMMU to consume items. If sync is true, wait until the queue - * is empty. Otherwise, wait until there is at least one free slot. - */ -static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) +static void queue_poll_init(struct arm_smmu_device *smmu, + struct arm_smmu_queue_poll *qp) { - ktime_t timeout; - unsigned int delay = 1, spin_cnt = 0; - - /* Wait longer if it's a CMD_SYNC */ - timeout = ktime_add_us(ktime_get(), sync ? - ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : - ARM_SMMU_POLL_TIMEOUT_US); + qp->delay = 1; + qp->spin_cnt = 0; + qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); +} - while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) { - if (ktime_compare(ktime_get(), timeout) > 0) - return -ETIMEDOUT; +static int queue_poll(struct arm_smmu_queue_poll *qp) +{ + if (ktime_compare(ktime_get(), qp->timeout) > 0) + return -ETIMEDOUT; - if (wfe) { - wfe(); - } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) { - cpu_relax(); - continue; - } else { - udelay(delay); - delay *= 2; - spin_cnt = 0; - } + if (qp->wfe) { + wfe(); + } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { + cpu_relax(); + } else { + udelay(qp->delay); + qp->delay *= 2; + qp->spin_cnt = 0; } return 0; @@ -777,16 +846,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) *dst++ = cpu_to_le64(*src++); } -static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) -{ - if (queue_full(q)) - return -ENOSPC; - - queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); - queue_inc_prod(q); - return 0; -} - static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) { int i; @@ -797,11 +856,12 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) { - if (queue_empty(q)) + if (queue_empty(&q->llq)) return -EAGAIN; - queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); - queue_inc_cons(q); + queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); + queue_inc_cons(&q->llq); + queue_sync_cons_out(q); return 0; } @@ -820,15 +880,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size); cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; break; + case CMDQ_OP_CFGI_CD: + cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); + /* Fallthrough */ case CMDQ_OP_CFGI_STE: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); break; + case CMDQ_OP_CFGI_CD_ALL: + cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); + break; case CMDQ_OP_CFGI_ALL: /* Cover the entire SID range */ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); break; case CMDQ_OP_TLBI_NH_VA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; @@ -868,20 +935,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); break; case CMDQ_OP_CMD_SYNC: - if (ent->sync.msiaddr) + if (ent->sync.msiaddr) { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); - else + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; + } else { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); + } cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); - /* - * Commands are written little-endian, but we want the SMMU to - * receive MSIData, and thus write it back to memory, in CPU - * byte order, so big-endian needs an extra byteswap here. - */ - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, - cpu_to_le32(ent->sync.msidata)); - cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; break; default: return -ENOENT; @@ -890,6 +951,27 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } +static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, + u32 prod) +{ + struct arm_smmu_queue *q = &smmu->cmdq.q; + struct arm_smmu_cmdq_ent ent = { + .opcode = CMDQ_OP_CMD_SYNC, + }; + + /* + * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI + * payload, so the write will zero the entire command on that platform. + */ + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) { + ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * + q->ent_dwords * 8; + } + + arm_smmu_cmdq_build_cmd(cmd, &ent); +} + static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) { static const char *cerror_str[] = { @@ -948,156 +1030,691 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); } -static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) +/* + * Command queue locking. + * This is a form of bastardised rwlock with the following major changes: + * + * - The only LOCK routines are exclusive_trylock() and shared_lock(). + * Neither have barrier semantics, and instead provide only a control + * dependency. + * + * - The UNLOCK routines are supplemented with shared_tryunlock(), which + * fails if the caller appears to be the last lock holder (yes, this is + * racy). All successful UNLOCK routines have RELEASE semantics. + */ +static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) { - struct arm_smmu_queue *q = &smmu->cmdq.q; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + int val; - smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]); + /* + * We can try to avoid the cmpxchg() loop by simply incrementing the + * lock counter. When held in exclusive state, the lock counter is set + * to INT_MIN so these increments won't hurt as the value will remain + * negative. + */ + if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) + return; - while (queue_insert_raw(q, cmd) == -ENOSPC) { - if (queue_poll_cons(q, false, wfe)) - dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + do { + val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); + } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); +} + +static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) +{ + (void)atomic_dec_return_release(&cmdq->lock); +} + +static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) +{ + if (atomic_read(&cmdq->lock) == 1) + return false; + + arm_smmu_cmdq_shared_unlock(cmdq); + return true; +} + +#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ +({ \ + bool __ret; \ + local_irq_save(flags); \ + __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) + +#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ +({ \ + atomic_set_release(&cmdq->lock, 0); \ + local_irq_restore(flags); \ +}) + + +/* + * Command queue insertion. + * This is made fiddly by our attempts to achieve some sort of scalability + * since there is one queue shared amongst all of the CPUs in the system. If + * you like mixed-size concurrency, dependency ordering and relaxed atomics, + * then you'll *love* this monstrosity. + * + * The basic idea is to split the queue up into ranges of commands that are + * owned by a given CPU; the owner may not have written all of the commands + * itself, but is responsible for advancing the hardware prod pointer when + * the time comes. The algorithm is roughly: + * + * 1. Allocate some space in the queue. At this point we also discover + * whether the head of the queue is currently owned by another CPU, + * or whether we are the owner. + * + * 2. Write our commands into our allocated slots in the queue. + * + * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. + * + * 4. If we are an owner: + * a. Wait for the previous owner to finish. + * b. Mark the queue head as unowned, which tells us the range + * that we are responsible for publishing. + * c. Wait for all commands in our owned range to become valid. + * d. Advance the hardware prod pointer. + * e. Tell the next owner we've finished. + * + * 5. If we are inserting a CMD_SYNC (we may or may not have been an + * owner), then we need to stick around until it has completed: + * a. If we have MSIs, the SMMU can write back into the CMD_SYNC + * to clear the first 4 bytes. + * b. Otherwise, we spin waiting for the hardware cons pointer to + * advance past our command. + * + * The devil is in the details, particularly the use of locking for handling + * SYNC completion and freeing up space in the queue before we think that it is + * full. + */ +static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod, bool set) +{ + u32 swidx, sbidx, ewidx, ebidx; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = sprod, + }; + + ewidx = BIT_WORD(Q_IDX(&llq, eprod)); + ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; + + while (llq.prod != eprod) { + unsigned long mask; + atomic_long_t *ptr; + u32 limit = BITS_PER_LONG; + + swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); + sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; + + ptr = &cmdq->valid_map[swidx]; + + if ((swidx == ewidx) && (sbidx < ebidx)) + limit = ebidx; + + mask = GENMASK(limit - 1, sbidx); + + /* + * The valid bit is the inverse of the wrap bit. This means + * that a zero-initialised queue is invalid and, after marking + * all entries as valid, they become invalid again when we + * wrap. + */ + if (set) { + atomic_long_xor(mask, ptr); + } else { /* Poll */ + unsigned long valid; + + valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; + atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); + } + + llq.prod = queue_inc_prod_n(&llq, limit - sbidx); } } -static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq_ent *ent) +/* Mark all entries in the range [sprod, eprod) as valid */ +static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); +} + +/* Wait for all entries in the range [sprod, eprod) to become valid */ +static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); +} + +/* Wait for the command queue to become non-full */ +static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; unsigned long flags; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + int ret = 0; - if (arm_smmu_cmdq_build_cmd(cmd, ent)) { - dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", - ent->opcode); - return; + /* + * Try to update our copy of cons by grabbing exclusive cmdq access. If + * that fails, spin until somebody else updates it for us. + */ + if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { + WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); + arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); + llq->val = READ_ONCE(cmdq->q.llq.val); + return 0; } - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + do { + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + if (!queue_full(llq)) + break; + + ret = queue_poll(&qp); + } while (!ret); + + return ret; } /* - * The difference between val and sync_idx is bounded by the maximum size of - * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. + * Wait until the SMMU signals a CMD_SYNC completion MSI. + * Must be called with the cmdq lock held in some capacity. */ -static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) +static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - ktime_t timeout; - u32 val; + int ret = 0; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); - timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US); - val = smp_cond_load_acquire(&smmu->sync_count, - (int)(VAL - sync_idx) >= 0 || - !ktime_before(ktime_get(), timeout)); + queue_poll_init(smmu, &qp); - return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; + /* + * The MSI won't generate an event, since it's being written back + * into the command queue. + */ + qp.wfe = false; + smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); + llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); + return ret; } -static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) +/* + * Wait until the SMMU cons index passes llq->prod. + * Must be called with the cmdq lock held in some capacity. + */ +static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; - struct arm_smmu_cmdq_ent ent = { - .opcode = CMDQ_OP_CMD_SYNC, - .sync = { - .msiaddr = virt_to_phys(&smmu->sync_count), - }, - }; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 prod = llq->prod; + int ret = 0; - spin_lock_irqsave(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + do { + if (queue_consumed(llq, prod)) + break; - /* Piggy-back on the previous command if it's a SYNC */ - if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) { - ent.sync.msidata = smmu->sync_nr; - } else { - ent.sync.msidata = ++smmu->sync_nr; - arm_smmu_cmdq_build_cmd(cmd, &ent); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - } + ret = queue_poll(&qp); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + /* + * This needs to be a readl() so that our subsequent call + * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. + * + * Specifically, we need to ensure that we observe all + * shared_lock()s by other CMD_SYNCs that share our owner, + * so that a failing call to tryunlock() means that we're + * the last one out and therefore we can safely advance + * cmdq->q.llq.cons. Roughly speaking: + * + * CPU 0 CPU1 CPU2 (us) + * + * if (sync) + * shared_lock(); + * + * dma_wmb(); + * set_valid_map(); + * + * if (owner) { + * poll_valid_map(); + * <control dependency> + * writel(prod_reg); + * + * readl(cons_reg); + * tryunlock(); + * + * Requires us to see CPU 0's shared_lock() acquisition. + */ + llq->cons = readl(cmdq->q.cons_reg); + } while (!ret); - return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); + return ret; } -static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) + return __arm_smmu_cmdq_poll_until_msi(smmu, llq); + + return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); +} + +static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, + u32 prod, int n) +{ + int i; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = prod, + }; + + for (i = 0; i < n; ++i) { + u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; + + prod = queue_inc_prod_n(&llq, i); + queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); + } +} + +/* + * This is the actual insertion function, and provides the following + * ordering guarantees to callers: + * + * - There is a dma_wmb() before publishing any commands to the queue. + * This can be relied upon to order prior writes to data structures + * in memory (such as a CD or an STE) before the command. + * + * - On completion of a CMD_SYNC, there is a control dependency. + * This can be relied upon to order subsequent writes to memory (e.g. + * freeing an IOVA) after completion of the CMD_SYNC. + * + * - Command insertion is totally ordered, so if two CPUs each race to + * insert their own list of commands then all of the commands from one + * CPU will appear before any of the commands from the other CPU. + */ +static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + u64 *cmds, int n, bool sync) +{ + u64 cmd_sync[CMDQ_ENT_DWORDS]; + u32 prod; unsigned long flags; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); - struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; - int ret; + bool owner; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + }, head = llq; + int ret = 0; - arm_smmu_cmdq_build_cmd(cmd, &ent); + /* 1. Allocate some space in the queue */ + local_irq_save(flags); + llq.val = READ_ONCE(cmdq->q.llq.val); + do { + u64 old; + + while (!queue_has_space(&llq, n + sync)) { + local_irq_restore(flags); + if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) + dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + local_irq_save(flags); + } + + head.cons = llq.cons; + head.prod = queue_inc_prod_n(&llq, n + sync) | + CMDQ_PROD_OWNED_FLAG; + + old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); + if (old == llq.val) + break; + + llq.val = old; + } while (1); + owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); + head.prod &= ~CMDQ_PROD_OWNED_FLAG; + llq.prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * 2. Write our commands into the queue + * Dependency ordering from the cmpxchg() loop above. + */ + arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); + if (sync) { + prod = queue_inc_prod_n(&llq, n); + arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); + queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); + + /* + * In order to determine completion of our CMD_SYNC, we must + * ensure that the queue can't wrap twice without us noticing. + * We achieve that by taking the cmdq lock as shared before + * marking our slot as valid. + */ + arm_smmu_cmdq_shared_lock(cmdq); + } + + /* 3. Mark our slots as valid, ensuring commands are visible first */ + dma_wmb(); + arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + /* 4. If we are the owner, take control of the SMMU hardware */ + if (owner) { + /* a. Wait for previous owner to finish */ + atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); + + /* b. Stop gathering work by clearing the owned flag */ + prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, + &cmdq->q.llq.atomic.prod); + prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * c. Wait for any gathered work to be written to the queue. + * Note that we read our own entries so that we have the control + * dependency required by (d). + */ + arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); + + /* + * d. Advance the hardware prod pointer + * Control dependency ordering from the entries becoming valid. + */ + writel_relaxed(prod, cmdq->q.prod_reg); + + /* + * e. Tell the next owner we're done + * Make sure we've updated the hardware first, so that we don't + * race to update prod and potentially move it backwards. + */ + atomic_set_release(&cmdq->owner_prod, prod); + } + + /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ + if (sync) { + llq.prod = queue_inc_prod_n(&llq, n); + ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); + if (ret) { + dev_err_ratelimited(smmu->dev, + "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", + llq.prod, + readl_relaxed(cmdq->q.prod_reg), + readl_relaxed(cmdq->q.cons_reg)); + } + + /* + * Try to unlock the cmq lock. This will fail if we're the last + * reader, in which case we can safely update cmdq->q.llq.cons + */ + if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { + WRITE_ONCE(cmdq->q.llq.cons, llq.cons); + arm_smmu_cmdq_shared_unlock(cmdq); + } + } + local_irq_restore(flags); return ret; } -static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { - int ret; - bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && - (smmu->features & ARM_SMMU_FEAT_COHERENCY); + u64 cmd[CMDQ_ENT_DWORDS]; - ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) - : __arm_smmu_cmdq_issue_sync(smmu); - if (ret) - dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); - return ret; + if (arm_smmu_cmdq_build_cmd(cmd, ent)) { + dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", + ent->opcode); + return -EINVAL; + } + + return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); +} + +static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); } /* Context descriptor manipulation functions */ -static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr) +static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, + int ssid, bool leaf) { - u64 val = 0; + size_t i; + unsigned long flags; + struct arm_smmu_master *master; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_CFGI_CD, + .cfgi = { + .ssid = ssid, + .leaf = leaf, + }, + }; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master, &smmu_domain->devices, domain_head) { + for (i = 0; i < master->num_sids; i++) { + cmd.cfgi.sid = master->sids[i]; + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + } + } + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - /* Repack the TCR. Just care about TTBR0 for now */ - val |= ARM_SMMU_TCR2CD(tcr, T0SZ); - val |= ARM_SMMU_TCR2CD(tcr, TG0); - val |= ARM_SMMU_TCR2CD(tcr, IRGN0); - val |= ARM_SMMU_TCR2CD(tcr, ORGN0); - val |= ARM_SMMU_TCR2CD(tcr, SH0); - val |= ARM_SMMU_TCR2CD(tcr, EPD0); - val |= ARM_SMMU_TCR2CD(tcr, EPD1); - val |= ARM_SMMU_TCR2CD(tcr, IPS); + arm_smmu_cmdq_issue_sync(smmu); +} - return val; +static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, + struct arm_smmu_l1_ctx_desc *l1_desc) +{ + size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3); + + l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, + &l1_desc->l2ptr_dma, GFP_KERNEL); + if (!l1_desc->l2ptr) { + dev_warn(smmu->dev, + "failed to allocate context descriptor table\n"); + return -ENOMEM; + } + return 0; } -static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu, - struct arm_smmu_s1_cfg *cfg) +static void arm_smmu_write_cd_l1_desc(__le64 *dst, + struct arm_smmu_l1_ctx_desc *l1_desc) { - u64 val; + u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | + CTXDESC_L1_DESC_V; + WRITE_ONCE(*dst, cpu_to_le64(val)); +} + +static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain, + u32 ssid) +{ + __le64 *l1ptr; + unsigned int idx; + struct arm_smmu_l1_ctx_desc *l1_desc; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; + + if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR) + return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS; + + idx = ssid >> CTXDESC_SPLIT; + l1_desc = &cdcfg->l1_desc[idx]; + if (!l1_desc->l2ptr) { + if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + return NULL; + + l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS; + arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); + /* An invalid L1CD can be cached */ + arm_smmu_sync_cd(smmu_domain, ssid, false); + } + idx = ssid & (CTXDESC_L2_ENTRIES - 1); + return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS; +} + +static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, + int ssid, struct arm_smmu_ctx_desc *cd) +{ /* - * We don't need to issue any invalidation here, as we'll invalidate - * the STE when installing the new entry anyway. + * This function handles the following cases: + * + * (1) Install primary CD, for normal DMA traffic (SSID = 0). + * (2) Install a secondary CD, for SID+SSID traffic. + * (3) Update ASID of a CD. Atomically write the first 64 bits of the + * CD, then invalidate the old entry and mappings. + * (4) Remove a secondary CD. */ - val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) | + u64 val; + bool cd_live; + __le64 *cdptr; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax))) + return -E2BIG; + + cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid); + if (!cdptr) + return -ENOMEM; + + val = le64_to_cpu(cdptr[0]); + cd_live = !!(val & CTXDESC_CD_0_V); + + if (!cd) { /* (4) */ + val = 0; + } else if (cd_live) { /* (3) */ + val &= ~CTXDESC_CD_0_ASID; + val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); + /* + * Until CD+TLB invalidation, both ASIDs may be used for tagging + * this substream's traffic + */ + } else { /* (1) and (2) */ + cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); + cdptr[2] = 0; + cdptr[3] = cpu_to_le64(cd->mair); + + /* + * STE is live, and the SMMU might read dwords of this CD in any + * order. Ensure that it observes valid values before reading + * V=1. + */ + arm_smmu_sync_cd(smmu_domain, ssid, true); + + val = cd->tcr | #ifdef __BIG_ENDIAN - CTXDESC_CD_0_ENDI | + CTXDESC_CD_0_ENDI | #endif - CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET | - CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) | - CTXDESC_CD_0_V; + CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET | + CTXDESC_CD_0_AA64 | + FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) | + CTXDESC_CD_0_V; + + /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */ + if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) + val |= CTXDESC_CD_0_S; + } + + /* + * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 + * "Configuration structures and configuration invalidation completion" + * + * The size of single-copy atomic reads made by the SMMU is + * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single + * field within an aligned 64-bit span of a structure can be altered + * without first making the structure invalid. + */ + WRITE_ONCE(cdptr[0], cpu_to_le64(val)); + arm_smmu_sync_cd(smmu_domain, ssid, true); + return 0; +} + +static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain) +{ + int ret; + size_t l1size; + size_t max_contexts; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; + struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg; + + max_contexts = 1 << cfg->s1cdmax; + + if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || + max_contexts <= CTXDESC_L2_ENTRIES) { + cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; + cdcfg->num_l1_ents = max_contexts; + + l1size = max_contexts * (CTXDESC_CD_DWORDS << 3); + } else { + cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; + cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts, + CTXDESC_L2_ENTRIES); + + cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents, + sizeof(*cdcfg->l1_desc), + GFP_KERNEL); + if (!cdcfg->l1_desc) + return -ENOMEM; + + l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); + } + + cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma, + GFP_KERNEL); + if (!cdcfg->cdtab) { + dev_warn(smmu->dev, "failed to allocate context descriptor\n"); + ret = -ENOMEM; + goto err_free_l1; + } + + return 0; + +err_free_l1: + if (cdcfg->l1_desc) { + devm_kfree(smmu->dev, cdcfg->l1_desc); + cdcfg->l1_desc = NULL; + } + return ret; +} + +static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain) +{ + int i; + size_t size, l1size; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; - /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */ - if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) - val |= CTXDESC_CD_0_S; + if (cdcfg->l1_desc) { + size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3); - cfg->cdptr[0] = cpu_to_le64(val); + for (i = 0; i < cdcfg->num_l1_ents; i++) { + if (!cdcfg->l1_desc[i].l2ptr) + continue; - val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK; - cfg->cdptr[1] = cpu_to_le64(val); + dmam_free_coherent(smmu->dev, size, + cdcfg->l1_desc[i].l2ptr, + cdcfg->l1_desc[i].l2ptr_dma); + } + devm_kfree(smmu->dev, cdcfg->l1_desc); + cdcfg->l1_desc = NULL; + + l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); + } else { + l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3); + } - cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair); + dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma); + cdcfg->cdtab_dma = 0; + cdcfg->cdtab = NULL; } /* Stream table manipulation functions */ @@ -1219,6 +1836,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, if (s1_cfg) { BUG_ON(ste_live); dst[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | @@ -1228,8 +1846,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); - val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) | - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS); + val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | + FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) | + FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt); } if (s2_cfg) { @@ -1253,7 +1873,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, STRTAB_STE_1_EATS_TRANS)); arm_smmu_sync_ste_for_sid(smmu, sid); - dst[0] = cpu_to_le64(val); + /* See comment in arm_smmu_write_ctx_desc() */ + WRITE_ONCE(dst[0], cpu_to_le64(val)); arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ @@ -1286,7 +1907,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) desc->span = STRTAB_SPLIT + 1; desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL); if (!desc->l2ptr) { dev_err(smmu->dev, "failed to allocate l2 stream table for SID %u\n", @@ -1305,6 +1926,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) int i; struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->evtq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[EVTQ_ENT_DWORDS]; do { @@ -1322,12 +1944,13 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) * Not much we can do on overflow, so scream and pretend we're * trying harder. */ - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); return IRQ_HANDLED; } @@ -1373,19 +1996,21 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) { struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->priq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[PRIQ_ENT_DWORDS]; do { while (!queue_remove_raw(q, evt)) arm_smmu_handle_ppr(smmu, evt); - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); - writel(q->cons, q->cons_reg); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); + queue_sync_cons_out(q); return IRQ_HANDLED; } @@ -1534,6 +2159,23 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) return 0; + /* + * Ensure that we've completed prior invalidation of the main TLBs + * before we read 'nr_ats_masters' in case of a concurrent call to + * arm_smmu_enable_ats(): + * + * // unmap() // arm_smmu_enable_ats() + * TLBI+SYNC atomic_inc(&nr_ats_masters); + * smp_mb(); [...] + * atomic_read(&nr_ats_masters); pci_enable_ats() // writel() + * + * Ensures that we always see the incremented 'nr_ats_masters' count if + * ATS was enabled at the PCI device before completion of the TLBI. + */ + smp_mb(); + if (!atomic_read(&smmu_domain->nr_ats_masters)) + return 0; + arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); @@ -1545,13 +2187,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, } /* IO_PGTABLE API */ -static void arm_smmu_tlb_sync(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_cmdq_issue_sync(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; @@ -1570,25 +2205,32 @@ static void arm_smmu_tlb_inv_context(void *cookie) /* * NOTE: when io-pgtable is in non-strict mode, we may get here with * PTEs previously cleared by unmaps on the current CPU not yet visible - * to the SMMU. We are relying on the DSB implicit in queue_inc_prod() - * to guarantee those are observed before the TLBI. Do be careful, 007. + * to the SMMU. We are relying on the dma_wmb() implicit during cmd + * insertion to guarantee those are observed before the TLBI. Do be + * careful, 007. */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); } -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, + size_t granule, bool leaf, + struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_domain *smmu_domain = cookie; + u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; struct arm_smmu_device *smmu = smmu_domain->smmu; + unsigned long start = iova, end = iova + size; + int i = 0; struct arm_smmu_cmdq_ent cmd = { .tlbi = { .leaf = leaf, - .addr = iova, }, }; + if (!size) + return; + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cmd.opcode = CMDQ_OP_TLBI_NH_VA; cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; @@ -1597,16 +2239,54 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } - do { - arm_smmu_cmdq_issue_cmd(smmu, &cmd); - cmd.tlbi.addr += granule; - } while (size -= granule); + while (iova < end) { + if (i == CMDQ_BATCH_ENTRIES) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false); + i = 0; + } + + cmd.tlbi.addr = iova; + arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd); + iova += granule; + i++; + } + + arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); + + /* + * Unfortunately, this can't be leaf-only since we may have + * zapped an entire table. + */ + arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); } -static const struct iommu_gather_ops arm_smmu_gather_ops = { +static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct iommu_domain *domain = &smmu_domain->domain; + + iommu_iotlb_gather_add_page(domain, gather, iova, granule); +} + +static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); +} + +static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range(iova, size, granule, true, cookie); +} + +static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_inv_page_nosync, }; /* IOMMU API */ @@ -1683,12 +2363,8 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; - if (cfg->cdptr) { - dmam_free_coherent(smmu_domain->smmu->dev, - CTXDESC_CD_DWORDS << 3, - cfg->cdptr, - cfg->cdptr_dma); - + if (cfg->cdcfg.cdtab) { + arm_smmu_free_cd_tables(smmu_domain); arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); } } else { @@ -1701,55 +2377,82 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) } static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master, struct io_pgtable_cfg *pgtbl_cfg) { int ret; int asid; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; + typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); if (asid < 0) return asid; - cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, - &cfg->cdptr_dma, - GFP_KERNEL | __GFP_ZERO); - if (!cfg->cdptr) { - dev_warn(smmu->dev, "failed to allocate context descriptor\n"); - ret = -ENOMEM; + cfg->s1cdmax = master->ssid_bits; + + ret = arm_smmu_alloc_cd_tables(smmu_domain); + if (ret) goto out_free_asid; - } cfg->cd.asid = (u16)asid; - cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; + cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | + FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | + FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | + FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | + CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; + cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; + + /* + * Note that this will end up calling arm_smmu_sync_cd() before + * the master has been added to the devices list for this domain. + * This isn't an issue because the STE hasn't been installed yet. + */ + ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd); + if (ret) + goto out_free_cd_tables; + return 0; +out_free_cd_tables: + arm_smmu_free_cd_tables(smmu_domain); out_free_asid: arm_smmu_bitmap_free(smmu->asid_map, asid); return ret; } static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master, struct io_pgtable_cfg *pgtbl_cfg) { int vmid; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; + typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); if (vmid < 0) return vmid; + vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; cfg->vmid = (u16)vmid; cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; - cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; + cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); return 0; } -static int arm_smmu_domain_finalise(struct iommu_domain *domain) +static int arm_smmu_domain_finalise(struct iommu_domain *domain, + struct arm_smmu_master *master) { int ret; unsigned long ias, oas; @@ -1757,6 +2460,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; int (*finalise_stage_fn)(struct arm_smmu_domain *, + struct arm_smmu_master *, struct io_pgtable_cfg *); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -1796,7 +2500,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, - .tlb = &arm_smmu_gather_ops, + .tlb = &arm_smmu_flush_ops, .iommu_dev = smmu->dev, }; @@ -1811,7 +2515,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; domain->geometry.force_aperture = true; - ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); + ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg); if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); return ret; @@ -1863,44 +2567,65 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) } } -static int arm_smmu_enable_ats(struct arm_smmu_master *master) +#ifdef CONFIG_PCI_ATS +static bool arm_smmu_ats_supported(struct arm_smmu_master *master) { - int ret; - size_t stu; struct pci_dev *pdev; struct arm_smmu_device *smmu = master->smmu; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) || !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled()) - return -ENXIO; + return false; pdev = to_pci_dev(master->dev); - if (pdev->untrusted) - return -EPERM; + return !pdev->untrusted && pdev->ats_cap; +} +#else +static bool arm_smmu_ats_supported(struct arm_smmu_master *master) +{ + return false; +} +#endif + +static void arm_smmu_enable_ats(struct arm_smmu_master *master) +{ + size_t stu; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_domain *smmu_domain = master->domain; + + /* Don't enable ATS at the endpoint if it's not enabled in the STE */ + if (!master->ats_enabled) + return; /* Smallest Translation Unit: log2 of the smallest supported granule */ stu = __ffs(smmu->pgsize_bitmap); + pdev = to_pci_dev(master->dev); - ret = pci_enable_ats(pdev, stu); - if (ret) - return ret; - - master->ats_enabled = true; - return 0; + atomic_inc(&smmu_domain->nr_ats_masters); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); + if (pci_enable_ats(pdev, stu)) + dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } static void arm_smmu_disable_ats(struct arm_smmu_master *master) { struct arm_smmu_cmdq_ent cmd; + struct arm_smmu_domain *smmu_domain = master->domain; - if (!master->ats_enabled || !dev_is_pci(master->dev)) + if (!master->ats_enabled) return; + pci_disable_ats(to_pci_dev(master->dev)); + /* + * Ensure ATS is disabled at the endpoint before we issue the + * ATC invalidation via the SMMU. + */ + wmb(); arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); arm_smmu_atc_inv_master(master, &cmd); - pci_disable_ats(to_pci_dev(master->dev)); - master->ats_enabled = false; + atomic_dec(&smmu_domain->nr_ats_masters); } static void arm_smmu_detach_dev(struct arm_smmu_master *master) @@ -1911,14 +2636,15 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) if (!smmu_domain) return; + arm_smmu_disable_ats(master); + spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_del(&master->domain_head); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); master->domain = NULL; + master->ats_enabled = false; arm_smmu_install_ste_for_dev(master); - - arm_smmu_disable_ats(master); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) @@ -1942,7 +2668,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!smmu_domain->smmu) { smmu_domain->smmu = smmu; - ret = arm_smmu_domain_finalise(domain); + ret = arm_smmu_domain_finalise(domain, master); if (ret) { smmu_domain->smmu = NULL; goto out_unlock; @@ -1954,28 +2680,35 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) dev_name(smmu->dev)); ret = -ENXIO; goto out_unlock; + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && + master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { + dev_err(dev, + "cannot attach to incompatible domain (%u SSID bits != %u)\n", + smmu_domain->s1_cfg.s1cdmax, master->ssid_bits); + ret = -EINVAL; + goto out_unlock; } master->domain = smmu_domain; + if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) + master->ats_enabled = arm_smmu_ats_supported(master); + + arm_smmu_install_ste_for_dev(master); + spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_add(&master->domain_head, &smmu_domain->devices); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) - arm_smmu_enable_ats(master); + arm_smmu_enable_ats(master); - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) - arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg); - - arm_smmu_install_ste_for_dev(master); out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; @@ -1985,21 +2718,16 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return ops->map(ops, iova, paddr, size, prot); } -static size_t -arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) +static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) { - int ret; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; if (!ops) return 0; - ret = ops->unmap(ops, iova, size); - if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size)) - return 0; - - return ret; + return ops->unmap(ops, iova, size, gather); } static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) @@ -2010,12 +2738,13 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) arm_smmu_tlb_inv_context(smmu_domain); } -static void arm_smmu_iotlb_sync(struct iommu_domain *domain) +static void arm_smmu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { - struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (smmu) - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, + gather->pgsize, true, smmu_domain); } static phys_addr_t @@ -2034,16 +2763,11 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) static struct platform_driver arm_smmu_driver; -static int arm_smmu_match_node(struct device *dev, const void *data) -{ - return dev->fwnode == data; -} - static struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, - fwnode, arm_smmu_match_node); + struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, + fwnode); put_device(dev); return dev ? dev_get_drvdata(dev) : NULL; } @@ -2070,51 +2794,66 @@ static int arm_smmu_add_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return -ENODEV; - /* - * We _can_ actually withstand dodgy bus code re-calling add_device() - * without an intervening remove_device()/of_xlate() sequence, but - * we're not going to do so quietly... - */ - if (WARN_ON_ONCE(fwspec->iommu_priv)) { - master = fwspec->iommu_priv; - smmu = master->smmu; - } else { - smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); - if (!smmu) - return -ENODEV; - master = kzalloc(sizeof(*master), GFP_KERNEL); - if (!master) - return -ENOMEM; - master->dev = dev; - master->smmu = smmu; - master->sids = fwspec->ids; - master->num_sids = fwspec->num_ids; - fwspec->iommu_priv = master; - } + if (WARN_ON_ONCE(fwspec->iommu_priv)) + return -EBUSY; + + smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); + if (!smmu) + return -ENODEV; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->dev = dev; + master->smmu = smmu; + master->sids = fwspec->ids; + master->num_sids = fwspec->num_ids; + fwspec->iommu_priv = master; /* Check the SIDs are in range of the SMMU and our stream table */ for (i = 0; i < master->num_sids; i++) { u32 sid = master->sids[i]; - if (!arm_smmu_sid_in_range(smmu, sid)) - return -ERANGE; + if (!arm_smmu_sid_in_range(smmu, sid)) { + ret = -ERANGE; + goto err_free_master; + } /* Ensure l2 strtab is initialised */ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { ret = arm_smmu_init_l2_strtab(smmu, sid); if (ret) - return ret; + goto err_free_master; } } + master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); + + if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) + master->ssid_bits = min_t(u8, master->ssid_bits, + CTXDESC_LINEAR_CDMAX); + + ret = iommu_device_link(&smmu->iommu, dev); + if (ret) + goto err_free_master; + group = iommu_group_get_for_dev(dev); - if (!IS_ERR(group)) { - iommu_group_put(group); - iommu_device_link(&smmu->iommu, dev); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto err_unlink; } - return PTR_ERR_OR_ZERO(group); + iommu_group_put(group); + return 0; + +err_unlink: + iommu_device_unlink(&smmu->iommu, dev); +err_free_master: + kfree(master); + fwspec->iommu_priv = NULL; + return ret; } static void arm_smmu_remove_device(struct device *dev) @@ -2246,15 +2985,6 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } -static void arm_smmu_put_resv_regions(struct device *dev, - struct list_head *head) -{ - struct iommu_resv_region *entry, *next; - - list_for_each_entry_safe(entry, next, head, list) - kfree(entry); -} - static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -2272,7 +3002,7 @@ static struct iommu_ops arm_smmu_ops = { .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, - .put_resv_regions = arm_smmu_put_resv_regions, + .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -2286,13 +3016,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, size_t qsz; do { - qsz = ((1 << q->max_n_shift) * dwords) << 3; + qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (q->base || qsz < PAGE_SIZE) break; - q->max_n_shift--; + q->llq.max_n_shift--; } while (1); if (!q->base) { @@ -2304,7 +3034,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, if (!WARN_ON(q->base_dma & (qsz - 1))) { dev_info(smmu->dev, "allocated %u entries for %s\n", - 1 << q->max_n_shift, name); + 1 << q->llq.max_n_shift, name); } q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); @@ -2313,24 +3043,55 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, q->q_base = Q_BASE_RWA; q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; - q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift); + q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); - q->prod = q->cons = 0; + q->llq.prod = q->llq.cons = 0; return 0; } +static void arm_smmu_cmdq_free_bitmap(void *data) +{ + unsigned long *bitmap = data; + bitmap_free(bitmap); +} + +static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) +{ + int ret = 0; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + unsigned int nents = 1 << cmdq->q.llq.max_n_shift; + atomic_long_t *bitmap; + + atomic_set(&cmdq->owner_prod, 0); + atomic_set(&cmdq->lock, 0); + + bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); + if (!bitmap) { + dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); + ret = -ENOMEM; + } else { + cmdq->valid_map = bitmap; + devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); + } + + return ret; +} + static int arm_smmu_init_queues(struct arm_smmu_device *smmu) { int ret; /* cmdq */ - spin_lock_init(&smmu->cmdq.lock); ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, "cmdq"); if (ret) return ret; + ret = arm_smmu_cmdq_init(smmu); + if (ret) + return ret; + /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, @@ -2388,7 +3149,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL); if (!strtab) { dev_err(smmu->dev, "failed to allocate l1 stream table (%u bytes)\n", @@ -2415,7 +3176,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL); if (!strtab) { dev_err(smmu->dev, "failed to allocate linear stream table (%u bytes)\n", @@ -2708,8 +3469,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Command queue */ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); - writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD); - writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS); + writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); + writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2736,9 +3497,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, + writel_relaxed(smmu->evtq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); - writel_relaxed(smmu->evtq.q.cons, + writel_relaxed(smmu->evtq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; @@ -2753,9 +3514,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) if (smmu->features & ARM_SMMU_FEAT_PRI) { writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); - writel_relaxed(smmu->priq.q.prod, + writel_relaxed(smmu->priq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); - writel_relaxed(smmu->priq.q.cons, + writel_relaxed(smmu->priq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; @@ -2909,18 +3670,24 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Queue sizes, capped to ensure natural alignment */ - smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_CMDQS, reg)); - if (!smmu->cmdq.q.max_n_shift) { - /* Odd alignment restrictions on the base, so ignore for now */ - dev_err(smmu->dev, "unit-length command queue not supported\n"); + smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_CMDQS, reg)); + if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { + /* + * We don't support splitting up batches, so one batch of + * commands plus an extra sync needs to fit inside the command + * queue. There's also no way we can handle the weird alignment + * restrictions on the base pointer for a unit-length queue. + */ + dev_err(smmu->dev, "command queue size <= %d entries not supported\n", + CMDQ_BATCH_ENTRIES); return -ENXIO; } - smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_EVTQS, reg)); - smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_PRIQS, reg)); + smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_EVTQS, reg)); + smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_PRIQS, reg)); /* SID/SSID sizes */ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); @@ -3069,6 +3836,43 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) return SZ_128K; } +static int arm_smmu_set_bus_ops(struct iommu_ops *ops) +{ + int err; + +#ifdef CONFIG_PCI + if (pci_bus_type.iommu_ops != ops) { + err = bus_set_iommu(&pci_bus_type, ops); + if (err) + return err; + } +#endif +#ifdef CONFIG_ARM_AMBA + if (amba_bustype.iommu_ops != ops) { + err = bus_set_iommu(&amba_bustype, ops); + if (err) + goto err_reset_pci_ops; + } +#endif + if (platform_bus_type.iommu_ops != ops) { + err = bus_set_iommu(&platform_bus_type, ops); + if (err) + goto err_reset_amba_ops; + } + + return 0; + +err_reset_amba_ops: +#ifdef CONFIG_ARM_AMBA + bus_set_iommu(&amba_bustype, NULL); +#endif +err_reset_pci_ops: __maybe_unused; +#ifdef CONFIG_PCI + bus_set_iommu(&pci_bus_type, NULL); +#endif + return err; +} + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -3098,7 +3902,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) { + if (resource_size(res) < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } @@ -3110,19 +3914,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Interrupt lines */ - irq = platform_get_irq_byname(pdev, "combined"); + irq = platform_get_irq_byname_optional(pdev, "combined"); if (irq > 0) smmu->combined_irq = irq; else { - irq = platform_get_irq_byname(pdev, "eventq"); + irq = platform_get_irq_byname_optional(pdev, "eventq"); if (irq > 0) smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "priq"); + irq = platform_get_irq_byname_optional(pdev, "priq"); if (irq > 0) smmu->priq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "gerror"); + irq = platform_get_irq_byname_optional(pdev, "gerror"); if (irq > 0) smmu->gerr_irq = irq; } @@ -3159,48 +3963,45 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PCI - if (pci_bus_type.iommu_ops != &arm_smmu_ops) { - pci_request_acs(); - ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - if (ret) - return ret; - } -#endif -#ifdef CONFIG_ARM_AMBA - if (amba_bustype.iommu_ops != &arm_smmu_ops) { - ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); - if (ret) - return ret; - } -#endif - if (platform_bus_type.iommu_ops != &arm_smmu_ops) { - ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops); - if (ret) - return ret; - } - return 0; + return arm_smmu_set_bus_ops(&arm_smmu_ops); } -static void arm_smmu_device_shutdown(struct platform_device *pdev) +static int arm_smmu_device_remove(struct platform_device *pdev) { struct arm_smmu_device *smmu = platform_get_drvdata(pdev); + arm_smmu_set_bus_ops(NULL); + iommu_device_unregister(&smmu->iommu); + iommu_device_sysfs_remove(&smmu->iommu); arm_smmu_device_disable(smmu); + + return 0; +} + +static void arm_smmu_device_shutdown(struct platform_device *pdev) +{ + arm_smmu_device_remove(pdev); } static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,smmu-v3", }, { }, }; +MODULE_DEVICE_TABLE(of, arm_smmu_of_match); static struct platform_driver arm_smmu_driver = { .driver = { - .name = "arm-smmu-v3", - .of_match_table = of_match_ptr(arm_smmu_of_match), - .suppress_bind_attrs = true, + .name = "arm-smmu-v3", + .of_match_table = arm_smmu_of_match, + .suppress_bind_attrs = true, }, .probe = arm_smmu_device_probe, + .remove = arm_smmu_device_remove, .shutdown = arm_smmu_device_shutdown, }; -builtin_platform_driver(arm_smmu_driver); +module_platform_driver(arm_smmu_driver); + +MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); +MODULE_AUTHOR("Will Deacon <will@kernel.org>"); +MODULE_ALIAS("platform:arm-smmu-v3"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 64977c131ee6..16c4b87af42b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -19,19 +19,15 @@ #include <linux/acpi.h> #include <linux/acpi_iort.h> -#include <linux/atomic.h> +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/dma-iommu.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-hi-lo.h> -#include <linux/io-pgtable.h> -#include <linux/iommu.h> #include <linux/iopoll.h> -#include <linux/init.h> -#include <linux/moduleparam.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> @@ -39,13 +35,13 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/ratelimit.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/amba/bus.h> #include <linux/fsl/mc.h> -#include "arm-smmu-regs.h" +#include "arm-smmu.h" /* * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU @@ -56,54 +52,13 @@ */ #define QCOM_DUMMY_VAL -1 -#define ARM_MMU500_ACTLR_CPRE (1 << 1) - -#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) -#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) -#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) - #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ #define TLB_SPIN_COUNT 10 -/* Maximum number of context banks per SMMU */ -#define ARM_SMMU_MAX_CBS 128 - -/* SMMU global address space */ -#define ARM_SMMU_GR0(smmu) ((smmu)->base) -#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) - -/* - * SMMU global address space with conditional offset to access secure - * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, - * nsGFSYNR0: 0x450) - */ -#define ARM_SMMU_GR0_NS(smmu) \ - ((smmu)->base + \ - ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ - ? 0x400 : 0)) - -/* - * Some 64-bit registers only make sense to write atomically, but in such - * cases all the data relevant to AArch32 formats lies within the lower word, - * therefore this actually makes more sense than it might first appear. - */ -#ifdef CONFIG_64BIT -#define smmu_write_atomic_lq writeq_relaxed -#else -#define smmu_write_atomic_lq writel_relaxed -#endif - -/* Translation context bank */ -#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 static int force_stage; -/* - * not really modular, but the easiest way to keep compat with existing - * bootargs behaviour is to continue using module_param() here. - */ module_param(force_stage, int, S_IRUGO); MODULE_PARM_DESC(force_stage, "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); @@ -113,19 +68,6 @@ module_param(disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); -enum arm_smmu_arch_version { - ARM_SMMU_V1, - ARM_SMMU_V1_64K, - ARM_SMMU_V2, -}; - -enum arm_smmu_implementation { - GENERIC_SMMU, - ARM_MMU500, - CAVIUM_SMMUV2, - QCOM_SMMUV2, -}; - struct arm_smmu_s2cr { struct iommu_group *group; int count; @@ -163,117 +105,8 @@ struct arm_smmu_master_cfg { #define for_each_cfg_sme(fw, i, idx) \ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) -struct arm_smmu_device { - struct device *dev; - - void __iomem *base; - void __iomem *cb_base; - unsigned long pgshift; - -#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) -#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) -#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) -#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) -#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) -#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) -#define ARM_SMMU_FEAT_VMID16 (1 << 6) -#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7) -#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8) -#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) -#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) -#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) -#define ARM_SMMU_FEAT_EXIDS (1 << 12) - u32 features; - -#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) - u32 options; - enum arm_smmu_arch_version version; - enum arm_smmu_implementation model; - - u32 num_context_banks; - u32 num_s2_context_banks; - DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); - struct arm_smmu_cb *cbs; - atomic_t irptndx; - - u32 num_mapping_groups; - u16 streamid_mask; - u16 smr_mask_mask; - struct arm_smmu_smr *smrs; - struct arm_smmu_s2cr *s2crs; - struct mutex stream_map_mutex; - - unsigned long va_size; - unsigned long ipa_size; - unsigned long pa_size; - unsigned long pgsize_bitmap; - - u32 num_global_irqs; - u32 num_context_irqs; - unsigned int *irqs; - struct clk_bulk_data *clks; - int num_clks; - - u32 cavium_id_base; /* Specific to Cavium */ - - spinlock_t global_sync_lock; - - /* IOMMU core code handle */ - struct iommu_device iommu; -}; - -enum arm_smmu_context_fmt { - ARM_SMMU_CTX_FMT_NONE, - ARM_SMMU_CTX_FMT_AARCH64, - ARM_SMMU_CTX_FMT_AARCH32_L, - ARM_SMMU_CTX_FMT_AARCH32_S, -}; - -struct arm_smmu_cfg { - u8 cbndx; - u8 irptndx; - union { - u16 asid; - u16 vmid; - }; - u32 cbar; - enum arm_smmu_context_fmt fmt; -}; -#define INVALID_IRPTNDX 0xff - -enum arm_smmu_domain_stage { - ARM_SMMU_DOMAIN_S1 = 0, - ARM_SMMU_DOMAIN_S2, - ARM_SMMU_DOMAIN_NESTED, - ARM_SMMU_DOMAIN_BYPASS, -}; - -struct arm_smmu_domain { - struct arm_smmu_device *smmu; - struct io_pgtable_ops *pgtbl_ops; - const struct iommu_gather_ops *tlb_ops; - struct arm_smmu_cfg cfg; - enum arm_smmu_domain_stage stage; - bool non_strict; - struct mutex init_mutex; /* Protects smmu pointer */ - spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ - struct iommu_domain domain; -}; - -struct arm_smmu_option_prop { - u32 opt; - const char *prop; -}; - -static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); - static bool using_legacy_binding, using_generic_binding; -static struct arm_smmu_option_prop arm_smmu_options[] = { - { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, - { 0, NULL}, -}; - static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) { if (pm_runtime_enabled(smmu->dev)) @@ -285,7 +118,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) { if (pm_runtime_enabled(smmu->dev)) - pm_runtime_put(smmu->dev); + pm_runtime_put_autosuspend(smmu->dev); } static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -293,19 +126,11 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) return container_of(dom, struct arm_smmu_domain, domain); } -static void parse_driver_options(struct arm_smmu_device *smmu) -{ - int i = 0; +static struct platform_driver arm_smmu_driver; +static struct iommu_ops arm_smmu_ops; - do { - if (of_property_read_bool(smmu->dev->of_node, - arm_smmu_options[i].prop)) { - smmu->options |= arm_smmu_options[i].opt; - dev_notice(smmu->dev, "option %s\n", - arm_smmu_options[i].prop); - } - } while (arm_smmu_options[++i].opt); -} +#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS +static int arm_smmu_bus_init(struct iommu_ops *ops); static struct device_node *dev_get_dev_node(struct device *dev) { @@ -333,7 +158,7 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) int err; of_for_each_phandle(it, err, dev->of_node, "mmu-masters", - "#stream-id-cells", 0) + "#stream-id-cells", -1) if (it->node == np) { *(void **)data = dev; return 1; @@ -342,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) return err == -ENOENT ? 0 : err; } -static struct platform_driver arm_smmu_driver; -static struct iommu_ops arm_smmu_ops; - static int arm_smmu_register_legacy_master(struct device *dev, struct arm_smmu_device **smmu) { @@ -396,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev, return err; } +/* + * With the legacy DT binding in play, we have no guarantees about + * probe order, but then we're also not doing default domains, so we can + * delay setting bus ops until we're sure every possible SMMU is ready, + * and that way ensure that no add_device() calls get missed. + */ +static int arm_smmu_legacy_bus_init(void) +{ + if (using_legacy_binding) + return arm_smmu_bus_init(&arm_smmu_ops); + return 0; +} +device_initcall_sync(arm_smmu_legacy_bus_init); +#else +static int arm_smmu_register_legacy_master(struct device *dev, + struct arm_smmu_device **smmu) +{ + return -ENODEV; +} +#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */ + static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) { int idx; @@ -415,15 +258,20 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) } /* Wait for any pending TLB invalidations to complete */ -static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, - void __iomem *sync, void __iomem *status) +static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, + int sync, int status) { unsigned int spin_cnt, delay; + u32 reg; + + if (smmu->impl && unlikely(smmu->impl->tlb_sync)) + return smmu->impl->tlb_sync(smmu, page, sync, status); - writel_relaxed(QCOM_DUMMY_VAL, sync); + arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { - if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) + reg = arm_smmu_readl(smmu, page, status); + if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE)) return; cpu_relax(); } @@ -435,134 +283,189 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) { - void __iomem *base = ARM_SMMU_GR0(smmu); unsigned long flags; spin_lock_irqsave(&smmu->global_sync_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, - base + ARM_SMMU_GR0_sTLBGSTATUS); + __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC, + ARM_SMMU_GR0_sTLBGSTATUS); spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } -static void arm_smmu_tlb_sync_context(void *cookie) +static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); unsigned long flags; spin_lock_irqsave(&smmu_domain->cb_lock, flags); - __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, - base + ARM_SMMU_CB_TLBSTATUS); + __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), + ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } -static void arm_smmu_tlb_sync_vmid(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_tlb_sync_global(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context_s1(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); - /* - * NOTE: this is not a relaxed write; it needs to guarantee that PTEs - * cleared by the current CPU are visible to the SMMU before the TLBI. + * The TLBI write may be relaxed, so ensure that PTEs cleared by the + * current CPU are visible beforehand. */ - writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); - arm_smmu_tlb_sync_context(cookie); + wmb(); + arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, + ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); + arm_smmu_tlb_sync_context(smmu_domain); } static void arm_smmu_tlb_inv_context_s2(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_GR0(smmu); - /* NOTE: see above */ - writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + /* See above */ + wmb(); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); arm_smmu_tlb_sync_global(smmu); } -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, + size_t granule, void *cookie, int reg) { struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); + int idx = cfg->cbndx; - if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - if (stage1) { - reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; - - if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { - iova &= ~12UL; - iova |= cfg->asid; - do { - writel_relaxed(iova, reg); - iova += granule; - } while (size -= granule); - } else { - iova >>= 12; - iova |= (u64)cfg->asid << 48; - do { - writeq_relaxed(iova, reg); - iova += granule >> 12; - } while (size -= granule); - } + if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { + iova = (iova >> 12) << 12; + iova |= cfg->asid; + do { + arm_smmu_cb_write(smmu, idx, reg, iova); + iova += granule; + } while (size -= granule); } else { - reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : - ARM_SMMU_CB_S2_TLBIIPAS2; iova >>= 12; + iova |= (u64)cfg->asid << 48; do { - smmu_write_atomic_lq(iova, reg); + arm_smmu_cb_writeq(smmu, idx, reg, iova); iova += granule >> 12; } while (size -= granule); } } +static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, + size_t granule, void *cookie, int reg) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + int idx = smmu_domain->cfg.cbndx; + + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + + iova >>= 12; + do { + if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) + arm_smmu_cb_writeq(smmu, idx, reg, iova); + else + arm_smmu_cb_write(smmu, idx, reg, iova); + iova += granule >> 12; + } while (size -= granule); +} + +static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, + ARM_SMMU_CB_S1_TLBIVA); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, + ARM_SMMU_CB_S1_TLBIVAL); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, + ARM_SMMU_CB_S1_TLBIVAL); +} + +static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2L); + arm_smmu_tlb_sync_context(cookie); +} + +static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, + ARM_SMMU_CB_S2_TLBIIPAS2L); +} + +static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_context_s2(cookie); +} /* * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think. + * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might + * think. */ -static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) wmb(); - writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } -static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { +static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, + .tlb_add_page = arm_smmu_tlb_add_page_s1, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, + .tlb_add_page = arm_smmu_tlb_add_page_s2, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, - .tlb_sync = arm_smmu_tlb_sync_vmid, + .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -571,26 +474,22 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) unsigned long iova; struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *gr1_base = ARM_SMMU_GR1(smmu); - void __iomem *cb_base; - - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); - fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); + int idx = smmu_domain->cfg.cbndx; - if (!(fsr & FSR_FAULT)) + fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); + if (!(fsr & ARM_SMMU_FSR_FAULT)) return IRQ_NONE; - fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); - iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); - cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx)); + fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); + iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); + cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); dev_err_ratelimited(smmu->dev, "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", - fsr, iova, fsynr, cbfrsynra, cfg->cbndx); + fsr, iova, fsynr, cbfrsynra, idx); - writel(fsr, cb_base + ARM_SMMU_CB_FSR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); return IRQ_HANDLED; } @@ -598,23 +497,32 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) { u32 gfsr, gfsynr0, gfsynr1, gfsynr2; struct arm_smmu_device *smmu = dev; - void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); - gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); - gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); - gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); - gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); + gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); + gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1); + gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2); if (!gfsr) return IRQ_NONE; - dev_err_ratelimited(smmu->dev, - "Unexpected global fault, this could be serious\n"); - dev_err_ratelimited(smmu->dev, - "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", - gfsr, gfsynr0, gfsynr1, gfsynr2); + if (__ratelimit(&rs)) { + if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) && + (gfsr & ARM_SMMU_sGFSR_USF)) + dev_err(smmu->dev, + "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n", + (u16)gfsynr1); + else + dev_err(smmu->dev, + "Unexpected global fault, this could be serious\n"); + dev_err(smmu->dev, + "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", + gfsr, gfsynr0, gfsynr1, gfsynr2); + } - writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); return IRQ_HANDLED; } @@ -627,31 +535,33 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, cb->cfg = cfg; - /* TTBCR */ + /* TCR */ if (stage1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; } else { - cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; - cb->tcr[1] |= TTBCR2_SEP_UPSTREAM; + cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); + cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - cb->tcr[1] |= TTBCR2_AS; + cb->tcr[1] |= ARM_SMMU_TCR2_AS; + else + cb->tcr[0] |= ARM_SMMU_TCR_EAE; } } else { - cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; + cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); } /* TTBRs */ if (stage1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; - cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; + cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; + cb->ttbr[1] = 0; } else { - cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; - cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; - cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; + cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, + cfg->asid); + cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, + cfg->asid); } } else { cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; @@ -663,8 +573,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; } else { - cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; + cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; + cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; } } } @@ -675,84 +585,84 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) bool stage1; struct arm_smmu_cb *cb = &smmu->cbs[idx]; struct arm_smmu_cfg *cfg = cb->cfg; - void __iomem *cb_base, *gr1_base; - - cb_base = ARM_SMMU_CB(smmu, idx); /* Unassigned context banks only need disabling */ if (!cfg) { - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0); return; } - gr1_base = ARM_SMMU_GR1(smmu); stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; /* CBA2R */ if (smmu->version > ARM_SMMU_V1) { if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) - reg = CBA2R_RW64_64BIT; + reg = ARM_SMMU_CBA2R_VA64; else - reg = CBA2R_RW64_32BIT; + reg = 0; /* 16-bit VMIDs live in CBA2R */ if (smmu->features & ARM_SMMU_FEAT_VMID16) - reg |= cfg->vmid << CBA2R_VMID_SHIFT; + reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx)); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg); } /* CBAR */ - reg = cfg->cbar; + reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); if (smmu->version < ARM_SMMU_V2) - reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; + reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); /* * Use the weakest shareability/memory types, so they are * overridden by the ttbcr/pte. */ if (stage1) { - reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | - (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); + reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG, + ARM_SMMU_CBAR_S1_BPSHCFG_NSH) | + FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR, + ARM_SMMU_CBAR_S1_MEMATTR_WB); } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { /* 8-bit VMIDs live in CBAR */ - reg |= cfg->vmid << CBAR_VMID_SHIFT; + reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx)); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg); /* - * TTBCR + * TCR * We must write this before the TTBRs, since it determines the * access behaviour of some fields (in particular, ASID[15:8]). */ if (stage1 && smmu->version > ARM_SMMU_V1) - writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2); - writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); /* TTBRs */ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { - writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); - writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); - writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); } else { - writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); if (stage1) - writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1, + cb->ttbr[1]); } /* MAIRs (stage-1 only) */ if (stage1) { - writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0); - writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); } /* SCTLR */ - reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M; + reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE | + ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M; if (stage1) - reg |= SCTLR_S1_ASIDPNE; + reg |= ARM_SMMU_SCTLR_S1_ASIDPNE; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) - reg |= SCTLR_E; + reg |= ARM_SMMU_SCTLR_E; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); } static int arm_smmu_init_domain_context(struct iommu_domain *domain, @@ -842,7 +752,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ias = min(ias, 32UL); oas = min(oas, 32UL); } - smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops; + smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; break; case ARM_SMMU_DOMAIN_NESTED: /* @@ -862,9 +772,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, oas = min(oas, 40UL); } if (smmu->version == ARM_SMMU_V2) - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; else - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; break; default: ret = -EINVAL; @@ -884,23 +794,29 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, } if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) - cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base; + cfg->vmid = cfg->cbndx + 1; else - cfg->asid = cfg->cbndx + smmu->cavium_id_base; + cfg->asid = cfg->cbndx; + + smmu_domain->smmu = smmu; + if (smmu->impl && smmu->impl->init_context) { + ret = smmu->impl->init_context(smmu_domain); + if (ret) + goto out_unlock; + } pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = smmu->pgsize_bitmap, .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, - .tlb = smmu_domain->tlb_ops, + .tlb = smmu_domain->flush_ops, .iommu_dev = smmu->dev, }; if (smmu_domain->non_strict) pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; - smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { ret = -ENOMEM; @@ -926,7 +842,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (ret < 0) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", cfg->irptndx, irq); - cfg->irptndx = INVALID_IRPTNDX; + cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; } mutex_unlock(&smmu_domain->init_mutex); @@ -936,6 +852,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, return 0; out_clear_smmu: + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); smmu_domain->smmu = NULL; out_unlock: mutex_unlock(&smmu_domain->init_mutex); @@ -963,7 +880,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) smmu->cbs[cfg->cbndx].cfg = NULL; arm_smmu_write_context_bank(smmu, cfg->cbndx); - if (cfg->irptndx != INVALID_IRPTNDX) { + if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; devm_free_irq(smmu->dev, irq, domain); } @@ -1019,24 +936,25 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_smr *smr = smmu->smrs + idx; - u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; + u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | + FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) - reg |= SMR_VALID; - writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); + reg |= ARM_SMMU_SMR_VALID; + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg); } static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; - u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT | - (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | - (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; + u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) - reg |= S2CR_EXIDVALID; - writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); + reg |= ARM_SMMU_S2CR_EXIDVALID; + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); } static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) @@ -1052,26 +970,38 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) */ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) { - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); u32 smr; + int i; if (!smmu->smrs) return; - + /* + * If we've had to accommodate firmware memory regions, we may + * have live SMRs by now; tread carefully... + * + * Somewhat perversely, not having a free SMR for this test implies we + * can get away without it anyway, as we'll only be able to 'allocate' + * these SMRs for the ID/mask values we're already trusting to be OK. + */ + for (i = 0; i < smmu->num_mapping_groups; i++) + if (!smmu->smrs[i].valid) + goto smr_ok; + return; +smr_ok: /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks. */ - smr = smmu->streamid_mask << SMR_ID_SHIFT; - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->streamid_mask = smr >> SMR_ID_SHIFT; + smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); + smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); - smr = smmu->streamid_mask << SMR_MASK_SHIFT; - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; + smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); + smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); } static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) @@ -1140,8 +1070,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ for_each_cfg_sme(fwspec, i, idx) { - u16 sid = fwspec->ids[i]; - u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; + u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); + u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); if (idx != INVALID_SMENDX) { ret = -EEXIST; @@ -1163,8 +1093,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev) } group = iommu_group_get_for_dev(dev); - if (!group) - group = ERR_PTR(-ENOMEM); if (IS_ERR(group)) { ret = PTR_ERR(group); goto out_err; @@ -1278,13 +1206,27 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) /* Looks ok, so add the device to the domain */ ret = arm_smmu_domain_add_master(smmu_domain, fwspec); + /* + * Setup an autosuspend delay to avoid bouncing runpm state. + * Otherwise, if a driver for a suspended consumer device + * unmaps buffers, it will runpm resume/suspend for each one. + * + * For example, when used by a GPU device, when an application + * or game exits, it can trigger unmapping 100s or 1000s of + * buffers. With a runpm cycle for each buffer, that adds up + * to 5-10sec worth of reprogramming the context bank, while + * the system appears to be locked up to the user. + */ + pm_runtime_set_autosuspend_delay(smmu->dev, 20); + pm_runtime_use_autosuspend(smmu->dev); + rpm_put: arm_smmu_rpm_put(smmu); return ret; } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1301,7 +1243,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1311,7 +1253,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return 0; arm_smmu_rpm_get(smmu); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); arm_smmu_rpm_put(smmu); return ret; @@ -1322,23 +1264,29 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { + if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); + smmu_domain->flush_ops->tlb_flush_all(smmu_domain); arm_smmu_rpm_put(smmu); } } -static void arm_smmu_iotlb_sync(struct iommu_domain *domain) +static void arm_smmu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { - arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_sync(smmu_domain); - arm_smmu_rpm_put(smmu); - } + if (!smmu) + return; + + arm_smmu_rpm_get(smmu); + if (smmu->version == ARM_SMMU_V2 || + smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + arm_smmu_tlb_sync_context(smmu_domain); + else + arm_smmu_tlb_sync_global(smmu); + arm_smmu_rpm_put(smmu); } static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, @@ -1349,28 +1297,26 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; struct device *dev = smmu->dev; - void __iomem *cb_base; + void __iomem *reg; u32 tmp; u64 phys; unsigned long va, flags; - int ret; + int ret, idx = cfg->cbndx; ret = arm_smmu_rpm_get(smmu); if (ret < 0) return 0; - cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); - spin_lock_irqsave(&smmu_domain->cb_lock, flags); - /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; - if (smmu->version == ARM_SMMU_V2) - smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR); - else /* Register is only 32-bit in v1 */ - writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) + arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); + else + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); - if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, - !(tmp & ATSR_ACTIVE), 5, 50)) { + reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; + if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE), + 5, 50)) { spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); dev_err(dev, "iova to phys timed out on %pad. Falling back to software table walk.\n", @@ -1378,9 +1324,9 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, return ops->iova_to_phys(ops, iova); } - phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); + phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); - if (phys & CB_PAR_F) { + if (phys & ARM_SMMU_CB_PAR_F) { dev_err(dev, "translation fault!\n"); dev_err(dev, "PAR = 0x%llx\n", phys); return 0; @@ -1426,16 +1372,11 @@ static bool arm_smmu_capable(enum iommu_cap cap) } } -static int arm_smmu_match_node(struct device *dev, const void *data) -{ - return dev->fwnode == data; -} - static struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, - fwnode, arm_smmu_match_node); + struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, + fwnode); put_device(dev); return dev ? dev_get_drvdata(dev) : NULL; } @@ -1466,8 +1407,8 @@ static int arm_smmu_add_device(struct device *dev) ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { - u16 sid = fwspec->ids[i]; - u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; + u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); + u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); if (sid & ~smmu->streamid_mask) { dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", @@ -1648,12 +1589,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) u32 mask, fwid = 0; if (args->args_count > 0) - fwid |= (u16)args->args[0]; + fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); if (args->args_count > 1) - fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; + fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) - fwid |= (u16)mask << SMR_MASK_SHIFT; + fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask); return iommu_fwspec_add_ids(dev, &fwid, 1); } @@ -1674,15 +1615,6 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } -static void arm_smmu_put_resv_regions(struct device *dev, - struct list_head *head) -{ - struct iommu_resv_region *entry, *next; - - list_for_each_entry_safe(entry, next, head, list) - kfree(entry); -} - static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1700,19 +1632,18 @@ static struct iommu_ops arm_smmu_ops = { .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, - .put_resv_regions = arm_smmu_put_resv_regions, + .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); int i; - u32 reg, major; + u32 reg; /* clear global FSR */ - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg); /* * Reset stream mapping groups: Initial values mark all SMRn as @@ -1721,76 +1652,50 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) for (i = 0; i < smmu->num_mapping_groups; ++i) arm_smmu_write_sme(smmu, i); - if (smmu->model == ARM_MMU500) { - /* - * Before clearing ARM_MMU500_ACTLR_CPRE, need to - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK - * bit is only present in MMU-500r2 onwards. - */ - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7); - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK; - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR); - if (major >= 2) - reg &= ~ARM_MMU500_ACR_CACHE_LOCK; - /* - * Allow unmatched Stream IDs to allocate bypass - * TLB entries for reduced latency. - */ - reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); - } - /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { - void __iomem *cb_base = ARM_SMMU_CB(smmu, i); - arm_smmu_write_context_bank(smmu, i); - writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); - /* - * Disable MMU-500's not-particularly-beneficial next-page - * prefetcher for the sake of errata #841119 and #826419. - */ - if (smmu->model == ARM_MMU500) { - reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR); - reg &= ~ARM_MMU500_ACTLR_CPRE; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR); - } + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); } /* Invalidate the TLB, just in case */ - writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL); - reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); /* Enable fault reporting */ - reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); + reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE | + ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE); /* Disable TLB broadcasting. */ - reg |= (sCR0_VMIDPNE | sCR0_PTM); + reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM); /* Enable client access, handling unmatched streams as appropriate */ - reg &= ~sCR0_CLIENTPD; + reg &= ~ARM_SMMU_sCR0_CLIENTPD; if (disable_bypass) - reg |= sCR0_USFCFG; + reg |= ARM_SMMU_sCR0_USFCFG; else - reg &= ~sCR0_USFCFG; + reg &= ~ARM_SMMU_sCR0_USFCFG; /* Disable forced broadcasting */ - reg &= ~sCR0_FB; + reg &= ~ARM_SMMU_sCR0_FB; /* Don't upgrade barriers */ - reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + reg &= ~(ARM_SMMU_sCR0_BSU); if (smmu->features & ARM_SMMU_FEAT_VMID16) - reg |= sCR0_VMID16EN; + reg |= ARM_SMMU_sCR0_VMID16EN; if (smmu->features & ARM_SMMU_FEAT_EXIDS) - reg |= sCR0_EXIDENABLE; + reg |= ARM_SMMU_sCR0_EXIDENABLE; + + if (smmu->impl && smmu->impl->reset) + smmu->impl->reset(smmu); /* Push the button */ arm_smmu_tlb_sync_global(smmu); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); } static int arm_smmu_id_size_to_bits(int size) @@ -1814,8 +1719,7 @@ static int arm_smmu_id_size_to_bits(int size) static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) { - unsigned long size; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + unsigned int size; u32 id; bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; int i; @@ -1825,25 +1729,25 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->version == ARM_SMMU_V2 ? 2 : 1); /* ID0 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0); /* Restrict available stages based on module parameter */ if (force_stage == 1) - id &= ~(ID0_S2TS | ID0_NTS); + id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS); else if (force_stage == 2) - id &= ~(ID0_S1TS | ID0_NTS); + id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS); - if (id & ID0_S1TS) { + if (id & ARM_SMMU_ID0_S1TS) { smmu->features |= ARM_SMMU_FEAT_TRANS_S1; dev_notice(smmu->dev, "\tstage 1 translation\n"); } - if (id & ID0_S2TS) { + if (id & ARM_SMMU_ID0_S2TS) { smmu->features |= ARM_SMMU_FEAT_TRANS_S2; dev_notice(smmu->dev, "\tstage 2 translation\n"); } - if (id & ID0_NTS) { + if (id & ARM_SMMU_ID0_NTS) { smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; dev_notice(smmu->dev, "\tnested translation\n"); } @@ -1854,8 +1758,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } - if ((id & ID0_S1TS) && - ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) { + if ((id & ARM_SMMU_ID0_S1TS) && + ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; dev_notice(smmu->dev, "\taddress translation ops\n"); } @@ -1866,7 +1770,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * Fortunately, this also opens up a workaround for systems where the * ID register value has ended up configured incorrectly. */ - cttw_reg = !!(id & ID0_CTTW); + cttw_reg = !!(id & ARM_SMMU_ID0_CTTW); if (cttw_fw || cttw_reg) dev_notice(smmu->dev, "\t%scoherent table walk\n", cttw_fw ? "" : "non-"); @@ -1875,16 +1779,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) "\t(IDR0.CTTW overridden by FW configuration)\n"); /* Max. number of entries we have for stream matching/indexing */ - if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { + if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { smmu->features |= ARM_SMMU_FEAT_EXIDS; size = 1 << 16; } else { - size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); + size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id); } smmu->streamid_mask = size - 1; - if (id & ID0_SMS) { + if (id & ARM_SMMU_ID0_SMS) { smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; - size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; + size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id); if (size == 0) { dev_err(smmu->dev, "stream-matching supported, but no SMRs present!\n"); @@ -1898,7 +1802,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; dev_notice(smmu->dev, - "\tstream matching with %lu register groups", size); + "\tstream matching with %u register groups", size); } /* s2cr->type == 0 means translation, so initialise explicitly */ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), @@ -1912,59 +1816,49 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) mutex_init(&smmu->stream_map_mutex); spin_lock_init(&smmu->global_sync_lock); - if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { + if (smmu->version < ARM_SMMU_V2 || + !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) { smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; - if (!(id & ID0_PTFS_NO_AARCH32S)) + if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S)) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; } /* ID1 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); - smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1); + smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; /* Check for size mismatch of SMMU address space from mapped region */ - size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); - size <<= smmu->pgshift; - if (smmu->cb_base != gr0_base + size) + size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1); + if (smmu->numpage != 2 * size << smmu->pgshift) dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", - size * 2, (smmu->cb_base - gr0_base) * 2); + "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n", + 2 * size << smmu->pgshift, smmu->numpage); + /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */ + smmu->numpage = size; - smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; - smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; + smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); + smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); if (smmu->num_s2_context_banks > smmu->num_context_banks) { dev_err(smmu->dev, "impossible number of S2 context banks!\n"); return -ENODEV; } dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", smmu->num_context_banks, smmu->num_s2_context_banks); - /* - * Cavium CN88xx erratum #27704. - * Ensure ASID and VMID allocation is unique across all SMMUs in - * the system. - */ - if (smmu->model == CAVIUM_SMMUV2) { - smmu->cavium_id_base = - atomic_add_return(smmu->num_context_banks, - &cavium_smmu_context_count); - smmu->cavium_id_base -= smmu->num_context_banks; - dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); - } smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, sizeof(*smmu->cbs), GFP_KERNEL); if (!smmu->cbs) return -ENOMEM; /* ID2 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); - size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); + id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2); + size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id)); smmu->ipa_size = size; /* The output mask is also applied for bypass */ - size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); + size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id)); smmu->pa_size = size; - if (id & ID2_VMID16) + if (id & ARM_SMMU_ID2_VMID16) smmu->features |= ARM_SMMU_FEAT_VMID16; /* @@ -1981,13 +1875,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) if (smmu->version == ARM_SMMU_V1_64K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; } else { - size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; + size = FIELD_GET(ARM_SMMU_ID2_UBS, id); smmu->va_size = arm_smmu_id_size_to_bits(size); - if (id & ID2_PTFS_4K) + if (id & ARM_SMMU_ID2_PTFS_4K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; - if (id & ID2_PTFS_16K) + if (id & ARM_SMMU_ID2_PTFS_16K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; - if (id & ID2_PTFS_64K) + if (id & ARM_SMMU_ID2_PTFS_64K) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; } @@ -2018,6 +1912,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", smmu->ipa_size, smmu->pa_size); + if (smmu->impl && smmu->impl->cfg_probe) + return smmu->impl->cfg_probe(smmu); + return 0; } @@ -2046,6 +1943,7 @@ static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 }, { }, }; +MODULE_DEVICE_TABLE(of, arm_smmu_of_match); #ifdef CONFIG_ACPI static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) @@ -2130,12 +2028,12 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, smmu->version = data->version; smmu->model = data->model; - parse_driver_options(smmu); - legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); if (legacy_binding && !using_generic_binding) { - if (!using_legacy_binding) - pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); + if (!using_legacy_binding) { + pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", + IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU"); + } using_legacy_binding = true; } else if (!legacy_binding && !using_legacy_binding) { using_generic_binding = true; @@ -2150,25 +2048,50 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, return 0; } -static void arm_smmu_bus_init(void) +static int arm_smmu_bus_init(struct iommu_ops *ops) { + int err; + /* Oh, for a proper bus abstraction */ - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + if (!iommu_present(&platform_bus_type)) { + err = bus_set_iommu(&platform_bus_type, ops); + if (err) + return err; + } #ifdef CONFIG_ARM_AMBA - if (!iommu_present(&amba_bustype)) - bus_set_iommu(&amba_bustype, &arm_smmu_ops); + if (!iommu_present(&amba_bustype)) { + err = bus_set_iommu(&amba_bustype, ops); + if (err) + goto err_reset_platform_ops; + } #endif #ifdef CONFIG_PCI if (!iommu_present(&pci_bus_type)) { - pci_request_acs(); - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + err = bus_set_iommu(&pci_bus_type, ops); + if (err) + goto err_reset_amba_ops; } #endif #ifdef CONFIG_FSL_MC_BUS - if (!iommu_present(&fsl_mc_bus_type)) - bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops); + if (!iommu_present(&fsl_mc_bus_type)) { + err = bus_set_iommu(&fsl_mc_bus_type, ops); + if (err) + goto err_reset_pci_ops; + } +#endif + return 0; + +err_reset_pci_ops: __maybe_unused; +#ifdef CONFIG_PCI + bus_set_iommu(&pci_bus_type, NULL); +#endif +err_reset_amba_ops: __maybe_unused; +#ifdef CONFIG_ARM_AMBA + bus_set_iommu(&amba_bustype, NULL); #endif +err_reset_platform_ops: __maybe_unused; + bus_set_iommu(&platform_bus_type, NULL); + return err; } static int arm_smmu_device_probe(struct platform_device *pdev) @@ -2194,12 +2117,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; + smmu = arm_smmu_impl_init(smmu); + if (IS_ERR(smmu)) + return PTR_ERR(smmu); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); - smmu->cb_base = smmu->base + resource_size(res) / 2; + /* + * The resource size should effectively match the value of SMMU_TOP; + * stash that temporarily until we know PAGESIZE to validate it with. + */ + smmu->numpage = resource_size(res); num_irqs = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { @@ -2224,10 +2155,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) for (i = 0; i < num_irqs; ++i) { int irq = platform_get_irq(pdev, i); - if (irq < 0) { - dev_err(dev, "failed to get irq index %d\n", i); + if (irq < 0) return -ENODEV; - } smmu->irqs[i] = irq; } @@ -2308,38 +2237,28 @@ static int arm_smmu_device_probe(struct platform_device *pdev) * ready to handle default domain setup as soon as any SMMU exists. */ if (!using_legacy_binding) - arm_smmu_bus_init(); + return arm_smmu_bus_init(&arm_smmu_ops); return 0; } -/* - * With the legacy DT binding in play, though, we have no guarantees about - * probe order, but then we're also not doing default domains, so we can - * delay setting bus ops until we're sure every possible SMMU is ready, - * and that way ensure that no add_device() calls get missed. - */ -static int arm_smmu_legacy_bus_init(void) -{ - if (using_legacy_binding) - arm_smmu_bus_init(); - return 0; -} -device_initcall_sync(arm_smmu_legacy_bus_init); - -static void arm_smmu_device_shutdown(struct platform_device *pdev) +static int arm_smmu_device_remove(struct platform_device *pdev) { struct arm_smmu_device *smmu = platform_get_drvdata(pdev); if (!smmu) - return; + return -ENODEV; if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) dev_err(&pdev->dev, "removing device with active domains!\n"); + arm_smmu_bus_init(NULL); + iommu_device_unregister(&smmu->iommu); + iommu_device_sysfs_remove(&smmu->iommu); + arm_smmu_rpm_get(smmu); /* Turn the thing off */ - writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD); arm_smmu_rpm_put(smmu); if (pm_runtime_enabled(smmu->dev)) @@ -2348,6 +2267,12 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev) clk_bulk_disable(smmu->num_clks, smmu->clks); clk_bulk_unprepare(smmu->num_clks, smmu->clks); + return 0; +} + +static void arm_smmu_device_shutdown(struct platform_device *pdev) +{ + arm_smmu_device_remove(pdev); } static int __maybe_unused arm_smmu_runtime_resume(struct device *dev) @@ -2398,11 +2323,17 @@ static const struct dev_pm_ops arm_smmu_pm_ops = { static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu", - .of_match_table = of_match_ptr(arm_smmu_of_match), + .of_match_table = arm_smmu_of_match, .pm = &arm_smmu_pm_ops, - .suppress_bind_attrs = true, + .suppress_bind_attrs = true, }, .probe = arm_smmu_device_probe, + .remove = arm_smmu_device_remove, .shutdown = arm_smmu_device_shutdown, }; -builtin_platform_driver(arm_smmu_driver); +module_platform_driver(arm_smmu_driver); + +MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); +MODULE_AUTHOR("Will Deacon <will@kernel.org>"); +MODULE_ALIAS("platform:arm-smmu"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h new file mode 100644 index 000000000000..8d1cd54d82a6 --- /dev/null +++ b/drivers/iommu/arm-smmu.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IOMMU API for ARM architected SMMU implementations. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#ifndef _ARM_SMMU_H +#define _ARM_SMMU_H + +#include <linux/atomic.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/io-pgtable.h> +#include <linux/iommu.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +/* Configuration registers */ +#define ARM_SMMU_GR0_sCR0 0x0 +#define ARM_SMMU_sCR0_VMID16EN BIT(31) +#define ARM_SMMU_sCR0_BSU GENMASK(15, 14) +#define ARM_SMMU_sCR0_FB BIT(13) +#define ARM_SMMU_sCR0_PTM BIT(12) +#define ARM_SMMU_sCR0_VMIDPNE BIT(11) +#define ARM_SMMU_sCR0_USFCFG BIT(10) +#define ARM_SMMU_sCR0_GCFGFIE BIT(5) +#define ARM_SMMU_sCR0_GCFGFRE BIT(4) +#define ARM_SMMU_sCR0_EXIDENABLE BIT(3) +#define ARM_SMMU_sCR0_GFIE BIT(2) +#define ARM_SMMU_sCR0_GFRE BIT(1) +#define ARM_SMMU_sCR0_CLIENTPD BIT(0) + +/* Auxiliary Configuration register */ +#define ARM_SMMU_GR0_sACR 0x10 + +/* Identification registers */ +#define ARM_SMMU_GR0_ID0 0x20 +#define ARM_SMMU_ID0_S1TS BIT(30) +#define ARM_SMMU_ID0_S2TS BIT(29) +#define ARM_SMMU_ID0_NTS BIT(28) +#define ARM_SMMU_ID0_SMS BIT(27) +#define ARM_SMMU_ID0_ATOSNS BIT(26) +#define ARM_SMMU_ID0_PTFS_NO_AARCH32 BIT(25) +#define ARM_SMMU_ID0_PTFS_NO_AARCH32S BIT(24) +#define ARM_SMMU_ID0_NUMIRPT GENMASK(23, 16) +#define ARM_SMMU_ID0_CTTW BIT(14) +#define ARM_SMMU_ID0_NUMSIDB GENMASK(12, 9) +#define ARM_SMMU_ID0_EXIDS BIT(8) +#define ARM_SMMU_ID0_NUMSMRG GENMASK(7, 0) + +#define ARM_SMMU_GR0_ID1 0x24 +#define ARM_SMMU_ID1_PAGESIZE BIT(31) +#define ARM_SMMU_ID1_NUMPAGENDXB GENMASK(30, 28) +#define ARM_SMMU_ID1_NUMS2CB GENMASK(23, 16) +#define ARM_SMMU_ID1_NUMCB GENMASK(7, 0) + +#define ARM_SMMU_GR0_ID2 0x28 +#define ARM_SMMU_ID2_VMID16 BIT(15) +#define ARM_SMMU_ID2_PTFS_64K BIT(14) +#define ARM_SMMU_ID2_PTFS_16K BIT(13) +#define ARM_SMMU_ID2_PTFS_4K BIT(12) +#define ARM_SMMU_ID2_UBS GENMASK(11, 8) +#define ARM_SMMU_ID2_OAS GENMASK(7, 4) +#define ARM_SMMU_ID2_IAS GENMASK(3, 0) + +#define ARM_SMMU_GR0_ID3 0x2c +#define ARM_SMMU_GR0_ID4 0x30 +#define ARM_SMMU_GR0_ID5 0x34 +#define ARM_SMMU_GR0_ID6 0x38 + +#define ARM_SMMU_GR0_ID7 0x3c +#define ARM_SMMU_ID7_MAJOR GENMASK(7, 4) +#define ARM_SMMU_ID7_MINOR GENMASK(3, 0) + +#define ARM_SMMU_GR0_sGFSR 0x48 +#define ARM_SMMU_sGFSR_USF BIT(1) + +#define ARM_SMMU_GR0_sGFSYNR0 0x50 +#define ARM_SMMU_GR0_sGFSYNR1 0x54 +#define ARM_SMMU_GR0_sGFSYNR2 0x58 + +/* Global TLB invalidation */ +#define ARM_SMMU_GR0_TLBIVMID 0x64 +#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 +#define ARM_SMMU_GR0_TLBIALLH 0x6c +#define ARM_SMMU_GR0_sTLBGSYNC 0x70 + +#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 +#define ARM_SMMU_sTLBGSTATUS_GSACTIVE BIT(0) + +/* Stream mapping registers */ +#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) +#define ARM_SMMU_SMR_VALID BIT(31) +#define ARM_SMMU_SMR_MASK GENMASK(31, 16) +#define ARM_SMMU_SMR_ID GENMASK(15, 0) + +#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) +#define ARM_SMMU_S2CR_PRIVCFG GENMASK(25, 24) +enum arm_smmu_s2cr_privcfg { + S2CR_PRIVCFG_DEFAULT, + S2CR_PRIVCFG_DIPAN, + S2CR_PRIVCFG_UNPRIV, + S2CR_PRIVCFG_PRIV, +}; +#define ARM_SMMU_S2CR_TYPE GENMASK(17, 16) +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS, + S2CR_TYPE_BYPASS, + S2CR_TYPE_FAULT, +}; +#define ARM_SMMU_S2CR_EXIDVALID BIT(10) +#define ARM_SMMU_S2CR_CBNDX GENMASK(7, 0) + +/* Context bank attribute registers */ +#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) +#define ARM_SMMU_CBAR_IRPTNDX GENMASK(31, 24) +#define ARM_SMMU_CBAR_TYPE GENMASK(17, 16) +enum arm_smmu_cbar_type { + CBAR_TYPE_S2_TRANS, + CBAR_TYPE_S1_TRANS_S2_BYPASS, + CBAR_TYPE_S1_TRANS_S2_FAULT, + CBAR_TYPE_S1_TRANS_S2_TRANS, +}; +#define ARM_SMMU_CBAR_S1_MEMATTR GENMASK(15, 12) +#define ARM_SMMU_CBAR_S1_MEMATTR_WB 0xf +#define ARM_SMMU_CBAR_S1_BPSHCFG GENMASK(9, 8) +#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH 3 +#define ARM_SMMU_CBAR_VMID GENMASK(7, 0) + +#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) + +#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) +#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16) +#define ARM_SMMU_CBA2R_VA64 BIT(0) + +#define ARM_SMMU_CB_SCTLR 0x0 +#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12) +#define ARM_SMMU_SCTLR_CFCFG BIT(7) +#define ARM_SMMU_SCTLR_CFIE BIT(6) +#define ARM_SMMU_SCTLR_CFRE BIT(5) +#define ARM_SMMU_SCTLR_E BIT(4) +#define ARM_SMMU_SCTLR_AFE BIT(2) +#define ARM_SMMU_SCTLR_TRE BIT(1) +#define ARM_SMMU_SCTLR_M BIT(0) + +#define ARM_SMMU_CB_ACTLR 0x4 + +#define ARM_SMMU_CB_RESUME 0x8 +#define ARM_SMMU_RESUME_TERMINATE BIT(0) + +#define ARM_SMMU_CB_TCR2 0x10 +#define ARM_SMMU_TCR2_SEP GENMASK(17, 15) +#define ARM_SMMU_TCR2_SEP_UPSTREAM 0x7 +#define ARM_SMMU_TCR2_AS BIT(4) +#define ARM_SMMU_TCR2_PASIZE GENMASK(3, 0) + +#define ARM_SMMU_CB_TTBR0 0x20 +#define ARM_SMMU_CB_TTBR1 0x28 +#define ARM_SMMU_TTBRn_ASID GENMASK_ULL(63, 48) + +#define ARM_SMMU_CB_TCR 0x30 +#define ARM_SMMU_TCR_EAE BIT(31) +#define ARM_SMMU_TCR_EPD1 BIT(23) +#define ARM_SMMU_TCR_TG0 GENMASK(15, 14) +#define ARM_SMMU_TCR_SH0 GENMASK(13, 12) +#define ARM_SMMU_TCR_ORGN0 GENMASK(11, 10) +#define ARM_SMMU_TCR_IRGN0 GENMASK(9, 8) +#define ARM_SMMU_TCR_T0SZ GENMASK(5, 0) + +#define ARM_SMMU_VTCR_RES1 BIT(31) +#define ARM_SMMU_VTCR_PS GENMASK(18, 16) +#define ARM_SMMU_VTCR_TG0 ARM_SMMU_TCR_TG0 +#define ARM_SMMU_VTCR_SH0 ARM_SMMU_TCR_SH0 +#define ARM_SMMU_VTCR_ORGN0 ARM_SMMU_TCR_ORGN0 +#define ARM_SMMU_VTCR_IRGN0 ARM_SMMU_TCR_IRGN0 +#define ARM_SMMU_VTCR_SL0 GENMASK(7, 6) +#define ARM_SMMU_VTCR_T0SZ ARM_SMMU_TCR_T0SZ + +#define ARM_SMMU_CB_CONTEXTIDR 0x34 +#define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_S1_MAIR1 0x3c + +#define ARM_SMMU_CB_PAR 0x50 +#define ARM_SMMU_CB_PAR_F BIT(0) + +#define ARM_SMMU_CB_FSR 0x58 +#define ARM_SMMU_FSR_MULTI BIT(31) +#define ARM_SMMU_FSR_SS BIT(30) +#define ARM_SMMU_FSR_UUT BIT(8) +#define ARM_SMMU_FSR_ASF BIT(7) +#define ARM_SMMU_FSR_TLBLKF BIT(6) +#define ARM_SMMU_FSR_TLBMCF BIT(5) +#define ARM_SMMU_FSR_EF BIT(4) +#define ARM_SMMU_FSR_PF BIT(3) +#define ARM_SMMU_FSR_AFF BIT(2) +#define ARM_SMMU_FSR_TF BIT(1) + +#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \ + ARM_SMMU_FSR_ASF | \ + ARM_SMMU_FSR_TLBMCF | \ + ARM_SMMU_FSR_TLBLKF) + +#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \ + ARM_SMMU_FSR_SS | \ + ARM_SMMU_FSR_UUT | \ + ARM_SMMU_FSR_EF | \ + ARM_SMMU_FSR_PF | \ + ARM_SMMU_FSR_TF | \ + ARM_SMMU_FSR_IGN) + +#define ARM_SMMU_CB_FAR 0x60 + +#define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_FSYNR0_WNR BIT(4) + +#define ARM_SMMU_CB_S1_TLBIVA 0x600 +#define ARM_SMMU_CB_S1_TLBIASID 0x610 +#define ARM_SMMU_CB_S1_TLBIVAL 0x620 +#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 +#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 +#define ARM_SMMU_CB_TLBSYNC 0x7f0 +#define ARM_SMMU_CB_TLBSTATUS 0x7f4 +#define ARM_SMMU_CB_ATS1PR 0x800 + +#define ARM_SMMU_CB_ATSR 0x8f0 +#define ARM_SMMU_ATSR_ACTIVE BIT(0) + + +/* Maximum number of context banks per SMMU */ +#define ARM_SMMU_MAX_CBS 128 + + +/* Shared driver definitions */ +enum arm_smmu_arch_version { + ARM_SMMU_V1, + ARM_SMMU_V1_64K, + ARM_SMMU_V2, +}; + +enum arm_smmu_implementation { + GENERIC_SMMU, + ARM_MMU500, + CAVIUM_SMMUV2, + QCOM_SMMUV2, +}; + +struct arm_smmu_device { + struct device *dev; + + void __iomem *base; + unsigned int numpage; + unsigned int pgshift; + +#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) +#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) +#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) +#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) +#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) +#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) +#define ARM_SMMU_FEAT_VMID16 (1 << 6) +#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7) +#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8) +#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) +#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) +#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) +#define ARM_SMMU_FEAT_EXIDS (1 << 12) + u32 features; + + enum arm_smmu_arch_version version; + enum arm_smmu_implementation model; + const struct arm_smmu_impl *impl; + + u32 num_context_banks; + u32 num_s2_context_banks; + DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); + struct arm_smmu_cb *cbs; + atomic_t irptndx; + + u32 num_mapping_groups; + u16 streamid_mask; + u16 smr_mask_mask; + struct arm_smmu_smr *smrs; + struct arm_smmu_s2cr *s2crs; + struct mutex stream_map_mutex; + + unsigned long va_size; + unsigned long ipa_size; + unsigned long pa_size; + unsigned long pgsize_bitmap; + + u32 num_global_irqs; + u32 num_context_irqs; + unsigned int *irqs; + struct clk_bulk_data *clks; + int num_clks; + + spinlock_t global_sync_lock; + + /* IOMMU core code handle */ + struct iommu_device iommu; +}; + +enum arm_smmu_context_fmt { + ARM_SMMU_CTX_FMT_NONE, + ARM_SMMU_CTX_FMT_AARCH64, + ARM_SMMU_CTX_FMT_AARCH32_L, + ARM_SMMU_CTX_FMT_AARCH32_S, +}; + +struct arm_smmu_cfg { + u8 cbndx; + u8 irptndx; + union { + u16 asid; + u16 vmid; + }; + enum arm_smmu_cbar_type cbar; + enum arm_smmu_context_fmt fmt; +}; +#define ARM_SMMU_INVALID_IRPTNDX 0xff + +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, + ARM_SMMU_DOMAIN_BYPASS, +}; + +struct arm_smmu_domain { + struct arm_smmu_device *smmu; + struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *flush_ops; + struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; + bool non_strict; + struct mutex init_mutex; /* Protects smmu pointer */ + spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ + struct iommu_domain domain; +}; + +static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg) +{ + return ARM_SMMU_TCR_EPD1 | + FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) | + FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) | + FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) | + FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) | + FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz); +} + +static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg) +{ + return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) | + FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM); +} + +static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg) +{ + return ARM_SMMU_VTCR_RES1 | + FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) | + FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) | + FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) | + FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) | + FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) | + FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) | + FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz); +} + +/* Implementation details, yay! */ +struct arm_smmu_impl { + u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset); + void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset, + u32 val); + u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); + void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, + u64 val); + int (*cfg_probe)(struct arm_smmu_device *smmu); + int (*reset)(struct arm_smmu_device *smmu); + int (*init_context)(struct arm_smmu_domain *smmu_domain); + void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, + int status); +}; + +static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) +{ + return smmu->base + (n << smmu->pgshift); +} + +static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) +{ + if (smmu->impl && unlikely(smmu->impl->read_reg)) + return smmu->impl->read_reg(smmu, page, offset); + return readl_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page, + int offset, u32 val) +{ + if (smmu->impl && unlikely(smmu->impl->write_reg)) + smmu->impl->write_reg(smmu, page, offset, val); + else + writel_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) +{ + if (smmu->impl && unlikely(smmu->impl->read_reg64)) + return smmu->impl->read_reg64(smmu, page, offset); + return readq_relaxed(arm_smmu_page(smmu, page) + offset); +} + +static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, + int offset, u64 val) +{ + if (smmu->impl && unlikely(smmu->impl->write_reg64)) + smmu->impl->write_reg64(smmu, page, offset, val); + else + writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); +} + +#define ARM_SMMU_GR0 0 +#define ARM_SMMU_GR1 1 +#define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) + +#define arm_smmu_gr0_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR0, (o)) +#define arm_smmu_gr0_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v)) + +#define arm_smmu_gr1_read(s, o) \ + arm_smmu_readl((s), ARM_SMMU_GR1, (o)) +#define arm_smmu_gr1_write(s, o, v) \ + arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v)) + +#define arm_smmu_cb_read(s, n, o) \ + arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_write(s, n, o, v) \ + arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v)) +#define arm_smmu_cb_readq(s, n, o) \ + arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o)) +#define arm_smmu_cb_writeq(s, n, o, v) \ + arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) + +struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); + +int arm_mmu500_reset(struct arm_smmu_device *smmu); + +#endif /* _ARM_SMMU_H */ diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d991d40f797f..a2e96a5fd9a7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -19,9 +19,11 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> +#include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; @@ -43,7 +45,6 @@ struct iommu_dma_cookie { dma_addr_t msi_iova; }; struct list_head msi_page_list; - spinlock_t msi_lock; /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; @@ -62,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } @@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn; + struct iova_domain *iovad; int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; + iovad = &cookie->iovad; + /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); base_pfn = max_t(unsigned long, 1, base >> order); @@ -351,6 +353,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } +static int iommu_dma_deferred_attach(struct device *dev, + struct iommu_domain *domain) +{ + const struct iommu_ops *ops = domain->ops; + + if (!is_kdump_kernel()) + return 0; + + if (unlikely(ops->is_attach_deferred && + ops->is_attach_deferred(domain, dev))) + return iommu_attach_device(domain, dev); + + return 0; +} + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -381,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, - size_t size, dma_addr_t dma_limit, struct device *dev) + size_t size, u64 dma_limit, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; @@ -403,11 +420,10 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) iova_len = roundup_pow_of_two(iova_len); - if (dev->bus_dma_mask) - dma_limit &= dev->bus_dma_mask; + dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); if (domain->geometry.force_aperture) - dma_limit = min(dma_limit, domain->geometry.aperture_end); + dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) @@ -444,18 +460,23 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; size_t iova_off = iova_offset(iovad, dma_addr); + struct iommu_iotlb_gather iotlb_gather; + size_t unmapped; dma_addr -= iova_off; size = iova_align(iovad, size + iova_off); + iommu_iotlb_gather_init(&iotlb_gather); + + unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); + WARN_ON(unmapped != size); - WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); if (!cookie->fq_domain) - iommu_tlb_sync(domain); + iommu_tlb_sync(domain, &iotlb_gather); iommu_dma_free_iova(cookie, dma_addr, size); } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot) + size_t size, int prot, u64 dma_mask) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -463,13 +484,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return DMA_MAPPING_ERROR; + size = iova_align(iovad, size + iova_off); - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { + if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { iommu_dma_free_iova(cookie, iova, size); return DMA_MAPPING_ERROR; } @@ -541,15 +565,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -static struct page **__iommu_dma_get_pages(void *cpu_addr) -{ - struct vm_struct *area = find_vm_area(cpu_addr); - - if (!area || !area->pages) - return NULL; - return area->pages; -} - /** * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space * @dev: Device to allocate memory for. Must be a real device @@ -581,6 +596,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return NULL; + min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { min_size = PAGE_SIZE; @@ -613,11 +631,11 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) < size) goto out_free_sg; - vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, + vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0)); if (!vaddr) goto out_unmap; @@ -661,7 +679,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); - arch_sync_dma_for_cpu(dev, phys, size, dir); + arch_sync_dma_for_cpu(phys, size, dir); } static void iommu_dma_sync_single_for_device(struct device *dev, @@ -673,7 +691,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); - arch_sync_dma_for_device(dev, phys, size, dir); + arch_sync_dma_for_device(phys, size, dir); } static void iommu_dma_sync_sg_for_cpu(struct device *dev, @@ -687,7 +705,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, return; for_each_sg(sgl, sg, nelems, i) - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); } static void iommu_dma_sync_sg_for_device(struct device *dev, @@ -701,7 +719,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, return; for_each_sg(sgl, sg, nelems, i) - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); } static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, @@ -713,10 +731,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dma_handle; - dma_handle =__iommu_dma_map(dev, phys, size, prot); + dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) - arch_sync_dma_for_device(dev, phys, size, dir); + arch_sync_dma_for_device(phys, size, dir); return dma_handle; } @@ -823,6 +841,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return 0; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); @@ -873,7 +894,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) + if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); @@ -913,7 +934,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, + dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -938,10 +960,10 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) * If it the address is remapped, then it's either non-coherent * or highmem CMA, or an iommu_dma_alloc_remap() construction. */ - pages = __iommu_dma_get_pages(cpu_addr); + pages = dma_common_find_pages(cpu_addr); if (!pages) page = vmalloc_to_page(cpu_addr); - dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP); + dma_common_free_remap(cpu_addr, alloc_size); } else { /* Lowmem means a coherent atomic or CMA allocation */ page = virt_to_page(cpu_addr); @@ -965,18 +987,21 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, { bool coherent = dev_is_dma_coherent(dev); size_t alloc_size = PAGE_ALIGN(size); + int node = dev_to_node(dev); struct page *page = NULL; void *cpu_addr; page = dma_alloc_contiguous(dev, alloc_size, gfp); if (!page) + page = alloc_pages_node(node, gfp, get_order(alloc_size)); + if (!page) return NULL; if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); cpu_addr = dma_common_contiguous_remap(page, alloc_size, - VM_USERMAP, prot, __builtin_return_address(0)); + prot, __builtin_return_address(0)); if (!cpu_addr) goto out_free_pages; @@ -1016,7 +1041,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (!cpu_addr) return NULL; - *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); + *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, + dev->coherent_dma_mask); if (*handle == DMA_MAPPING_ERROR) { __iommu_dma_free(dev, size, cpu_addr); return NULL; @@ -1042,7 +1068,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO; if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { - struct page **pages = __iommu_dma_get_pages(cpu_addr); + struct page **pages = dma_common_find_pages(cpu_addr); if (pages) return __iommu_dma_mmap(pages, size, vma); @@ -1064,7 +1090,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, int ret; if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { - struct page **pages = __iommu_dma_get_pages(cpu_addr); + struct page **pages = dma_common_find_pages(cpu_addr); if (pages) { return sg_alloc_table_from_pages(sgt, pages, @@ -1083,6 +1109,13 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, return ret; } +static unsigned long iommu_dma_get_merge_boundary(struct device *dev) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + + return (1UL << __ffs(domain->pgsize_bitmap)) - 1; +} + static const struct dma_map_ops iommu_dma_ops = { .alloc = iommu_dma_alloc, .free = iommu_dma_free, @@ -1098,6 +1131,7 @@ static const struct dma_map_ops iommu_dma_ops = { .sync_sg_for_device = iommu_dma_sync_sg_for_device, .map_resource = iommu_dma_map_resource, .unmap_resource = iommu_dma_unmap_resource, + .get_merge_boundary = iommu_dma_get_merge_boundary, }; /* @@ -1141,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (msi_page->phys == msi_addr) return msi_page; - msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); + msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); if (!msi_page) return NULL; @@ -1169,25 +1203,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie; struct iommu_dma_msi_page *msi_page; - unsigned long flags; + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ if (!domain || !domain->iova_cookie) { desc->iommu_cookie = NULL; return 0; } - cookie = domain->iova_cookie; - /* - * We disable IRQs to rule out a possible inversion against - * irq_desc_lock if, say, someone tries to retarget the affinity - * of an MSI from within an IPI handler. + * In fact the whole prepare operation should already be serialised by + * irq_domain_mutex further up the callchain, but that's pretty subtle + * on its own, so consider this locking as failsafe documentation... */ - spin_lock_irqsave(&cookie->msi_lock, flags); + mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); - spin_unlock_irqrestore(&cookie->msi_lock, flags); + mutex_unlock(&msi_prepare_lock); msi_desc_set_iommu_cookie(desc, msi_page); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 5d0754ed5fa0..071bb42bbbc5 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && - info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) { + info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) { pr_warn("Device scope type does not match for %s\n", pci_name(info->dev)); return -EINVAL; @@ -895,8 +895,11 @@ int __init detect_intel_iommu(void) } #ifdef CONFIG_X86 - if (!ret) + if (!ret) { x86_init.iommu.iommu_init = intel_iommu_init; + x86_platform.iommu_shutdown = intel_iommu_shutdown; + } + #endif if (dmar_tbl) { @@ -1351,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, struct qi_desc desc; if (mask) { - WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else @@ -1368,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, qi_submit_sync(&desc, iommu); } +/* PASID-based IOTLB invalidation */ +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, + unsigned long npages, bool ih) +{ + struct qi_desc desc = {.qw2 = 0, .qw3 = 0}; + + /* + * npages == -1 means a PASID-selective invalidation, otherwise, + * a positive value for Page-selective-within-PASID invalidation. + * 0 is not a valid input. + */ + if (WARN_ON(!npages)) { + pr_err("Invalid input npages = %ld\n", npages); + return; + } + + if (npages == -1) { + desc.qw0 = QI_EIOTLB_PASID(pasid) | + QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; + desc.qw1 = 0; + } else { + int mask = ilog2(__roundup_pow_of_two(npages)); + unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); + + if (WARN_ON_ONCE(!ALIGN(addr, align))) + addr &= ~(align - 1); + + desc.qw0 = QI_EIOTLB_PASID(pasid) | + QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | + QI_EIOTLB_TYPE; + desc.qw1 = QI_EIOTLB_ADDR(addr) | + QI_EIOTLB_IH(ih) | + QI_EIOTLB_AM(mask); + } + + qi_submit_sync(&desc, iommu); +} + /* * Disable Queued Invalidation interface. */ @@ -1519,6 +1562,64 @@ static const char *dma_remap_fault_reasons[] = "PCE for translation request specifies blocking", }; +static const char * const dma_remap_sm_fault_reasons[] = { + "SM: Invalid Root Table Address", + "SM: TTM 0 for request with PASID", + "SM: TTM 0 for page group request", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */ + "SM: Error attempting to access Root Entry", + "SM: Present bit in Root Entry is clear", + "SM: Non-zero reserved field set in Root Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */ + "SM: Error attempting to access Context Entry", + "SM: Present bit in Context Entry is clear", + "SM: Non-zero reserved field set in the Context Entry", + "SM: Invalid Context Entry", + "SM: DTE field in Context Entry is clear", + "SM: PASID Enable field in Context Entry is clear", + "SM: PASID is larger than the max in Context Entry", + "SM: PRE field in Context-Entry is clear", + "SM: RID_PASID field error in Context-Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */ + "SM: Error attempting to access the PASID Directory Entry", + "SM: Present bit in Directory Entry is clear", + "SM: Non-zero reserved field set in PASID Directory Entry", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */ + "SM: Error attempting to access PASID Table Entry", + "SM: Present bit in PASID Table Entry is clear", + "SM: Non-zero reserved field set in PASID Table Entry", + "SM: Invalid Scalable-Mode PASID Table Entry", + "SM: ERE field is clear in PASID Table Entry", + "SM: SRE field is clear in PASID Table Entry", + "Unknown", "Unknown",/* 0x5E-0x5F */ + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */ + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */ + "SM: Error attempting to access first-level paging entry", + "SM: Present bit in first-level paging entry is clear", + "SM: Non-zero reserved field set in first-level paging entry", + "SM: Error attempting to access FL-PML4 entry", + "SM: First-level entry address beyond MGAW in Nested translation", + "SM: Read permission error in FL-PML4 entry in Nested translation", + "SM: Read permission error in first-level paging entry in Nested translation", + "SM: Write permission error in first-level paging entry in Nested translation", + "SM: Error attempting to access second-level paging entry", + "SM: Read/Write permission error in second-level paging entry", + "SM: Non-zero reserved field set in second-level paging entry", + "SM: Invalid second-level page table pointer", + "SM: A/D bit update needed in second-level entry when set up in no snoop", + "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */ + "SM: Address in first-level translation is not canonical", + "SM: U/S set 0 for first-level translation with user privilege", + "SM: No execute permission for request with PASID and ER=1", + "SM: Address beyond the DMA hardware max", + "SM: Second-level entry address beyond the max", + "SM: No write permission for Write/AtomicOp request", + "SM: No read permission for Read/AtomicOp request", + "SM: Invalid address-interrupt address", + "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */ + "SM: A/D bit update needed in first-level entry when set up in no snoop", +}; + static const char *irq_remap_fault_reasons[] = { "Detected reserved fields in the decoded interrupt-remapped request", @@ -1536,6 +1637,10 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) ARRAY_SIZE(irq_remap_fault_reasons))) { *fault_type = INTR_REMAP; return irq_remap_fault_reasons[fault_reason - 0x20]; + } else if (fault_reason >= 0x30 && (fault_reason - 0x30 < + ARRAY_SIZE(dma_remap_sm_fault_reasons))) { + *fault_type = DMA_REMAP; + return dma_remap_sm_fault_reasons[fault_reason - 0x30]; } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { *fault_type = DMA_REMAP; return dma_remap_fault_reasons[fault_reason]; @@ -1611,7 +1716,8 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int dmar_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, unsigned long long addr) + u8 fault_reason, int pasid, u16 source_id, + unsigned long long addr) { const char *reason; int fault_type; @@ -1624,10 +1730,11 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type, PCI_FUNC(source_id & 0xFF), addr >> 48, fault_reason, reason); else - pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n", + pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n", type ? "DMA Read" : "DMA Write", source_id >> 8, PCI_SLOT(source_id & 0xFF), - PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); + PCI_FUNC(source_id & 0xFF), pasid, addr, + fault_reason, reason); return 0; } @@ -1659,8 +1766,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id) u8 fault_reason; u16 source_id; u64 guest_addr; - int type; + int type, pasid; u32 data; + bool pasid_present; /* highest 32 bits */ data = readl(iommu->reg + reg + @@ -1672,10 +1780,12 @@ irqreturn_t dmar_fault(int irq, void *dev_id) fault_reason = dma_frcd_fault_reason(data); type = dma_frcd_type(data); + pasid = dma_frcd_pasid_value(data); data = readl(iommu->reg + reg + fault_index * PRIMARY_FAULT_REG_LEN + 8); source_id = dma_frcd_source_id(data); + pasid_present = dma_frcd_pasid_present(data); guest_addr = dmar_readq(iommu->reg + reg + fault_index * PRIMARY_FAULT_REG_LEN); guest_addr = dma_frcd_page_addr(guest_addr); @@ -1688,7 +1798,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); if (!ratelimited) + /* Using pasid -1 if pasid is not present */ dmar_fault_do_one(iommu, type, fault_reason, + pasid_present ? pasid : -1, source_id, guest_addr); fault_index++; diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index b0c1e5f9daae..186ff5cc975c 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -566,7 +566,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, static const struct iommu_ops exynos_iommu_ops; -static int __init exynos_sysmmu_probe(struct platform_device *pdev) +static int exynos_sysmmu_probe(struct platform_device *pdev) { int irq, ret; struct device *dev = &pdev->dev; @@ -583,10 +583,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) return PTR_ERR(data->sfrbase); irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "Unable to find IRQ resource\n"); + if (irq <= 0) return irq; - } ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, dev_name(dev), data); @@ -1075,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, */ static int exynos_iommu_map(struct iommu_domain *iommu_domain, unsigned long l_iova, phys_addr_t paddr, size_t size, - int prot) + int prot, gfp_t gfp) { struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); sysmmu_pte_t *entry; @@ -1130,7 +1128,8 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain } static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, - unsigned long l_iova, size_t size) + unsigned long l_iova, size_t size, + struct iommu_iotlb_gather *gather) { struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 471f05d452e0..c1257bef553c 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -5,6 +5,7 @@ * Authors: Gayatri Kammela <gayatri.kammela@intel.com> * Sohil Mehta <sohil.mehta@intel.com> * Jacob Pan <jacob.jun.pan@linux.intel.com> + * Lu Baolu <baolu.lu@linux.intel.com> */ #include <linux/debugfs.h> @@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) } DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct); +static inline unsigned long level_to_directory_size(int level) +{ + return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1)); +} + +static inline void +dump_page_info(struct seq_file *m, unsigned long iova, u64 *path) +{ + seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n", + iova >> VTD_PAGE_SHIFT, path[5], path[4], + path[3], path[2], path[1]); +} + +static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, + int level, unsigned long start, + u64 *path) +{ + int i; + + if (level > 5 || level < 1) + return; + + for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT); + i++, pde++, start += level_to_directory_size(level)) { + if (!dma_pte_present(pde)) + continue; + + path[level] = pde->val; + if (dma_pte_superpage(pde) || level == 1) + dump_page_info(m, start, path); + else + pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)), + level - 1, start, path); + path[level] = 0; + } +} + +static int show_device_domain_translation(struct device *dev, void *data) +{ + struct dmar_domain *domain = find_domain(dev); + struct seq_file *m = data; + u64 path[6] = { 0 }; + + if (!domain) + return 0; + + seq_printf(m, "Device %s with pasid %d @0x%llx\n", + dev_name(dev), domain->default_pasid, + (u64)virt_to_phys(domain->pgd)); + seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); + + pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path); + seq_putc(m, '\n'); + + return 0; +} + +static int domain_translation_struct_show(struct seq_file *m, void *unused) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&device_domain_lock, flags); + ret = bus_for_each_dev(&pci_bus_type, NULL, m, + show_device_domain_translation); + spin_unlock_irqrestore(&device_domain_lock, flags); + + return ret; +} +DEFINE_SHOW_ATTRIBUTE(domain_translation_struct); + #ifdef CONFIG_IRQ_REMAP static void ir_tbl_remap_entry_show(struct seq_file *m, struct intel_iommu *iommu) @@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void) &iommu_regset_fops); debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug, NULL, &dmar_translation_struct_fops); + debugfs_create_file("domain_translation_struct", 0444, + intel_iommu_debug, NULL, + &domain_translation_struct_fops); #ifdef CONFIG_IRQ_REMAP debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug, NULL, &ir_translation_struct_fops); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12d094d08c0a..9dc37672bf89 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -41,9 +41,11 @@ #include <linux/dma-direct.h> #include <linux/crash_dump.h> #include <linux/numa.h> +#include <linux/swiotlb.h> #include <asm/irq_remapping.h> #include <asm/cacheflush.h> #include <asm/iommu.h> +#include <trace/events/intel_iommu.h> #include "irq_remapping.h" #include "intel-pasid.h" @@ -305,6 +307,20 @@ static int hw_pass_through = 1; */ #define DOMAIN_FLAG_LOSE_CHILDREN BIT(1) +/* + * When VT-d works in the scalable mode, it allows DMA translation to + * happen through either first level or second level page table. This + * bit marks that the DMA translation for the domain goes through the + * first level page table, otherwise, it goes through the second level. + */ +#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(2) + +/* + * Domain represents a virtual machine which demands iommu nested + * translation mode support. + */ +#define DOMAIN_FLAG_NESTING_MODE BIT(3) + #define for_each_domain_iommu(idx, domain) \ for (idx = 0; idx < g_num_of_iommus; idx++) \ if (domain->iommu_refcnt[idx]) @@ -339,19 +355,28 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); +static void domain_context_clear(struct intel_iommu *iommu, + struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); static int intel_iommu_attach_device(struct iommu_domain *domain, struct device *dev); +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova); #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON int dmar_disabled = 0; #else int dmar_disabled = 1; -#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/ +#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ +#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON +int intel_iommu_sm = 1; +#else int intel_iommu_sm; +#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ + int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); @@ -360,8 +385,8 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; +static int intel_no_bounce; -#define IDENTMAP_ALL 1 #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 @@ -370,9 +395,12 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) -static DEFINE_SPINLOCK(device_domain_lock); +DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); +#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \ + to_pci_dev(d)->untrusted) + /* * Iterate over elements in device_domain_list and call the specified * callback @fn against each element. @@ -455,6 +483,9 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; + } else if (!strncmp(str, "nobounce", 8)) { + pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); + intel_no_bounce = 1; } str += strcspn(str, ","); @@ -539,6 +570,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain) return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; } +static inline bool domain_use_first_level(struct dmar_domain *domain) +{ + return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL; +} + static inline int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn) { @@ -648,11 +684,12 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip) return ret; } -static int domain_update_iommu_superpage(struct intel_iommu *skip) +static int domain_update_iommu_superpage(struct dmar_domain *domain, + struct intel_iommu *skip) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; - int mask = 0xf; + int mask = 0x3; if (!intel_iommu_superpage) { return 0; @@ -662,7 +699,13 @@ static int domain_update_iommu_superpage(struct intel_iommu *skip) rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (iommu != skip) { - mask &= cap_super_page_val(iommu->cap); + if (domain && domain_use_first_level(domain)) { + if (!cap_fl1gp_support(iommu->cap)) + mask = 0x1; + } else { + mask &= cap_super_page_val(iommu->cap); + } + if (!mask) break; } @@ -677,7 +720,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) { domain_update_iommu_coherency(domain); domain->iommu_snooping = domain_update_iommu_snooping(NULL); - domain->iommu_superpage = domain_update_iommu_superpage(NULL); + domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL); } struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, @@ -761,13 +804,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (dev_is_pci(dev)) { struct pci_dev *pf_pdev; - pdev = to_pci_dev(dev); - -#ifdef CONFIG_X86 - /* VMD child devices currently cannot be handled individually */ - if (is_vmd(pdev->bus)) - return NULL; -#endif + pdev = pci_real_dma_dev(to_pci_dev(dev)); /* VFs aren't listed in scope tables; we need to look up * the PF instead to find the IOMMU. */ @@ -900,6 +937,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; + if (domain_use_first_level(domain)) + pteval |= DMA_FL_PTE_XD; if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); @@ -1470,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, spin_unlock_irqrestore(&device_domain_lock, flags); } +static void domain_flush_piotlb(struct intel_iommu *iommu, + struct dmar_domain *domain, + u64 addr, unsigned long npages, bool ih) +{ + u16 did = domain->iommu_did[iommu->seq_id]; + + if (domain->default_pasid) + qi_flush_piotlb(iommu, did, domain->default_pasid, + addr, npages, ih); + + if (!list_empty(&domain->devices)) + qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); +} + static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, @@ -1483,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (ih) ih = 1 << 6; - /* - * Fallback to domain selective flush if no PSI support or the size is - * too big. - * PSI requires page size to be 2 ^ x, and the base address is naturally - * aligned to the size - */ - if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, did, 0, 0, - DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, - DMA_TLB_PSI_FLUSH); + + if (domain_use_first_level(domain)) { + domain_flush_piotlb(iommu, domain, addr, pages, ih); + } else { + /* + * Fallback to domain selective flush if no PSI support or + * the size is too big. PSI requires page size to be 2 ^ x, + * and the base address is naturally aligned to the size. + */ + if (!cap_pgsel_inv(iommu->cap) || + mask > cap_max_amask_val(iommu->cap)) + iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH); + else + iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, + DMA_TLB_PSI_FLUSH); + } /* * In caching mode, changes of pages from non-present to present require @@ -1509,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages) { - /* It's a non-present to present mapping. Only flush if caching mode */ - if (cap_caching_mode(iommu->cap)) + /* + * It's a non-present to present mapping. Only flush if caching mode + * and second level. + */ + if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); else iommu_flush_write_buffer(iommu); @@ -1527,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad) struct intel_iommu *iommu = g_iommus[idx]; u16 did = domain->iommu_did[iommu->seq_id]; - iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + if (domain_use_first_level(domain)) + domain_flush_piotlb(iommu, domain, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH); if (!cap_caching_mode(iommu->cap)) iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), @@ -1696,6 +1761,33 @@ static void free_dmar_iommu(struct intel_iommu *iommu) #endif } +/* + * Check and return whether first level is used by default for + * DMA translation. + */ +static bool first_level_by_default(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + static int first_level_support = -1; + + if (likely(first_level_support != -1)) + return first_level_support; + + first_level_support = 1; + + rcu_read_lock(); + for_each_active_iommu(iommu, drhd) { + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) { + first_level_support = 0; + break; + } + } + rcu_read_unlock(); + + return first_level_support; +} + static struct dmar_domain *alloc_domain(int flags) { struct dmar_domain *domain; @@ -1707,6 +1799,8 @@ static struct dmar_domain *alloc_domain(int flags) memset(domain, 0, sizeof(*domain)); domain->nid = NUMA_NO_NODE; domain->flags = flags; + if (first_level_by_default()) + domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); @@ -1836,14 +1930,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, { int adjust_width, agaw; unsigned long sagaw; - int err; + int ret; init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); - err = init_iova_flush_queue(&domain->iovad, - iommu_flush_iova, iova_entry_free); - if (err) - return err; + if (!intel_iommu_strict) { + ret = init_iova_flush_queue(&domain->iovad, + iommu_flush_iova, iova_entry_free); + if (ret) + pr_info("iova flush queue initialization failed\n"); + } domain_reserve_special_ranges(domain); @@ -2105,9 +2201,26 @@ out_unlock: return ret; } +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct pasid_table *table; +}; + +static int domain_context_mapping_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct domain_context_mapping_data *data = opaque; + + return domain_context_mapping_one(data->domain, data->iommu, + data->table, PCI_BUS_NUM(alias), + alias & 0xff); +} + static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { + struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2117,7 +2230,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - return domain_context_mapping_one(domain, iommu, table, bus, devfn); + + if (!dev_is_pci(dev)) + return domain_context_mapping_one(domain, iommu, table, + bus, devfn); + + data.domain = domain; + data.iommu = iommu; + data.table = table; + + return pci_for_each_dma_alias(to_pci_dev(dev), + &domain_context_mapping_cb, &data); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -2189,17 +2312,20 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; + u64 attr; BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; + attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); + if (domain_use_first_level(domain)) + attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD; if (!sg) { sg_res = nr_pages; - pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; } while (nr_pages > 0) { @@ -2211,7 +2337,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff; sg->dma_length = sg->length; - pteval = (sg_phys(sg) - pgoff) | prot; + pteval = (sg_phys(sg) - pgoff) | attr; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -2380,14 +2506,27 @@ static void domain_remove_dev_info(struct dmar_domain *domain) spin_unlock_irqrestore(&device_domain_lock, flags); } -/* - * find_domain - * Note: we use struct device->archdata.iommu stores the info - */ -static struct dmar_domain *find_domain(struct device *dev) +struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; + if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO || + dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)) + return NULL; + + if (dev_is_pci(dev)) + dev = &pci_real_dma_dev(to_pci_dev(dev))->dev; + + /* No lock here, assumes no domain exit in normal case */ + info = dev->archdata.iommu; + if (likely(info)) + return info->domain; + + return NULL; +} + +static struct dmar_domain *deferred_attach_domain(struct device *dev) +{ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { struct iommu_domain *domain; @@ -2397,12 +2536,7 @@ static struct dmar_domain *find_domain(struct device *dev) intel_iommu_attach_device(domain, dev); } - /* No lock here, assumes no domain exit in normal case */ - info = dev->archdata.iommu; - - if (likely(info)) - return info->domain; - return NULL; + return find_domain(dev); } static inline struct device_domain_info * @@ -2418,6 +2552,36 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn) return NULL; } +static int domain_setup_first_level(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, + int pasid) +{ + int flags = PASID_FLAG_SUPERVISOR_MODE; + struct dma_pte *pgd = domain->pgd; + int agaw, level; + + /* + * Skip top levels of page tables for iommu which has + * less agaw than default. Unnecessary for PT mode. + */ + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { + pgd = phys_to_virt(dma_pte_addr(pgd)); + if (!dma_pte_present(pgd)) + return -ENOMEM; + } + + level = agaw_to_level(agaw); + if (level != 4 && level != 5) + return -EINVAL; + + flags |= (level == 5) ? PASID_FLAG_FL5LP : 0; + + return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, + domain->iommu_did[iommu->seq_id], + flags); +} + static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, int bus, int devfn, struct device *dev, @@ -2517,6 +2681,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, dev, PASID_RID2PASID); + else if (domain_use_first_level(domain)) + ret = domain_setup_first_level(iommu, domain, dev, + PASID_RID2PASID); else ret = intel_pasid_setup_second_level(iommu, domain, dev, PASID_RID2PASID); @@ -2722,10 +2889,8 @@ static int __init si_domain_init(int hw) } /* - * Normally we use DMA domains for devices which have RMRRs. But we - * loose this requirement for graphic and usb devices. Identity map - * the RMRRs for graphic and USB devices so that they could use the - * si_domain. + * Identity map the RMRRs so that devices with RMRRs could also use + * the si_domain. */ for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, @@ -2733,9 +2898,6 @@ static int __init si_domain_init(int hw) unsigned long long start = rmrr->base_address; unsigned long long end = rmrr->end_address; - if (device_is_rmrr_locked(dev)) - continue; - if (WARN_ON(end < start || end >> agaw_to_width(si_domain->agaw))) continue; @@ -2754,7 +2916,7 @@ static int identity_mapping(struct device *dev) struct device_domain_info *info; info = dev->archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO) + if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) return (info->domain == si_domain); return 0; @@ -2874,9 +3036,6 @@ static int device_def_domain_type(struct device *dev) if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - if (device_is_rmrr_locked(dev)) - return IOMMU_DOMAIN_DMA; - /* * Prevent any device marked as untrusted from getting * placed into the statically identity mapping domain. @@ -2914,13 +3073,9 @@ static int device_def_domain_type(struct device *dev) return IOMMU_DOMAIN_DMA; } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) return IOMMU_DOMAIN_DMA; - } else { - if (device_has_rmrr(dev)) - return IOMMU_DOMAIN_DMA; } - return (iommu_identity_mapping & IDENTMAP_ALL) ? - IOMMU_DOMAIN_IDENTITY : 0; + return 0; } static void intel_iommu_init_qi(struct intel_iommu *iommu) @@ -3249,10 +3404,7 @@ static int __init init_dmars(void) if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; -#ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu)) - intel_svm_init(iommu); -#endif + intel_svm_check(iommu); } /* @@ -3267,9 +3419,6 @@ static int __init init_dmars(void) iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } - if (iommu_pass_through) - iommu_identity_mapping |= IDENTMAP_ALL; - #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA dmar_map_gfx = 0; #endif @@ -3342,8 +3491,21 @@ static unsigned long intel_alloc_iova(struct device *dev, { unsigned long iova_pfn; - /* Restrict dma_mask to the width that the iommu can handle */ - dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + /* + * Restrict dma_mask to the width that the iommu can handle. + * First-level translation restricts the input-address to a + * canonical address (i.e., address bits 63:N have the same + * value as address bit [N-1], where N is 48-bits with 4-level + * paging and 57-bits with 5-level paging). Hence, skip bit + * [N-1]. + */ + if (domain_use_first_level(domain)) + dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1), + dma_mask); + else + dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), + dma_mask); + /* Ensure we reserve the whole size-aligned region */ nrpages = __roundup_pow_of_two(nrpages); @@ -3361,7 +3523,8 @@ static unsigned long intel_alloc_iova(struct device *dev, iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask), true); if (unlikely(!iova_pfn)) { - dev_err(dev, "Allocating %ld-page iova failed", nrpages); + dev_err_once(dev, "Allocating %ld-page iova failed\n", + nrpages); return 0; } @@ -3431,7 +3594,7 @@ static bool iommu_need_mapping(struct device *dev) if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask) dma_mask = dev->coherent_dma_mask; - if (dma_mask >= dma_get_required_mask(dev)) + if (dma_mask >= dma_direct_get_required_mask(dev)) return false; /* @@ -3472,7 +3635,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, BUG_ON(dir == DMA_NONE); - domain = find_domain(dev); + domain = deferred_attach_domain(dev); if (!domain) return DMA_MAPPING_ERROR; @@ -3505,6 +3668,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr += paddr & ~PAGE_MASK; + + trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT); + return start_paddr; error: @@ -3560,10 +3726,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) if (dev_is_pci(dev)) pdev = to_pci_dev(dev); - dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn); - freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict || (pdev && pdev->untrusted) || !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, @@ -3579,6 +3742,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) * cpu used up by the iotlb flush operation... */ } + + trace_unmap_single(dev, dev_addr, size); } static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, @@ -3669,6 +3834,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, } intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); + + trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT); } static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, @@ -3688,7 +3855,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele if (!iommu_need_mapping(dev)) return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); - domain = find_domain(dev); + domain = deferred_attach_domain(dev); if (!domain) return 0; @@ -3725,9 +3892,19 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele return 0; } + for_each_sg(sglist, sg, nelems, i) + trace_map_sg(dev, i + 1, nelems, sg); + return nelems; } +static u64 intel_get_required_mask(struct device *dev) +{ + if (!iommu_need_mapping(dev)) + return dma_direct_get_required_mask(dev); + return DMA_BIT_MASK(32); +} + static const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, @@ -3738,6 +3915,258 @@ static const struct dma_map_ops intel_dma_ops = { .map_resource = intel_map_resource, .unmap_resource = intel_unmap_resource, .dma_supported = dma_direct_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .get_required_mask = intel_get_required_mask, +}; + +static void +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, enum dma_sync_target target) +{ + struct dmar_domain *domain; + phys_addr_t tlb_addr; + + domain = find_domain(dev); + if (WARN_ON(!domain)) + return; + + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target); +} + +static dma_addr_t +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, + enum dma_data_direction dir, unsigned long attrs, + u64 dma_mask) +{ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); + struct dmar_domain *domain; + struct intel_iommu *iommu; + unsigned long iova_pfn; + unsigned long nrpages; + phys_addr_t tlb_addr; + int prot = 0; + int ret; + + domain = deferred_attach_domain(dev); + if (WARN_ON(dir == DMA_NONE || !domain)) + return DMA_MAPPING_ERROR; + + iommu = domain_get_iommu(domain); + if (WARN_ON(!iommu)) + return DMA_MAPPING_ERROR; + + nrpages = aligned_nrpages(0, size); + iova_pfn = intel_alloc_iova(dev, domain, + dma_to_mm_pfn(nrpages), dma_mask); + if (!iova_pfn) + return DMA_MAPPING_ERROR; + + /* + * Check if DMAR supports zero-length reads on write only + * mappings.. + */ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || + !cap_zlr(iommu->cap)) + prot |= DMA_PTE_READ; + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + prot |= DMA_PTE_WRITE; + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { + tlb_addr = swiotlb_tbl_map_single(dev, + __phys_to_dma(dev, io_tlb_start), + paddr, size, aligned_size, dir, attrs); + if (tlb_addr == DMA_MAPPING_ERROR) { + goto swiotlb_error; + } else { + /* Cleanup the padding area. */ + void *padding_start = phys_to_virt(tlb_addr); + size_t padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || + dir == DMA_BIDIRECTIONAL)) { + padding_start += size; + padding_size -= size; + } + + memset(padding_start, 0, padding_size); + } + } else { + tlb_addr = paddr; + } + + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), + tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot); + if (ret) + goto mapping_error; + + trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size); + + return (phys_addr_t)iova_pfn << PAGE_SHIFT; + +mapping_error: + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_unmap_single(dev, tlb_addr, size, + aligned_size, dir, attrs); +swiotlb_error: + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); + dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n", + size, (unsigned long long)paddr, dir); + + return DMA_MAPPING_ERROR; +} + +static void +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); + struct dmar_domain *domain; + phys_addr_t tlb_addr; + + domain = find_domain(dev); + if (WARN_ON(!domain)) + return; + + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); + if (WARN_ON(!tlb_addr)) + return; + + intel_unmap(dev, dev_addr, size); + if (is_swiotlb_buffer(tlb_addr)) + swiotlb_tbl_unmap_single(dev, tlb_addr, size, + aligned_size, dir, attrs); + + trace_bounce_unmap_single(dev, dev_addr, size); +} + +static dma_addr_t +bounce_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return bounce_map_single(dev, page_to_phys(page) + offset, + size, dir, attrs, *dev->dma_mask); +} + +static dma_addr_t +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + return bounce_map_single(dev, phys_addr, size, + dir, attrs, *dev->dma_mask); +} + +static void +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_unmap_page(dev, sg->dma_address, + sg_dma_len(sg), dir, attrs); +} + +static int +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) { + sg->dma_address = bounce_map_page(dev, sg_page(sg), + sg->offset, sg->length, + dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; + sg_dma_len(sg) = sg->length; + } + + for_each_sg(sglist, sg, nelems, i) + trace_bounce_map_sg(dev, i + 1, nelems, sg); + + return nelems; + +out_unmap: + bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return 0; +} + +static void +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE); +} + +static void +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_sync_single(dev, sg_dma_address(sg), + sg_dma_len(sg), dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) + bounce_sync_single(dev, sg_dma_address(sg), + sg_dma_len(sg), dir, SYNC_FOR_DEVICE); +} + +static const struct dma_map_ops bounce_dma_ops = { + .alloc = intel_alloc_coherent, + .free = intel_free_coherent, + .map_sg = bounce_map_sg, + .unmap_sg = bounce_unmap_sg, + .map_page = bounce_map_page, + .unmap_page = bounce_unmap_page, + .sync_single_for_cpu = bounce_sync_single_for_cpu, + .sync_single_for_device = bounce_sync_single_for_device, + .sync_sg_for_cpu = bounce_sync_sg_for_cpu, + .sync_sg_for_device = bounce_sync_sg_for_device, + .map_resource = bounce_map_resource, + .unmap_resource = bounce_unmap_resource, + .dma_supported = dma_direct_supported, }; static inline int iommu_domain_cache_init(void) @@ -4007,17 +4436,38 @@ static void __init init_iommu_pm_ops(void) static inline void init_iommu_pm_ops(void) {} #endif /* CONFIG_PM */ +static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) +{ + if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || + !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || + rmrr->end_address <= rmrr->base_address || + arch_rmrr_sanity_check(rmrr)) + return -EINVAL; + + return 0; +} + int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; + rmrr = (struct acpi_dmar_reserved_memory *)header; + if (rmrr_sanity_check(rmrr)) + WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, + "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + rmrr->base_address, rmrr->end_address, + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) goto out; rmrru->hdr = header; - rmrr = (struct acpi_dmar_reserved_memory *)header; + rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; @@ -4156,7 +4606,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) iommu->name); return -ENXIO; } - sp = domain_update_iommu_superpage(iommu) - 1; + sp = domain_update_iommu_superpage(NULL, iommu) - 1; if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { pr_warn("%s: Doesn't support large page.\n", iommu->name); @@ -4176,10 +4626,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) if (ret) goto out; -#ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu)) - intel_svm_init(iommu); -#endif + intel_svm_check(iommu); if (dmaru->ignored) { /* @@ -4456,6 +4903,26 @@ static void intel_disable_iommus(void) iommu_disable_translation(iommu); } +void intel_iommu_shutdown(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + + if (no_iommu || dmar_disabled) + return; + + down_write(&dmar_global_lock); + + /* Disable PMRs explicitly here. */ + for_each_iommu(iommu, drhd) + iommu_disable_protect_mem_regions(iommu); + + /* Make sure the IOMMUs are switched off */ + intel_disable_iommus(); + + up_write(&dmar_global_lock); +} + static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { struct iommu_device *iommu_dev = dev_to_iommu_device(dev); @@ -4540,22 +5007,20 @@ const struct attribute_group *intel_iommu_groups[] = { NULL, }; -static int __init platform_optin_force_iommu(void) +static inline bool has_untrusted_dev(void) { struct pci_dev *pdev = NULL; - bool has_untrusted_dev = false; - if (!dmar_platform_optin() || no_platform_optin) - return 0; + for_each_pci_dev(pdev) + if (pdev->untrusted) + return true; - for_each_pci_dev(pdev) { - if (pdev->untrusted) { - has_untrusted_dev = true; - break; - } - } + return false; +} - if (!has_untrusted_dev) +static int __init platform_optin_force_iommu(void) +{ + if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev()) return 0; if (no_iommu || dmar_disabled) @@ -4566,12 +5031,9 @@ static int __init platform_optin_force_iommu(void) * map for all devices except those marked as being untrusted. */ if (dmar_disabled) - iommu_identity_mapping |= IDENTMAP_ALL; + iommu_set_default_passthrough(false); dmar_disabled = 0; -#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) - swiotlb = 0; -#endif no_iommu = 0; return 1; @@ -4711,7 +5173,14 @@ int __init intel_iommu_init(void) up_write(&dmar_global_lock); #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) - swiotlb = 0; + /* + * If the system has no untrusted device or the user has decided + * to disable the bounce page mechanisms, we don't need swiotlb. + * Mark this and the pre-allocated bounce pages will be released + * later. + */ + if (!has_untrusted_dev() || intel_no_bounce) + swiotlb = 0; #endif dma_ops = &intel_dma_ops; @@ -4759,6 +5228,28 @@ out_free_dmar: return ret; } +static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + return 0; +} + +/* + * NB - intel-iommu lacks any sort of reference counting for the users of + * dependent devices. If multiple endpoints have intersecting dependent + * devices, unbinding the driver from any one of them will possibly leave + * the others unable to operate. + */ +static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || !dev || !dev_is_pci(dev)) + return; + + pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); +} + static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4779,7 +5270,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear_one(iommu, info->bus, info->devfn); + domain_context_clear(iommu, info->dev); intel_pasid_free_table(info->dev); } @@ -4805,7 +5296,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - if (info) + if (info && info != DEFER_DEVICE_DOMAIN_INFO + && info != DUMMY_DEVICE_DOMAIN_INFO) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -4839,6 +5331,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; struct iommu_domain *domain; + int ret; switch (type) { case IOMMU_DOMAIN_DMA: @@ -4855,11 +5348,12 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } - if (type == IOMMU_DOMAIN_DMA && - init_iova_flush_queue(&dmar_domain->iovad, - iommu_flush_iova, iova_entry_free)) { - pr_warn("iova flush queue initialization failed\n"); - intel_iommu_strict = 1; + if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) { + ret = init_iova_flush_queue(&dmar_domain->iovad, + iommu_flush_iova, + iova_entry_free); + if (ret) + pr_info("iova flush queue initialization failed\n"); } domain_update_iommu_cap(dmar_domain); @@ -4925,7 +5419,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain, domain->auxd_refcnt--; if (!domain->auxd_refcnt && domain->default_pasid > 0) - intel_pasid_free_id(domain->default_pasid); + ioasid_free(domain->default_pasid); } static int aux_domain_add_dev(struct dmar_domain *domain, @@ -4943,10 +5437,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain, if (domain->default_pasid <= 0) { int pasid; - pasid = intel_pasid_alloc_id(domain, PASID_MIN, - pci_max_pasids(to_pci_dev(dev)), - GFP_KERNEL); - if (pasid <= 0) { + /* No private data needed for the default pasid */ + pasid = ioasid_alloc(NULL, PASID_MIN, + pci_max_pasids(to_pci_dev(dev)) - 1, + NULL); + if (pasid == INVALID_IOASID) { pr_err("Can't allocate default pasid\n"); return -ENODEV; } @@ -4964,8 +5459,12 @@ static int aux_domain_add_dev(struct dmar_domain *domain, goto attach_failed; /* Setup the PASID entry for mediated devices: */ - ret = intel_pasid_setup_second_level(iommu, domain, dev, - domain->default_pasid); + if (domain_use_first_level(domain)) + ret = domain_setup_first_level(iommu, domain, dev, + domain->default_pasid); + else + ret = intel_pasid_setup_second_level(iommu, domain, dev, + domain->default_pasid); if (ret) goto table_failed; spin_unlock(&iommu->lock); @@ -4982,7 +5481,7 @@ attach_failed: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); if (!domain->auxd_refcnt && domain->default_pasid > 0) - intel_pasid_free_id(domain->default_pasid); + ioasid_free(domain->default_pasid); return ret; } @@ -5113,16 +5612,13 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, - size_t size, int iommu_prot) + size_t size, int iommu_prot, gfp_t gfp) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); u64 max_addr; int prot = 0; int ret; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return -EINVAL; - if (iommu_prot & IOMMU_READ) prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) @@ -5153,7 +5649,8 @@ static int intel_iommu_map(struct iommu_domain *domain, } static size_t intel_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct page *freelist = NULL; @@ -5164,8 +5661,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) size = VTD_PAGE_SIZE << level_to_offset_bits(level); @@ -5197,9 +5692,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, int level = 0; u64 phys = 0; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); if (pte) phys = dma_pte_addr(pte); @@ -5243,6 +5735,24 @@ static inline bool iommu_pasid_support(void) return ret; } +static inline bool nested_mode_support(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + bool ret = true; + + rcu_read_lock(); + for_each_active_iommu(iommu, drhd) { + if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) { + ret = false; + break; + } + } + rcu_read_unlock(); + + return ret; +} + static bool intel_iommu_capable(enum iommu_cap cap) { if (cap == IOMMU_CAP_CACHE_COHERENCY) @@ -5273,8 +5783,10 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); @@ -5300,7 +5812,8 @@ static int intel_iommu_add_device(struct device *dev) if (!get_private_domain_for_dev(dev)) { dev_warn(dev, "Failed to get a private domain.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unlink; } dev_info(dev, @@ -5309,7 +5822,16 @@ static int intel_iommu_add_device(struct device *dev) } } + if (device_needs_bounce(dev)) { + dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n"); + set_dma_ops(dev, &bounce_dma_ops); + } + return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5326,6 +5848,9 @@ static void intel_iommu_remove_device(struct device *dev) iommu_group_remove_device(dev); iommu_device_unlink(&iommu->iommu, dev); + + if (device_needs_bounce(dev)) + set_dma_ops(dev, NULL); } static void intel_iommu_get_resv_regions(struct device *device, @@ -5369,8 +5894,8 @@ static void intel_iommu_get_resv_regions(struct device *device, struct pci_dev *pdev = to_pci_dev(device); if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { - reg = iommu_alloc_resv_region(0, 1UL << 24, 0, - IOMMU_RESV_DIRECT); + reg = iommu_alloc_resv_region(0, 1UL << 24, prot, + IOMMU_RESV_DIRECT_RELAXABLE); if (reg) list_add_tail(®->list, head); } @@ -5385,15 +5910,6 @@ static void intel_iommu_get_resv_regions(struct device *device, list_add_tail(®->list, head); } -static void intel_iommu_put_resv_regions(struct device *dev, - struct list_head *head) -{ - struct iommu_resv_region *entry, *next; - - list_for_each_entry_safe(entry, next, head, list) - kfree(entry); -} - int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) { struct device_domain_info *info; @@ -5458,6 +5974,13 @@ static void intel_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); } +static struct iommu_group *intel_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { @@ -5613,10 +6136,42 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; } +static int +intel_iommu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long flags; + int ret = 0; + + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + spin_lock_irqsave(&device_domain_lock, flags); + if (nested_mode_support() && + list_empty(&dmar_domain->devices)) { + dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; + dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(&device_domain_lock, flags); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, + .domain_set_attr = intel_iommu_domain_set_attr, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .aux_attach_dev = intel_iommu_aux_attach_device, @@ -5628,9 +6183,9 @@ const struct iommu_ops intel_iommu_ops = { .add_device = intel_iommu_add_device, .remove_device = intel_iommu_remove_device, .get_resv_regions = intel_iommu_get_resv_regions, - .put_resv_regions = intel_iommu_put_resv_regions, + .put_resv_regions = generic_iommu_put_resv_regions, .apply_resv_region = intel_iommu_apply_resv_region, - .device_group = pci_device_group, + .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, @@ -5639,20 +6194,46 @@ const struct iommu_ops intel_iommu_ops = { .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; -static void quirk_iommu_g4x_gfx(struct pci_dev *dev) +static void quirk_iommu_igfx(struct pci_dev *dev) { - /* G4x/GM45 integrated gfx dmar support is totally busted. */ pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); +/* G4x/GM45 integrated gfx dmar support is totally busted. */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); + +/* Broadwell igfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c index 040a445be300..22b30f10b396 100644 --- a/drivers/iommu/intel-pasid.c +++ b/drivers/iommu/intel-pasid.c @@ -26,42 +26,6 @@ */ static DEFINE_SPINLOCK(pasid_lock); u32 intel_pasid_max_id = PASID_MAX; -static DEFINE_IDR(pasid_idr); - -int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp) -{ - int ret, min, max; - - min = max_t(int, start, PASID_MIN); - max = min_t(int, end, intel_pasid_max_id); - - WARN_ON(in_interrupt()); - idr_preload(gfp); - spin_lock(&pasid_lock); - ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC); - spin_unlock(&pasid_lock); - idr_preload_end(); - - return ret; -} - -void intel_pasid_free_id(int pasid) -{ - spin_lock(&pasid_lock); - idr_remove(&pasid_idr, pasid); - spin_unlock(&pasid_lock); -} - -void *intel_pasid_lookup_id(int pasid) -{ - void *p; - - spin_lock(&pasid_lock); - p = idr_find(&pasid_idr, pasid); - spin_unlock(&pasid_lock); - - return p; -} /* * Per device pasid table management: @@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, devtlb_invalidation_with_pasid(iommu, dev, pasid); } +static void pasid_flush_caches(struct intel_iommu *iommu, + struct pasid_entry *pte, + int pasid, u16 did) +{ + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + if (cap_caching_mode(iommu->cap)) { + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + iotlb_invalidation_with_pasid(iommu, did, pasid); + } else { + iommu_flush_write_buffer(iommu); + } +} + /* * Set up the scalable mode pasid table entry for first only * translation type. @@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, pasid_set_sre(pte); } -#ifdef CONFIG_X86 - if (cpu_feature_enabled(X86_FEATURE_LA57)) - pasid_set_flpm(pte, 1); -#endif /* CONFIG_X86 */ + if (flags & PASID_FLAG_FL5LP) { + if (cap_5lp_support(iommu->cap)) { + pasid_set_flpm(pte, 1); + } else { + pr_err("No 5-level paging support for first-level\n"); + pasid_clear_entry(pte); + return -EINVAL; + } + } pasid_set_domain_id(pte, did); pasid_set_address_width(pte, iommu->agaw); @@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, /* Setup Present and PASID Granular Transfer Type: */ pasid_set_translation_type(pte, 1); pasid_set_present(pte); - - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(pte, sizeof(*pte)); - - if (cap_caching_mode(iommu->cap)) { - pasid_cache_invalidation_with_pasid(iommu, did, pasid); - iotlb_invalidation_with_pasid(iommu, did, pasid); - } else { - iommu_flush_write_buffer(iommu); - } + pasid_flush_caches(iommu, pte, pasid, did); return 0; } @@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, */ pasid_set_sre(pte); pasid_set_present(pte); - - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(pte, sizeof(*pte)); - - if (cap_caching_mode(iommu->cap)) { - pasid_cache_invalidation_with_pasid(iommu, did, pasid); - iotlb_invalidation_with_pasid(iommu, did, pasid); - } else { - iommu_flush_write_buffer(iommu); - } + pasid_flush_caches(iommu, pte, pasid, did); return 0; } @@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, */ pasid_set_sre(pte); pasid_set_present(pte); - - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(pte, sizeof(*pte)); - - if (cap_caching_mode(iommu->cap)) { - pasid_cache_invalidation_with_pasid(iommu, did, pasid); - iotlb_invalidation_with_pasid(iommu, did, pasid); - } else { - iommu_flush_write_buffer(iommu); - } + pasid_flush_caches(iommu, pte, pasid, did); return 0; } diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h index fc8cd8f17de1..92de6df24ccb 100644 --- a/drivers/iommu/intel-pasid.h +++ b/drivers/iommu/intel-pasid.h @@ -37,6 +37,12 @@ */ #define PASID_FLAG_SUPERVISOR_MODE BIT(0) +/* + * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first- + * level translation, otherwise, 4-level paging will be used. + */ +#define PASID_FLAG_FL5LP BIT(1) + struct pasid_dir_entry { u64 val; }; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 780de0caafe8..d7f2a5358900 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -17,25 +17,13 @@ #include <linux/dmar.h> #include <linux/interrupt.h> #include <linux/mm_types.h> +#include <linux/ioasid.h> #include <asm/page.h> #include "intel-pasid.h" static irqreturn_t prq_event_thread(int irq, void *d); -int intel_svm_init(struct intel_iommu *iommu) -{ - if (cpu_feature_enabled(X86_FEATURE_GBPAGES) && - !cap_fl1gp_support(iommu->cap)) - return -EINVAL; - - if (cpu_feature_enabled(X86_FEATURE_LA57) && - !cap_5lp_support(iommu->cap)) - return -EINVAL; - - return 0; -} - #define PRQ_ORDER 0 int intel_svm_enable_prq(struct intel_iommu *iommu) @@ -99,25 +87,43 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) return 0; } +static inline bool intel_svm_capable(struct intel_iommu *iommu) +{ + return iommu->flags & VTD_FLAG_SVM_CAPABLE; +} + +void intel_svm_check(struct intel_iommu *iommu) +{ + if (!pasid_supported(iommu)) + return; + + if (cpu_feature_enabled(X86_FEATURE_GBPAGES) && + !cap_fl1gp_support(iommu->cap)) { + pr_err("%s SVM disabled, incompatible 1GB page capability\n", + iommu->name); + return; + } + + if (cpu_feature_enabled(X86_FEATURE_LA57) && + !cap_5lp_support(iommu->cap)) { + pr_err("%s SVM disabled, incompatible paging mode\n", + iommu->name); + return; + } + + iommu->flags |= VTD_FLAG_SVM_CAPABLE; +} + static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, - unsigned long address, unsigned long pages, int ih, int gl) + unsigned long address, unsigned long pages, int ih) { struct qi_desc desc; if (pages == -1) { - /* For global kernel pages we have to flush them in *all* PASIDs - * because that's the only option the hardware gives us. Despite - * the fact that they are actually only accessible through one. */ - if (gl) - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | - QI_EIOTLB_TYPE; - else - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | + QI_EIOTLB_DID(sdev->did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; desc.qw1 = 0; } else { int mask = ilog2(__roundup_pow_of_two(pages)); @@ -127,7 +133,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; desc.qw1 = QI_EIOTLB_ADDR(address) | - QI_EIOTLB_GL(gl) | QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); } @@ -162,13 +167,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d } static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, - unsigned long pages, int ih, int gl) + unsigned long pages, int ih) { struct intel_svm_dev *sdev; rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) - intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); + intel_flush_svm_range_dev(svm, sdev, address, pages, ih); rcu_read_unlock(); } @@ -180,7 +185,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, start, - (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); + (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); } static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) @@ -203,7 +208,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); } rcu_read_unlock(); @@ -217,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = { static DEFINE_MUTEX(pasid_mutex); static LIST_HEAD(global_svm_list); +#define for_each_svm_dev(sdev, svm, d) \ + list_for_each_entry((sdev), &(svm)->devs, list) \ + if ((d) != (sdev)->dev) {} else + int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) { struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); @@ -230,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (!iommu || dmar_disabled) return -EINVAL; + if (!intel_svm_capable(iommu)) + return -ENOTSUPP; + if (dev_is_pci(dev)) { pasid_max = pci_max_pasids(to_pci_dev(dev)); if (pasid_max < 0) @@ -262,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ goto out; } - list_for_each_entry(sdev, &svm->devs, list) { - if (dev == sdev->dev) { - if (sdev->ops != ops) { - ret = -EBUSY; - goto out; - } - sdev->users++; - goto success; + /* Find the matching device in svm list */ + for_each_svm_dev(sdev, svm, dev) { + if (sdev->ops != ops) { + ret = -EBUSY; + goto out; } + sdev->users++; + goto success; } break; @@ -324,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (pasid_max > intel_pasid_max_id) pasid_max = intel_pasid_max_id; - /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ - ret = intel_pasid_alloc_id(svm, - !!cap_caching_mode(iommu->cap), - pasid_max - 1, GFP_KERNEL); - if (ret < 0) { + /* Do not use PASID 0, reserved for RID to PASID */ + svm->pasid = ioasid_alloc(NULL, PASID_MIN, + pasid_max - 1, svm); + if (svm->pasid == INVALID_IOASID) { kfree(svm); kfree(sdev); + ret = -ENOSPC; goto out; } - svm->pasid = ret; svm->notifier.ops = &intel_mmuops; svm->mm = mm; svm->flags = flags; @@ -343,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (mm) { ret = mmu_notifier_register(&svm->notifier, mm); if (ret) { - intel_pasid_free_id(svm->pasid); + ioasid_free(svm->pasid); kfree(svm); kfree(sdev); goto out; @@ -354,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ret = intel_pasid_setup_first_level(iommu, dev, mm ? mm->pgd : init_mm.pgd, svm->pasid, FLPT_DEFAULT_DID, - mm ? 0 : PASID_FLAG_SUPERVISOR_MODE); + (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) | + (cpu_feature_enabled(X86_FEATURE_LA57) ? + PASID_FLAG_FL5LP : 0)); spin_unlock(&iommu->lock); if (ret) { if (mm) mmu_notifier_unregister(&svm->notifier, mm); - intel_pasid_free_id(svm->pasid); + ioasid_free(svm->pasid); kfree(svm); kfree(sdev); goto out; @@ -375,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ret = intel_pasid_setup_first_level(iommu, dev, mm ? mm->pgd : init_mm.pgd, svm->pasid, FLPT_DEFAULT_DID, - mm ? 0 : PASID_FLAG_SUPERVISOR_MODE); + (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) | + (cpu_feature_enabled(X86_FEATURE_LA57) ? + PASID_FLAG_FL5LP : 0)); spin_unlock(&iommu->lock); if (ret) { kfree(sdev); @@ -407,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) if (!iommu) goto out; - svm = intel_pasid_lookup_id(pasid); + svm = ioasid_find(NULL, pasid, NULL); if (!svm) goto out; - list_for_each_entry(sdev, &svm->devs, list) { - if (dev == sdev->dev) { - ret = 0; - sdev->users--; - if (!sdev->users) { - list_del_rcu(&sdev->list); - /* Flush the PASID cache and IOTLB for this device. - * Note that we do depend on the hardware *not* using - * the PASID any more. Just as we depend on other - * devices never using PASIDs that they have no right - * to use. We have a *shared* PASID table, because it's - * large and has to be physically contiguous. So it's - * hard to be as defensive as we might like. */ - intel_pasid_tear_down_entry(iommu, dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); - kfree_rcu(sdev, rcu); - - if (list_empty(&svm->devs)) { - intel_pasid_free_id(svm->pasid); - if (svm->mm) - mmu_notifier_unregister(&svm->notifier, svm->mm); - - list_del(&svm->list); - - /* We mandate that no page faults may be outstanding - * for the PASID when intel_svm_unbind_mm() is called. - * If that is not obeyed, subtle errors will happen. - * Let's make them less subtle... */ - memset(svm, 0x6b, sizeof(*svm)); - kfree(svm); - } + if (IS_ERR(svm)) { + ret = PTR_ERR(svm); + goto out; + } + + for_each_svm_dev(sdev, svm, dev) { + ret = 0; + sdev->users--; + if (!sdev->users) { + list_del_rcu(&sdev->list); + /* Flush the PASID cache and IOTLB for this device. + * Note that we do depend on the hardware *not* using + * the PASID any more. Just as we depend on other + * devices never using PASIDs that they have no right + * to use. We have a *shared* PASID table, because it's + * large and has to be physically contiguous. So it's + * hard to be as defensive as we might like. */ + intel_pasid_tear_down_entry(iommu, dev, svm->pasid); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); + kfree_rcu(sdev, rcu); + + if (list_empty(&svm->devs)) { + ioasid_free(svm->pasid); + if (svm->mm) + mmu_notifier_unregister(&svm->notifier, svm->mm); + list_del(&svm->list); + /* We mandate that no page faults may be outstanding + * for the PASID when intel_svm_unbind_mm() is called. + * If that is not obeyed, subtle errors will happen. + * Let's make them less subtle... */ + memset(svm, 0x6b, sizeof(*svm)); + kfree(svm); } - break; } + break; } out: mutex_unlock(&pasid_mutex); @@ -464,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid) if (!iommu) goto out; - svm = intel_pasid_lookup_id(pasid); + svm = ioasid_find(NULL, pasid, NULL); if (!svm) goto out; + if (IS_ERR(svm)) { + ret = PTR_ERR(svm); + goto out; + } /* init_mm is used in this case */ if (!svm->mm) ret = 1; @@ -574,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!svm || svm->pasid != req->pasid) { rcu_read_lock(); - svm = intel_pasid_lookup_id(req->pasid); + svm = ioasid_find(NULL, req->pasid, NULL); /* It *can't* go away, because the driver is not permitted * to unbind the mm while any page faults are outstanding. * So we only need RCU to protect the internal idr code. */ rcu_read_unlock(); - - if (!svm) { + if (IS_ERR_OR_NULL(svm)) { pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n", iommu->name, req->pasid, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); @@ -664,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (req->priv_data_present) memcpy(&resp.qw2, req->priv_data, sizeof(req->priv_data)); + resp.qw2 = 0; + resp.qw3 = 0; + qi_submit_sync(&resp, iommu); } - resp.qw2 = 0; - resp.qw3 = 0; - qi_submit_sync(&resp, iommu); - head = (head + sizeof(*req)) & PRQ_RING_MASK; } diff --git a/drivers/iommu/intel-trace.c b/drivers/iommu/intel-trace.c new file mode 100644 index 000000000000..bfb6a6e37a88 --- /dev/null +++ b/drivers/iommu/intel-trace.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel IOMMU trace support + * + * Copyright (C) 2019 Intel Corporation + * + * Author: Lu Baolu <baolu.lu@linux.intel.com> + */ + +#include <linux/string.h> +#include <linux/types.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/intel_iommu.h> diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 4786ca061e31..81e43c1df7ec 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct set_msi_sid_data *data = opaque; + if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias)) + data->busmatch_count++; + data->pdev = pdev; data->alias = alias; data->count++; - if (PCI_BUS_NUM(alias) == pdev->bus->number) - data->busmatch_count++; - return 0; } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 0fc8dfab2abf..4272fe4e17f4 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -112,7 +112,9 @@ #define ARM_V7S_TEX_MASK 0x7 #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) -#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */ +/* MediaTek extend the two bits for PA 32bit/33bit */ +#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9) +#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4) /* *well, except for TEX on level 2 large pages, of course :( */ #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 @@ -147,8 +149,6 @@ #define ARM_V7S_TTBR_IRGN_ATTR(attr) \ ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) -#define ARM_V7S_TCR_PD1 BIT(5) - #ifdef CONFIG_ZONE_DMA32 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 @@ -169,18 +169,62 @@ struct arm_v7s_io_pgtable { spinlock_t split_lock; }; +static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl); + static dma_addr_t __arm_v7s_dma_addr(void *pages) { return (dma_addr_t)virt_to_phys(pages); } -static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) +static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg) +{ + return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && + (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT); +} + +static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl, + struct io_pgtable_cfg *cfg) +{ + arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl); + + if (!arm_v7s_is_mtk_enabled(cfg)) + return pte; + + if (paddr & BIT_ULL(32)) + pte |= ARM_V7S_ATTR_MTK_PA_BIT32; + if (paddr & BIT_ULL(33)) + pte |= ARM_V7S_ATTR_MTK_PA_BIT33; + return pte; +} + +static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, + struct io_pgtable_cfg *cfg) { + arm_v7s_iopte mask; + phys_addr_t paddr; + if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) - pte &= ARM_V7S_TABLE_MASK; + mask = ARM_V7S_TABLE_MASK; + else if (arm_v7s_pte_is_cont(pte, lvl)) + mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES; else - pte &= ARM_V7S_LVL_MASK(lvl); - return phys_to_virt(pte); + mask = ARM_V7S_LVL_MASK(lvl); + + paddr = pte & mask; + if (!arm_v7s_is_mtk_enabled(cfg)) + return paddr; + + if (pte & ARM_V7S_ATTR_MTK_PA_BIT32) + paddr |= BIT_ULL(32); + if (pte & ARM_V7S_ATTR_MTK_PA_BIT33) + paddr |= BIT_ULL(33); + return paddr; +} + +static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl, + struct arm_v7s_io_pgtable *data) +{ + return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg)); } static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, @@ -295,9 +339,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) pte |= ARM_V7S_ATTR_NS_SECTION; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) - pte |= ARM_V7S_ATTR_MTK_4GB; - return pte; } @@ -362,7 +403,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) return false; } -static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, +static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, + struct iommu_iotlb_gather *, unsigned long, size_t, int, arm_v7s_iopte *); static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, @@ -383,7 +425,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, size_t sz = ARM_V7S_BLOCK_SIZE(lvl); tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); - if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, + if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, sz, lvl, tblp) != sz)) return -EINVAL; } else if (ptep[i]) { @@ -396,7 +438,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, if (num_entries > 1) pte = arm_v7s_pte_to_cont(pte, lvl); - pte |= paddr & ARM_V7S_LVL_MASK(lvl); + pte |= paddr_to_iopte(paddr, lvl, cfg); __arm_v7s_set_pte(ptep, pte, num_entries, cfg); return 0; @@ -462,7 +504,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, } if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { - cptep = iopte_deref(pte, lvl); + cptep = iopte_deref(pte, lvl, data); } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); @@ -484,7 +526,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return 0; - if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || + paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); @@ -493,9 +536,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, * a chance for anything to kick off a table walk for the new iova. */ if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_V7S_BLOCK_SIZE(2), false); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_walk(iop, iova, size, + ARM_V7S_BLOCK_SIZE(2)); } else { wmb(); } @@ -512,7 +554,8 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop) arm_v7s_iopte pte = data->pgd[i]; if (ARM_V7S_PTE_IS_TABLE(pte, 1)) - __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); + __arm_v7s_free_table(iopte_deref(pte, 1, data), + 2, data); } __arm_v7s_free_table(data->pgd, 1, data); kmem_cache_destroy(data->l2_tables); @@ -541,12 +584,12 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); size *= ARM_V7S_CONT_PAGES; - io_pgtable_tlb_add_flush(iop, iova, size, size, true); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_leaf(iop, iova, size, size); return pte; } static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) @@ -582,16 +625,16 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) return 0; - tablep = iopte_deref(pte, 1); - return __arm_v7s_unmap(data, iova, size, 2, tablep); + tablep = iopte_deref(pte, 1, data); + return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); } - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - io_pgtable_tlb_sync(&data->iop); + io_pgtable_tlb_add_page(&data->iop, gather, iova, size); return size; } static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_v7s_iopte *ptep) { @@ -638,10 +681,9 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, for (i = 0; i < num_entries; i++) { if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, blk_size, - ARM_V7S_BLOCK_SIZE(lvl + 1), false); - io_pgtable_tlb_sync(iop); - ptep = iopte_deref(pte[i], lvl); + io_pgtable_tlb_flush_walk(iop, iova, blk_size, + ARM_V7S_BLOCK_SIZE(lvl + 1)); + ptep = iopte_deref(pte[i], lvl, data); __arm_v7s_free_table(ptep, lvl + 1, data); } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { /* @@ -651,8 +693,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, blk_size, - blk_size, true); + io_pgtable_tlb_add_page(iop, gather, iova, blk_size); } iova += blk_size; } @@ -662,23 +703,24 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); + return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0], + ptep); } /* Keep on walkin' */ - ptep = iopte_deref(pte[0], lvl); - return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); + ptep = iopte_deref(pte[0], lvl, data); + return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep); } static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); if (WARN_ON(upper_32_bits(iova))) return 0; - return __arm_v7s_unmap(data, iova, size, 1, data->pgd); + return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd); } static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, @@ -692,7 +734,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, do { ptep += ARM_V7S_LVL_IDX(iova, ++lvl); pte = READ_ONCE(*ptep); - ptep = iopte_deref(pte, lvl); + ptep = iopte_deref(pte, lvl, data); } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); if (!ARM_V7S_PTE_IS_VALID(pte)) @@ -701,7 +743,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, mask = ARM_V7S_LVL_MASK(lvl); if (arm_v7s_pte_is_cont(pte, lvl)) mask *= ARM_V7S_CONT_PAGES; - return (pte & mask) | (iova & ~mask); + return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask); } static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, @@ -709,18 +751,21 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, { struct arm_v7s_io_pgtable *data; - if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) + if (cfg->ias > ARM_V7S_ADDR_BITS) + return NULL; + + if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS)) return NULL; if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | - IO_PGTABLE_QUIRK_ARM_MTK_4GB | + IO_PGTABLE_QUIRK_ARM_MTK_EXT | IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB && + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) return NULL; @@ -751,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, */ cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; - /* TCR: T0SZ=0, disable TTBR1 */ - cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1; + /* TCR: T0SZ=0, EAE=0 (if applicable) */ + cfg->arm_v7s_cfg.tcr = 0; /* * TEX remap: the indices used map to the closest equivalent types @@ -775,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, /* Ensure the empty pgd is visible before any actual TTBR write */ wmb(); - /* TTBRs */ - cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | - ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | - (cfg->coherent_walk ? - (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | - ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : - (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | - ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC))); - cfg->arm_v7s_cfg.ttbr[1] = 0; + /* TTBR */ + cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S | + (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS | + ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | + ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : + (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | + ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC))); return &data->iop; out_free_data: @@ -799,29 +842,32 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST -static struct io_pgtable_cfg *cfg_cookie; +static struct io_pgtable_cfg *cfg_cookie __initdata; -static void dummy_tlb_flush_all(void *cookie) +static void __init dummy_tlb_flush_all(void *cookie) { WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void __init dummy_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_sync(void *cookie) +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { - WARN_ON(cookie != cfg_cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } -static const struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, - .tlb_add_flush = dummy_tlb_add_flush, - .tlb_sync = dummy_tlb_sync, + .tlb_flush_walk = dummy_tlb_flush, + .tlb_flush_leaf = dummy_tlb_flush, + .tlb_add_page = dummy_tlb_add_page, }; #define __FAIL(ops) ({ \ @@ -896,7 +942,7 @@ static int __init arm_v7s_do_selftests(void) size = 1UL << __ffs(cfg.pgsize_bitmap); while (i < loopnr) { iova_start = i * SZ_16M; - if (ops->unmap(ops, iova_start + size, size) != size) + if (ops->unmap(ops, iova_start + size, size, NULL) != size) return __FAIL(ops); /* Remap of partial unmap */ @@ -914,7 +960,7 @@ static int __init arm_v7s_do_selftests(void) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { size = 1UL << i; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 161a7d56264d..983b08477e64 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -12,7 +12,6 @@ #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/io-pgtable.h> -#include <linux/iommu.h> #include <linux/kernel.h> #include <linux/sizes.h> #include <linux/slab.h> @@ -33,39 +32,31 @@ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) /* - * For consistency with the architecture, we always consider - * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 - */ -#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) - -/* * Calculate the right shift amount to get to the portion describing level l * in a virtual address mapped by the pagetable in d. */ #define ARM_LPAE_LVL_SHIFT(l,d) \ - ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ - * (d)->bits_per_level) + (d)->pg_shift) - -#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) + (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ + ilog2(sizeof(arm_lpae_iopte))) -#define ARM_LPAE_PAGES_PER_PGD(d) \ - DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) +#define ARM_LPAE_GRANULE(d) \ + (sizeof(arm_lpae_iopte) << (d)->bits_per_level) +#define ARM_LPAE_PGD_SIZE(d) \ + (sizeof(arm_lpae_iopte) << (d)->pgd_bits) /* * Calculate the index at level l used to map virtual address a using the * pagetable in d. */ #define ARM_LPAE_PGD_IDX(l,d) \ - ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ -#define ARM_LPAE_BLOCK_SIZE(l,d) \ - (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ - ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) +#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) /* Page table bits */ #define ARM_LPAE_PTE_TYPE_SHIFT 0 @@ -109,40 +100,29 @@ #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) /* Register bits */ -#define ARM_32_LPAE_TCR_EAE (1 << 31) -#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) +#define ARM_LPAE_TCR_TG0_4K 0 +#define ARM_LPAE_TCR_TG0_64K 1 +#define ARM_LPAE_TCR_TG0_16K 2 -#define ARM_LPAE_TCR_EPD1 (1 << 23) +#define ARM_LPAE_TCR_TG1_16K 1 +#define ARM_LPAE_TCR_TG1_4K 2 +#define ARM_LPAE_TCR_TG1_64K 3 -#define ARM_LPAE_TCR_TG0_4K (0 << 14) -#define ARM_LPAE_TCR_TG0_64K (1 << 14) -#define ARM_LPAE_TCR_TG0_16K (2 << 14) - -#define ARM_LPAE_TCR_SH0_SHIFT 12 -#define ARM_LPAE_TCR_SH0_MASK 0x3 #define ARM_LPAE_TCR_SH_NS 0 #define ARM_LPAE_TCR_SH_OS 2 #define ARM_LPAE_TCR_SH_IS 3 -#define ARM_LPAE_TCR_ORGN0_SHIFT 10 -#define ARM_LPAE_TCR_IRGN0_SHIFT 8 -#define ARM_LPAE_TCR_RGN_MASK 0x3 #define ARM_LPAE_TCR_RGN_NC 0 #define ARM_LPAE_TCR_RGN_WBWA 1 #define ARM_LPAE_TCR_RGN_WT 2 #define ARM_LPAE_TCR_RGN_WB 3 -#define ARM_LPAE_TCR_SL0_SHIFT 6 -#define ARM_LPAE_TCR_SL0_MASK 0x3 +#define ARM_LPAE_VTCR_SL0_MASK 0x3 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 -#define ARM_LPAE_TCR_SZ_MASK 0xf - -#define ARM_LPAE_TCR_PS_SHIFT 16 -#define ARM_LPAE_TCR_PS_MASK 0x7 -#define ARM_LPAE_TCR_IPS_SHIFT 32 -#define ARM_LPAE_TCR_IPS_MASK 0x7 +#define ARM_LPAE_VTCR_PS_SHIFT 16 +#define ARM_LPAE_VTCR_PS_MASK 0x7 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL @@ -167,6 +147,9 @@ #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) +#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL +#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL + /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) @@ -178,10 +161,9 @@ struct arm_lpae_io_pgtable { struct io_pgtable iop; - int levels; - size_t pgd_size; - unsigned long pg_shift; - unsigned long bits_per_level; + int pgd_bits; + int start_level; + int bits_per_level; void *pgd; }; @@ -211,7 +193,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, { u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; - if (data->pg_shift < 16) + if (ARM_LPAE_GRANULE(data) < SZ_64K) return paddr; /* Rotate the packed high-order bits back to the top */ @@ -290,6 +272,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); @@ -299,17 +282,11 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte = prot; - if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; - if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) pte |= ARM_LPAE_PTE_TYPE_PAGE; else pte |= ARM_LPAE_PTE_TYPE_BLOCK; - if (data->iop.fmt != ARM_MALI_LPAE) - pte |= ARM_LPAE_PTE_AF; - pte |= ARM_LPAE_PTE_SH_IS; pte |= paddr_to_iopte(paddr, data); __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); @@ -335,8 +312,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) + if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { + WARN_ON(1); return -EINVAL; + } } __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); @@ -387,7 +366,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); /* If we can install a leaf entry at this level, then do so */ - if (size == block_size && (size & cfg->pgsize_bitmap)) + if (size == block_size) return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); /* We can't allocate tables at the final level */ @@ -459,14 +438,25 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_QCOM_SYS_CACHE) + else if (prot & IOMMU_SYS_CACHE_ONLY) pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); } + if (prot & IOMMU_CACHE) + pte |= ARM_LPAE_PTE_SH_IS; + else + pte |= ARM_LPAE_PTE_SH_OS; + if (prot & IOMMU_NOEXEC) pte |= ARM_LPAE_PTE_XN; + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (data->iop.fmt != ARM_MALI_LPAE) + pte |= ARM_LPAE_PTE_AF; + return pte; } @@ -474,16 +464,22 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int iommu_prot) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; - int ret, lvl = ARM_LPAE_START_LVL(data); + int ret, lvl = data->start_level; arm_lpae_iopte prot; + long iaext = (long)iova >> cfg->ias; /* If no access, then nothing to do */ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) return 0; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || - paddr >= (1ULL << data->iop.cfg.oas))) + if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) + return -EINVAL; + + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) + iaext = ~iaext; + if (WARN_ON(iaext || paddr >> cfg->oas)) return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); @@ -503,8 +499,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *start, *end; unsigned long table_size; - if (lvl == ARM_LPAE_START_LVL(data)) - table_size = data->pgd_size; + if (lvl == data->start_level) + table_size = ARM_LPAE_PGD_SIZE(data); else table_size = ARM_LPAE_GRANULE(data); @@ -532,11 +528,12 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) { struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); - __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); + __arm_lpae_free_pgtable(data, data->start_level, data->pgd); kfree(data); } static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_lpae_iopte blk_pte, int lvl, arm_lpae_iopte *ptep) @@ -582,15 +579,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - io_pgtable_tlb_sync(&data->iop); + io_pgtable_tlb_add_page(&data->iop, gather, iova, size); return size; } - return __arm_lpae_unmap(data, iova, size, lvl, tablep); + return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep) { @@ -612,9 +609,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, if (!iopte_leaf(pte, lvl, iop->fmt)) { /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_LPAE_GRANULE(data), false); - io_pgtable_tlb_sync(iop); + io_pgtable_tlb_flush_walk(iop, iova, size, + ARM_LPAE_GRANULE(data)); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { @@ -625,7 +621,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, size, size, true); + io_pgtable_tlb_add_page(iop, gather, iova, size); } return size; @@ -634,26 +630,32 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, pte, + return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, lvl + 1, ptep); } /* Keep on walkin' */ ptep = iopte_deref(pte, data); - return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); + return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep); } static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; - int lvl = ARM_LPAE_START_LVL(data); + long iaext = (long)iova >> cfg->ias; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) + if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return 0; - return __arm_lpae_unmap(data, iova, size, lvl, ptep); + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) + iaext = ~iaext; + if (WARN_ON(iaext)) + return 0; + + return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep); } static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, @@ -661,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte pte, *ptep = data->pgd; - int lvl = ARM_LPAE_START_LVL(data); + int lvl = data->start_level; do { /* Valid IOPTE pointer? */ @@ -738,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) static struct arm_lpae_io_pgtable * arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) { - unsigned long va_bits, pgd_bits; struct arm_lpae_io_pgtable *data; + int levels, va_bits, pg_shift; arm_lpae_restrict_pgsizes(cfg); @@ -761,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (!data) return NULL; - data->pg_shift = __ffs(cfg->pgsize_bitmap); - data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); + pg_shift = __ffs(cfg->pgsize_bitmap); + data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); - va_bits = cfg->ias - data->pg_shift; - data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + va_bits = cfg->ias - pg_shift; + levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + data->start_level = ARM_LPAE_MAX_LEVELS - levels; /* Calculate the actual size of our pgd (without concatenation) */ - pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); - data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); + data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, @@ -785,9 +787,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) { u64 reg; struct arm_lpae_io_pgtable *data; + typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; + bool tg1; if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | - IO_PGTABLE_QUIRK_NON_STRICT)) + IO_PGTABLE_QUIRK_NON_STRICT | + IO_PGTABLE_QUIRK_ARM_TTBR1)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -796,58 +801,55 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) /* TCR */ if (cfg->coherent_walk) { - reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + tcr->sh = ARM_LPAE_TCR_SH_IS; + tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; + tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; } else { - reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT); + tcr->sh = ARM_LPAE_TCR_SH_OS; + tcr->irgn = ARM_LPAE_TCR_RGN_NC; + tcr->orgn = ARM_LPAE_TCR_RGN_NC; } + tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: - reg |= ARM_LPAE_TCR_TG0_4K; + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; break; case SZ_16K: - reg |= ARM_LPAE_TCR_TG0_16K; + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; break; case SZ_64K: - reg |= ARM_LPAE_TCR_TG0_64K; + tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; break; } switch (cfg->oas) { case 32: - reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_32_BIT; break; case 36: - reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_36_BIT; break; case 40: - reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_40_BIT; break; case 42: - reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_42_BIT; break; case 44: - reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_44_BIT; break; case 48: - reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_48_BIT; break; case 52: - reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT); + tcr->ips = ARM_LPAE_TCR_PS_52_BIT; break; default: goto out_free_data; } - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; - - /* Disable speculative walks through TTBR1 */ - reg |= ARM_LPAE_TCR_EPD1; - cfg->arm_lpae_s1_cfg.tcr = reg; + tcr->tsz = 64ULL - cfg->ias; /* MAIRs */ reg = (ARM_LPAE_MAIR_ATTR_NC @@ -859,20 +861,19 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) (ARM_LPAE_MAIR_ATTR_INC_OWBRWA << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); - cfg->arm_lpae_s1_cfg.mair[0] = reg; - cfg->arm_lpae_s1_cfg.mair[1] = 0; + cfg->arm_lpae_s1_cfg.mair = reg; /* Looking good; allocate a pgd */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), + GFP_KERNEL, cfg); if (!data->pgd) goto out_free_data; /* Ensure the empty pgd is visible before any actual TTBR write */ wmb(); - /* TTBRs */ - cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); - cfg->arm_lpae_s1_cfg.ttbr[1] = 0; + /* TTBR */ + cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); return &data->iop; out_free_data: @@ -883,8 +884,9 @@ out_free_data: static struct io_pgtable * arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) { - u64 reg, sl; + u64 sl; struct arm_lpae_io_pgtable *data; + typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; /* The NS quirk doesn't apply at stage 2 */ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT)) @@ -898,69 +900,74 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) * Concatenate PGDs at level 1 if possible in order to reduce * the depth of the stage-2 walk. */ - if (data->levels == ARM_LPAE_MAX_LEVELS) { + if (data->start_level == 0) { unsigned long pgd_pages; - pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); + pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { - data->pgd_size = pgd_pages << data->pg_shift; - data->levels--; + data->pgd_bits += data->bits_per_level; + data->start_level++; } } /* VTCR */ - reg = ARM_64_LPAE_S2_TCR_RES1 | - (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + if (cfg->coherent_walk) { + vtcr->sh = ARM_LPAE_TCR_SH_IS; + vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; + vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; + } else { + vtcr->sh = ARM_LPAE_TCR_SH_OS; + vtcr->irgn = ARM_LPAE_TCR_RGN_NC; + vtcr->orgn = ARM_LPAE_TCR_RGN_NC; + } - sl = ARM_LPAE_START_LVL(data); + sl = data->start_level; switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: - reg |= ARM_LPAE_TCR_TG0_4K; + vtcr->tg = ARM_LPAE_TCR_TG0_4K; sl++; /* SL0 format is different for 4K granule size */ break; case SZ_16K: - reg |= ARM_LPAE_TCR_TG0_16K; + vtcr->tg = ARM_LPAE_TCR_TG0_16K; break; case SZ_64K: - reg |= ARM_LPAE_TCR_TG0_64K; + vtcr->tg = ARM_LPAE_TCR_TG0_64K; break; } switch (cfg->oas) { case 32: - reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; break; case 36: - reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; break; case 40: - reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; break; case 42: - reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; break; case 44: - reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; break; case 48: - reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; break; case 52: - reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT); + vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; break; default: goto out_free_data; } - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; - reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; - cfg->arm_lpae_s2_cfg.vtcr = reg; + vtcr->tsz = 64ULL - cfg->ias; + vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; /* Allocate pgd pages */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), + GFP_KERNEL, cfg); if (!data->pgd) goto out_free_data; @@ -979,61 +986,77 @@ out_free_data: static struct io_pgtable * arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) { - struct io_pgtable *iop; - if (cfg->ias > 32 || cfg->oas > 40) return NULL; cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); - if (iop) { - cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; - cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; - } - - return iop; + return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); } static struct io_pgtable * arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) { - struct io_pgtable *iop; - if (cfg->ias > 40 || cfg->oas > 40) return NULL; cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); - if (iop) - cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; - - return iop; + return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); } static struct io_pgtable * arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { - struct io_pgtable *iop; + struct arm_lpae_io_pgtable *data; + + /* No quirks for Mali (hopefully) */ + if (cfg->quirks) + return NULL; - if (cfg->ias != 48 || cfg->oas > 40) + if (cfg->ias > 48 || cfg->oas > 40) return NULL; cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); - if (iop) { - u64 mair, ttbr; - - /* Copy values as union fields overlap */ - mair = cfg->arm_lpae_s1_cfg.mair[0]; - ttbr = cfg->arm_lpae_s1_cfg.ttbr[0]; - - cfg->arm_mali_lpae_cfg.memattr = mair; - cfg->arm_mali_lpae_cfg.transtab = ttbr | - ARM_MALI_LPAE_TTBR_READ_INNER | - ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; + + data = arm_lpae_alloc_pgtable(cfg); + if (!data) + return NULL; + + /* Mali seems to need a full 4-level table regardless of IAS */ + if (data->start_level > 0) { + data->start_level = 0; + data->pgd_bits = 0; } + /* + * MEMATTR: Mali has no actual notion of a non-cacheable type, so the + * best we can do is mimic the out-of-tree driver and hope that the + * "implementation-defined caching policy" is good enough. Similarly, + * we'll use it for the sake of a valid attribute for our 'device' + * index, although callers should never request that in practice. + */ + cfg->arm_mali_lpae_cfg.memattr = + (ARM_MALI_LPAE_MEMATTR_IMP_DEF + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | + (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | + (ARM_MALI_LPAE_MEMATTR_IMP_DEF + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); + + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, + cfg); + if (!data->pgd) + goto out_free_data; + + /* Ensure the empty pgd is visible before TRANSTAB can be written */ + wmb(); + + cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | + ARM_MALI_LPAE_TTBR_READ_INNER | + ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; + return &data->iop; - return iop; +out_free_data: + kfree(data); + return NULL; } struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { @@ -1063,29 +1086,32 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST -static struct io_pgtable_cfg *cfg_cookie; +static struct io_pgtable_cfg *cfg_cookie __initdata; -static void dummy_tlb_flush_all(void *cookie) +static void __init dummy_tlb_flush_all(void *cookie) { WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void __init dummy_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_sync(void *cookie) +static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { - WARN_ON(cookie != cfg_cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } -static const struct iommu_gather_ops dummy_tlb_ops __initconst = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, - .tlb_add_flush = dummy_tlb_add_flush, - .tlb_sync = dummy_tlb_sync, + .tlb_flush_walk = dummy_tlb_flush, + .tlb_flush_leaf = dummy_tlb_flush, + .tlb_add_page = dummy_tlb_add_page, }; static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) @@ -1095,9 +1121,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", cfg->pgsize_bitmap, cfg->ias); - pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", - data->levels, data->pgd_size, data->pg_shift, - data->bits_per_level, data->pgd); + pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", + ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), + ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); } #define __FAIL(ops, i) ({ \ @@ -1109,7 +1135,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) { - static const enum io_pgtable_fmt fmts[] = { + static const enum io_pgtable_fmt fmts[] __initconst = { ARM_64_LPAE_S1, ARM_64_LPAE_S2, }; @@ -1168,7 +1194,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) /* Partial unmap */ size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size) != size) + if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) return __FAIL(ops, i); /* Remap of partial unmap */ @@ -1183,7 +1209,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { size = 1UL << j; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42)) @@ -1208,13 +1234,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) static int __init arm_lpae_do_selftests(void) { - static const unsigned long pgsize[] = { + static const unsigned long pgsize[] __initconst = { SZ_4K | SZ_2M | SZ_1G, SZ_16K | SZ_32M, SZ_64K | SZ_512M, }; - static const unsigned int ias[] = { + static const unsigned int ias[] __initconst = { 32, 36, 40, 42, 44, 48, }; diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index ced53e5b72b5..94394c81468f 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) if (!ops) return; - iop = container_of(ops, struct io_pgtable, ops); + iop = io_pgtable_ops_to_pgtable(ops); io_pgtable_tlb_flush_all(iop); io_pgtable_init_table[iop->fmt]->free(iop); } diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c new file mode 100644 index 000000000000..0f8dd377aada --- /dev/null +++ b/drivers/iommu/ioasid.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * I/O Address Space ID allocator. There is one global IOASID space, split into + * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and + * free IOASIDs with ioasid_alloc and ioasid_free. + */ +#include <linux/ioasid.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/xarray.h> + +struct ioasid_data { + ioasid_t id; + struct ioasid_set *set; + void *private; + struct rcu_head rcu; +}; + +/* + * struct ioasid_allocator_data - Internal data structure to hold information + * about an allocator. There are two types of allocators: + * + * - Default allocator always has its own XArray to track the IOASIDs allocated. + * - Custom allocators may share allocation helpers with different private data. + * Custom allocators that share the same helper functions also share the same + * XArray. + * Rules: + * 1. Default allocator is always available, not dynamically registered. This is + * to prevent race conditions with early boot code that want to register + * custom allocators or allocate IOASIDs. + * 2. Custom allocators take precedence over the default allocator. + * 3. When all custom allocators sharing the same helper functions are + * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be + * freed. Otherwise, outstanding IOASIDs will be lost and orphaned. + * 4. When switching between custom allocators sharing the same helper + * functions, outstanding IOASIDs are preserved. + * 5. When switching between custom allocator and default allocator, all IOASIDs + * must be freed to ensure unadulterated space for the new allocator. + * + * @ops: allocator helper functions and its data + * @list: registered custom allocators + * @slist: allocators share the same ops but different data + * @flags: attributes of the allocator + * @xa: xarray holds the IOASID space + * @rcu: used for kfree_rcu when unregistering allocator + */ +struct ioasid_allocator_data { + struct ioasid_allocator_ops *ops; + struct list_head list; + struct list_head slist; +#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */ + unsigned long flags; + struct xarray xa; + struct rcu_head rcu; +}; + +static DEFINE_SPINLOCK(ioasid_allocator_lock); +static LIST_HEAD(allocators_list); + +static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque); +static void default_free(ioasid_t ioasid, void *opaque); + +static struct ioasid_allocator_ops default_ops = { + .alloc = default_alloc, + .free = default_free, +}; + +static struct ioasid_allocator_data default_allocator = { + .ops = &default_ops, + .flags = 0, + .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC), +}; + +static struct ioasid_allocator_data *active_allocator = &default_allocator; + +static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque) +{ + ioasid_t id; + + if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) { + pr_err("Failed to alloc ioasid from %d to %d\n", min, max); + return INVALID_IOASID; + } + + return id; +} + +static void default_free(ioasid_t ioasid, void *opaque) +{ + struct ioasid_data *ioasid_data; + + ioasid_data = xa_erase(&default_allocator.xa, ioasid); + kfree_rcu(ioasid_data, rcu); +} + +/* Allocate and initialize a new custom allocator with its helper functions */ +static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *ia_data; + + ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC); + if (!ia_data) + return NULL; + + xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC); + INIT_LIST_HEAD(&ia_data->slist); + ia_data->flags |= IOASID_ALLOCATOR_CUSTOM; + ia_data->ops = ops; + + /* For tracking custom allocators that share the same ops */ + list_add_tail(&ops->list, &ia_data->slist); + + return ia_data; +} + +static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b) +{ + return (a->free == b->free) && (a->alloc == b->alloc); +} + +/** + * ioasid_register_allocator - register a custom allocator + * @ops: the custom allocator ops to be registered + * + * Custom allocators take precedence over the default xarray based allocator. + * Private data associated with the IOASID allocated by the custom allocators + * are managed by IOASID framework similar to data stored in xa by default + * allocator. + * + * There can be multiple allocators registered but only one is active. In case + * of runtime removal of a custom allocator, the next one is activated based + * on the registration ordering. + * + * Multiple allocators can share the same alloc() function, in this case the + * IOASID space is shared. + */ +int ioasid_register_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *ia_data; + struct ioasid_allocator_data *pallocator; + int ret = 0; + + spin_lock(&ioasid_allocator_lock); + + ia_data = ioasid_alloc_allocator(ops); + if (!ia_data) { + ret = -ENOMEM; + goto out_unlock; + } + + /* + * No particular preference, we activate the first one and keep + * the later registered allocators in a list in case the first one gets + * removed due to hotplug. + */ + if (list_empty(&allocators_list)) { + WARN_ON(active_allocator != &default_allocator); + /* Use this new allocator if default is not active */ + if (xa_empty(&active_allocator->xa)) { + rcu_assign_pointer(active_allocator, ia_data); + list_add_tail(&ia_data->list, &allocators_list); + goto out_unlock; + } + pr_warn("Default allocator active with outstanding IOASID\n"); + ret = -EAGAIN; + goto out_free; + } + + /* Check if the allocator is already registered */ + list_for_each_entry(pallocator, &allocators_list, list) { + if (pallocator->ops == ops) { + pr_err("IOASID allocator already registered\n"); + ret = -EEXIST; + goto out_free; + } else if (use_same_ops(pallocator->ops, ops)) { + /* + * If the new allocator shares the same ops, + * then they will share the same IOASID space. + * We should put them under the same xarray. + */ + list_add_tail(&ops->list, &pallocator->slist); + goto out_free; + } + } + list_add_tail(&ia_data->list, &allocators_list); + + spin_unlock(&ioasid_allocator_lock); + return 0; +out_free: + kfree(ia_data); +out_unlock: + spin_unlock(&ioasid_allocator_lock); + return ret; +} +EXPORT_SYMBOL_GPL(ioasid_register_allocator); + +/** + * ioasid_unregister_allocator - Remove a custom IOASID allocator ops + * @ops: the custom allocator to be removed + * + * Remove an allocator from the list, activate the next allocator in + * the order it was registered. Or revert to default allocator if all + * custom allocators are unregistered without outstanding IOASIDs. + */ +void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops) +{ + struct ioasid_allocator_data *pallocator; + struct ioasid_allocator_ops *sops; + + spin_lock(&ioasid_allocator_lock); + if (list_empty(&allocators_list)) { + pr_warn("No custom IOASID allocators active!\n"); + goto exit_unlock; + } + + list_for_each_entry(pallocator, &allocators_list, list) { + if (!use_same_ops(pallocator->ops, ops)) + continue; + + if (list_is_singular(&pallocator->slist)) { + /* No shared helper functions */ + list_del(&pallocator->list); + /* + * All IOASIDs should have been freed before + * the last allocator that shares the same ops + * is unregistered. + */ + WARN_ON(!xa_empty(&pallocator->xa)); + if (list_empty(&allocators_list)) { + pr_info("No custom IOASID allocators, switch to default.\n"); + rcu_assign_pointer(active_allocator, &default_allocator); + } else if (pallocator == active_allocator) { + rcu_assign_pointer(active_allocator, + list_first_entry(&allocators_list, + struct ioasid_allocator_data, list)); + pr_info("IOASID allocator changed"); + } + kfree_rcu(pallocator, rcu); + break; + } + /* + * Find the matching shared ops to delete, + * but keep outstanding IOASIDs + */ + list_for_each_entry(sops, &pallocator->slist, list) { + if (sops == ops) { + list_del(&ops->list); + break; + } + } + break; + } + +exit_unlock: + spin_unlock(&ioasid_allocator_lock); +} +EXPORT_SYMBOL_GPL(ioasid_unregister_allocator); + +/** + * ioasid_set_data - Set private data for an allocated ioasid + * @ioasid: the ID to set data + * @data: the private data + * + * For IOASID that is already allocated, private data can be set + * via this API. Future lookup can be done via ioasid_find. + */ +int ioasid_set_data(ioasid_t ioasid, void *data) +{ + struct ioasid_data *ioasid_data; + int ret = 0; + + spin_lock(&ioasid_allocator_lock); + ioasid_data = xa_load(&active_allocator->xa, ioasid); + if (ioasid_data) + rcu_assign_pointer(ioasid_data->private, data); + else + ret = -ENOENT; + spin_unlock(&ioasid_allocator_lock); + + /* + * Wait for readers to stop accessing the old private data, so the + * caller can free it. + */ + if (!ret) + synchronize_rcu(); + + return ret; +} +EXPORT_SYMBOL_GPL(ioasid_set_data); + +/** + * ioasid_alloc - Allocate an IOASID + * @set: the IOASID set + * @min: the minimum ID (inclusive) + * @max: the maximum ID (inclusive) + * @private: data private to the caller + * + * Allocate an ID between @min and @max. The @private pointer is stored + * internally and can be retrieved with ioasid_find(). + * + * Return: the allocated ID on success, or %INVALID_IOASID on failure. + */ +ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, + void *private) +{ + struct ioasid_data *data; + void *adata; + ioasid_t id; + + data = kzalloc(sizeof(*data), GFP_ATOMIC); + if (!data) + return INVALID_IOASID; + + data->set = set; + data->private = private; + + /* + * Custom allocator needs allocator data to perform platform specific + * operations. + */ + spin_lock(&ioasid_allocator_lock); + adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data; + id = active_allocator->ops->alloc(min, max, adata); + if (id == INVALID_IOASID) { + pr_err("Failed ASID allocation %lu\n", active_allocator->flags); + goto exit_free; + } + + if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) && + xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) { + /* Custom allocator needs framework to store and track allocation results */ + pr_err("Failed to alloc ioasid from %d\n", id); + active_allocator->ops->free(id, active_allocator->ops->pdata); + goto exit_free; + } + data->id = id; + + spin_unlock(&ioasid_allocator_lock); + return id; +exit_free: + spin_unlock(&ioasid_allocator_lock); + kfree(data); + return INVALID_IOASID; +} +EXPORT_SYMBOL_GPL(ioasid_alloc); + +/** + * ioasid_free - Free an IOASID + * @ioasid: the ID to remove + */ +void ioasid_free(ioasid_t ioasid) +{ + struct ioasid_data *ioasid_data; + + spin_lock(&ioasid_allocator_lock); + ioasid_data = xa_load(&active_allocator->xa, ioasid); + if (!ioasid_data) { + pr_err("Trying to free unknown IOASID %u\n", ioasid); + goto exit_unlock; + } + + active_allocator->ops->free(ioasid, active_allocator->ops->pdata); + /* Custom allocator needs additional steps to free the xa element */ + if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { + ioasid_data = xa_erase(&active_allocator->xa, ioasid); + kfree_rcu(ioasid_data, rcu); + } + +exit_unlock: + spin_unlock(&ioasid_allocator_lock); +} +EXPORT_SYMBOL_GPL(ioasid_free); + +/** + * ioasid_find - Find IOASID data + * @set: the IOASID set + * @ioasid: the IOASID to find + * @getter: function to call on the found object + * + * The optional getter function allows to take a reference to the found object + * under the rcu lock. The function can also check if the object is still valid: + * if @getter returns false, then the object is invalid and NULL is returned. + * + * If the IOASID exists, return the private pointer passed to ioasid_alloc. + * Private data can be NULL if not set. Return an error if the IOASID is not + * found, or if @set is not NULL and the IOASID does not belong to the set. + */ +void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, + bool (*getter)(void *)) +{ + void *priv; + struct ioasid_data *ioasid_data; + struct ioasid_allocator_data *idata; + + rcu_read_lock(); + idata = rcu_dereference(active_allocator); + ioasid_data = xa_load(&idata->xa, ioasid); + if (!ioasid_data) { + priv = ERR_PTR(-ENOENT); + goto unlock; + } + if (set && ioasid_data->set != set) { + /* data found but does not belong to the set */ + priv = ERR_PTR(-EACCES); + goto unlock; + } + /* Now IOASID and its set is verified, we can return the private data */ + priv = rcu_dereference(ioasid_data->private); + if (getter && !getter(priv)) + priv = NULL; +unlock: + rcu_read_unlock(); + + return priv; +} +EXPORT_SYMBOL_GPL(ioasid_find); + +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); +MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>"); +MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index e436ff813e7e..99869217fbec 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -87,6 +87,7 @@ error: put_device(iommu->dev); return ret; } +EXPORT_SYMBOL_GPL(iommu_device_sysfs_add); void iommu_device_sysfs_remove(struct iommu_device *iommu) { @@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu) device_unregister(iommu->dev); iommu->dev = NULL; } +EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove); + /* * IOMMU drivers can indicate a device is managed by a given IOMMU using * this interface. A link to the device will be created in the "devices" @@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) return ret; } +EXPORT_SYMBOL_GPL(iommu_device_link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link) { @@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); } +EXPORT_SYMBOL_GPL(iommu_device_unlink); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0c674d80c37f..3e3528436e0b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -22,16 +22,15 @@ #include <linux/bitops.h> #include <linux/property.h> #include <linux/fsl/mc.h> +#include <linux/module.h> #include <trace/events/iommu.h> static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); -#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; -#else -static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; -#endif + +static unsigned int iommu_def_domain_type __read_mostly; static bool iommu_dma_strict __read_mostly = true; +static u32 iommu_cmd_line __read_mostly; struct iommu_group { struct kobject kobj; @@ -68,6 +67,18 @@ static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_SW_MSI] = "msi", }; +#define IOMMU_CMD_LINE_DMA_API BIT(0) + +static void iommu_set_cmd_line_dma_api(void) +{ + iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; +} + +static bool iommu_cmd_line_dma_api(void) +{ + return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); +} + #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) @@ -80,14 +91,58 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ static LIST_HEAD(iommu_device_list); static DEFINE_SPINLOCK(iommu_device_lock); +/* + * Use a function instead of an array here because the domain-type is a + * bit-field, so an array would waste memory. + */ +static const char *iommu_domain_type_str(unsigned int t) +{ + switch (t) { + case IOMMU_DOMAIN_BLOCKED: + return "Blocked"; + case IOMMU_DOMAIN_IDENTITY: + return "Passthrough"; + case IOMMU_DOMAIN_UNMANAGED: + return "Unmanaged"; + case IOMMU_DOMAIN_DMA: + return "Translated"; + default: + return "Unknown"; + } +} + +static int __init iommu_subsys_init(void) +{ + bool cmd_line = iommu_cmd_line_dma_api(); + + if (!cmd_line) { + if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) + iommu_set_default_passthrough(false); + else + iommu_set_default_translated(false); + + if (iommu_default_passthrough() && mem_encrypt_active()) { + pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); + iommu_set_default_translated(false); + } + } + + pr_info("Default domain type: %s %s\n", + iommu_domain_type_str(iommu_def_domain_type), + cmd_line ? "(set via kernel command line)" : ""); + + return 0; +} +subsys_initcall(iommu_subsys_init); + int iommu_device_register(struct iommu_device *iommu) { spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); - return 0; } +EXPORT_SYMBOL_GPL(iommu_device_register); void iommu_device_unregister(struct iommu_device *iommu) { @@ -95,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu) list_del(&iommu->list); spin_unlock(&iommu_device_lock); } +EXPORT_SYMBOL_GPL(iommu_device_unregister); static struct iommu_param *iommu_get_dev_param(struct device *dev) { @@ -130,10 +186,21 @@ int iommu_probe_device(struct device *dev) if (!iommu_get_dev_param(dev)) return -ENOMEM; + if (!try_module_get(ops->owner)) { + ret = -EINVAL; + goto err_free_dev_param; + } + ret = ops->add_device(dev); if (ret) - iommu_free_dev_param(dev); + goto err_module_put; + + return 0; +err_module_put: + module_put(ops->owner); +err_free_dev_param: + iommu_free_dev_param(dev); return ret; } @@ -144,7 +211,10 @@ void iommu_release_device(struct device *dev) if (dev->iommu_group) ops->remove_device(dev); - iommu_free_dev_param(dev); + if (dev->iommu_param) { + module_put(ops->owner); + iommu_free_dev_param(dev); + } } static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, @@ -165,7 +235,11 @@ static int __init iommu_set_def_domain_type(char *str) if (ret) return ret; - iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; + if (pt) + iommu_set_default_passthrough(true); + else + iommu_set_default_translated(true); + return 0; } early_param("iommu.passthrough", iommu_set_def_domain_type); @@ -229,60 +303,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) * @new: new region to insert * @regions: list of regions * - * The new element is sorted by address with respect to the other - * regions of the same type. In case it overlaps with another - * region of the same type, regions are merged. In case it - * overlaps with another region of different type, regions are - * not merged. + * Elements are sorted by start address and overlapping segments + * of the same type are merged. */ -static int iommu_insert_resv_region(struct iommu_resv_region *new, - struct list_head *regions) +int iommu_insert_resv_region(struct iommu_resv_region *new, + struct list_head *regions) { - struct iommu_resv_region *region; - phys_addr_t start = new->start; - phys_addr_t end = new->start + new->length - 1; - struct list_head *pos = regions->next; - - while (pos != regions) { - struct iommu_resv_region *entry = - list_entry(pos, struct iommu_resv_region, list); - phys_addr_t a = entry->start; - phys_addr_t b = entry->start + entry->length - 1; - int type = entry->type; - - if (end < a) { - goto insert; - } else if (start > b) { - pos = pos->next; - } else if ((start >= a) && (end <= b)) { - if (new->type == type) - return 0; - else - pos = pos->next; + struct iommu_resv_region *iter, *tmp, *nr, *top; + LIST_HEAD(stack); + + nr = iommu_alloc_resv_region(new->start, new->length, + new->prot, new->type); + if (!nr) + return -ENOMEM; + + /* First add the new element based on start address sorting */ + list_for_each_entry(iter, regions, list) { + if (nr->start < iter->start || + (nr->start == iter->start && nr->type <= iter->type)) + break; + } + list_add_tail(&nr->list, &iter->list); + + /* Merge overlapping segments of type nr->type in @regions, if any */ + list_for_each_entry_safe(iter, tmp, regions, list) { + phys_addr_t top_end, iter_end = iter->start + iter->length - 1; + + /* no merge needed on elements of different types than @new */ + if (iter->type != new->type) { + list_move_tail(&iter->list, &stack); + continue; + } + + /* look for the last stack element of same type as @iter */ + list_for_each_entry_reverse(top, &stack, list) + if (top->type == iter->type) + goto check_overlap; + + list_move_tail(&iter->list, &stack); + continue; + +check_overlap: + top_end = top->start + top->length - 1; + + if (iter->start > top_end + 1) { + list_move_tail(&iter->list, &stack); } else { - if (new->type == type) { - phys_addr_t new_start = min(a, start); - phys_addr_t new_end = max(b, end); - int ret; - - list_del(&entry->list); - entry->start = new_start; - entry->length = new_end - new_start + 1; - ret = iommu_insert_resv_region(entry, regions); - kfree(entry); - return ret; - } else { - pos = pos->next; - } + top->length = max(top_end, iter_end) - top->start + 1; + list_del(&iter->list); + kfree(iter); } } -insert: - region = iommu_alloc_resv_region(new->start, new->length, - new->prot, new->type); - if (!region) - return -ENOMEM; - - list_add_tail(®ion->list, pos); + list_splice(&stack, regions); return 0; } @@ -696,6 +768,7 @@ err_put_group: mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: @@ -831,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group) kobject_get(group->devices_kobj); return group; } +EXPORT_SYMBOL_GPL(iommu_group_ref_get); /** * iommu_group_put - Decrement group reference @@ -1204,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev) { return iommu_group_alloc(); } +EXPORT_SYMBOL_GPL(generic_device_group); /* * Use standard PCI bus topology, isolation features, and DMA alias quirks @@ -1271,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev) /* No shared group found, allocate new */ return iommu_group_alloc(); } +EXPORT_SYMBOL_GPL(pci_device_group); /* Get the IOMMU group for device on fsl-mc bus */ struct iommu_group *fsl_mc_device_group(struct device *dev) @@ -1283,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev) group = iommu_group_alloc(); return group; } +EXPORT_SYMBOL_GPL(fsl_mc_device_group); /** * iommu_group_get_for_dev - Find or create the IOMMU group for a device @@ -1351,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) return group; } +EXPORT_SYMBOL(iommu_group_get_for_dev); struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { @@ -1481,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) { int err; + if (ops == NULL) { + bus->iommu_ops = NULL; + return 0; + } + if (bus->iommu_ops != NULL) return -EBUSY; @@ -1610,6 +1693,36 @@ out_unlock: } EXPORT_SYMBOL_GPL(iommu_attach_device); +int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev, + struct iommu_cache_invalidate_info *inv_info) +{ + if (unlikely(!domain->ops->cache_invalidate)) + return -ENODEV; + + return domain->ops->cache_invalidate(domain, dev, inv_info); +} +EXPORT_SYMBOL_GPL(iommu_cache_invalidate); + +int iommu_sva_bind_gpasid(struct iommu_domain *domain, + struct device *dev, struct iommu_gpasid_bind_data *data) +{ + if (unlikely(!domain->ops->sva_bind_gpasid)) + return -ENODEV; + + return domain->ops->sva_bind_gpasid(domain, dev, data); +} +EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid); + +int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, + ioasid_t pasid) +{ + if (unlikely(!domain->ops->sva_unbind_gpasid)) + return -ENODEV; + + return domain->ops->sva_unbind_gpasid(dev, pasid); +} +EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); + static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { @@ -1799,8 +1912,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, return pgsize; } -int iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) +int __iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_ops *ops = domain->ops; unsigned long orig_iova = iova; @@ -1837,8 +1950,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); + ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); - ret = ops->map(domain, iova, paddr, pgsize, prot); if (ret) break; @@ -1858,11 +1971,25 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, return ret; } + +int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + might_sleep(); + return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(iommu_map); +int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iommu_map_atomic); + static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, - bool sync) + struct iommu_iotlb_gather *iotlb_gather) { const struct iommu_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; @@ -1899,13 +2026,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain, while (unmapped < size) { size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); - unmapped_page = ops->unmap(domain, iova, pgsize); + unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); if (!unmapped_page) break; - if (sync && ops->iotlb_range_add) - ops->iotlb_range_add(domain, iova, pgsize); - pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); @@ -1913,9 +2037,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unmapped += unmapped_page; } - if (sync && ops->iotlb_sync) - ops->iotlb_sync(domain); - trace_unmap(orig_iova, size, unmapped); return unmapped; } @@ -1923,19 +2044,28 @@ static size_t __iommu_unmap(struct iommu_domain *domain, size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - return __iommu_unmap(domain, iova, size, true); + struct iommu_iotlb_gather iotlb_gather; + size_t ret; + + iommu_iotlb_gather_init(&iotlb_gather); + ret = __iommu_unmap(domain, iova, size, &iotlb_gather); + iommu_tlb_sync(domain, &iotlb_gather); + + return ret; } EXPORT_SYMBOL_GPL(iommu_unmap); size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather) { - return __iommu_unmap(domain, iova, size, false); + return __iommu_unmap(domain, iova, size, iotlb_gather); } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot) +size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot, + gfp_t gfp) { size_t len = 0, mapped = 0; phys_addr_t start; @@ -1946,7 +2076,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, phys_addr_t s_phys = sg_phys(sg); if (len && s_phys != start + len) { - ret = iommu_map(domain, iova + mapped, start, len, prot); + ret = __iommu_map(domain, iova + mapped, start, + len, prot, gfp); + if (ret) goto out_err; @@ -1974,8 +2106,22 @@ out_err: return 0; } + +size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) +{ + might_sleep(); + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); +} EXPORT_SYMBOL_GPL(iommu_map_sg); +size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) +{ + return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(iommu_map_sg_atomic); + int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) { @@ -2111,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) ops->put_resv_regions(dev, list); } +/** + * generic_iommu_put_resv_regions - Reserved region driver helper + * @dev: device for which to free reserved regions + * @list: reserved region list for device + * + * IOMMU drivers can use this to implement their .put_resv_regions() callback + * for simple reservations. Memory allocated for each reserved region will be + * freed. If an IOMMU driver allocates additional resources per region, it is + * going to have to implement a custom callback. + */ +void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, list, list) + kfree(entry); +} +EXPORT_SYMBOL(generic_iommu_put_resv_regions); + struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type) @@ -2128,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, region->type = type; return region; } +EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); static int request_default_domain_for_dev(struct device *dev, unsigned long type) @@ -2143,7 +2309,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) mutex_lock(&group->mutex); - /* Check if the default domain is already direct mapped */ ret = 0; if (group->default_domain && group->default_domain->type == type) goto out; @@ -2153,7 +2318,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) if (iommu_group_device_count(group) != 1) goto out; - /* Allocate a direct mapped domain */ ret = -ENOMEM; domain = __iommu_domain_alloc(dev->bus, type); if (!domain) @@ -2166,13 +2330,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) goto out; } - iommu_group_create_direct_mappings(group, dev); - - /* Make the direct mapped domain the default for this group */ + /* Make the domain the default for this group */ if (group->default_domain) iommu_domain_free(group->default_domain); group->default_domain = domain; + iommu_group_create_direct_mappings(group, dev); + dev_info(dev, "Using iommu %s mapping\n", type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); @@ -2196,6 +2360,28 @@ int iommu_request_dma_domain_for_dev(struct device *dev) return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA); } +void iommu_set_default_passthrough(bool cmd_line) +{ + if (cmd_line) + iommu_set_cmd_line_dma_api(); + + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; +} + +void iommu_set_default_translated(bool cmd_line) +{ + if (cmd_line) + iommu_set_cmd_line_dma_api(); + + iommu_def_domain_type = IOMMU_DOMAIN_DMA; +} + +bool iommu_default_passthrough(void) +{ + return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; +} +EXPORT_SYMBOL_GPL(iommu_default_passthrough); + const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { const struct iommu_ops *ops = NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 3e1a8a675572..0e6a9536eca6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); } EXPORT_SYMBOL(alloc_iova_mem); @@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad, spin_unlock_irqrestore(&fq->lock, flags); - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) + /* Avoid false sharing as much as possible. */ + if (!atomic_read(&iovad->fq_timer_on) && + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ad0098c0c87c..ecb3f9464dd5 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -49,6 +49,10 @@ struct ipmmu_features { bool setup_imbuscr; bool twobit_imttbcr_sl0; bool reserved_context; + bool cache_snoop; + unsigned int ctx_offset_base; + unsigned int ctx_offset_stride; + unsigned int utlb_offset_base; }; struct ipmmu_vmsa_device { @@ -98,126 +102,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IM_NS_ALIAS_OFFSET 0x800 -#define IM_CTX_SIZE 0x40 - -#define IMCTR 0x0000 -#define IMCTR_TRE (1 << 17) -#define IMCTR_AFE (1 << 16) -#define IMCTR_RTSEL_MASK (3 << 4) -#define IMCTR_RTSEL_SHIFT 4 -#define IMCTR_TREN (1 << 3) -#define IMCTR_INTEN (1 << 2) -#define IMCTR_FLUSH (1 << 1) -#define IMCTR_MMUEN (1 << 0) - -#define IMCAAR 0x0004 - -#define IMTTBCR 0x0008 -#define IMTTBCR_EAE (1 << 31) -#define IMTTBCR_PMB (1 << 30) -#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) -#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) -#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) -#define IMTTBCR_SH1_MASK (3 << 28) -#define IMTTBCR_ORGN1_NC (0 << 26) -#define IMTTBCR_ORGN1_WB_WA (1 << 26) -#define IMTTBCR_ORGN1_WT (2 << 26) -#define IMTTBCR_ORGN1_WB (3 << 26) -#define IMTTBCR_ORGN1_MASK (3 << 26) -#define IMTTBCR_IRGN1_NC (0 << 24) -#define IMTTBCR_IRGN1_WB_WA (1 << 24) -#define IMTTBCR_IRGN1_WT (2 << 24) -#define IMTTBCR_IRGN1_WB (3 << 24) -#define IMTTBCR_IRGN1_MASK (3 << 24) -#define IMTTBCR_TSZ1_MASK (7 << 16) -#define IMTTBCR_TSZ1_SHIFT 16 -#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) -#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) -#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) -#define IMTTBCR_SH0_MASK (3 << 12) -#define IMTTBCR_ORGN0_NC (0 << 10) -#define IMTTBCR_ORGN0_WB_WA (1 << 10) -#define IMTTBCR_ORGN0_WT (2 << 10) -#define IMTTBCR_ORGN0_WB (3 << 10) -#define IMTTBCR_ORGN0_MASK (3 << 10) -#define IMTTBCR_IRGN0_NC (0 << 8) -#define IMTTBCR_IRGN0_WB_WA (1 << 8) -#define IMTTBCR_IRGN0_WT (2 << 8) -#define IMTTBCR_IRGN0_WB (3 << 8) -#define IMTTBCR_IRGN0_MASK (3 << 8) -#define IMTTBCR_SL0_LVL_2 (0 << 4) -#define IMTTBCR_SL0_LVL_1 (1 << 4) -#define IMTTBCR_TSZ0_MASK (7 << 0) -#define IMTTBCR_TSZ0_SHIFT O - -#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) -#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) - -#define IMBUSCR 0x000c -#define IMBUSCR_DVM (1 << 2) -#define IMBUSCR_BUSSEL_SYS (0 << 0) -#define IMBUSCR_BUSSEL_CCI (1 << 0) -#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) -#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) -#define IMBUSCR_BUSSEL_MASK (3 << 0) - -#define IMTTLBR0 0x0010 -#define IMTTUBR0 0x0014 -#define IMTTLBR1 0x0018 -#define IMTTUBR1 0x001c - -#define IMSTR 0x0020 -#define IMSTR_ERRLVL_MASK (3 << 12) -#define IMSTR_ERRLVL_SHIFT 12 -#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) -#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) -#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) -#define IMSTR_ERRCODE_MASK (7 << 8) -#define IMSTR_MHIT (1 << 4) -#define IMSTR_ABORT (1 << 2) -#define IMSTR_PF (1 << 1) -#define IMSTR_TF (1 << 0) - -#define IMMAIR0 0x0028 -#define IMMAIR1 0x002c -#define IMMAIR_ATTR_MASK 0xff -#define IMMAIR_ATTR_DEVICE 0x04 -#define IMMAIR_ATTR_NC 0x44 -#define IMMAIR_ATTR_WBRWA 0xff -#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) -#define IMMAIR_ATTR_IDX_NC 0 -#define IMMAIR_ATTR_IDX_WBRWA 1 -#define IMMAIR_ATTR_IDX_DEV 2 - -#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */ -#define IMEUAR 0x0034 /* R-Car Gen3 only */ - -#define IMPCTR 0x0200 -#define IMPSTR 0x0208 -#define IMPEAR 0x020c -#define IMPMBA(n) (0x0280 + ((n) * 4)) -#define IMPMBD(n) (0x02c0 + ((n) * 4)) +/* MMU "context" registers */ +#define IMCTR 0x0000 /* R-Car Gen2/3 */ +#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ +#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ +#define IMTTBCR 0x0008 /* R-Car Gen2/3 */ +#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ +#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ + +#define IMBUSCR 0x000c /* R-Car Gen2 only */ +#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ +#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ + +#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ +#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ + +#define IMSTR 0x0020 /* R-Car Gen2/3 */ +#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ +#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ +#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ +#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ + +#define IMMAIR0 0x0028 /* R-Car Gen2/3 */ + +#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ +#define IMEUAR 0x0034 /* R-Car Gen3 only */ + +/* uTLB registers */ #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) -#define IMUCTR0(n) (0x0300 + ((n) * 16)) -#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) -#define IMUCTR_FIXADDEN (1 << 31) -#define IMUCTR_FIXADD_MASK (0xff << 16) -#define IMUCTR_FIXADD_SHIFT 16 -#define IMUCTR_TTSEL_MMU(n) ((n) << 4) -#define IMUCTR_TTSEL_PMB (8 << 4) -#define IMUCTR_TTSEL_MASK (15 << 4) -#define IMUCTR_FLUSH (1 << 1) -#define IMUCTR_MMUEN (1 << 0) +#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ +#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) -#define IMUASID0(n) (0x0308 + ((n) * 16)) -#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) -#define IMUASID_ASID8_MASK (0xff << 8) -#define IMUASID_ASID8_SHIFT 8 -#define IMUASID_ASID0_MASK (0xff << 0) -#define IMUASID_ASID0_SHIFT 0 +#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ /* ----------------------------------------------------------------------------- * Root device handling @@ -264,29 +191,61 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, iowrite32(data, mmu->base + offset); } +static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) +{ + return mmu->features->ctx_offset_base + + context_id * mmu->features->ctx_offset_stride + reg; +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) +{ + return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); +} + +static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg, u32 data) +{ + ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); +} + static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, unsigned int reg) { - return ipmmu_read(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg); + return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); } static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, unsigned int reg, u32 data) { - ipmmu_write(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); } static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, unsigned int reg, u32 data) { if (domain->mmu != domain->mmu->root) - ipmmu_write(domain->mmu, - domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); + + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); +} + +static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) +{ + return mmu->features->utlb_offset_base + reg; +} - ipmmu_write(domain->mmu->root, - domain->context_id * IM_CTX_SIZE + reg, data); +static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); +} + +static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); } /* ----------------------------------------------------------------------------- @@ -334,11 +293,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, */ /* TODO: What should we set the ASID to ? */ - ipmmu_write(mmu, IMUASID(utlb), 0); + ipmmu_imuasid_write(mmu, utlb, 0); /* TODO: Do we need to flush the microTLB ? */ - ipmmu_write(mmu, IMUCTR(utlb), - IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | - IMUCTR_MMUEN); + ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | + IMUCTR_FLUSH | IMUCTR_MMUEN); mmu->utlb_ctx[utlb] = domain->context_id; } @@ -350,7 +308,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, { struct ipmmu_vmsa_device *mmu = domain->mmu; - ipmmu_write(mmu, IMUCTR(utlb), 0); + ipmmu_imuctr_write(mmu, utlb, 0); mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; } @@ -361,16 +319,16 @@ static void ipmmu_tlb_flush_all(void *cookie) ipmmu_tlb_invalidate(domain); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void ipmmu_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { - /* The hardware doesn't support selective TLB flush. */ + ipmmu_tlb_flush_all(cookie); } -static const struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, - .tlb_add_flush = ipmmu_tlb_add_flush, - .tlb_sync = ipmmu_tlb_flush_all, + .tlb_flush_walk = ipmmu_tlb_flush, + .tlb_flush_leaf = ipmmu_tlb_flush, }; /* ----------------------------------------------------------------------------- @@ -416,27 +374,29 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) u32 tmp; /* TTBR0 */ - ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; + ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); /* * TTBCR - * We use long descriptors with inner-shareable WBWA tables and allocate - * the whole 32-bit VA space to TTBR0. + * We use long descriptors and allocate the whole 32-bit VA space to + * TTBR0. */ if (domain->mmu->features->twobit_imttbcr_sl0) tmp = IMTTBCR_SL0_TWOBIT_LVL_1; else tmp = IMTTBCR_SL0_LVL_1; - ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | - IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | - IMTTBCR_IRGN0_WB_WA | tmp); + if (domain->mmu->features->cache_snoop) + tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA; + + ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); /* MAIR0 */ ipmmu_ctx_write_root(domain, IMMAIR0, - domain->cfg.arm_lpae_s1_cfg.mair[0]); + domain->cfg.arm_lpae_s1_cfg.mair); /* IMBUSCR */ if (domain->mmu->features->setup_imbuscr) @@ -480,7 +440,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; + domain->cfg.tlb = &ipmmu_flush_ops; domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; /* @@ -722,7 +682,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, } static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); @@ -733,14 +693,14 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size); + return domain->iop->unmap(domain->iop, iova, size, gather); } -static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) +static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); @@ -748,6 +708,12 @@ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) ipmmu_tlb_flush_all(domain); } +static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, + struct iommu_iotlb_gather *gather) +{ + ipmmu_flush_iotlb_all(io_domain); +} + static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, dma_addr_t iova) { @@ -775,6 +741,7 @@ static int ipmmu_init_platform_device(struct device *dev, static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a774a1", }, + { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, { .soc_id = "r8a7795", }, { .soc_id = "r8a7796", }, @@ -786,6 +753,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = { }; static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = { + { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, { .soc_id = "r8a7795", .revision = "ES3.*" }, { .soc_id = "r8a77965", }, @@ -957,7 +925,7 @@ static const struct iommu_ops ipmmu_ops = { .detach_dev = ipmmu_detach_device, .map = ipmmu_map, .unmap = ipmmu_unmap, - .flush_iotlb_all = ipmmu_iotlb_sync, + .flush_iotlb_all = ipmmu_flush_iotlb_all, .iotlb_sync = ipmmu_iotlb_sync, .iova_to_phys = ipmmu_iova_to_phys, .add_device = ipmmu_add_device, @@ -977,7 +945,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) /* Disable all contexts. */ for (i = 0; i < mmu->num_ctx; ++i) - ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); + ipmmu_ctx_write(mmu, i, IMCTR, 0); } static const struct ipmmu_features ipmmu_features_default = { @@ -988,6 +956,10 @@ static const struct ipmmu_features ipmmu_features_default = { .setup_imbuscr = true, .twobit_imttbcr_sl0 = false, .reserved_context = false, + .cache_snoop = true, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, }; static const struct ipmmu_features ipmmu_features_rcar_gen3 = { @@ -998,6 +970,10 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .setup_imbuscr = false, .twobit_imttbcr_sl0 = true, .reserved_context = true, + .cache_snoop = false, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, }; static const struct of_device_id ipmmu_of_ids[] = { @@ -1008,6 +984,9 @@ static const struct of_device_id ipmmu_of_ids[] = { .compatible = "renesas,ipmmu-r8a774a1", .data = &ipmmu_features_rcar_gen3, }, { + .compatible = "renesas,ipmmu-r8a774b1", + .data = &ipmmu_features_rcar_gen3, + }, { .compatible = "renesas,ipmmu-r8a774c0", .data = &ipmmu_features_rcar_gen3, }, { @@ -1076,8 +1055,6 @@ static int ipmmu_probe(struct platform_device *pdev) mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); - irq = platform_get_irq(pdev, 0); - /* * Determine if this IPMMU instance is a root device by checking for * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. @@ -1096,10 +1073,9 @@ static int ipmmu_probe(struct platform_device *pdev) /* Root devices have mandatory IRQs */ if (ipmmu_is_root(mmu)) { - if (irq < 0) { - dev_err(&pdev->dev, "no IRQ found\n"); + irq = platform_get_irq(pdev, 0); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, dev_name(&pdev->dev), mmu); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b25e2eb9e038..94a6df1bddd6 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -168,20 +168,29 @@ fail: return; } -static void __flush_iotlb_sync(void *cookie) +static void __flush_iotlb_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) { - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ + __flush_iotlb_range(iova, size, granule, false, cookie); } -static const struct iommu_gather_ops msm_iommu_gather_ops = { +static void __flush_iotlb_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, size, granule, true, cookie); +} + +static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, granule, granule, true, cookie); +} + +static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, - .tlb_add_flush = __flush_iotlb_range, - .tlb_sync = __flush_iotlb_sync, + .tlb_flush_walk = __flush_iotlb_walk, + .tlb_flush_leaf = __flush_iotlb_leaf, + .tlb_add_page = __flush_iotlb_page, }; static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) @@ -270,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx, SET_V2PCFG(base, ctx, 0x3); SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); - SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]); - SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]); + SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); + SET_TTBR1(base, ctx, 0); /* Set prrr and nmrr */ SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); @@ -345,7 +354,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &msm_iommu_gather_ops, + .tlb = &msm_iommu_flush_ops, .iommu_dev = priv->dev, }; @@ -495,7 +504,7 @@ fail: } static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t len, int prot) + phys_addr_t pa, size_t len, int prot, gfp_t gfp) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; @@ -509,13 +518,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t len) + size_t len, struct iommu_iotlb_gather *gather) { struct msm_priv *priv = to_msm_priv(domain); unsigned long flags; spin_lock_irqsave(&priv->pgtlock, flags); - len = priv->iop->unmap(priv->iop, iova, len); + len = priv->iop->unmap(priv->iop, iova, len, gather); spin_unlock_irqrestore(&priv->pgtlock, flags); return len; @@ -691,6 +700,13 @@ static struct iommu_ops msm_iommu_ops = { .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, .iova_to_phys = msm_iommu_iova_to_phys, .add_device = msm_iommu_add_device, .remove_device = msm_iommu_remove_device, @@ -750,7 +766,6 @@ static int msm_iommu_probe(struct platform_device *pdev) iommu->irq = platform_get_irq(pdev, 0); if (iommu->irq < 0) { - dev_err(iommu->dev, "could not get iommu irq\n"); ret = -ENODEV; goto fail; } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 82e4be4dfdaf..95945f467c03 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -28,6 +28,7 @@ #include "mtk_iommu.h" #define REG_MMU_PT_BASE_ADDR 0x000 +#define MMU_PT_ADDR_MASK GENMASK(31, 7) #define REG_MMU_INVALIDATE 0x020 #define F_ALL_INVLD 0x2 @@ -44,12 +45,9 @@ #define REG_MMU_DCM_DIS 0x050 #define REG_MMU_CTRL_REG 0x110 +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) -#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ - ((data)->m4u_plat == M4U_MT2712 ? 4 : 5) -/* It's named by F_MMU_TF_PROT_SEL in mt2712. */ -#define F_MMU_TF_PROTECT_SEL(prot, data) \ - (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) +#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) #define REG_MMU_IVRP_PADDR 0x114 @@ -66,26 +64,32 @@ #define F_INT_CLR_BIT BIT(12) #define REG_MMU_INT_MAIN_CONTROL 0x124 -#define F_INT_TRANSLATION_FAULT BIT(0) -#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) -#define F_INT_INVALID_PA_FAULT BIT(2) -#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) -#define F_INT_TLB_MISS_FAULT BIT(4) -#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) -#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) + /* mmu0 | mmu1 */ +#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) +#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) +#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) +#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) +#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) +#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) +#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) #define REG_MMU_CPE_DONE 0x12C #define REG_MMU_FAULT_ST1 0x134 +#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) +#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) -#define REG_MMU_FAULT_VA 0x13c +#define REG_MMU0_FAULT_VA 0x13c #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) -#define REG_MMU_INVLD_PA 0x140 -#define REG_MMU_INT_ID 0x150 -#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) -#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) +#define REG_MMU0_INVLD_PA 0x140 +#define REG_MMU1_FAULT_VA 0x144 +#define REG_MMU1_INVLD_PA 0x148 +#define REG_MMU0_INT_ID 0x150 +#define REG_MMU1_INT_ID 0x154 +#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) +#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) #define MTK_PROTECT_PA_ALIGN 128 @@ -97,8 +101,6 @@ #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) struct mtk_iommu_domain { - spinlock_t pgtlock; /* lock for page table */ - struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; @@ -107,6 +109,30 @@ struct mtk_iommu_domain { static const struct iommu_ops mtk_iommu_ops; +/* + * In M4U 4GB mode, the physical address is remapped as below: + * + * CPU Physical address: + * ==================== + * + * 0 1G 2G 3G 4G 5G + * |---A---|---B---|---C---|---D---|---E---| + * +--I/O--+------------Memory-------------+ + * + * IOMMU output physical address: + * ============================= + * + * 4G 5G 6G 7G 8G + * |---E---|---B---|---C---|---D---| + * +------------Memory-------------+ + * + * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the + * bit32 of the CPU physical address always is needed to set, and for Region + * 'E', the CPU physical address keep as is. + * Additionally, The iommu consumers always use the CPU phyiscal address. + */ +#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL + static LIST_HEAD(m4ulist); /* List all the M4U HWs */ #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) @@ -145,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie) } } -static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, - void *cookie) +static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, + size_t granule, void *cookie) { struct mtk_iommu_data *data = cookie; + unsigned long flags; + int ret; + u32 tmp; for_each_m4u(data) { + spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); @@ -160,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, data->base + REG_MMU_INVLD_END_A); writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); - data->tlb_flush_active = true; - } -} - -static void mtk_iommu_tlb_sync(void *cookie) -{ - struct mtk_iommu_data *data = cookie; - int ret; - u32 tmp; - - for_each_m4u(data) { - /* Avoid timing out if there's nothing to wait for */ - if (!data->tlb_flush_active) - return; + /* tlb sync */ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, - tmp, tmp != 0, 10, 100000); + tmp, tmp != 0, 10, 1000); if (ret) { dev_warn(data->dev, "Partial TLB flush timed out, falling back to full flush\n"); @@ -184,14 +200,25 @@ static void mtk_iommu_tlb_sync(void *cookie) } /* Clear the CPE status */ writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - data->tlb_flush_active = false; + spin_unlock_irqrestore(&data->tlb_lock, flags); } } -static const struct iommu_gather_ops mtk_iommu_gather_ops = { +static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + struct mtk_iommu_data *data = cookie; + struct iommu_domain *domain = &data->m4u_dom->domain; + + iommu_iotlb_gather_add_page(domain, gather, iova, granule); +} + +static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, - .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, - .tlb_sync = mtk_iommu_tlb_sync, + .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync, + .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync, + .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, }; static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) @@ -204,13 +231,21 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) /* Read error info from registers */ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); - fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); + if (int_state & F_REG_MMU0_FAULT_MASK) { + regval = readl_relaxed(data->base + REG_MMU0_INT_ID); + fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); + fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); + } else { + regval = readl_relaxed(data->base + REG_MMU1_INT_ID); + fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); + fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); + } layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; - fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); - regval = readl_relaxed(data->base + REG_MMU_INT_ID); - fault_larb = F_MMU0_INT_ID_LARB_ID(regval); - fault_port = F_MMU0_INT_ID_PORT_ID(regval); + fault_larb = F_MMU_INT_ID_LARB_ID(regval); + fault_port = F_MMU_INT_ID_PORT_ID(regval); + + fault_larb = data->plat_data->larbid_remap[fault_larb]; if (report_iommu_fault(&dom->domain, data->dev, fault_iova, write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { @@ -242,7 +277,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, for (i = 0; i < fwspec->num_ids; ++i) { larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); portid = MTK_M4U_TO_PORT(fwspec->ids[i]); - larb_mmu = &data->smi_imu.larb_imu[larbid]; + larb_mmu = &data->larb_imu[larbid]; dev_dbg(dev, "%s iommu port: %d\n", enable ? "enable" : "disable", portid); @@ -258,22 +293,18 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) { struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - spin_lock_init(&dom->pgtlock); - dom->cfg = (struct io_pgtable_cfg) { .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | - IO_PGTABLE_QUIRK_TLBI_ON_MAP, + IO_PGTABLE_QUIRK_TLBI_ON_MAP | + IO_PGTABLE_QUIRK_ARM_MTK_EXT, .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .ias = 32, - .oas = 32, - .tlb = &mtk_iommu_gather_ops, + .oas = 34, + .tlb = &mtk_iommu_flush_ops, .iommu_dev = data->dev, }; - if (data->enable_4GB) - dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB; - dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); if (!dom->iop) { dev_err(data->dev, "Failed to alloc io pgtable\n"); @@ -336,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, /* Update the pgtable base address register of the M4U HW */ if (!data->m4u_dom) { data->m4u_dom = dom; - writel(dom->cfg.arm_v7s_cfg.ttbr[0], + writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, data->base + REG_MMU_PT_BASE_ADDR); } @@ -356,37 +387,44 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, } static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - unsigned long flags; - int ret; + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), - size, prot); - spin_unlock_irqrestore(&dom->pgtlock, flags); + /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ + if (data->enable_4GB) + paddr |= BIT_ULL(32); - return ret; + /* Synchronize with the tlb_lock */ + return dom->iop->map(dom->iop, iova, paddr, size, prot); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - unsigned long flags; - size_t unmapsz; - spin_lock_irqsave(&dom->pgtlock, flags); - unmapsz = dom->iop->unmap(dom->iop, iova, size); - spin_unlock_irqrestore(&dom->pgtlock, flags); + return dom->iop->unmap(dom->iop, iova, size, gather); +} - return unmapsz; +static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); } -static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) +static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); + size_t length = gather->end - gather->start; + + if (gather->start == ULONG_MAX) + return; + + mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, + data); } static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, @@ -394,15 +432,11 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, { struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); - unsigned long flags; phys_addr_t pa; - spin_lock_irqsave(&dom->pgtlock, flags); pa = dom->iop->iova_to_phys(dom->iop, iova); - spin_unlock_irqrestore(&dom->pgtlock, flags); - - if (data->enable_4GB) - pa |= BIT_ULL(32); + if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) + pa &= ~BIT_ULL(32); return pa; } @@ -490,7 +524,7 @@ static const struct iommu_ops mtk_iommu_ops = { .detach_dev = mtk_iommu_detach_device, .map = mtk_iommu_map, .unmap = mtk_iommu_unmap, - .flush_iotlb_all = mtk_iommu_iotlb_sync, + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, .iotlb_sync = mtk_iommu_iotlb_sync, .iova_to_phys = mtk_iommu_iova_to_phys, .add_device = mtk_iommu_add_device, @@ -511,9 +545,11 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return ret; } - regval = F_MMU_TF_PROTECT_SEL(2, data); - if (data->m4u_plat == M4U_MT8173) - regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; + if (data->plat_data->m4u_plat == M4U_MT8173) + regval = F_MMU_PREFETCH_RT_REPLACE_MOD | + F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; + else + regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); regval = F_L2_MULIT_HIT_EN | @@ -533,14 +569,14 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) F_INT_PRETETCH_TRANSATION_FIFO_FAULT; writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); - if (data->m4u_plat == M4U_MT8173) + if (data->plat_data->m4u_plat == M4U_MT8173) regval = (data->protect_base >> 1) | (data->enable_4GB << 31); else regval = lower_32_bits(data->protect_base) | upper_32_bits(data->protect_base); writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); - if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { + if (data->enable_4GB && data->plat_data->has_vld_pa_rng) { /* * If 4GB mode is enabled, the validate PA range is from * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. @@ -550,8 +586,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) } writel_relaxed(0, data->base + REG_MMU_DCM_DIS); - /* It's MISC control register whose default value is ok except mt8173.*/ - if (data->m4u_plat == M4U_MT8173) + if (data->plat_data->reset_axi) writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, @@ -584,7 +619,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (!data) return -ENOMEM; data->dev = dev; - data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); + data->plat_data = of_device_get_match_data(dev); /* Protect memory. HW will access here while translation fault.*/ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); @@ -594,6 +629,8 @@ static int mtk_iommu_probe(struct platform_device *pdev) /* Whether the current dram is over 4GB */ data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); + if (!data->plat_data->has_4gb_mode) + data->enable_4GB = false; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->base = devm_ioremap_resource(dev, res); @@ -605,15 +642,16 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (data->irq < 0) return data->irq; - data->bclk = devm_clk_get(dev, "bclk"); - if (IS_ERR(data->bclk)) - return PTR_ERR(data->bclk); + if (data->plat_data->has_bclk) { + data->bclk = devm_clk_get(dev, "bclk"); + if (IS_ERR(data->bclk)) + return PTR_ERR(data->bclk); + } larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); if (larb_nr < 0) return larb_nr; - data->smi_imu.larb_nr = larb_nr; for (i = 0; i < larb_nr; i++) { struct device_node *larbnode; @@ -638,7 +676,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) of_node_put(larbnode); return -EPROBE_DEFER; } - data->smi_imu.larb_imu[id].dev = &plarbdev->dev; + data->larb_imu[id].dev = &plarbdev->dev; component_match_add_release(dev, &match, release_of, compare_of, larbnode); @@ -662,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + spin_lock_init(&data->tlb_lock); list_add_tail(&data->list, &m4ulist); if (!iommu_present(&platform_bus_type)) @@ -699,6 +738,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); + reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); clk_disable_unprepare(data->bclk); return 0; } @@ -707,6 +747,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_suspend_reg *reg = &data->reg; + struct mtk_iommu_domain *m4u_dom = data->m4u_dom; void __iomem *base = data->base; int ret; @@ -722,8 +763,9 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); - if (data->m4u_dom) - writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], + writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); + if (m4u_dom) + writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); return 0; } @@ -732,9 +774,32 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) }; +static const struct mtk_iommu_plat_data mt2712_data = { + .m4u_plat = M4U_MT2712, + .has_4gb_mode = true, + .has_bclk = true, + .has_vld_pa_rng = true, + .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, +}; + +static const struct mtk_iommu_plat_data mt8173_data = { + .m4u_plat = M4U_MT8173, + .has_4gb_mode = true, + .has_bclk = true, + .reset_axi = true, + .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ +}; + +static const struct mtk_iommu_plat_data mt8183_data = { + .m4u_plat = M4U_MT8183, + .reset_axi = true, + .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1}, +}; + static const struct of_device_id mtk_iommu_of_ids[] = { - { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, - { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, + { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, + { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, + { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, {} }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 59337323db58..ea949a324e33 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -24,12 +24,25 @@ struct mtk_iommu_suspend_reg { u32 int_control0; u32 int_main_control; u32 ivrp_paddr; + u32 vld_pa_rng; }; enum mtk_iommu_plat { M4U_MT2701, M4U_MT2712, M4U_MT8173, + M4U_MT8183, +}; + +struct mtk_iommu_plat_data { + enum mtk_iommu_plat m4u_plat; + bool has_4gb_mode; + + /* HW will use the EMI clock if there isn't the "bclk". */ + bool has_bclk; + bool has_vld_pa_rng; + bool reset_axi; + unsigned char larbid_remap[MTK_LARB_NR_MAX]; }; struct mtk_iommu_domain; @@ -43,14 +56,14 @@ struct mtk_iommu_data { struct mtk_iommu_suspend_reg reg; struct mtk_iommu_domain *m4u_dom; struct iommu_group *m4u_group; - struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ bool enable_4GB; - bool tlb_flush_active; + spinlock_t tlb_lock; /* lock for tlb range flush */ struct iommu_device iommu; - enum mtk_iommu_plat m4u_plat; + const struct mtk_iommu_plat_data *plat_data; struct list_head list; + struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; }; static inline int compare_of(struct device *dev, void *data) @@ -67,14 +80,14 @@ static inline int mtk_iommu_bind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); - return component_bind_all(dev, &data->smi_imu); + return component_bind_all(dev, &data->larb_imu); } static inline void mtk_iommu_unbind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); - component_unbind_all(dev, &data->smi_imu); + component_unbind_all(dev, &data->larb_imu); } #endif diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index abeeac488372..e93b94ecac45 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -206,7 +206,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, for (i = 0; i < fwspec->num_ids; ++i) { larbid = mt2701_m4u_to_larb(fwspec->ids[i]); portid = mt2701_m4u_to_port(fwspec->ids[i]); - larb_mmu = &data->smi_imu.larb_imu[larbid]; + larb_mmu = &data->larb_imu[larbid]; dev_dbg(dev, "%s iommu port: %d\n", enable ? "enable" : "disable", portid); @@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, } static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; @@ -324,7 +324,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); unsigned long flags; @@ -426,7 +427,7 @@ static int mtk_iommu_add_device(struct device *dev) int err; of_for_each_phandle(&it, err, dev->of_node, "iommus", - "#iommu-cells", 0) { + "#iommu-cells", -1) { int count = of_phandle_iterator_args(&it, iommu_spec.args, MAX_PHANDLE_ARGS); iommu_spec.np = of_node_get(it.node); @@ -610,14 +611,12 @@ static int mtk_iommu_probe(struct platform_device *pdev) } } - data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; + data->larb_imu[larb_nr].dev = &plarbdev->dev; component_match_add_release(dev, &match, release_of, compare_of, larb_spec.np); larb_nr++; } - data->smi_imu.larb_nr = larb_nr; - platform_set_drvdata(pdev, data); ret = mtk_iommu_hw_init(data); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 614a93aa5305..20738aacac89 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -8,9 +8,12 @@ #include <linux/export.h> #include <linux/iommu.h> #include <linux/limits.h> +#include <linux/module.h> +#include <linux/msi.h> #include <linux/of.h> #include <linux/of_iommu.h> #include <linux/of_pci.h> +#include <linux/pci.h> #include <linux/slab.h> #include <linux/fsl/mc.h> @@ -89,16 +92,16 @@ static int of_iommu_xlate(struct device *dev, { const struct iommu_ops *ops; struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; - int err; + int ret; ops = iommu_ops_from_fwnode(fwnode); if ((ops && !ops->of_xlate) || !of_device_is_available(iommu_spec->np)) return NO_IOMMU; - err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); - if (err) - return err; + ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); + if (ret) + return ret; /* * The otherwise-empty fwspec handily serves to indicate the specific * IOMMU device we're waiting for, which will be useful if we ever get @@ -107,7 +110,12 @@ static int of_iommu_xlate(struct device *dev, if (!ops) return driver_deferred_probe_check_state(dev); - return ops->of_xlate(dev, iommu_spec); + if (!try_module_get(ops->owner)) + return -ENODEV; + + ret = ops->of_xlate(dev, iommu_spec); + module_put(ops->owner); + return ret; } struct of_pci_iommu_alias_info { @@ -177,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, .np = master_np, }; + pci_request_acs(); err = pci_for_each_dma_alias(to_pci_dev(dev), of_pci_iommu_init, &info); } else if (dev_is_fsl_mc(dev)) { @@ -194,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, if (err) break; } - } + fwspec = dev_iommu_fwspec_get(dev); + if (!err && fwspec) + of_property_read_u32(master_np, "pasid-num-bits", + &fwspec->num_pasid_bits); + } /* * Two success conditions can be represented by non-negative err here: diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index dfb961d8c21b..be551cc34be4 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -35,6 +35,15 @@ static const struct iommu_ops omap_iommu_ops; +struct orphan_dev { + struct device *dev; + struct list_head node; +}; + +static LIST_HEAD(orphan_dev_list); + +static DEFINE_SPINLOCK(orphan_lock); + #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) /* bitmap of the page sizes currently supported */ @@ -53,6 +62,8 @@ static const struct iommu_ops omap_iommu_ops; static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; +static int _omap_iommu_add_device(struct device *dev); + /** * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain * @dom: generic iommu domain handle @@ -65,6 +76,9 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) /** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device + * + * This should be treated as an deprecated API. It is preserved only + * to maintain existing functionality for OMAP3 ISP driver. **/ void omap_iommu_save_ctx(struct device *dev) { @@ -92,6 +106,9 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); /** * omap_iommu_restore_ctx - Restore registers for pm off-mode support * @dev: client device + * + * This should be treated as an deprecated API. It is preserved only + * to maintain existing functionality for OMAP3 ISP driver. **/ void omap_iommu_restore_ctx(struct device *dev) { @@ -186,36 +203,18 @@ static void omap2_iommu_disable(struct omap_iommu *obj) static int iommu_enable(struct omap_iommu *obj) { - int err; - struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - - if (pdata && pdata->deassert_reset) { - err = pdata->deassert_reset(pdev, pdata->reset_name); - if (err) { - dev_err(obj->dev, "deassert_reset failed: %d\n", err); - return err; - } - } - - pm_runtime_get_sync(obj->dev); + int ret; - err = omap2_iommu_enable(obj); + ret = pm_runtime_get_sync(obj->dev); + if (ret < 0) + pm_runtime_put_noidle(obj->dev); - return err; + return ret < 0 ? ret : 0; } static void iommu_disable(struct omap_iommu *obj) { - struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - - omap2_iommu_disable(obj); - pm_runtime_put_sync(obj->dev); - - if (pdata && pdata->assert_reset) - pdata->assert_reset(pdev, pdata->reset_name); } /* @@ -901,15 +900,219 @@ static void omap_iommu_detach(struct omap_iommu *obj) dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, DMA_TO_DEVICE); - iommu_disable(obj); obj->pd_dma = 0; obj->iopgd = NULL; + iommu_disable(obj); spin_unlock(&obj->iommu_lock); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); } +static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) +{ + struct iotlb_lock lock; + struct cr_regs cr; + struct cr_regs *tmp; + int i; + + /* check if there are any locked tlbs to save */ + iotlb_lock_get(obj, &lock); + obj->num_cr_ctx = lock.base; + if (!obj->num_cr_ctx) + return; + + tmp = obj->cr_ctx; + for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) + * tmp++ = cr; +} + +static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) +{ + struct iotlb_lock l; + struct cr_regs *tmp; + int i; + + /* no locked tlbs to restore */ + if (!obj->num_cr_ctx) + return; + + l.base = 0; + tmp = obj->cr_ctx; + for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { + l.vict = i; + iotlb_lock_set(obj, &l); + iotlb_load_cr(obj, tmp); + } + l.base = obj->num_cr_ctx; + l.vict = i; + iotlb_lock_set(obj, &l); +} + +/** + * omap_iommu_domain_deactivate - deactivate attached iommu devices + * @domain: iommu domain attached to the target iommu device + * + * This API allows the client devices of IOMMU devices to suspend + * the IOMMUs they control at runtime, after they are idled and + * suspended all activity. System Suspend will leverage the PM + * driver late callbacks. + **/ +int omap_iommu_domain_deactivate(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = to_omap_domain(domain); + struct omap_iommu_device *iommu; + struct omap_iommu *oiommu; + int i; + + if (!omap_domain->dev) + return 0; + + iommu = omap_domain->iommus; + iommu += (omap_domain->num_iommus - 1); + for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { + oiommu = iommu->iommu_dev; + pm_runtime_put_sync(oiommu->dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); + +/** + * omap_iommu_domain_activate - activate attached iommu devices + * @domain: iommu domain attached to the target iommu device + * + * This API allows the client devices of IOMMU devices to resume the + * IOMMUs they control at runtime, before they can resume operations. + * System Resume will leverage the PM driver late callbacks. + **/ +int omap_iommu_domain_activate(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = to_omap_domain(domain); + struct omap_iommu_device *iommu; + struct omap_iommu *oiommu; + int i; + + if (!omap_domain->dev) + return 0; + + iommu = omap_domain->iommus; + for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { + oiommu = iommu->iommu_dev; + pm_runtime_get_sync(oiommu->dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); + +/** + * omap_iommu_runtime_suspend - disable an iommu device + * @dev: iommu device + * + * This function performs all that is necessary to disable an + * IOMMU device, either during final detachment from a client + * device, or during system/runtime suspend of the device. This + * includes programming all the appropriate IOMMU registers, and + * managing the associated omap_hwmod's state and the device's + * reset line. This function also saves the context of any + * locked TLBs if suspending. + **/ +static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iommu_platform_data *pdata = dev_get_platdata(dev); + struct omap_iommu *obj = to_iommu(dev); + int ret; + + /* save the TLBs only during suspend, and not for power down */ + if (obj->domain && obj->iopgd) + omap_iommu_save_tlb_entries(obj); + + omap2_iommu_disable(obj); + + if (pdata && pdata->device_idle) + pdata->device_idle(pdev); + + if (pdata && pdata->assert_reset) + pdata->assert_reset(pdev, pdata->reset_name); + + if (pdata && pdata->set_pwrdm_constraint) { + ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); + if (ret) { + dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", + ret); + } + } + + return 0; +} + +/** + * omap_iommu_runtime_resume - enable an iommu device + * @dev: iommu device + * + * This function performs all that is necessary to enable an + * IOMMU device, either during initial attachment to a client + * device, or during system/runtime resume of the device. This + * includes programming all the appropriate IOMMU registers, and + * managing the associated omap_hwmod's state and the device's + * reset line. The function also restores any locked TLBs if + * resuming after a suspend. + **/ +static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iommu_platform_data *pdata = dev_get_platdata(dev); + struct omap_iommu *obj = to_iommu(dev); + int ret = 0; + + if (pdata && pdata->set_pwrdm_constraint) { + ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); + if (ret) { + dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", + ret); + } + } + + if (pdata && pdata->deassert_reset) { + ret = pdata->deassert_reset(pdev, pdata->reset_name); + if (ret) { + dev_err(dev, "deassert_reset failed: %d\n", ret); + return ret; + } + } + + if (pdata && pdata->device_enable) + pdata->device_enable(pdev); + + /* restore the TLBs only during resume, and not for power up */ + if (obj->domain) + omap_iommu_restore_tlb_entries(obj); + + ret = omap2_iommu_enable(obj); + + return ret; +} + +/** + * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation + * @dev: iommu device + * + * This function performs the necessary checks to determine if the IOMMU + * device needs suspending or not. The function checks if the runtime_pm + * status of the device is suspended, and returns 1 in that case. This + * results in the PM core to skip invoking any of the Sleep PM callbacks + * (suspend, suspend_late, resume, resume_early etc). + */ +static int omap_iommu_prepare(struct device *dev) +{ + if (pm_runtime_status_suspended(dev)) + return 1; + return 0; +} + static bool omap_iommu_can_register(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -974,6 +1177,7 @@ static int omap_iommu_probe(struct platform_device *pdev) struct omap_iommu *obj; struct resource *res; struct device_node *of = pdev->dev.of_node; + struct orphan_dev *orphan_dev, *tmp; if (!of) { pr_err("%s: only DT-based devices are supported\n", __func__); @@ -984,6 +1188,15 @@ static int omap_iommu_probe(struct platform_device *pdev) if (!obj) return -ENOMEM; + /* + * self-manage the ordering dependencies between omap_device_enable/idle + * and omap_device_assert/deassert_hardreset API + */ + if (pdev->dev.pm_domain) { + dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); + pdev->dev.pm_domain = NULL; + } + obj->name = dev_name(&pdev->dev); obj->nr_tlb_entries = 32; err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); @@ -996,6 +1209,11 @@ static int omap_iommu_probe(struct platform_device *pdev) obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); + obj->cr_ctx = devm_kzalloc(&pdev->dev, + sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, + GFP_KERNEL); + if (!obj->cr_ctx) + return -ENOMEM; spin_lock_init(&obj->iommu_lock); spin_lock_init(&obj->page_table_lock); @@ -1036,13 +1254,20 @@ static int omap_iommu_probe(struct platform_device *pdev) goto out_sysfs; } - pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); dev_info(&pdev->dev, "%s registered\n", obj->name); + list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) { + err = _omap_iommu_add_device(orphan_dev->dev); + if (!err) { + list_del(&orphan_dev->node); + kfree(orphan_dev); + } + } + return 0; out_sysfs: @@ -1072,6 +1297,14 @@ static int omap_iommu_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops omap_iommu_pm_ops = { + .prepare = omap_iommu_prepare, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, + omap_iommu_runtime_resume, NULL) +}; + static const struct of_device_id omap_iommu_of_match[] = { { .compatible = "ti,omap2-iommu" }, { .compatible = "ti,omap4-iommu" }, @@ -1085,6 +1318,7 @@ static struct platform_driver omap_iommu_driver = { .remove = omap_iommu_remove, .driver = { .name = "omap-iommu", + .pm = &omap_iommu_pm_ops, .of_match_table = of_match_ptr(omap_iommu_of_match), }, }; @@ -1105,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) } static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, - phys_addr_t pa, size_t bytes, int prot) + phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct device *dev = omap_domain->dev; @@ -1149,7 +1383,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, } static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct device *dev = omap_domain->dev; @@ -1423,7 +1657,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, return ret; } -static int omap_iommu_add_device(struct device *dev) +static int _omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data, *tmp; struct omap_iommu *oiommu; @@ -1432,6 +1666,8 @@ static int omap_iommu_add_device(struct device *dev) struct platform_device *pdev; int num_iommus, i; int ret; + struct orphan_dev *orphan_dev; + unsigned long flags; /* * Allocate the archdata iommu structure for DT-based devices. @@ -1463,10 +1699,26 @@ static int omap_iommu_add_device(struct device *dev) } pdev = of_find_device_by_node(np); - if (WARN_ON(!pdev)) { + if (!pdev) { of_node_put(np); kfree(arch_data); - return -EINVAL; + spin_lock_irqsave(&orphan_lock, flags); + list_for_each_entry(orphan_dev, &orphan_dev_list, + node) { + if (orphan_dev->dev == dev) + break; + } + spin_unlock_irqrestore(&orphan_lock, flags); + + if (orphan_dev && orphan_dev->dev == dev) + return -EPROBE_DEFER; + + orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL); + orphan_dev->dev = dev; + spin_lock_irqsave(&orphan_lock, flags); + list_add(&orphan_dev->node, &orphan_dev_list); + spin_unlock_irqrestore(&orphan_lock, flags); + return -EPROBE_DEFER; } oiommu = platform_get_drvdata(pdev); @@ -1477,6 +1729,7 @@ static int omap_iommu_add_device(struct device *dev) } tmp->iommu_dev = oiommu; + tmp->dev = &pdev->dev; of_node_put(np); } @@ -1511,6 +1764,17 @@ static int omap_iommu_add_device(struct device *dev) return 0; } +static int omap_iommu_add_device(struct device *dev) +{ + int ret; + + ret = _omap_iommu_add_device(dev); + if (ret == -EPROBE_DEFER) + return 0; + + return ret; +} + static void omap_iommu_remove_device(struct device *dev) { struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; @@ -1554,7 +1818,7 @@ static const struct iommu_ops omap_iommu_ops = { static int __init omap_iommu_init(void) { struct kmem_cache *p; - const unsigned long flags = SLAB_HWCACHE_ALIGN; + const slab_flags_t flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; int ret; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 09968a02d291..18ee713ede78 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -73,16 +73,22 @@ struct omap_iommu { void *ctx; /* iommu context: registres saved area */ + struct cr_regs *cr_ctx; + u32 num_cr_ctx; + int has_bus_err_back; u32 id; struct iommu_device iommu; struct iommu_group *group; + + u8 pwrst; }; /** * struct omap_iommu_arch_data - omap iommu private data - * @iommu_dev: handle of the iommu device + * @iommu_dev: handle of the OMAP iommu device + * @dev: handle of the iommu device * * This is an omap iommu private data object, which binds an iommu user * to its iommu device. This object should be placed at the iommu user's @@ -91,6 +97,7 @@ struct omap_iommu { */ struct omap_iommu_arch_data { struct omap_iommu *iommu_dev; + struct device *dev; }; struct cr_regs { diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..39759db4f003 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -7,6 +7,7 @@ */ #include <linux/atomic.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-iommu.h> @@ -32,7 +33,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> -#include "arm-smmu-regs.h" +#include "arm-smmu.h" #define SMMU_INTR_SEL_NS 0x2000 @@ -155,7 +156,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); size_t s = size; - iova &= ~12UL; + iova = (iova >> 12) << 12; iova |= ctx->asid; do { iommu_writel(ctx, reg, iova); @@ -164,10 +165,32 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } -static const struct iommu_gather_ops qcom_gather_ops = { +static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); + qcom_iommu_tlb_sync(cookie); +} + +static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); + qcom_iommu_tlb_sync(cookie); +} + +static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); +} + +static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, - .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, - .tlb_sync = qcom_iommu_tlb_sync, + .tlb_flush_walk = qcom_iommu_tlb_flush_walk, + .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, + .tlb_add_page = qcom_iommu_tlb_add_page, }; static irqreturn_t qcom_iommu_fault(int irq, void *dev) @@ -178,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); - if (!(fsr & FSR_FAULT)) + if (!(fsr & ARM_SMMU_FSR_FAULT)) return IRQ_NONE; fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); @@ -192,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) } iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); - iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); + iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE); return IRQ_HANDLED; } @@ -215,7 +238,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 40, - .tlb = &qcom_gather_ops, + .tlb = &qcom_flush_ops, .iommu_dev = qcom_iommu->dev, }; @@ -246,31 +269,30 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, /* TTBRs */ iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, - pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] | - ((u64)ctx->asid << TTBRn_ASID_SHIFT)); - iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, - pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] | - ((u64)ctx->asid << TTBRn_ASID_SHIFT)); - - /* TTBCR */ - iommu_writel(ctx, ARM_SMMU_CB_TTBCR2, - (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) | - TTBCR2_SEP_UPSTREAM); - iommu_writel(ctx, ARM_SMMU_CB_TTBCR, - pgtbl_cfg.arm_lpae_s1_cfg.tcr); + pgtbl_cfg.arm_lpae_s1_cfg.ttbr | + FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); + iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0); + + /* TCR */ + iommu_writel(ctx, ARM_SMMU_CB_TCR2, + arm_smmu_lpae_tcr2(&pgtbl_cfg)); + iommu_writel(ctx, ARM_SMMU_CB_TCR, + arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE); /* MAIRs (stage-1 only) */ iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0, - pgtbl_cfg.arm_lpae_s1_cfg.mair[0]); + pgtbl_cfg.arm_lpae_s1_cfg.mair); iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1, - pgtbl_cfg.arm_lpae_s1_cfg.mair[1]); + pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32); /* SCTLR */ - reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | - SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG; + reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | + ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE | + ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE | + ARM_SMMU_SCTLR_CFCFG; if (IS_ENABLED(CONFIG_BIG_ENDIAN)) - reg |= SCTLR_E; + reg |= ARM_SMMU_SCTLR_E; iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); @@ -400,7 +422,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de } static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { int ret; unsigned long flags; @@ -417,7 +439,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { size_t ret; unsigned long flags; @@ -434,14 +456,14 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, */ pm_runtime_get_sync(qcom_domain->iommu->dev); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); pm_runtime_put_sync(qcom_domain->iommu->dev); return ret; } -static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) +static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, @@ -454,6 +476,12 @@ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) pm_runtime_put_sync(qcom_domain->iommu->dev); } +static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + qcom_iommu_flush_iotlb_all(domain); +} + static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { @@ -510,8 +538,8 @@ static int qcom_iommu_add_device(struct device *dev) } group = iommu_group_get_for_dev(dev); - if (IS_ERR_OR_NULL(group)) - return PTR_ERR_OR_ZERO(group); + if (IS_ERR(group)) + return PTR_ERR(group); iommu_group_put(group); iommu_device_link(&qcom_iommu->iommu, dev); @@ -581,7 +609,7 @@ static const struct iommu_ops qcom_iommu_ops = { .detach_dev = qcom_iommu_detach_dev, .map = qcom_iommu_map, .unmap = qcom_iommu_unmap, - .flush_iotlb_all = qcom_iommu_iotlb_sync, + .flush_iotlb_all = qcom_iommu_flush_iotlb_all, .iotlb_sync = qcom_iommu_iotlb_sync, .iova_to_phys = qcom_iommu_iova_to_phys, .add_device = qcom_iommu_add_device, @@ -696,10 +724,8 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev) return PTR_ERR(ctx->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (irq < 0) return -ENODEV; - } /* clear IRQs before registering fault handler, just in case the * boot-loader left us a surprise: @@ -775,7 +801,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) struct qcom_iommu_dev *qcom_iommu; struct device *dev = &pdev->dev; struct resource *res; - int ret, sz, max_asid = 0; + int ret, max_asid = 0; /* find the max asid (which is 1:1 to ctx bank idx), so we know how * many child ctx devices we have: @@ -783,9 +809,8 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) for_each_child_of_node(dev->of_node, child) max_asid = max(max_asid, get_asid(child)); - sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])); - - qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL); + qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), + GFP_KERNEL); if (!qcom_iommu) return -ENOMEM; qcom_iommu->num_ctxs = max_asid; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index dc26d74d79c2..b33cdd5aad81 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -100,6 +100,7 @@ struct rk_iommu { struct device *dev; void __iomem **bases; int num_mmu; + int num_irq; struct clk_bulk_data *clocks; int num_clocks; bool reset_disabled; @@ -526,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) int i, err; err = pm_runtime_get_if_in_use(iommu->dev); - if (WARN_ON_ONCE(err <= 0)) + if (!err || WARN_ON_ONCE(err < 0)) return ret; if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) @@ -757,7 +758,7 @@ unwind: } static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -794,7 +795,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, } static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -979,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) if (!dma_dev) return NULL; - rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) return NULL; if (type == IOMMU_DOMAIN_DMA && iommu_get_dma_cookie(&rk_domain->domain)) - return NULL; + goto err_free_domain; /* * rk32xx iommus use a 2 level pagetable. @@ -1020,6 +1021,8 @@ err_free_dt: err_put_cookie: if (type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); +err_free_domain: + kfree(rk_domain); return NULL; } @@ -1048,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (domain->type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); + kfree(rk_domain); } static int rk_iommu_add_device(struct device *dev) @@ -1136,7 +1140,7 @@ static int rk_iommu_probe(struct platform_device *pdev) struct rk_iommu *iommu; struct resource *res; int num_res = pdev->num_resources; - int err, i, irq; + int err, i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -1163,6 +1167,10 @@ static int rk_iommu_probe(struct platform_device *pdev) if (iommu->num_mmu == 0) return PTR_ERR(iommu->bases[0]); + iommu->num_irq = platform_irq_count(pdev); + if (iommu->num_irq < 0) + return iommu->num_irq; + iommu->reset_disabled = device_property_read_bool(dev, "rockchip,disable-mmu-reset"); @@ -1219,8 +1227,9 @@ static int rk_iommu_probe(struct platform_device *pdev) pm_runtime_enable(dev); - i = 0; - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + if (irq < 0) return irq; @@ -1245,10 +1254,13 @@ err_unprepare_clocks: static void rk_iommu_shutdown(struct platform_device *pdev) { struct rk_iommu *iommu = platform_get_drvdata(pdev); - int i = 0, irq; + int i; + + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) devm_free_irq(iommu->dev, irq, iommu); + } pm_runtime_force_suspend(&pdev->dev); } diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 22d4db302c1c..1137f3ddcb85 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -265,7 +265,7 @@ undo_cpu_trans: } static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct s390_domain *s390_domain = to_s390_domain(domain); int flags = ZPCI_PTE_VALID, rc = 0; @@ -314,7 +314,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, } static size_t s390_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long iova, size_t size, + struct iommu_iotlb_gather *gather) { struct s390_domain *s390_domain = to_s390_domain(domain); int flags = ZPCI_PTE_INVALID; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 6d40bc1b38bf..3fb7ba72507d 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, } static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t bytes, int prot) + phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) { struct gart_device *gart = gart_handle; int ret; @@ -207,7 +207,7 @@ static inline int __gart_iommu_unmap(struct gart_device *gart, } static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t bytes) + size_t bytes, struct iommu_iotlb_gather *gather) { struct gart_device *gart = gart_handle; int err; @@ -273,11 +273,17 @@ static int gart_iommu_of_xlate(struct device *dev, return 0; } -static void gart_iommu_sync(struct iommu_domain *domain) +static void gart_iommu_sync_map(struct iommu_domain *domain) { FLUSH_GART_REGS(gart_handle); } +static void gart_iommu_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + gart_iommu_sync_map(domain); +} + static const struct iommu_ops gart_iommu_ops = { .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, @@ -292,7 +298,7 @@ static const struct iommu_ops gart_iommu_ops = { .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, - .iotlb_sync_map = gart_iommu_sync, + .iotlb_sync_map = gart_iommu_sync_map, .iotlb_sync = gart_iommu_sync, }; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index c4a652b227f8..63a147b623e6 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) return (addr & smmu->pfn_mask) == addr; } -static dma_addr_t smmu_pde_to_dma(u32 pde) +static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) { - return pde << 12; + return (dma_addr_t)(pde & smmu->pfn_mask) << 12; } static void smmu_flush_ptc_all(struct tegra_smmu *smmu) @@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, static inline void smmu_flush(struct tegra_smmu *smmu) { - smmu_readl(smmu, SMMU_CONFIG); + smmu_readl(smmu, SMMU_PTB_ASID); } static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) @@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, unsigned int i; u32 value; + group = tegra_smmu_find_swgroup(smmu, swgroup); + if (group) { + value = smmu_readl(smmu, group->reg); + value &= ~SMMU_ASID_MASK; + value |= SMMU_ASID_VALUE(asid); + value |= SMMU_ASID_ENABLE; + smmu_writel(smmu, value, group->reg); + } else { + pr_warn("%s group from swgroup %u not found\n", __func__, + swgroup); + /* No point moving ahead if group was not found */ + return; + } + for (i = 0; i < smmu->soc->num_clients; i++) { const struct tegra_mc_client *client = &smmu->soc->clients[i]; @@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, value |= BIT(client->smmu.bit); smmu_writel(smmu, value, client->smmu.reg); } - - group = tegra_smmu_find_swgroup(smmu, swgroup); - if (group) { - value = smmu_readl(smmu, group->reg); - value &= ~SMMU_ASID_MASK; - value |= SMMU_ASID_VALUE(asid); - value |= SMMU_ASID_ENABLE; - smmu_writel(smmu, value, group->reg); - } } static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, @@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, dma_addr_t *dmap) { unsigned int pd_index = iova_pd_index(iova); + struct tegra_smmu *smmu = as->smmu; struct page *pt_page; u32 *pd; @@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, return NULL; pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pd_index]); + *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); return tegra_smmu_pte_offset(pt_page, iova); } @@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, } else { u32 *pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pde]); + *dmap = smmu_pde_to_dma(smmu, pd[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); @@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); - dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); tegra_smmu_set_pde(as, iova, 0); @@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, } static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; @@ -680,7 +686,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, } static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 80a740df0737..cce329d71fba 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, */ static int __viommu_sync_req(struct viommu_dev *viommu) { - int ret = 0; unsigned int len; size_t write_len; struct viommu_request *req; @@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu) kfree(req); } - return ret; + return 0; } static int viommu_sync_req(struct viommu_dev *viommu) @@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) } static int viommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { int ret; u32 flags; @@ -751,7 +750,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, } static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { int ret = 0; size_t unmapped; @@ -797,7 +796,8 @@ static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, return paddr; } -static void viommu_iotlb_sync(struct iommu_domain *domain) +static void viommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) iommu_dma_get_resv_regions(dev, head); } -static void viommu_put_resv_regions(struct device *dev, struct list_head *head) -{ - struct iommu_resv_region *entry, *next; - - list_for_each_entry_safe(entry, next, head, list) - kfree(entry); -} - static struct iommu_ops viommu_ops; static struct virtio_driver virtio_iommu_drv; @@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev) err_unlink_dev: iommu_device_unlink(&viommu->iommu, dev); err_free_dev: - viommu_put_resv_regions(dev, &vdev->resv_regions); + generic_iommu_put_resv_regions(dev, &vdev->resv_regions); kfree(vdev); return ret; @@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev) iommu_group_remove_device(dev); iommu_device_unlink(&vdev->viommu->iommu, dev); - viommu_put_resv_regions(dev, &vdev->resv_regions); + generic_iommu_put_resv_regions(dev, &vdev->resv_regions); kfree(vdev); } @@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = { .remove_device = viommu_remove_device, .device_group = viommu_device_group, .get_resv_regions = viommu_get_resv_regions, - .put_resv_regions = viommu_put_resv_regions, + .put_resv_regions = generic_iommu_put_resv_regions, .of_xlate = viommu_of_xlate, }; |