/* * IPMMU VMSA * * Copyright (C) 2014 Renesas Electronics Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) #include #include #else #define arm_iommu_create_mapping(...) NULL #define arm_iommu_attach_device(...) -ENODEV #define arm_iommu_release_mapping(...) do {} while (0) #define arm_iommu_detach_device(...) do {} while (0) #endif #include "io-pgtable.h" #define IPMMU_CTX_MAX 8 struct ipmmu_features { bool use_ns_alias_offset; bool has_cache_leaf_nodes; unsigned int number_of_contexts; }; struct ipmmu_vmsa_device { struct device *dev; void __iomem *base; struct iommu_device iommu; struct ipmmu_vmsa_device *root; const struct ipmmu_features *features; unsigned int num_utlbs; unsigned int num_ctx; spinlock_t lock; /* Protects ctx and domains[] */ DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; struct iommu_group *group; struct dma_iommu_mapping *mapping; }; struct ipmmu_vmsa_domain { struct ipmmu_vmsa_device *mmu; struct iommu_domain io_domain; struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; unsigned int context_id; spinlock_t lock; /* Protects mappings */ }; static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) { return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) { return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL; } #define TLB_LOOP_TIMEOUT 100 /* 100us */ /* ----------------------------------------------------------------------------- * Registers Definition */ #define IM_NS_ALIAS_OFFSET 0x800 #define IM_CTX_SIZE 0x40 #define IMCTR 0x0000 #define IMCTR_TRE (1 << 17) #define IMCTR_AFE (1 << 16) #define IMCTR_RTSEL_MASK (3 << 4) #define IMCTR_RTSEL_SHIFT 4 #define IMCTR_TREN (1 << 3) #define IMCTR_INTEN (1 << 2) #define IMCTR_FLUSH (1 << 1) #define IMCTR_MMUEN (1 << 0) #define IMCAAR 0x0004 #define IMTTBCR 0x0008 #define IMTTBCR_EAE (1 << 31) #define IMTTBCR_PMB (1 << 30) #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) #define IMTTBCR_SH1_MASK (3 << 28) #define IMTTBCR_ORGN1_NC (0 << 26) #define IMTTBCR_ORGN1_WB_WA (1 << 26) #define IMTTBCR_ORGN1_WT (2 << 26) #define IMTTBCR_ORGN1_WB (3 << 26) #define IMTTBCR_ORGN1_MASK (3 << 26) #define IMTTBCR_IRGN1_NC (0 << 24) #define IMTTBCR_IRGN1_WB_WA (1 << 24) #define IMTTBCR_IRGN1_WT (2 << 24) #define IMTTBCR_IRGN1_WB (3 << 24) #define IMTTBCR_IRGN1_MASK (3 << 24) #define IMTTBCR_TSZ1_MASK (7 << 16) #define IMTTBCR_TSZ1_SHIFT 16 #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) #define IMTTBCR_SH0_MASK (3 << 12) #define IMTTBCR_ORGN0_NC (0 << 10) #define IMTTBCR_ORGN0_WB_WA (1 << 10) #define IMTTBCR_ORGN0_WT (2 << 10) #define IMTTBCR_ORGN0_WB (3 << 10) #define IMTTBCR_ORGN0_MASK (3 << 10) #define IMTTBCR_IRGN0_NC (0 << 8) #define IMTTBCR_IRGN0_WB_WA (1 << 8) #define IMTTBCR_IRGN0_WT (2 << 8) #define IMTTBCR_IRGN0_WB (3 << 8) #define IMTTBCR_IRGN0_MASK (3 << 8) #define IMTTBCR_SL0_LVL_2 (0 << 4) #define IMTTBCR_SL0_LVL_1 (1 << 4) #define IMTTBCR_TSZ0_MASK (7 << 0) #define IMTTBCR_TSZ0_SHIFT O #define IMBUSCR 0x000c #define IMBUSCR_DVM (1 << 2) #define IMBUSCR_BUSSEL_SYS (0 << 0) #define IMBUSCR_BUSSEL_CCI (1 << 0) #define IMBUSCR_BUSSEL_IMCAAR (2 << 0) #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) #define IMBUSCR_BUSSEL_MASK (3 << 0) #define IMTTLBR0 0x0010 #define IMTTUBR0 0x0014 #define IMTTLBR1 0x0018 #define IMTTUBR1 0x001c #define IMSTR 0x0020 #define IMSTR_ERRLVL_MASK (3 << 12) #define IMSTR_ERRLVL_SHIFT 12 #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) #define IMSTR_ERRCODE_MASK (7 << 8) #define IMSTR_MHIT (1 << 4) #define IMSTR_ABORT (1 << 2) #define IMSTR_PF (1 << 1) #define IMSTR_TF (1 << 0) #define IMMAIR0 0x0028 #define IMMAIR1 0x002c #define IMMAIR_ATTR_MASK 0xff #define IMMAIR_ATTR_DEVICE 0x04 #define IMMAIR_ATTR_NC 0x44 #define IMMAIR_ATTR_WBRWA 0xff #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) #define IMMAIR_ATTR_IDX_NC 0 #define IMMAIR_ATTR_IDX_WBRWA 1 #define IMMAIR_ATTR_IDX_DEV 2 #define IMEAR 0x0030 #define IMPCTR 0x0200 #define IMPSTR 0x0208 #define IMPEAR 0x020c #define IMPMBA(n) (0x0280 + ((n) * 4)) #define IMPMBD(n) (0x02c0 + ((n) * 4)) #define IMUCTR(n) (0x0300 + ((n) * 16)) #define IMUCTR_FIXADDEN (1 << 31) #define IMUCTR_FIXADD_MASK (0xff << 16) #define IMUCTR_FIXADD_SHIFT 16 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) #define IMUCTR_TTSEL_PMB (8 << 4) #define IMUCTR_TTSEL_MASK (15 << 4) #define IMUCTR_FLUSH (1 << 1) #define IMUCTR_MMUEN (1 << 0) #define IMUASID(n) (0x0308 + ((n) * 16)) #define IMUASID_ASID8_MASK (0xff << 8) #define IMUASID_ASID8_SHIFT 8 #define IMUASID_ASID0_MASK (0xff << 0) #define IMUASID_ASID0_SHIFT 0 /* ----------------------------------------------------------------------------- * Root device handling */ static struct platform_driver ipmmu_driver; static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) { return mmu->root == mmu; } static int __ipmmu_check_device(struct device *dev, void *data) { struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); struct ipmmu_vmsa_device **rootp = data; if (ipmmu_is_root(mmu)) *rootp = mmu; return 0; } static struct ipmmu_vmsa_device *ipmmu_find_root(void) { struct ipmmu_vmsa_device *root = NULL; return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, __ipmmu_check_device) == 0 ? root : NULL; } /* ----------------------------------------------------------------------------- * Read/Write Access */ static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) { return ioread32(mmu->base + offset); } static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, u32 data) { iowrite32(data, mmu->base + offset); } static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) { return ipmmu_read(domain->mmu->root, domain->context_id * IM_CTX_SIZE + reg); } static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, u32 data) { ipmmu_write(domain->mmu->root, domain->context_id * IM_CTX_SIZE + reg, data); } /* ----------------------------------------------------------------------------- * TLB and microTLB Management */ /* Wait for any pending TLB invalidations to complete */ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) { unsigned int count = 0; while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { cpu_relax(); if (++count == TLB_LOOP_TIMEOUT) { dev_err_ratelimited(domain->mmu->dev, "TLB sync timed out -- MMU may be deadlocked\n"); return; } udelay(1); } } static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) { u32 reg; reg = ipmmu_ctx_read(domain, IMCTR); reg |= IMCTR_FLUSH; ipmmu_ctx_write(domain, IMCTR, reg); ipmmu_tlb_sync(domain); } /* * Enable MMU translation for the microTLB. */ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, unsigned int utlb) { struct ipmmu_vmsa_device *mmu = domain->mmu; /* * TODO: Reference-count the microTLB as several bus masters can be * connected to the same microTLB. */ /* TODO: What should we set the ASID to ? */ ipmmu_write(mmu, IMUASID(utlb), 0); /* TODO: Do we need to flush the microTLB ? */ ipmmu_write(mmu, IMUCTR(utlb), IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | IMUCTR_MMUEN); } /* * Disable MMU translation for the microTLB. */ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, unsigned int utlb) { struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_write(mmu, IMUCTR(utlb), 0); } static void ipmmu_tlb_flush_all(void *cookie) { struct ipmmu_vmsa_domain *domain = cookie; ipmmu_tlb_invalidate(domain); } static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { /* The hardware doesn't support selective TLB flush. */ } static const struct iommu_gather_ops ipmmu_gather_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, }; /* ----------------------------------------------------------------------------- * Domain/Context Management */ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, struct ipmmu_vmsa_domain *domain) { unsigned long flags; int ret; spin_lock_irqsave(&mmu->lock, flags); ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); if (ret != mmu->num_ctx) { mmu->domains[ret] = domain; set_bit(ret, mmu->ctx); } else ret = -EBUSY; spin_unlock_irqrestore(&mmu->lock, flags); return ret; } static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, unsigned int context_id) { unsigned long flags; spin_lock_irqsave(&mmu->lock, flags); clear_bit(context_id, mmu->ctx); mmu->domains[context_id] = NULL; spin_unlock_irqrestore(&mmu->lock, flags); } static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { u64 ttbr; int ret; /* * Allocate the page table operations. * * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory * access, Long-descriptor format" that the NStable bit being set in a * table descriptor will result in the NStable and NS bits of all child * entries being ignored and considered as being set. The IPMMU seems * not to comply with this, as it generates a secure access page fault * if any of the NStable and NS bits isn't set when running in * non-secure mode. */ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; domain->cfg.tlb = &ipmmu_gather_ops; domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; /* * TODO: Add support for coherent walk through CCI with DVM and remove * cache handling. For now, delegate it to the io-pgtable code. */ domain->cfg.iommu_dev = domain->mmu->root->dev; /* * Find an unused context. */ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); if (ret < 0) return ret; domain->context_id = ret; domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, domain); if (!domain->iop) { ipmmu_domain_free_context(domain->mmu->root, domain->context_id); return -EINVAL; } /* TTBR0 */ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; ipmmu_ctx_write(domain, IMTTLBR0, ttbr); ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); /* * TTBCR * We use long descriptors with inner-shareable WBWA tables and allocate * the whole 32-bit VA space to TTBR0. */ ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); /* MAIR0 */ ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); /* IMBUSCR */ ipmmu_ctx_write(domain, IMBUSCR, ipmmu_ctx_read(domain, IMBUSCR) & ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); /* * IMSTR * Clear all interrupt flags. */ ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); /* * IMCTR * Enable the MMU and interrupt generation. The long-descriptor * translation table format doesn't use TEX remapping. Don't enable AF * software management as we have no use for it. Flush the TLB as * required when modifying the context registers. */ ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); return 0; } static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { /* * Disable the context. Flush the TLB as required when modifying the * context registers. * * TODO: Is TLB flush really needed ? */ ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); ipmmu_tlb_sync(domain); ipmmu_domain_free_context(domain->mmu->root, domain->context_id); } /* ----------------------------------------------------------------------------- * Fault Handling */ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) { const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; struct ipmmu_vmsa_device *mmu = domain->mmu; u32 status; u32 iova; status = ipmmu_ctx_read(domain, IMSTR); if (!(status & err_mask)) return IRQ_NONE; iova = ipmmu_ctx_read(domain, IMEAR); /* * Clear the error status flags. Unlike traditional interrupt flag * registers that must be cleared by writing 1, this status register * seems to require 0. The error address register must be read before, * otherwise its value will be 0. */ ipmmu_ctx_write(domain, IMSTR, 0); /* Log fatal errors. */ if (status & IMSTR_MHIT) dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", iova); if (status & IMSTR_ABORT) dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", iova); if (!(status & (IMSTR_PF | IMSTR_TF))) return IRQ_NONE; /* * Try to handle page faults and translation faults. * * TODO: We need to look up the faulty device based on the I/O VA. Use * the IOMMU device for now. */ if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) return IRQ_HANDLED; dev_err_ratelimited(mmu->dev, "Unhandled fault: status 0x%08x iova 0x%08x\n", status, iova); return IRQ_HANDLED; } static irqreturn_t ipmmu_irq(int irq, void *dev) { struct ipmmu_vmsa_device *mmu = dev; irqreturn_t status = IRQ_NONE; unsigned int i; unsigned long flags; spin_lock_irqsave(&mmu->lock, flags); /* * Check interrupts for all active contexts. */ for (i = 0; i < mmu->num_ctx; i++) { if (!mmu->domains[i]) continue; if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) status = IRQ_HANDLED; } spin_unlock_irqrestore(&mmu->lock, flags); return status; } /* ----------------------------------------------------------------------------- * IOMMU Operations */ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) { struct ipmmu_vmsa_domain *domain; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; spin_lock_init(&domain->lock); return &domain->io_domain; } static struct iommu_domain *ipmmu_domain_alloc(unsigned type) { struct iommu_domain *io_domain = NULL; switch (type) { case IOMMU_DOMAIN_UNMANAGED: io_domain = __ipmmu_domain_alloc(type); break; case IOMMU_DOMAIN_DMA: io_domain = __ipmmu_domain_alloc(type); if (io_domain && iommu_get_dma_cookie(io_domain)) { kfree(io_domain); io_domain = NULL; } break; } return io_domain; } static void ipmmu_domain_free(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); /* * Free the domain resources. We assume that all devices have already * been detached. */ iommu_put_dma_cookie(io_domain); ipmmu_domain_destroy_context(domain); free_io_pgtable_ops(domain->iop); kfree(domain); } static int ipmmu_attach_device(struct iommu_domain *io_domain, struct device *dev) { struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned long flags; unsigned int i; int ret = 0; if (!mmu) { dev_err(dev, "Cannot attach to IPMMU\n"); return -ENXIO; } spin_lock_irqsave(&domain->lock, flags); if (!domain->mmu) { /* The domain hasn't been used yet, initialize it. */ domain->mmu = mmu; ret = ipmmu_domain_init_context(domain); if (ret < 0) { dev_err(dev, "Unable to initialize IPMMU context\n"); domain->mmu = NULL; } else { dev_info(dev, "Using IPMMU context %u\n", domain->context_id); } } else if (domain->mmu != mmu) { /* * Something is wrong, we can't attach two devices using * different IOMMUs to the same domain. */ dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", dev_name(mmu->dev), dev_name(domain->mmu->dev)); ret = -EINVAL; } else dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); spin_unlock_irqrestore(&domain->lock, flags); if (ret < 0) return ret; for (i = 0; i < fwspec->num_ids; ++i) ipmmu_utlb_enable(domain, fwspec->ids[i]); return 0; } static void ipmmu_detach_device(struct iommu_domain *io_domain, struct device *dev) { struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned int i; for (i = 0; i < fwspec->num_ids; ++i) ipmmu_utlb_disable(domain, fwspec->ids[i]); /* * TODO: Optimize by disabling the context when no device is attached. */ } static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); if (!domain) return -ENODEV; return domain->iop->map(domain->iop, iova, paddr, size, prot); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, size_t size) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); return domain->iop->unmap(domain->iop, iova, size); } static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); if (domain->mmu) ipmmu_tlb_flush_all(domain); } static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, dma_addr_t iova) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); /* TODO: Is locking needed ? */ return domain->iop->iova_to_phys(domain->iop, iova); } static int ipmmu_init_platform_device(struct device *dev, struct of_phandle_args *args) { struct platform_device *ipmmu_pdev; ipmmu_pdev = of_find_device_by_node(args->np); if (!ipmmu_pdev) return -ENODEV; dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev); return 0; } static int ipmmu_of_xlate(struct device *dev, struct of_phandle_args *spec) { iommu_fwspec_add_ids(dev, spec->args, 1); /* Initialize once - xlate() will call multiple times */ if (to_ipmmu(dev)) return 0; return ipmmu_init_platform_device(dev, spec); } static int ipmmu_init_arm_mapping(struct device *dev) { struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct iommu_group *group; int ret; /* Create a device group and add the device to it. */ group = iommu_group_alloc(); if (IS_ERR(group)) { dev_err(dev, "Failed to allocate IOMMU group\n"); return PTR_ERR(group); } ret = iommu_group_add_device(group, dev); iommu_group_put(group); if (ret < 0) { dev_err(dev, "Failed to add device to IPMMU group\n"); return ret; } /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. * * TODO: * - Create one mapping per context (TLB). * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault. */ if (!mmu->mapping) { struct dma_iommu_mapping *mapping; mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); ret = PTR_ERR(mapping); goto error; } mmu->mapping = mapping; } /* Attach the ARM VA mapping to the device. */ ret = arm_iommu_attach_device(dev, mmu->mapping); if (ret < 0) { dev_err(dev, "Failed to attach device to VA mapping\n"); goto error; } return 0; error: iommu_group_remove_device(dev); if (mmu->mapping) arm_iommu_release_mapping(mmu->mapping); return ret; } static int ipmmu_add_device(struct device *dev) { struct iommu_group *group; /* * Only let through devices that have been verified in xlate() */ if (!to_ipmmu(dev)) return -ENODEV; if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) return ipmmu_init_arm_mapping(dev); group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); iommu_group_put(group); return 0; } static void ipmmu_remove_device(struct device *dev) { arm_iommu_detach_device(dev); iommu_group_remove_device(dev); } static struct iommu_group *ipmmu_find_group(struct device *dev) { struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct iommu_group *group; if (mmu->group) return iommu_group_ref_get(mmu->group); group = iommu_group_alloc(); if (!IS_ERR(group)) mmu->group = group; return group; } static const struct iommu_ops ipmmu_ops = { .domain_alloc = ipmmu_domain_alloc, .domain_free = ipmmu_domain_free, .attach_dev = ipmmu_attach_device, .detach_dev = ipmmu_detach_device, .map = ipmmu_map, .unmap = ipmmu_unmap, .flush_iotlb_all = ipmmu_iotlb_sync, .iotlb_sync = ipmmu_iotlb_sync, .map_sg = default_iommu_map_sg, .iova_to_phys = ipmmu_iova_to_phys, .add_device = ipmmu_add_device, .remove_device = ipmmu_remove_device, .device_group = ipmmu_find_group, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .of_xlate = ipmmu_of_xlate, }; /* ----------------------------------------------------------------------------- * Probe/remove and init */ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) { unsigned int i; /* Disable all contexts. */ for (i = 0; i < mmu->num_ctx; ++i) ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); } static const struct ipmmu_features ipmmu_features_default = { .use_ns_alias_offset = true, .has_cache_leaf_nodes = false, .number_of_contexts = 1, /* software only tested with one context */ }; static const struct of_device_id ipmmu_of_ids[] = { { .compatible = "renesas,ipmmu-vmsa", .data = &ipmmu_features_default, }, { /* Terminator */ }, }; MODULE_DEVICE_TABLE(of, ipmmu_of_ids); static int ipmmu_probe(struct platform_device *pdev) { struct ipmmu_vmsa_device *mmu; struct resource *res; int irq; int ret; mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); return -ENOMEM; } mmu->dev = &pdev->dev; mmu->num_utlbs = 32; spin_lock_init(&mmu->lock); bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmu->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mmu->base)) return PTR_ERR(mmu->base); /* * The IPMMU has two register banks, for secure and non-secure modes. * The bank mapped at the beginning of the IPMMU address space * corresponds to the running mode of the CPU. When running in secure * mode the non-secure register bank is also available at an offset. * * Secure mode operation isn't clearly documented and is thus currently * not implemented in the driver. Furthermore, preliminary tests of * non-secure operation with the main register bank were not successful. * Offset the registers base unconditionally to point to the non-secure * alias space for now. */ if (mmu->features->use_ns_alias_offset) mmu->base += IM_NS_ALIAS_OFFSET; mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX, mmu->features->number_of_contexts); irq = platform_get_irq(pdev, 0); /* * Determine if this IPMMU instance is a root device by checking for * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. */ if (!mmu->features->has_cache_leaf_nodes || !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) mmu->root = mmu; else mmu->root = ipmmu_find_root(); /* * Wait until the root device has been registered for sure. */ if (!mmu->root) return -EPROBE_DEFER; /* Root devices have mandatory IRQs */ if (ipmmu_is_root(mmu)) { if (irq < 0) { dev_err(&pdev->dev, "no IRQ found\n"); return irq; } ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, dev_name(&pdev->dev), mmu); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); return ret; } ipmmu_device_reset(mmu); } /* * Register the IPMMU to the IOMMU subsystem in the following cases: * - R-Car Gen2 IPMMU (all devices registered) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) */ if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev)); if (ret) return ret; iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode); ret = iommu_device_register(&mmu->iommu); if (ret) return ret; #if defined(CONFIG_IOMMU_DMA) if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &ipmmu_ops); #endif } /* * We can't create the ARM mapping here as it requires the bus to have * an IOMMU, which only happens when bus_set_iommu() is called in * ipmmu_init() after the probe function returns. */ platform_set_drvdata(pdev, mmu); return 0; } static int ipmmu_remove(struct platform_device *pdev) { struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); iommu_device_sysfs_remove(&mmu->iommu); iommu_device_unregister(&mmu->iommu); arm_iommu_release_mapping(mmu->mapping); ipmmu_device_reset(mmu); return 0; } static struct platform_driver ipmmu_driver = { .driver = { .name = "ipmmu-vmsa", .of_match_table = of_match_ptr(ipmmu_of_ids), }, .probe = ipmmu_probe, .remove = ipmmu_remove, }; static int __init ipmmu_init(void) { static bool setup_done; int ret; if (setup_done) return 0; ret = platform_driver_register(&ipmmu_driver); if (ret < 0) return ret; #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &ipmmu_ops); #endif setup_done = true; return 0; } static void __exit ipmmu_exit(void) { return platform_driver_unregister(&ipmmu_driver); } subsys_initcall(ipmmu_init); module_exit(ipmmu_exit); #ifdef CONFIG_IOMMU_DMA static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np) { ipmmu_init(); return 0; } IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa", ipmmu_vmsa_iommu_of_setup); #endif MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); MODULE_AUTHOR("Laurent Pinchart "); MODULE_LICENSE("GPL v2");