diff options
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/Kconfig | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 46 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/hypervisor.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 44 | ||||
-rw-r--r-- | arch/arm/xen/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 65 | ||||
-rw-r--r-- | arch/arm/xen/p2m.c | 208 |
9 files changed, 424 insertions, 9 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 00c1ff45a158..e089e622be79 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1862,6 +1862,12 @@ config CC_STACKPROTECTOR neutralized via a kernel panic. This feature requires gcc version 4.2 or above. +config SWIOTLB + def_bool y + +config IOMMU_HELPER + def_bool SWIOTLB + config XEN_DOM0 def_bool y depends on XEN @@ -1872,6 +1878,7 @@ config XEN depends on CPU_V7 && !CPU_V6 depends on !GENERIC_ATOMIC64 select ARM_PSCI + select SWIOTLB_XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 863cd84eb1a2..e701a4d9aa59 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -11,17 +11,28 @@ #include <asm-generic/dma-coherent.h> #include <asm/memory.h> +#include <xen/xen.h> +#include <asm/xen/hypervisor.h> + #define DMA_ERROR_CODE (~0) extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; return &arm_dma_ops; } +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (xen_initial_domain()) + return xen_dma_ops; + else + return __generic_dma_ops(dev); +} + static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { BUG_ON(!dev); @@ -94,6 +105,39 @@ static inline unsigned long dma_max_pfn(struct device *dev) } #define dma_max_pfn(dev) dma_max_pfn(dev) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + unsigned int offset = paddr & ~PAGE_MASK; + return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + unsigned int offset = dev_addr & ~PAGE_MASK; + return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + u64 limit, mask; + + if (!dev->dma_mask) + return 0; + + mask = *dev->dma_mask; + + limit = (mask + 1) & ~mask; + if (limit && size > limit) + return 0; + + if ((addr | (addr + size - 1)) & ~mask) + return 0; + + return 1; +} + +static inline void dma_mark_clean(void *addr, size_t size) { } + /* * DMA errors are defined by all-bits-set in the DMA address. */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d070741b2b37..3c597c222ef2 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -24,9 +24,11 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/blk_types.h> #include <asm/byteorder.h> #include <asm/memory.h> #include <asm-generic/pci_iomap.h> +#include <xen/xen.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -372,6 +374,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #define BIOVEC_MERGEABLE(vec1, vec2) \ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) + #ifdef CONFIG_MMU #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(phys_addr_t addr, size_t size); diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index d7ab99a0c9eb..1317ee40f4df 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } +extern struct dma_map_ops *xen_dma_ops; + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..1109017499e5 --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -0,0 +1,50 @@ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + if (__generic_dma_ops(hwdev)->unmap_page) + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (__generic_dma_ops(hwdev)->sync_single_for_device) + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 359a7b50b158..75579a9d6f76 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -6,12 +6,12 @@ #include <linux/pfn.h> #include <linux/types.h> +#include <linux/dma-mapping.h> +#include <xen/xen.h> #include <xen/interface/grant_table.h> -#define pfn_to_mfn(pfn) (pfn) #define phys_to_machine_mapping_valid(pfn) (1) -#define mfn_to_pfn(mfn) (mfn) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #define pte_mfn pte_pfn @@ -32,6 +32,38 @@ typedef struct xpaddr { #define INVALID_P2M_ENTRY (~0UL) +unsigned long __pfn_to_mfn(unsigned long pfn); +unsigned long __mfn_to_pfn(unsigned long mfn); +extern struct rb_root phys_to_mach; + +static inline unsigned long pfn_to_mfn(unsigned long pfn) +{ + unsigned long mfn; + + if (phys_to_mach.rb_node != NULL) { + mfn = __pfn_to_mfn(pfn); + if (mfn != INVALID_P2M_ENTRY) + return mfn; + } + + return pfn; +} + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ + unsigned long pfn; + + if (phys_to_mach.rb_node != NULL) { + pfn = __mfn_to_pfn(mfn); + if (pfn != INVALID_P2M_ENTRY) + return pfn; + } + + return mfn; +} + +#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) + static inline xmaddr_t phys_to_machine(xpaddr_t phys) { unsigned offset = phys.paddr & ~PAGE_MASK; @@ -76,11 +108,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte) return 0; } -static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return true; -} +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, + unsigned long nr_pages); static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 43841033afd3..12969523414c 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile @@ -1 +1 @@ -obj-y := enlighten.o hypercall.o grant-table.o +obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c new file mode 100644 index 000000000000..b0e77de99148 --- /dev/null +++ b/arch/arm/xen/mm.c @@ -0,0 +1,65 @@ +#include <linux/bootmem.h> +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/swiotlb.h> + +#include <xen/xen.h> +#include <xen/interface/memory.h> +#include <xen/swiotlb-xen.h> + +#include <asm/cacheflush.h> +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/interface.h> + +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, + unsigned int address_bits, + dma_addr_t *dma_handle) +{ + if (!xen_initial_domain()) + return -EINVAL; + + /* we assume that dom0 is mapped 1:1 for now */ + *dma_handle = pstart; + return 0; +} +EXPORT_SYMBOL_GPL(xen_create_contiguous_region); + +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) +{ + return; +} +EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); + +struct dma_map_ops *xen_dma_ops; +EXPORT_SYMBOL_GPL(xen_dma_ops); + +static struct dma_map_ops xen_swiotlb_dma_ops = { + .mapping_error = xen_swiotlb_dma_mapping_error, + .alloc = xen_swiotlb_alloc_coherent, + .free = xen_swiotlb_free_coherent, + .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, + .sync_single_for_device = xen_swiotlb_sync_single_for_device, + .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, + .map_sg = xen_swiotlb_map_sg_attrs, + .unmap_sg = xen_swiotlb_unmap_sg_attrs, + .map_page = xen_swiotlb_map_page, + .unmap_page = xen_swiotlb_unmap_page, + .dma_supported = xen_swiotlb_dma_supported, + .set_dma_mask = xen_swiotlb_set_dma_mask, +}; + +int __init xen_mm_init(void) +{ + if (!xen_initial_domain()) + return 0; + xen_swiotlb_init(1, false); + xen_dma_ops = &xen_swiotlb_dma_ops; + return 0; +} +arch_initcall(xen_mm_init); diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c new file mode 100644 index 000000000000..23732cdff551 --- /dev/null +++ b/arch/arm/xen/p2m.c @@ -0,0 +1,208 @@ +#include <linux/bootmem.h> +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/rwlock.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/swiotlb.h> + +#include <xen/xen.h> +#include <xen/interface/memory.h> +#include <xen/swiotlb-xen.h> + +#include <asm/cacheflush.h> +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/interface.h> + +struct xen_p2m_entry { + unsigned long pfn; + unsigned long mfn; + unsigned long nr_pages; + struct rb_node rbnode_mach; + struct rb_node rbnode_phys; +}; + +rwlock_t p2m_lock; +struct rb_root phys_to_mach = RB_ROOT; +static struct rb_root mach_to_phys = RB_ROOT; + +static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) +{ + struct rb_node **link = &phys_to_mach.rb_node; + struct rb_node *parent = NULL; + struct xen_p2m_entry *entry; + int rc = 0; + + while (*link) { + parent = *link; + entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); + + if (new->mfn == entry->mfn) + goto err_out; + if (new->pfn == entry->pfn) + goto err_out; + + if (new->pfn < entry->pfn) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + rb_link_node(&new->rbnode_phys, parent, link); + rb_insert_color(&new->rbnode_phys, &phys_to_mach); + goto out; + +err_out: + rc = -EINVAL; + pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", + __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); +out: + return rc; +} + +unsigned long __pfn_to_mfn(unsigned long pfn) +{ + struct rb_node *n = phys_to_mach.rb_node; + struct xen_p2m_entry *entry; + unsigned long irqflags; + + read_lock_irqsave(&p2m_lock, irqflags); + while (n) { + entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); + if (entry->pfn <= pfn && + entry->pfn + entry->nr_pages > pfn) { + read_unlock_irqrestore(&p2m_lock, irqflags); + return entry->mfn + (pfn - entry->pfn); + } + if (pfn < entry->pfn) + n = n->rb_left; + else + n = n->rb_right; + } + read_unlock_irqrestore(&p2m_lock, irqflags); + + return INVALID_P2M_ENTRY; +} +EXPORT_SYMBOL_GPL(__pfn_to_mfn); + +static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) +{ + struct rb_node **link = &mach_to_phys.rb_node; + struct rb_node *parent = NULL; + struct xen_p2m_entry *entry; + int rc = 0; + + while (*link) { + parent = *link; + entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); + + if (new->mfn == entry->mfn) + goto err_out; + if (new->pfn == entry->pfn) + goto err_out; + + if (new->mfn < entry->mfn) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + rb_link_node(&new->rbnode_mach, parent, link); + rb_insert_color(&new->rbnode_mach, &mach_to_phys); + goto out; + +err_out: + rc = -EINVAL; + pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", + __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); +out: + return rc; +} + +unsigned long __mfn_to_pfn(unsigned long mfn) +{ + struct rb_node *n = mach_to_phys.rb_node; + struct xen_p2m_entry *entry; + unsigned long irqflags; + + read_lock_irqsave(&p2m_lock, irqflags); + while (n) { + entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); + if (entry->mfn <= mfn && + entry->mfn + entry->nr_pages > mfn) { + read_unlock_irqrestore(&p2m_lock, irqflags); + return entry->pfn + (mfn - entry->mfn); + } + if (mfn < entry->mfn) + n = n->rb_left; + else + n = n->rb_right; + } + read_unlock_irqrestore(&p2m_lock, irqflags); + + return INVALID_P2M_ENTRY; +} +EXPORT_SYMBOL_GPL(__mfn_to_pfn); + +bool __set_phys_to_machine_multi(unsigned long pfn, + unsigned long mfn, unsigned long nr_pages) +{ + int rc; + unsigned long irqflags; + struct xen_p2m_entry *p2m_entry; + struct rb_node *n = phys_to_mach.rb_node; + + if (mfn == INVALID_P2M_ENTRY) { + write_lock_irqsave(&p2m_lock, irqflags); + while (n) { + p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); + if (p2m_entry->pfn <= pfn && + p2m_entry->pfn + p2m_entry->nr_pages > pfn) { + rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); + rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); + write_unlock_irqrestore(&p2m_lock, irqflags); + kfree(p2m_entry); + return true; + } + if (pfn < p2m_entry->pfn) + n = n->rb_left; + else + n = n->rb_right; + } + write_unlock_irqrestore(&p2m_lock, irqflags); + return true; + } + + p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT); + if (!p2m_entry) { + pr_warn("cannot allocate xen_p2m_entry\n"); + return false; + } + p2m_entry->pfn = pfn; + p2m_entry->nr_pages = nr_pages; + p2m_entry->mfn = mfn; + + write_lock_irqsave(&p2m_lock, irqflags); + if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || + (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { + write_unlock_irqrestore(&p2m_lock, irqflags); + return false; + } + write_unlock_irqrestore(&p2m_lock, irqflags); + return true; +} +EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi); + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + return __set_phys_to_machine_multi(pfn, mfn, 1); +} +EXPORT_SYMBOL_GPL(__set_phys_to_machine); + +int p2m_init(void) +{ + rwlock_init(&p2m_lock); + return 0; +} +arch_initcall(p2m_init); |