summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2009-05-21 10:09:46 +0100
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-10-20 16:22:34 -0700
commitde1ef2065c4675ab1062ebc8d1cb6c5f42b61d04 (patch)
tree8091769ae22659277e43df69a7101c17e19530fa
parentf020e2905166e12f9a8f109fe968cb5a9db887e9 (diff)
downloadblackbird-op-linux-de1ef2065c4675ab1062ebc8d1cb6c5f42b61d04.tar.gz
blackbird-op-linux-de1ef2065c4675ab1062ebc8d1cb6c5f42b61d04.zip
xen/privcmd: move remap_domain_mfn_range() to core xen code and export.
This allows xenfs to be built as a module, previously it required flush_tlb_all and arbitrary_virt_to_machine to be exported. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r--arch/x86/xen/mmu.c66
-rw-r--r--drivers/xen/xenfs/privcmd.c81
-rw-r--r--include/xen/xen-ops.h5
3 files changed, 79 insertions, 73 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 1ceb0f2fa0af..f08ea045620f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2265,6 +2265,72 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+ unsigned long mfn;
+ pgprot_t prot;
+ struct mmu_update *mmu_update;
+};
+
+static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct remap_data *rmd = data;
+ pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
+ rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+ rmd->mmu_update->val = pte_val_ma(pte);
+ rmd->mmu_update++;
+
+ return 0;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long mfn, int nr,
+ pgprot_t prot, unsigned domid)
+{
+ struct remap_data rmd;
+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+ int batch;
+ unsigned long range;
+ int err = 0;
+
+ prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+
+ rmd.mfn = mfn;
+ rmd.prot = prot;
+
+ while (nr) {
+ batch = min(REMAP_BATCH_SIZE, nr);
+ range = (unsigned long)batch << PAGE_SHIFT;
+
+ rmd.mmu_update = mmu_update;
+ err = apply_to_page_range(vma->vm_mm, addr, range,
+ remap_area_mfn_pte_fn, &rmd);
+ if (err)
+ goto out;
+
+ err = -EFAULT;
+ if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+ goto out;
+
+ nr -= batch;
+ addr += range;
+ }
+
+ err = 0;
+out:
+
+ flush_tlb_all();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
#ifdef CONFIG_XEN_DEBUG_FS
static struct dentry *d_mmu_debug;
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
index 438223ae0fc3..f80be7f6eb95 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/xenfs/privcmd.c
@@ -31,76 +31,12 @@
#include <xen/interface/xen.h>
#include <xen/features.h>
#include <xen/page.h>
-
-#define REMAP_BATCH_SIZE 16
+#include <xen/xen-ops.h>
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
-struct remap_data {
- unsigned long mfn;
- pgprot_t prot;
- struct mmu_update *mmu_update;
-};
-
-static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
-{
- struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
-
- rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
- rmd->mmu_update->val = pte_val_ma(pte);
- rmd->mmu_update++;
-
- return 0;
-}
-
-static int remap_domain_mfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid)
-{
- struct remap_data rmd;
- struct mmu_update mmu_update[REMAP_BATCH_SIZE];
- int batch;
- unsigned long range;
- int err = 0;
-
- prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
-
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-
- rmd.mfn = mfn;
- rmd.prot = prot;
-
- while (nr) {
- batch = min(REMAP_BATCH_SIZE, nr);
- range = (unsigned long)batch << PAGE_SHIFT;
-
- rmd.mmu_update = mmu_update;
- err = apply_to_page_range(vma->vm_mm, addr, range,
- remap_area_mfn_pte_fn, &rmd);
- if (err)
- goto out;
-
- err = -EFAULT;
- if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
- goto out;
-
- nr -= batch;
- addr += range;
- }
-
- err = 0;
-out:
-
- flush_tlb_all();
-
- return err;
-}
-
static long privcmd_ioctl_hypercall(void __user *udata)
{
struct privcmd_hypercall hypercall;
@@ -233,11 +169,11 @@ static int mmap_mfn_range(void *data, void *state)
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
- rc = remap_domain_mfn_range(vma,
- msg->va & PAGE_MASK,
- msg->mfn, msg->npages,
- vma->vm_page_prot,
- st->domain);
+ rc = xen_remap_domain_mfn_range(vma,
+ msg->va & PAGE_MASK,
+ msg->mfn, msg->npages,
+ vma->vm_page_prot,
+ st->domain);
if (rc < 0)
return rc;
@@ -315,9 +251,8 @@ static int mmap_batch_fn(void *data, void *state)
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
- if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK,
- *mfnp, 1,
- st->vma->vm_page_prot, st->domain) < 0) {
+ if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+ st->vma->vm_page_prot, st->domain) < 0) {
*mfnp |= 0xf0000000U;
st->err++;
}
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 351f4051f6d8..98b92154a264 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long mfn, int nr,
+ pgprot_t prot, unsigned domid);
+
#endif /* INCLUDE_XEN_OPS_H */
OpenPOWER on IntegriCloud