summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-19 13:47:28 -0800
committerH. Peter Anvin <hpa@zytor.com>2008-12-19 15:40:30 -0800
commit982d789ab76c8a11426852fec2fdf2f412e21c0c (patch)
tree41e6932764facecb11bc9ca831ffd67ded384d68 /arch/x86/mm
parentd87fe6607c31944f7572f965c1507ae77026c133 (diff)
downloadblackbird-op-linux-982d789ab76c8a11426852fec2fdf2f412e21c0c.tar.gz
blackbird-op-linux-982d789ab76c8a11426852fec2fdf2f412e21c0c.zip
x86: PAT: remove follow_pfnmap_pte in favor of follow_phys
Impact: Cleanup - removes a new function in favor of a recently modified older one. Replace follow_pfnmap_pte in pat code with follow_phys. follow_phys lso returns protection eliminating the need of pte_pgprot call. Using follow_phys also eliminates the need for pte_pa. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pat.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d5254bae84f4..541bcc944a5b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
int retval = 0;
unsigned long i, j;
u64 paddr;
- pgprot_t prot;
- pte_t pte;
+ unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
@@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
if (is_linear_pfn_mapping(vma)) {
/*
- * reserve the whole chunk starting from vm_pgoff,
- * But, we have to get the protection from pte.
+ * reserve the whole chunk covered by vma. We need the
+ * starting address and protection from pte.
*/
- if (follow_pfnmap_pte(vma, vma_start, &pte)) {
+ if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
- return -1;
+ return -EINVAL;
}
- prot = pte_pgprot(pte);
- paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
- return reserve_pfn_range(paddr, vma_size, prot);
+ return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
}
/* reserve entire vma page by page, using pfn and prot from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
- if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+ if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
- prot = pte_pgprot(pte);
- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+ retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
if (retval)
goto cleanup_ret;
}
@@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
cleanup_ret:
/* Reserve error: Cleanup partial reservation and return error */
for (j = 0; j < i; j += PAGE_SIZE) {
- if (follow_pfnmap_pte(vma, vma_start + j, &pte))
+ if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
free_pfn_range(paddr, PAGE_SIZE);
}
@@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
{
unsigned long i;
u64 paddr;
+ unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
@@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
} else {
/* free entire vma, page by page, using the pfn from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
- pte_t pte;
-
- if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+ if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
free_pfn_range(paddr, PAGE_SIZE);
}
}
OpenPOWER on IntegriCloud