summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-05-05 16:15:13 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-05 16:36:32 -0700
commit1f8d419e291f7f7f7f3ffd4f0ba00834621690c8 (patch)
tree833df93032a38bc749458ce8be3a316eae1d5215 /arch
parente685752de107201432a055f7c45c396a5b04dc17 (diff)
downloadblackbird-op-linux-1f8d419e291f7f7f7f3ffd4f0ba00834621690c8.tar.gz
blackbird-op-linux-1f8d419e291f7f7f7f3ffd4f0ba00834621690c8.zip
[PATCH] ppc64: pgtable.h and other header cleanups
This patch started as simply removing a few never-used macros from asm-ppc64/pgtable.h, then kind of grew. It now makes a bunch of cleanups to the ppc64 low-level header files (with corresponding changes to .c files where necessary) such as: - Abolishing never-used macros - Eliminating multiple #defines with the same purpose - Removing pointless macros (cases where just expanding the macro everywhere turns out clearer and more sensible) - Removing some cases where macros which could be defined in terms of each other weren't - Moving imalloc() related definitions from pgtable.h to their own header file (imalloc.h) - Re-arranging headers to group things more logically - Moving all VSID allocation related things to mmu.h, instead of being split between mmu.h and mmu_context.h - Removing some reserved space for flags from the PMD - we're not using it. - Fix some bugs which broke compile with STRICT_MM_TYPECHECKS. Signed-off-by: David Gibson <dwg@au1.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/pci.c2
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c11
-rw-r--r--arch/ppc64/mm/imalloc.c5
-rw-r--r--arch/ppc64/mm/init.c1
-rw-r--r--arch/ppc64/mm/stab.c5
6 files changed, 16 insertions, 11 deletions
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index be3cc387c1ec..d786d4b6af0b 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -438,7 +438,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
int i;
if (page_is_ram(offset >> PAGE_SHIFT))
- return prot;
+ return __pgprot(prot);
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 144657e0c3d5..52b6b9305341 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context,
j = 0;
for (i = 0; i < number; i++) {
- if ((batch->addr[i] >= USER_START) &&
- (batch->addr[i] <= USER_END))
+ if (batch->addr[i] < KERNELBASE)
vsid = get_vsid(context, batch->addr[i]);
else
vsid = get_kernel_vsid(batch->addr[i]);
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index e48be12f518c..0a0f97008d02 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
int local = 0;
cpumask_t tmp;
+ if ((ea & ~REGION_MASK) > EADDR_MASK)
+ return 1;
+
switch (REGION_ID(ea)) {
case USER_REGION_ID:
user_region = 1;
mm = current->mm;
- if ((ea > USER_END) || (! mm))
+ if (! mm)
return 1;
vsid = get_vsid(mm->context.id, ea);
break;
case IO_REGION_ID:
- if (ea > IMALLOC_END)
- return 1;
mm = &ioremap_mm;
vsid = get_kernel_vsid(ea);
break;
case VMALLOC_REGION_ID:
- if (ea > VMALLOC_END)
- return 1;
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
unsigned long vsid, vpn, va, hash, secondary, slot;
unsigned long huge = pte_huge(pte);
- if ((ea >= USER_START) && (ea <= USER_END))
+ if (ea < KERNELBASE)
vsid = get_vsid(context, ea);
else
vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index 9d92b0d9cde5..cb8727f3267a 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -14,6 +14,7 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/semaphore.h>
+#include <asm/imalloc.h>
static DECLARE_MUTEX(imlist_sem);
struct vm_struct * imlist = NULL;
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
unsigned long addr;
struct vm_struct **p, *tmp;
- addr = IMALLOC_START;
+ addr = ioremap_bot;
for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long) tmp->addr)
break;
- if ((unsigned long)tmp->addr >= IMALLOC_START)
+ if ((unsigned long)tmp->addr >= ioremap_bot)
addr = tmp->size + (unsigned long) tmp->addr;
if (addr > IMALLOC_END-size)
return 1;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index cf33d7ec2e29..afbf25227cb0 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -64,6 +64,7 @@
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
+#include <asm/imalloc.h>
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index 31491131d5e4..df4bbe14153c 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -19,6 +19,11 @@
#include <asm/paca.h>
#include <asm/cputable.h>
+struct stab_entry {
+ unsigned long esid_data;
+ unsigned long vsid_data;
+};
+
/* Both the segment table and SLB code uses the following cache */
#define NR_STAB_CACHE_ENTRIES 8
DEFINE_PER_CPU(long, stab_cache_ptr);
OpenPOWER on IntegriCloud