summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/arm/mach-pxa/mainstone.c4
-rw-r--r--arch/arm/mm/consistent.c4
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm26/mm/init.c2
-rw-r--r--arch/cris/mm/init.c2
-rw-r--r--arch/frv/kernel/frv_ksyms.c1
-rw-r--r--arch/frv/mm/dma-alloc.c4
-rw-r--r--arch/frv/mm/init.c6
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c1
-rw-r--r--arch/h8300/mm/init.c4
-rw-r--r--arch/i386/kernel/efi.c2
-rw-r--r--arch/i386/kernel/smp.c28
-rw-r--r--arch/i386/kernel/sys_i386.c25
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c2
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c2
-rw-r--r--arch/i386/mm/hugetlbpage.c12
-rw-r--r--arch/i386/mm/init.c6
-rw-r--r--arch/i386/mm/pageattr.c20
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/m32r/mm/init.c4
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/memory.c2
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c1
-rw-r--r--arch/m68knommu/mm/init.c4
-rw-r--r--arch/mips/arc/memory.c2
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/mips-boards/generic/memory.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c2
-rw-r--r--arch/mips/mm/init.c11
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c15
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/ppc/kernel/dma-mapping.c4
-rw-r--r--arch/ppc/mm/init.c6
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/sh/mm/consistent.c3
-rw-r--r--arch/sh/mm/hugetlbpage.c12
-rw-r--r--arch/sh/mm/init.c4
-rw-r--r--arch/sh64/mm/hugetlbpage.c12
-rw-r--r--arch/sh64/mm/init.c4
-rw-r--r--arch/sparc/kernel/sun4d_smp.c6
-rw-r--r--arch/sparc/kernel/sun4m_smp.c6
-rw-r--r--arch/sparc/mm/generic.c1
-rw-r--r--arch/sparc/mm/init.c6
-rw-r--r--arch/sparc/mm/loadmmu.c2
-rw-r--r--arch/sparc/mm/srmmu.c9
-rw-r--r--arch/sparc/mm/sun4c.c15
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/kernel/pci.c2
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S39
-rw-r--r--arch/sparc64/kernel/traps.c21
-rw-r--r--arch/sparc64/kernel/tsb.S210
-rw-r--r--arch/sparc64/mm/fault.c15
-rw-r--r--arch/sparc64/mm/generic.c1
-rw-r--r--arch/sparc64/mm/hugetlbpage.c40
-rw-r--r--arch/sparc64/mm/init.c25
-rw-r--r--arch/sparc64/mm/tsb.c234
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/um/kernel/physmem.c2
-rw-r--r--arch/x86_64/kernel/time.c2
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c1
-rw-r--r--arch/x86_64/mm/init.c6
-rw-r--r--arch/x86_64/mm/pageattr.c63
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--arch/xtensa/mm/pgtable.c24
72 files changed, 582 insertions, 418 deletions
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 486d7945583d..544ac5dc09eb 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -357,7 +357,7 @@ free_reserved_mem(void *start, void *end)
void *__start = start;
for (; __start < end; __start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(__start));
- set_page_count(virt_to_page(__start), 1);
+ init_page_count(virt_to_page(__start));
free_page((long)__start);
totalram_pages++;
}
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d5bda60209ec..98356f810007 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -157,14 +157,14 @@ static struct platform_device smc91x_device = {
.resource = smc91x_resources,
};
-static int mst_audio_startup(snd_pcm_substream_t *substream, void *priv)
+static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv)
{
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MST_MSCWR2 &= ~MST_MSCWR2_AC97_SPKROFF;
return 0;
}
-static void mst_audio_shutdown(snd_pcm_substream_t *substream, void *priv)
+static void mst_audio_shutdown(struct snd_pcm_substream *substream, void *priv)
{
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MST_MSCWR2 |= MST_MSCWR2_AC97_SPKROFF;
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index c2ee18d2075e..8a1bfcd50087 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -223,6 +223,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
pte = consistent_pte[idx] + off;
c->vm_pages = page;
+ split_page(page, order);
+
/*
* Set the "dma handle"
*/
@@ -231,7 +233,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
do {
BUG_ON(!pte_none(*pte));
- set_page_count(page, 1);
/*
* x86 does not mark the pages reserved...
*/
@@ -250,7 +251,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
* Free the otherwise unused pages.
*/
while (page < end) {
- set_page_count(page, 1);
__free_page(page);
page++;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8b276ee38acf..b0321e943b76 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -531,7 +531,7 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
for (; addr < end; addr += PAGE_SIZE) {
struct page *page = virt_to_page(addr);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
free_page(addr);
totalram_pages++;
}
diff --git a/arch/arm26/mm/init.c b/arch/arm26/mm/init.c
index 1f09a9d0fb83..e3ecaa453747 100644
--- a/arch/arm26/mm/init.c
+++ b/arch/arm26/mm/init.c
@@ -324,7 +324,7 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
for (; addr < end; addr += PAGE_SIZE) {
struct page *page = virt_to_page(addr);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
free_page(addr);
totalram_pages++;
}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index 31a0018b525a..b7842ff213a6 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -216,7 +216,7 @@ free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 0f1c6cbc4f50..aa6b7d0a2109 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -27,6 +27,7 @@ EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strchr);
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index 342823aad758..636b2f8b5d98 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -115,9 +115,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);
-
- for (i = 1; i < (1 << order); i++)
- set_page_count(rpage + i, 1);
+ split_page(rpage, order);
}
err = 0;
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 765088ea8a50..8899aa1a4f06 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -169,7 +169,7 @@ void __init mem_init(void)
struct page *page = &mem_map[pfn];
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalram_pages++;
}
@@ -210,7 +210,7 @@ void __init free_initmem(void)
/* next to check that the page we free is not a partial page */
for (addr = start; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -230,7 +230,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 5cc76efaf7aa..69d6ad32d56c 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -25,6 +25,7 @@ extern char h8300_debug_device[];
/* platform dependent support */
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strchr);
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 1e0929ddc8c4..09efc4b1f038 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -196,7 +196,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
@@ -219,7 +219,7 @@ free_initmem()
/* next to check that the page we free is not a partial page */
for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index c9cad7ba0d2d..aeabb4196861 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -115,7 +115,7 @@ static void efi_call_phys_epilog(void)
unsigned long cr4;
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0);
- cpu_gdt_descr->address = __va(cpu_gdt_descr->address);
+ cpu_gdt_descr->address = (unsigned long)__va(cpu_gdt_descr->address);
load_gdt(cpu_gdt_descr);
cr4 = read_cr4();
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 218d725a5a1e..d134e9643a58 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -504,27 +504,23 @@ void unlock_ipi_call_lock(void)
spin_unlock_irq(&call_lock);
}
-static struct call_data_struct * call_data;
-
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
-/*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code. Does not return until
+static struct call_data_struct *call_data;
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait)
{
struct call_data_struct data;
int cpus;
diff --git a/arch/i386/kernel/sys_i386.c b/arch/i386/kernel/sys_i386.c
index a4a61976ecb9..8fdb1fb17a5f 100644
--- a/arch/i386/kernel/sys_i386.c
+++ b/arch/i386/kernel/sys_i386.c
@@ -40,14 +40,13 @@ asmlinkage int sys_pipe(unsigned long __user * fildes)
return error;
}
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
{
int error = -EBADF;
- struct file * file = NULL;
+ struct file *file = NULL;
+ struct mm_struct *mm = current->mm;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
@@ -56,9 +55,9 @@ static inline long do_mmap2(
goto out;
}
- down_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
if (file)
fput(file);
@@ -66,13 +65,6 @@ out:
return error;
}
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
@@ -101,7 +93,8 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
+ a.fd, a.offset >> PAGE_SHIFT);
out:
return err;
}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index be242723c339..17a6fe7166e7 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -46,7 +46,7 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static unsigned long cyc2ns_scale;
+static unsigned long cyc2ns_scale __read_mostly;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index a7f5a2aceba2..5e41ee29c8cf 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -74,7 +74,7 @@ late_initcall(start_lost_tick_compensation);
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static unsigned long cyc2ns_scale;
+static unsigned long cyc2ns_scale __read_mostly;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index d524127c9afc..a7d891585411 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -48,18 +48,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 2700f01994ba..7ba55a6e2dbc 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -270,7 +270,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
static void __meminit free_new_highpage(struct page *page)
{
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
@@ -727,7 +727,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
@@ -766,7 +766,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index d0cadb33b54c..92c3d9f0e731 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -51,6 +51,13 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
if (!base)
return NULL;
+ /*
+ * page_private is used to track the number of entries in
+ * the page table page that have non standard attributes.
+ */
+ SetPagePrivate(base);
+ page_private(base) = 0;
+
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
@@ -143,11 +150,12 @@ __change_page_attr(struct page *page, pgprot_t prot)
return -ENOMEM;
set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
kpte_page = split;
- }
- get_page(kpte_page);
+ }
+ page_private(kpte_page)++;
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
- __put_page(kpte_page);
+ BUG_ON(page_private(kpte_page) == 0);
+ page_private(kpte_page)--;
} else
BUG();
@@ -157,10 +165,8 @@ __change_page_attr(struct page *page, pgprot_t prot)
* replace it with a largepage.
*/
if (!PageReserved(kpte_page)) {
- /* memleak and potential failed 2M page regeneration */
- BUG_ON(!page_count(kpte_page));
-
- if (cpu_has_pse && (page_count(kpte_page) == 1)) {
+ if (cpu_has_pse && (page_private(kpte_page) == 0)) {
+ ClearPagePrivate(kpte_page);
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 2d13889d0a99..9dbc7dadd165 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -68,9 +68,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
- * This function checks for proper alignment of input addr and len parameters.
+ * Don't actually need to do any preparation, but need to make sure
+ * the address is in the right region.
*/
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b38b6d213c15..08d94e6bfa18 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -197,7 +197,7 @@ free_initmem (void)
eaddr = (unsigned long) ia64_imva(__init_end);
while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
++totalram_pages;
addr += PAGE_SIZE;
@@ -252,7 +252,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
continue;
page = virt_to_page(start);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
free_page(start);
++totalram_pages;
}
@@ -640,7 +640,7 @@ mem_init (void)
void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalram_pages++;
num_physpages++;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 6facf15b04f3..c9e7dad860b7 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -226,7 +226,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -244,7 +244,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
unsigned long p;
for (p = start; p < end; p += PAGE_SIZE) {
ClearPageReserved(virt_to_page(p));
- set_page_count(virt_to_page(p), 1);
+ init_page_count(virt_to_page(p));
free_page(p);
totalram_pages++;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index c45beb955943..a190e39c907a 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -137,7 +137,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 559942ce0e1e..d6d582a5abb0 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -54,7 +54,7 @@ void __init init_pointer_table(unsigned long ptable)
/* unreserve the page so it's possible to free that page */
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
- set_page_count(PD_PAGE(dp), 1);
+ init_page_count(PD_PAGE(dp));
return;
}
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index d855fec26317..afb57eeafdcb 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -276,7 +276,7 @@ void free_initmem(void)
addr = (unsigned long)&__init_begin;
for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
virt_to_page(addr)->flags &= ~(1 << PG_reserved);
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index eddb8d3e130a..d844c755945a 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -26,6 +26,7 @@ EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strchr);
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index 89f0b554ffb7..d79503fe6e42 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -195,7 +195,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
@@ -218,7 +218,7 @@ free_initmem()
/* next to check that the page we free is not a partial page */
for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 958d2eb78862..8a9ef58cc399 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -158,7 +158,7 @@ unsigned long __init prom_free_prom_memory(void)
while (addr < boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size) {
ClearPageReserved(virt_to_page(__va(addr)));
- set_page_count(virt_to_page(__va(addr)), 1);
+ init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
freed += PAGE_SIZE;
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 81cb5a76cfb7..1edaf3074ee9 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -118,7 +118,7 @@ unsigned long __init prom_free_prom_memory(void)
addr = PAGE_SIZE;
while (addr < end) {
ClearPageReserved(virt_to_page(__va(addr)));
- set_page_count(virt_to_page(__va(addr)), 1);
+ init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
}
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index 2c8afd77a20b..ee5e70c95cf3 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -174,7 +174,7 @@ unsigned long __init prom_free_prom_memory(void)
while (addr < boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size) {
ClearPageReserved(virt_to_page(__va(addr)));
- set_page_count(virt_to_page(__va(addr)), 1);
+ init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
freed += PAGE_SIZE;
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index 0dbd7435bb2a..1ec4e75656bd 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -117,7 +117,7 @@ unsigned long __init prom_free_prom_memory(void)
while (addr < boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size) {
ClearPageReserved(virt_to_page(__va(addr)));
- set_page_count(virt_to_page(__va(addr)), 1);
+ init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
freed += PAGE_SIZE;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 0ff9a348b843..52f7d59fe612 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -54,7 +54,8 @@ unsigned long empty_zero_page, zero_page_mask;
*/
unsigned long setup_zero_pages(void)
{
- unsigned long order, size;
+ unsigned int order;
+ unsigned long size;
struct page *page;
if (cpu_has_vce)
@@ -67,9 +68,9 @@ unsigned long setup_zero_pages(void)
panic("Oh boy, that early out of memory?");
page = virt_to_page(empty_zero_page);
+ split_page(page, order);
while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
SetPageReserved(page);
- set_page_count(page, 1);
page++;
}
@@ -244,7 +245,7 @@ void __init mem_init(void)
#ifdef CONFIG_LIMITED_DMA
set_page_address(page, lowmem_page_address(page));
#endif
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
@@ -291,7 +292,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
@@ -314,7 +315,7 @@ void free_initmem(void)
page = addr;
#endif
ClearPageReserved(virt_to_page(page));
- set_page_count(virt_to_page(page), 1);
+ init_page_count(virt_to_page(page));
free_page(page);
totalram_pages++;
freed += PAGE_SIZE;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index ed93a9792959..e0d095daa5ed 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -559,7 +559,7 @@ void __init mem_init(void)
/* if (!page_is_ram(pgnr)) continue; */
/* commented out until page_is_ram works */
ClearPageReserved(p);
- set_page_count(p, 1);
+ init_page_count(p);
__free_page(p);
totalram_pages++;
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 7847ca13d6c2..852eda3953dc 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -398,7 +398,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
num_physpages++;
totalram_pages++;
@@ -1018,7 +1018,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
num_physpages++;
totalram_pages++;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b51bb28c054b..7370f9f33e29 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -133,21 +133,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return __pte(old);
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- if (! (within_hugepage_low_range(addr, len)
- || within_hugepage_high_range(addr, len)) )
- return -EINVAL;
- return 0;
-}
-
struct slb_flush_info {
struct mm_struct *mm;
u16 newareas;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 7d0d75c11848..b57fb3a2b7bb 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -216,7 +216,7 @@ static void free_sec(unsigned long start, unsigned long end, const char *name)
while (start < end) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
cnt++;
start += PAGE_SIZE;
@@ -248,7 +248,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 81cfb0c2ec58..bacb71c89811 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -140,7 +140,7 @@ void free_initmem(void)
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
memset((void *)addr, 0xcc, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -155,7 +155,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 550517c2dd42..454cac01d8cc 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -108,8 +108,8 @@ EXPORT_SYMBOL(phys_mem_access_prot);
void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 0);
- free_cold_page(page);
+ init_page_count(page);
+ __free_page(page);
totalram_pages++;
num_physpages++;
}
@@ -376,7 +376,7 @@ void __init mem_init(void)
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index b33a4443f5a9..fec8e65b36ea 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -115,7 +115,7 @@ static void __init cell_spuprop_present(struct device_node *spe,
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
set_page_links(page, ZONE_DMA, node_id, pfn);
- set_page_count(page, 1);
+ init_page_count(page);
reset_page_mapcount(page);
SetPageReserved(page);
INIT_LIST_HEAD(&page->lru);
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c
index 685fd0defe23..61465ec88bc7 100644
--- a/arch/ppc/kernel/dma-mapping.c
+++ b/arch/ppc/kernel/dma-mapping.c
@@ -223,6 +223,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
struct page *end = page + (1 << order);
+ split_page(page, order);
+
/*
* Set the "dma handle"
*/
@@ -231,7 +233,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
do {
BUG_ON(!pte_none(*pte));
- set_page_count(page, 1);
SetPageReserved(page);
set_pte_at(&init_mm, vaddr,
pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
@@ -244,7 +245,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
* Free the otherwise unused pages.
*/
while (page < end) {
- set_page_count(page, 1);
__free_page(page);
page++;
}
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index 134db5c04203..cb1c294fb932 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -140,7 +140,7 @@ static void free_sec(unsigned long start, unsigned long end, const char *name)
while (start < end) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
cnt++;
start += PAGE_SIZE;
@@ -172,7 +172,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
@@ -441,7 +441,7 @@ void __init mem_init(void)
struct page *page = mem_map + pfn;
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index df953383724d..a055894f3bd8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -292,7 +292,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -307,7 +307,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index df3a9e452cc5..ee73e30263af 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -23,6 +23,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
page = alloc_pages(gfp, order);
if (!page)
return NULL;
+ split_page(page, order);
ret = page_address(page);
*handle = virt_to_phys(ret);
@@ -37,8 +38,6 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
end = page + (1 << order);
while (++page < end) {
- set_page_count(page, 1);
-
/* Free any unused pages */
if (page >= free) {
__free_page(page);
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 6b7a7688c98e..a3568fd51508 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index e342565f75fb..77b4a838fe10 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -273,7 +273,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -286,7 +286,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
unsigned long p;
for (p = start; p < end; p += PAGE_SIZE) {
ClearPageReserved(virt_to_page(p));
- set_page_count(virt_to_page(p), 1);
+ init_page_count(virt_to_page(p));
free_page(p);
totalram_pages++;
}
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index ed6a505b3ee2..3d89f2a6c785 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c
index a65e8bb2c3cc..1169757fb38b 100644
--- a/arch/sh64/mm/init.c
+++ b/arch/sh64/mm/init.c
@@ -173,7 +173,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -186,7 +186,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
unsigned long p;
for (p = start; p < end; p += PAGE_SIZE) {
ClearPageReserved(virt_to_page(p));
- set_page_count(virt_to_page(p), 1);
+ init_page_count(virt_to_page(p));
free_page(p);
totalram_pages++;
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 40d426cce824..4219dd2ce3a2 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -266,19 +266,19 @@ void __init smp4d_boot_cpus(void)
/* Free unneeded trap tables */
ClearPageReserved(virt_to_page(trapbase_cpu1));
- set_page_count(virt_to_page(trapbase_cpu1), 1);
+ init_page_count(virt_to_page(trapbase_cpu1));
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
ClearPageReserved(virt_to_page(trapbase_cpu2));
- set_page_count(virt_to_page(trapbase_cpu2), 1);
+ init_page_count(virt_to_page(trapbase_cpu2));
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
ClearPageReserved(virt_to_page(trapbase_cpu3));
- set_page_count(virt_to_page(trapbase_cpu3), 1);
+ init_page_count(virt_to_page(trapbase_cpu3));
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index a21f27d10e55..fbbd8a474c4c 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -233,21 +233,21 @@ void __init smp4m_boot_cpus(void)
/* Free unneeded trap tables */
if (!cpu_isset(i, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu1));
- set_page_count(virt_to_page(trapbase_cpu1), 1);
+ init_page_count(virt_to_page(trapbase_cpu1));
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(2, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu2));
- set_page_count(virt_to_page(trapbase_cpu2), 1);
+ init_page_count(virt_to_page(trapbase_cpu2));
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(3, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu3));
- set_page_count(virt_to_page(trapbase_cpu3), 1);
+ init_page_count(virt_to_page(trapbase_cpu3));
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 2cb0728cee05..1ef7fa03fefe 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -76,7 +76,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma->vm_pgoff = (offset >> PAGE_SHIFT) |
((unsigned long)space << 28UL);
- prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index c03babaa0498..898669732466 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -383,7 +383,7 @@ void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
struct page *page = pfn_to_page(tmp);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
@@ -480,7 +480,7 @@ void free_initmem (void)
p = virt_to_page(addr);
ClearPageReserved(p);
- set_page_count(p, 1);
+ init_page_count(p);
__free_page(p);
totalram_pages++;
num_physpages++;
@@ -497,7 +497,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
struct page *p = virt_to_page(start);
ClearPageReserved(p);
- set_page_count(p, 1);
+ init_page_count(p);
__free_page(p);
num_physpages++;
}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
index e9f9571601ba..36b4d24988f8 100644
--- a/arch/sparc/mm/loadmmu.c
+++ b/arch/sparc/mm/loadmmu.c
@@ -22,8 +22,6 @@ struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;
-unsigned int pg_iobits;
-
extern void ld_mmu_sun4c(void);
extern void ld_mmu_srmmu(void);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c664b962987c..27b0e0ba8581 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -2130,6 +2130,13 @@ static unsigned long srmmu_pte_to_pgoff(pte_t pte)
return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
+static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
+{
+ prot &= ~__pgprot(SRMMU_CACHE);
+
+ return prot;
+}
+
/* Load up routines and constants for sun4m and sun4d mmu */
void __init ld_mmu_srmmu(void)
{
@@ -2150,9 +2157,9 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
- pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
/* Functions */
+ BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 731f19603cad..49f28c1bdc6d 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1589,7 +1589,10 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
{
- unsigned long page_entry;
+ unsigned long page_entry, pg_iobits;
+
+ pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
+ _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
@@ -2134,6 +2137,13 @@ void __init sun4c_paging_init(void)
printk("SUN4C: %d mmu entries for the kernel\n", cnt);
}
+static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
+{
+ prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
+
+ return prot;
+}
+
/* Load up routines and constants for sun4c mmu */
void __init ld_mmu_sun4c(void)
{
@@ -2156,10 +2166,9 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
- pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
- _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
/* Functions */
+ BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index c3685b314d71..267afddf63cf 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB
bool "4MB"
config HUGETLB_PAGE_SIZE_512K
- depends on !SPARC64_PAGE_SIZE_4MB
+ depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
bool "512K"
config HUGETLB_PAGE_SIZE_64K
- depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
+ depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K
bool "64K"
endchoice
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 95ffa9418620..dfccff29e182 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -656,6 +656,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
@@ -663,7 +664,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret)
return ret;
- vma->vm_flags |= VM_IO;
return 0;
}
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index ab23ddb7116e..b731881224e8 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -29,15 +29,15 @@
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
- * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+ * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
-#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \
+#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \
- srlx VADDR, PAGE_SHIFT, TMP1; \
+ srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \
@@ -53,7 +53,7 @@ sun4v_itlb_miss:
LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
- COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -99,7 +99,7 @@ sun4v_dtlb_miss:
LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
- COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -171,21 +171,26 @@ sun4v_dtsb_miss:
/* fallthrough */
- /* Create TSB pointer into %g1. This is something like:
- *
- * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
- * tsb_base = tsb_reg & ~0x7UL;
- * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
- * tsb_ptr = tsb_base + (tsb_index * 16);
- */
sun4v_tsb_miss_common:
- COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
- /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
- * still in %g2, so it's quite trivial to get at the PGD PHYS value
- * so we can preload it into %g7.
- */
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+#ifdef CONFIG_HUGETLB_PAGE
+ mov SCRATCHPAD_UTSBREG2, %g5
+ ldxa [%g5] ASI_SCRATCHPAD, %g5
+ cmp %g5, -1
+ be,pt %xcc, 80f
+ nop
+ COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+
+ /* That clobbered %g2, reload it. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+ sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+#endif
+
ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7f7dba0ca96a..df612e4f75f9 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)
extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */
void __init trap_init(void)
@@ -2535,9 +2536,27 @@ void __init trap_init(void)
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)))
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)))
trap_per_cpu_offsets_are_bolixed_dave();
+ if ((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)))
+ tsb_config_offsets_are_bolixed_dave();
+
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 118baea44f69..a0c8ba58920b 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -3,8 +3,13 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
+#include <linux/config.h>
+
#include <asm/tsb.h>
#include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
+#include <asm/mmu.h>
.text
.align 32
@@ -34,34 +39,124 @@ tsb_miss_itlb:
ldxa [%g4] ASI_IMMU, %g4
/* At this point we have:
- * %g1 -- TSB entry address
+ * %g1 -- PAGE_SIZE TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
*/
tsb_miss_page_table_walk:
- TRAP_LOAD_PGD_PHYS(%g7, %g5)
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
- /* And now we have the PGD base physical address in %g7. */
-tsb_miss_page_table_walk_sun4v_fastpath:
- USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
+ /* Before committing to a full page table walk,
+ * check the huge page TSB.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+
+661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
+ nop
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov SCRATCHPAD_UTSBREG2, %g5
+ ldxa [%g5] ASI_SCRATCHPAD, %g5
+ .previous
+
+ cmp %g5, -1
+ be,pt %xcc, 80f
+ nop
+
+ /* We need an aligned pair of registers containing 2 values
+ * which can be easily rematerialized. %g6 and %g7 foot the
+ * bill just nicely. We'll save %g6 away into %g2 for the
+ * huge page TSB TAG comparison.
+ *
+ * Perform a huge page TSB lookup.
+ */
+ mov %g6, %g2
+ and %g5, 0x7, %g6
+ mov 512, %g7
+ andn %g5, 0x7, %g5
+ sllx %g7, %g6, %g7
+ srlx %g4, HPAGE_SHIFT, %g6
+ sub %g7, 1, %g7
+ and %g6, %g7, %g6
+ sllx %g6, 4, %g6
+ add %g5, %g6, %g5
+
+ TSB_LOAD_QUAD(%g5, %g6)
+ cmp %g6, %g2
+ be,a,pt %xcc, tsb_tlb_reload
+ mov %g7, %g5
+
+ /* No match, remember the huge page TSB entry address,
+ * and restore %g6 and %g7.
+ */
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
+ srlx %g4, 22, %g6
+80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+
+#endif
+
+ ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
/* At this point we have:
* %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
- * %g5 -- physical address of PTE in Linux page tables
+ * %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
+ * %g7 -- page table physical address
+ *
+ * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
+ * TSB both lack a matching entry.
*/
-tsb_reload:
- TSB_LOCK_TAG(%g1, %g2, %g7)
+tsb_miss_page_table_walk_sun4v_fastpath:
+ USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, tsb_do_fault
- TSB_STORE(%g1, %g7)
+ brgez,pn %g5, tsb_do_fault
+ nop
+
+#ifdef CONFIG_HUGETLB_PAGE
+661: sethi %uhi(_PAGE_SZALL_4U), %g7
+ sllx %g7, 32, %g7
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov _PAGE_SZALL_4V, %g7
+ nop
+ .previous
+
+ and %g5, %g7, %g2
+
+661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
+ sllx %g7, 32, %g7
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov _PAGE_SZHUGE_4V, %g7
+ nop
+ .previous
+
+ cmp %g2, %g7
+ bne,pt %xcc, 60f
+ nop
+
+ /* It is a huge page, use huge page TSB entry address we
+ * calculated above.
+ */
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
+ ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
+ cmp %g2, -1
+ movne %xcc, %g2, %g1
+60:
+#endif
+ /* At this point we have:
+ * %g1 -- TSB entry address
+ * %g3 -- FAULT_CODE_{D,I}TLB
+ * %g5 -- valid PTE
+ * %g6 -- TAG TARGET (vaddr >> 22)
+ */
+tsb_reload:
+ TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
@@ -240,10 +335,9 @@ tsb_flush:
* schedule() time.
*
* %o0: page table physical address
- * %o1: TSB register value
- * %o2: TSB virtual address
- * %o3: TSB mapping locked PTE
- * %o4: Hypervisor TSB descriptor physical address
+ * %o1: TSB base config pointer
+ * %o2: TSB huge config pointer, or NULL if none
+ * %o3: Hypervisor TSB descriptor physical address
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@@ -253,63 +347,79 @@ tsb_flush:
.globl __tsb_context_switch
.type __tsb_context_switch,#function
__tsb_context_switch:
- rdpr %pstate, %o5
- wrpr %o5, PSTATE_IE, %pstate
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+
+ TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
- ldub [%g6 + TI_CPU], %g1
- sethi %hi(trap_block), %g2
- sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
- or %g2, %lo(trap_block), %g2
- add %g2, %g1, %g2
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
- sethi %hi(tlb_type), %g1
- lduw [%g1 + %lo(tlb_type)], %g1
- cmp %g1, 3
- bne,pt %icc, 1f
+ ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
+ brz,pt %o2, 1f
+ mov -1, %g3
+
+ ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
+
+1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
+
+ sethi %hi(tlb_type), %g2
+ lduw [%g2 + %lo(tlb_type)], %g2
+ cmp %g2, 3
+ bne,pt %icc, 50f
nop
/* Hypervisor TSB switch. */
- mov SCRATCHPAD_UTSBREG1, %g1
- stxa %o1, [%g1] ASI_SCRATCHPAD
- mov -1, %g2
- mov SCRATCHPAD_UTSBREG2, %g1
- stxa %g2, [%g1] ASI_SCRATCHPAD
-
- /* Save away %o5's %pstate, we have to use %o5 for
- * the hypervisor call.
- */
- mov %o5, %g1
+ mov SCRATCHPAD_UTSBREG1, %o5
+ stxa %o0, [%o5] ASI_SCRATCHPAD
+ mov SCRATCHPAD_UTSBREG2, %o5
+ stxa %g3, [%o5] ASI_SCRATCHPAD
+
+ mov 2, %o0
+ cmp %g3, -1
+ move %xcc, 1, %o0
mov HV_FAST_MMU_TSB_CTXNON0, %o5
- mov 1, %o0
- mov %o4, %o1
+ mov %o3, %o1
ta HV_FAST_TRAP
- /* Finish up and restore %o5. */
+ /* Finish up. */
ba,pt %xcc, 9f
- mov %g1, %o5
+ nop
/* SUN4U TSB switch. */
-1: mov TSB_REG, %g1
- stxa %o1, [%g1] ASI_DMMU
+50: mov TSB_REG, %o5
+ stxa %o0, [%o5] ASI_DMMU
membar #Sync
- stxa %o1, [%g1] ASI_IMMU
+ stxa %o0, [%o5] ASI_IMMU
membar #Sync
-2: brz %o2, 9f
- nop
+2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
+ brz %o4, 9f
+ ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
- mov TLB_TAG_ACCESS, %g1
+ mov TLB_TAG_ACCESS, %g3
lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
- stxa %o2, [%g1] ASI_DMMU
+ stxa %o4, [%g3] ASI_DMMU
membar #Sync
sllx %g2, 3, %g2
- stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
+ stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ brz,pt %o2, 9f
+ nop
+
+ ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
+ ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
+ mov TLB_TAG_ACCESS, %g3
+ stxa %o4, [%g3] ASI_DMMU
+ membar #Sync
+ sub %g2, (1 << 3), %g2
+ stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync
+
9:
- wrpr %o5, %pstate
+ wrpr %g1, %pstate
retl
nop
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 63b6cc0cd5d5..d21ff3230c02 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -410,9 +410,18 @@ good_area:
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
- if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
- tsb_grow(mm, mm_rss);
-
+#ifdef CONFIG_HUGETLB_PAGE
+ mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#endif
+ if (unlikely(mm_rss >=
+ mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+ tsb_grow(mm, MM_TSB_BASE, mm_rss);
+#ifdef CONFIG_HUGETLB_PAGE
+ mm_rss = mm->context.huge_pte_count;
+ if (unlikely(mm_rss >=
+ mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
+ tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+#endif
return;
/*
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 5fc5c579e35e..8cb06205d265 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -140,7 +140,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
vma->vm_pgoff = phys_base >> PAGE_SHIFT;
- prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index a7a24869d045..074620d413d4 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- if (pgd) {
- pud = pud_offset(pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
- }
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
}
return pte;
}
@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
int i;
+ if (!pte_present(*ptep) && pte_present(entry))
+ mm->context.huge_pte_count++;
+
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry);
ptep++;
@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
int i;
entry = *ptep;
+ if (pte_present(entry))
+ mm->context.huge_pte_count--;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(mm, addr, ptep);
@@ -263,18 +264,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
@@ -302,6 +291,15 @@ static void context_reload(void *__data)
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
+ struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+ if (likely(tp->tsb != NULL))
+ return;
+
+ tsb_grow(mm, MM_TSB_HUGE, 0);
+ tsb_context_switch(mm);
+ smp_tsb_sync(mm);
+
/* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages.
*/
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index c2b556106fc1..ded63ee9c4fd 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
struct mm_struct *mm;
struct tsb *tsb;
unsigned long tag, flags;
+ unsigned long tsb_index, tsb_hash_shift;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
mm = vma->vm_mm;
+ tsb_index = MM_TSB_BASE;
+ tsb_hash_shift = PAGE_SHIFT;
+
spin_lock_irqsave(&mm->context.lock, flags);
- tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
- (mm->context.tsb_nentries - 1UL)];
+#ifdef CONFIG_HUGETLB_PAGE
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
+ if ((tlb_type == hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+ (tlb_type != hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
+ tsb_index = MM_TSB_HUGE;
+ tsb_hash_shift = HPAGE_SHIFT;
+ }
+ }
+#endif
+
+ tsb = mm->context.tsb_block[tsb_index].tsb;
+ tsb += ((address >> tsb_hash_shift) &
+ (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL);
tsb_insert(tsb, tag, pte_val(pte));
@@ -1461,7 +1478,7 @@ void free_initmem(void)
p = virt_to_page(page);
ClearPageReserved(p);
- set_page_count(p, 1);
+ init_page_count(p);
__free_page(p);
num_physpages++;
totalram_pages++;
@@ -1477,7 +1494,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
struct page *p = virt_to_page(start);
ClearPageReserved(p);
- set_page_count(p, 1);
+ init_page_count(p);
__free_page(p);
num_physpages++;
totalram_pages++;
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index b2064e2a44d6..beaa02810f0e 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -15,9 +15,9 @@
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
-static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
{
- vaddr >>= PAGE_SHIFT;
+ vaddr >>= hash_shift;
return vaddr & (nentries - 1);
}
@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
unsigned long v;
for (v = start; v < end; v += PAGE_SIZE) {
- unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+ unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+ KERNEL_TSB_NENTRIES);
struct tsb *ent = &swapper_tsb[hash];
if (tag_compare(ent->tag, v)) {
@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
-void flush_tsb_user(struct mmu_gather *mp)
+static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
{
- struct mm_struct *mm = mp->mm;
- unsigned long nentries, base, flags;
- struct tsb *tsb;
- int i;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- tsb = mm->context.tsb;
- nentries = mm->context.tsb_nentries;
+ unsigned long i;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(tsb);
- else
- base = (unsigned long) tsb;
-
for (i = 0; i < mp->tlb_nr; i++) {
unsigned long v = mp->vaddrs[i];
unsigned long tag, ent, hash;
v &= ~0x1UL;
- hash = tsb_hash(v, nentries);
- ent = base + (hash * sizeof(struct tsb));
+ hash = tsb_hash(v, hash_shift, nentries);
+ ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);
tsb_flush(ent, tag);
}
+}
+
+void flush_tsb_user(struct mmu_gather *mp)
+{
+ struct mm_struct *mm = mp->mm;
+ unsigned long nentries, base, flags;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+
+#ifdef CONFIG_HUGETLB_PAGE
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+ }
+#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
+#else
+#error Broken base page size setting...
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
+#else
+#error Broken huge page size setting...
+#endif
+#endif
+
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
{
unsigned long tsb_reg, base, tsb_paddr;
unsigned long page_sz, tte;
- mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+ mm->context.tsb_block[tsb_idx].tsb_nentries =
+ tsb_bytes / sizeof(struct tsb);
base = TSBMAP_BASE;
tte = pgprot_val(PAGE_KERNEL_LOCKED);
- tsb_paddr = __pa(mm->context.tsb);
+ tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
/* Use the smallest page size that can map the whole TSB
@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
/* Physical mapping, no locked TLB entry for TSB. */
tsb_reg |= tsb_paddr;
- mm->context.tsb_reg_val = tsb_reg;
- mm->context.tsb_map_vaddr = 0;
- mm->context.tsb_map_pte = 0;
+ mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+ mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
+ mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
} else {
tsb_reg |= base;
tsb_reg |= (tsb_paddr & (page_sz - 1UL));
tte |= (tsb_paddr & ~(page_sz - 1UL));
- mm->context.tsb_reg_val = tsb_reg;
- mm->context.tsb_map_vaddr = base;
- mm->context.tsb_map_pte = tte;
+ mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+ mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
+ mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
}
/* Setup the Hypervisor TSB descriptor. */
if (tlb_type == hypervisor) {
- struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+ struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
- switch (PAGE_SIZE) {
- case 8192:
- default:
- hp->pgsz_idx = HV_PGSZ_IDX_8K;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ hp->pgsz_idx = HV_PGSZ_IDX_BASE;
break;
-
- case 64 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_64K;
- break;
-
- case 512 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_512K;
- break;
-
- case 4 * 1024 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+ case MM_TSB_HUGE:
+ hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
break;
+#endif
+ default:
+ BUG();
};
hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0;
- switch (PAGE_SIZE) {
- case 8192:
- default:
- hp->pgsz_mask = HV_PGSZ_MASK_8K;
- break;
-
- case 64 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_64K;
- break;
-
- case 512 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_512K;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ hp->pgsz_mask = HV_PGSZ_MASK_BASE;
break;
-
- case 4 * 1024 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+ case MM_TSB_HUGE:
+ hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
break;
+#endif
+ default:
+ BUG();
};
hp->tsb_base = tsb_paddr;
hp->resv = 0;
@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
}
}
-/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * do_sparc64_fault() invokes this routine to try and grow the TSB.
+/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
+ * do_sparc64_fault() invokes this routine to try and grow it.
*
* When we reach the maximum TSB size supported, we stick ~0UL into
- * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
+ * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
* the number of entries that the current TSB can hold at once. Currently,
* we trigger when the RSS hits 3/4 of the TSB capacity.
*/
-void tsb_grow(struct mm_struct *mm, unsigned long rss)
+void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
{
unsigned long max_tsb_size = 1 * 1024 * 1024;
unsigned long new_size, old_size, flags;
@@ -297,7 +328,8 @@ retry_tsb_alloc:
* down to a 0-order allocation and force no TSB
* growing for this address space.
*/
- if (mm->context.tsb == NULL && new_cache_index > 0) {
+ if (mm->context.tsb_block[tsb_index].tsb == NULL &&
+ new_cache_index > 0) {
new_cache_index = 0;
new_size = 8192;
new_rss_limit = ~0UL;
@@ -307,8 +339,8 @@ retry_tsb_alloc:
/* If we failed on a TSB grow, we are under serious
* memory pressure so don't try to grow any more.
*/
- if (mm->context.tsb != NULL)
- mm->context.tsb_rss_limit = ~0UL;
+ if (mm->context.tsb_block[tsb_index].tsb != NULL)
+ mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
return;
}
@@ -339,23 +371,26 @@ retry_tsb_alloc:
*/
spin_lock_irqsave(&mm->context.lock, flags);
- old_tsb = mm->context.tsb;
- old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
- old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+ old_tsb = mm->context.tsb_block[tsb_index].tsb;
+ old_cache_index =
+ (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
+ old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
+ sizeof(struct tsb));
/* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check.
*/
- if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
+ if (unlikely(old_tsb &&
+ (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return;
}
- mm->context.tsb_rss_limit = new_rss_limit;
+ mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
if (old_tsb) {
extern void copy_tsb(unsigned long old_tsb_base,
@@ -372,8 +407,8 @@ retry_tsb_alloc:
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
}
- mm->context.tsb = new_tsb;
- setup_tsb_params(mm, new_size);
+ mm->context.tsb_block[tsb_index].tsb = new_tsb;
+ setup_tsb_params(mm, tsb_index, new_size);
spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -394,40 +429,65 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+#ifdef CONFIG_HUGETLB_PAGE
+ unsigned long huge_pte_count;
+#endif
+ unsigned int i;
+
spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL;
+#ifdef CONFIG_HUGETLB_PAGE
+ /* We reset it to zero because the fork() page copying
+ * will re-increment the counters as the parent PTEs are
+ * copied into the child address space.
+ */
+ huge_pte_count = mm->context.huge_pte_count;
+ mm->context.huge_pte_count = 0;
+#endif
+
/* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up.
*/
- mm->context.tsb = NULL;
+ for (i = 0; i < MM_NUM_TSBS; i++)
+ mm->context.tsb_block[i].tsb = NULL;
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
- tsb_grow(mm, get_mm_rss(mm));
+ tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
- if (unlikely(!mm->context.tsb))
+#ifdef CONFIG_HUGETLB_PAGE
+ if (unlikely(huge_pte_count))
+ tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+#endif
+
+ if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
return -ENOMEM;
return 0;
}
-void destroy_context(struct mm_struct *mm)
+static void tsb_destroy_one(struct tsb_config *tp)
{
- unsigned long flags, cache_index;
+ unsigned long cache_index;
- cache_index = (mm->context.tsb_reg_val & 0x7UL);
- kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
+ if (!tp->tsb)
+ return;
+ cache_index = tp->tsb_reg_val & 0x7UL;
+ kmem_cache_free(tsb_caches[cache_index], tp->tsb);
+ tp->tsb = NULL;
+ tp->tsb_reg_val = 0UL;
+}
- /* We can remove these later, but for now it's useful
- * to catch any bogus post-destroy_context() references
- * to the TSB.
- */
- mm->context.tsb = NULL;
- mm->context.tsb_reg_val = 0UL;
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags, i;
+
+ for (i = 0; i < MM_NUM_TSBS; i++)
+ tsb_destroy_one(&mm->context.tsb_block[i]);
spin_lock_irqsave(&ctx_alloc_lock, flags);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index fa4f915be5c5..92cce96b5e24 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -57,7 +57,7 @@ static void setup_highmem(unsigned long highmem_start,
for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
page = &mem_map[highmem_pfn + i];
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
}
}
@@ -296,7 +296,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
(end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 544665e04513..0e65340eee33 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -279,7 +279,7 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
for(i = 0; i < total_pages; i++){
p = &map[i];
- set_page_count(p, 0);
+ memset(p, 0, sizeof(struct page));
SetPageReserved(p);
INIT_LIST_HEAD(&p->lru);
}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 3080f84bf7b7..ee5ce3d3cbc3 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -477,7 +477,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
-static unsigned int cyc2ns_scale;
+static unsigned int cyc2ns_scale __read_mostly;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 3496abc8d372..c9dc7e46731e 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -124,6 +124,7 @@ extern void * __memcpy(void *,const void *,__kernel_size_t);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 7af1742aa958..40ed13d263cd 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -486,7 +486,7 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalram_pages++;
num_physpages++;
@@ -592,7 +592,7 @@ void free_initmem(void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
@@ -632,7 +632,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 35f1f1aab063..531ad21447b1 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -45,6 +45,13 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
pte_t *pbase;
if (!base)
return NULL;
+ /*
+ * page_private is used to track the number of entries in
+ * the page table page have non standard attributes.
+ */
+ SetPagePrivate(base);
+ page_private(base) = 0;
+
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
@@ -77,26 +84,12 @@ static inline void flush_map(unsigned long address)
on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
}
-struct deferred_page {
- struct deferred_page *next;
- struct page *fpage;
- unsigned long address;
-};
-static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
+static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
-static inline void save_page(unsigned long address, struct page *fpage)
+static inline void save_page(struct page *fpage)
{
- struct deferred_page *df;
- df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
- if (!df) {
- flush_map(address);
- __free_page(fpage);
- } else {
- df->next = df_list;
- df->fpage = fpage;
- df->address = address;
- df_list = df;
- }
+ fpage->lru.next = (struct list_head *)deferred_pages;
+ deferred_pages = fpage;
}
/*
@@ -138,8 +131,8 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
set_pte(kpte, pfn_pte(pfn, prot));
} else {
/*
- * split_large_page will take the reference for this change_page_attr
- * on the split page.
+ * split_large_page will take the reference for this
+ * change_page_attr on the split page.
*/
struct page *split;
@@ -151,23 +144,20 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
set_pte(kpte,mk_pte(split, ref_prot2));
kpte_page = split;
}
- get_page(kpte_page);
+ page_private(kpte_page)++;
} else if ((kpte_flags & _PAGE_PSE) == 0) {
set_pte(kpte, pfn_pte(pfn, ref_prot));
- __put_page(kpte_page);
+ BUG_ON(page_private(kpte_page) == 0);
+ page_private(kpte_page)--;
} else
BUG();
/* on x86-64 the direct mapping set at boot is not using 4k pages */
BUG_ON(PageReserved(kpte_page));
- switch (page_count(kpte_page)) {
- case 1:
- save_page(address, kpte_page);
+ if (page_private(kpte_page) == 0) {
+ save_page(kpte_page);
revert_page(address, ref_prot);
- break;
- case 0:
- BUG(); /* memleak and failed 2M page regeneration */
}
return 0;
}
@@ -220,17 +210,18 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
void global_flush_tlb(void)
{
- struct deferred_page *df, *next_df;
+ struct page *dpage;
down_read(&init_mm.mmap_sem);
- df = xchg(&df_list, NULL);
+ dpage = xchg(&deferred_pages, NULL);
up_read(&init_mm.mmap_sem);
- flush_map((df && !df->next) ? df->address : 0);
- for (; df; df = next_df) {
- next_df = df->next;
- if (df->fpage)
- __free_page(df->fpage);
- kfree(df);
+
+ flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
+ while (dpage) {
+ struct page *tmp = dpage;
+ dpage = (struct page *)dpage->lru.next;
+ ClearPagePrivate(tmp);
+ __free_page(tmp);
}
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 5a91d6c9e66d..e1be4235f367 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -272,7 +272,7 @@ free_reserved_mem(void *start, void *end)
{
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page((unsigned long)start);
totalram_pages++;
}
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
index e5e119c820e4..7d28914d11cb 100644
--- a/arch/xtensa/mm/pgtable.c
+++ b/arch/xtensa/mm/pgtable.c
@@ -14,25 +14,21 @@
pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte, p;
+ pte_t *pte = NULL, *p;
int color = ADDR_COLOR(address);
int i;
p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
if (likely(p)) {
- struct page *page;
-
- for (i = 0; i < COLOR_SIZE; i++, p++) {
- page = virt_to_page(pte);
-
- set_page_count(page, 1);
- ClearPageCompound(page);
+ split_page(virt_to_page(p), COLOR_ORDER);
+ for (i = 0; i < COLOR_SIZE; i++) {
if (ADDR_COLOR(p) == color)
pte = p;
else
free_page(p);
+ p += PTRS_PER_PTE;
}
clear_page(pte);
}
@@ -49,20 +45,20 @@ int flush;
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page, p;
+ struct page *page = NULL, *p;
int color = ADDR_COLOR(address);
p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
if (likely(p)) {
- for (i = 0; i < PAGE_ORDER; i++) {
- set_page_count(p, 1);
- ClearPageCompound(p);
+ split_page(p, COLOR_ORDER);
- if (PADDR_COLOR(page_address(pg)) == color)
+ for (i = 0; i < PAGE_ORDER; i++) {
+ if (PADDR_COLOR(page_address(p)) == color)
page = p;
else
- free_page(p);
+ __free_page(p);
+ p++;
}
clear_highpage(page);
}
OpenPOWER on IntegriCloud