diff options
Diffstat (limited to 'arch/score')
-rw-r--r-- | arch/score/include/asm/cacheflush.h | 4 | ||||
-rw-r--r-- | arch/score/include/asm/delay.h | 2 | ||||
-rw-r--r-- | arch/score/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/score/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/score/mm/cache.c | 26 | ||||
-rw-r--r-- | arch/score/mm/init.c | 5 |
6 files changed, 31 insertions, 9 deletions
diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h index caaba24036e3..1d545d0ce206 100644 --- a/arch/score/include/asm/cacheflush.h +++ b/arch/score/include/asm/cacheflush.h @@ -14,10 +14,12 @@ extern void flush_cache_sigtramp(unsigned long addr); extern void flush_icache_all(void); extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_dcache_range(unsigned long start, unsigned long end); +extern void flush_dcache_page(struct page *page); + +#define PG_dcache_dirty PG_arch_1 #define flush_cache_dup_mm(mm) do {} while (0) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do {} while (0) #define flush_dcache_mmap_lock(mapping) do {} while (0) #define flush_dcache_mmap_unlock(mapping) do {} while (0) #define flush_cache_vmap(start, end) do {} while (0) diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h index 6726ec199dc0..529e494712a5 100644 --- a/arch/score/include/asm/delay.h +++ b/arch/score/include/asm/delay.h @@ -1,6 +1,8 @@ #ifndef _ASM_SCORE_DELAY_H #define _ASM_SCORE_DELAY_H +#include <asm-generic/param.h> + static inline void __delay(unsigned long loops) { /* 3 cycles per loop. */ diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h index d92a5a2d36d4..1e9ade8e77e6 100644 --- a/arch/score/include/asm/page.h +++ b/arch/score/include/asm/page.h @@ -74,7 +74,7 @@ extern unsigned long max_pfn; #define page_to_bus(page) (page_to_phys(page)) #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) -#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) +#define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn)) #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c index 6a2503c75c4e..6f898c057878 100644 --- a/arch/score/kernel/setup.c +++ b/arch/score/kernel/setup.c @@ -49,6 +49,7 @@ static void __init bootmem_init(void) min_low_pfn = PFN_UP(MEMORY_START); max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE); + max_mapnr = max_low_pfn - min_low_pfn; /* Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c index dbac9d9dfddd..b25e95743600 100644 --- a/arch/score/mm/cache.c +++ b/arch/score/mm/cache.c @@ -29,6 +29,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/fs.h> #include <asm/mmu_context.h> @@ -51,6 +52,27 @@ static void flush_data_cache_page(unsigned long addr) } } +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long addr; + + if (PageHighMem(page)) + return; + if (mapping && !mapping_mapped(mapping)) { + set_bit(PG_dcache_dirty, &(page)->flags); + return; + } + + /* + * We could delay the flush for the !page_mapping case too. But that + * case is for exec env/arg pages and those are %99 certainly going to + * get faulted into the tlb (and thus flushed) anyways. + */ + addr = (unsigned long) page_address(page); + flush_data_cache_page(addr); +} + /* called by update_mmu_cache. */ void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) @@ -63,11 +85,11 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); - if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { + if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) { addr = (unsigned long) page_address(page); if (exec) flush_data_cache_page(addr); - clear_bit(PG_arch_1, &page->flags); + clear_bit(PG_dcache_dirty, &(page)->flags); } } diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 4e3dcd0c4716..8c15b2c85d5a 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -83,7 +83,6 @@ void __init mem_init(void) unsigned long codesize, reservedpages, datasize, initsize; unsigned long tmp, ram = 0; - max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ @@ -101,10 +100,6 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); - kclist_add(&kcore_vmalloc, (void *) VMALLOC_START, - VMALLOC_END - VMALLOC_START); - printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |