summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-12-08 15:56:10 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-12-08 15:56:10 +0100
commit39b742f957a287a7514a8a35c9f516cdf30b9ff5 (patch)
tree4b4e9c0747bb047e906fed6c04919c00ccfa2b35 /arch/s390/kernel
parentf4eb07c17df2e6cf9bd58bfcd9cc9e05e9489d07 (diff)
downloadblackbird-op-linux-39b742f957a287a7514a8a35c9f516cdf30b9ff5.tar.gz
blackbird-op-linux-39b742f957a287a7514a8a35c9f516cdf30b9ff5.zip
[S390] Use add_active_range() and free_area_init_nodes().
Size zones and holes in an architecture independent manner for s390. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/setup.c53
1 files changed, 12 insertions, 41 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b8a1ce215142..49ef206ec880 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -66,7 +66,6 @@ unsigned long machine_flags = 0;
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
-unsigned long __initdata zholes_size[MAX_NR_ZONES];
static unsigned long __initdata memory_end;
/*
@@ -354,21 +353,6 @@ void machine_power_off(void)
*/
void (*pm_power_off)(void) = machine_power_off;
-static void __init
-add_memory_hole(unsigned long start, unsigned long end)
-{
- unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
-
- if (end <= dma_pfn)
- zholes_size[ZONE_DMA] += end - start + 1;
- else if (start > dma_pfn)
- zholes_size[ZONE_NORMAL] += end - start + 1;
- else {
- zholes_size[ZONE_DMA] += dma_pfn - start + 1;
- zholes_size[ZONE_NORMAL] += end - dma_pfn;
- }
-}
-
static int __init early_parse_mem(char *p)
{
memory_end = memparse(p, &p);
@@ -521,7 +505,6 @@ setup_memory(void)
{
unsigned long bootmap_size;
unsigned long start_pfn, end_pfn, init_pfn;
- unsigned long last_rw_end;
int i;
/*
@@ -577,39 +560,27 @@ setup_memory(void)
/*
* Register RAM areas with the bootmem allocator.
*/
- last_rw_end = start_pfn;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
- unsigned long start_chunk, end_chunk;
+ unsigned long start_chunk, end_chunk, pfn;
if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue;
- start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
- start_chunk >>= PAGE_SHIFT;
- end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
- end_chunk >>= PAGE_SHIFT;
- if (start_chunk < start_pfn)
- start_chunk = start_pfn;
- if (end_chunk > end_pfn)
- end_chunk = end_pfn;
- if (start_chunk < end_chunk) {
- /* Initialize storage key for RAM pages */
- for (init_pfn = start_chunk ; init_pfn < end_chunk;
- init_pfn++)
- page_set_storage_key(init_pfn << PAGE_SHIFT,
- PAGE_DEFAULT_KEY);
- free_bootmem(start_chunk << PAGE_SHIFT,
- (end_chunk - start_chunk) << PAGE_SHIFT);
- if (last_rw_end < start_chunk)
- add_memory_hole(last_rw_end, start_chunk - 1);
- last_rw_end = end_chunk;
- }
+ start_chunk = PFN_DOWN(memory_chunk[i].addr);
+ end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
+ end_chunk = min(end_chunk, end_pfn);
+ if (start_chunk >= end_chunk)
+ continue;
+ add_active_range(0, start_chunk, end_chunk);
+ pfn = max(start_chunk, start_pfn);
+ for (; pfn <= end_chunk; pfn++)
+ page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
}
psw_set_key(PAGE_DEFAULT_KEY);
- if (last_rw_end < end_pfn - 1)
- add_memory_hole(last_rw_end, end_pfn - 1);
+ free_bootmem_with_active_regions(0, max_pfn);
+ reserve_bootmem(0, PFN_PHYS(start_pfn));
/*
* Reserve the bootmem bitmap itself as well. We do this in two
OpenPOWER on IntegriCloud