summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c84
1 files changed, 25 insertions, 59 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 9821241fdede..e9844086b236 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -270,33 +270,6 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
pcpu_unit_page_offset(cpu, page_idx);
}
-static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
-{
- *rs = find_next_zero_bit(bitmap, end, *rs);
- *re = find_next_bit(bitmap, end, *rs + 1);
-}
-
-static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
-{
- *rs = find_next_bit(bitmap, end, *rs);
- *re = find_next_zero_bit(bitmap, end, *rs + 1);
-}
-
-/*
- * Bitmap region iterators. Iterates over the bitmap between
- * [@start, @end) in @chunk. @rs and @re should be integer variables
- * and will be set to start and end index of the current free region.
- */
-#define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
-
-#define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
-
/*
* The following are helper functions to help access bitmaps and convert
* between bitmap offsets to address offsets.
@@ -732,9 +705,8 @@ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
}
bits = 0;
- pcpu_for_each_md_free_region(chunk, bit_off, bits) {
+ pcpu_for_each_md_free_region(chunk, bit_off, bits)
pcpu_block_update(chunk_md, bit_off, bit_off + bits);
- }
}
/**
@@ -749,7 +721,7 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
{
struct pcpu_block_md *block = chunk->md_blocks + index;
unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
- int rs, re, start; /* region start, region end */
+ unsigned int rs, re, start; /* region start, region end */
/* promote scan_hint to contig_hint */
if (block->scan_hint) {
@@ -765,10 +737,9 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
block->right_free = 0;
/* iterate over free areas and update the contig hints */
- pcpu_for_each_unpop_region(alloc_map, rs, re, start,
- PCPU_BITMAP_BLOCK_BITS) {
+ bitmap_for_each_clear_region(alloc_map, rs, re, start,
+ PCPU_BITMAP_BLOCK_BITS)
pcpu_block_update(block, rs, re);
- }
}
/**
@@ -1041,13 +1012,13 @@ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
int *next_off)
{
- int page_start, page_end, rs, re;
+ unsigned int page_start, page_end, rs, re;
page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
rs = page_start;
- pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
+ bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
if (rs >= page_end)
return true;
@@ -1702,13 +1673,13 @@ area_found:
/* populate if not all pages are already there */
if (!is_atomic) {
- int page_start, page_end, rs, re;
+ unsigned int page_start, page_end, rs, re;
page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size);
- pcpu_for_each_unpop_region(chunk->populated, rs, re,
- page_start, page_end) {
+ bitmap_for_each_clear_region(chunk->populated, rs, re,
+ page_start, page_end) {
WARN_ON(chunk->immutable);
ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
@@ -1858,10 +1829,10 @@ static void pcpu_balance_workfn(struct work_struct *work)
spin_unlock_irq(&pcpu_lock);
list_for_each_entry_safe(chunk, next, &to_free, list) {
- int rs, re;
+ unsigned int rs, re;
- pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
- chunk->nr_pages) {
+ bitmap_for_each_set_region(chunk->populated, rs, re, 0,
+ chunk->nr_pages) {
pcpu_depopulate_chunk(chunk, rs, re);
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, rs, re);
@@ -1893,7 +1864,7 @@ retry_pop:
}
for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
- int nr_unpop = 0, rs, re;
+ unsigned int nr_unpop = 0, rs, re;
if (!nr_to_pop)
break;
@@ -1910,9 +1881,9 @@ retry_pop:
continue;
/* @chunk can't go away while pcpu_alloc_mutex is held */
- pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
- chunk->nr_pages) {
- int nr = min(re - rs, nr_to_pop);
+ bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
+ chunk->nr_pages) {
+ int nr = min_t(int, re - rs, nr_to_pop);
ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
if (!ret) {
@@ -2125,7 +2096,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
void *ptr;
int unit;
- base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
+ base_size = ALIGN(struct_size(ai, groups, nr_groups),
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
@@ -2220,7 +2191,7 @@ static void pcpu_dump_alloc_info(const char *lvl,
* @base_addr: mapped address
*
* Initialize the first percpu chunk which contains the kernel static
- * perpcu area. This function is to be called from arch percpu area
+ * percpu area. This function is to be called from arch percpu area
* setup path.
*
* @ai contains all information necessary to initialize the first
@@ -2267,12 +2238,9 @@ static void pcpu_dump_alloc_info(const char *lvl,
* share the same vm, but use offset regions in the area allocation map.
* The chunk serving the dynamic region is circulated in the chunk slots
* and available for dynamic allocation like any other chunk.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
*/
-int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
- void *base_addr)
+void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ void *base_addr)
{
size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
size_t static_size, dyn_size;
@@ -2457,7 +2425,6 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
/* we're done */
pcpu_base_addr = base_addr;
- return 0;
}
#ifdef CONFIG_SMP
@@ -2710,7 +2677,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
struct pcpu_alloc_info *ai;
size_t size_sum, areas_size;
unsigned long max_distance;
- int group, i, highest_group, rc;
+ int group, i, highest_group, rc = 0;
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
cpu_distance_fn);
@@ -2795,7 +2762,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
ai->dyn_size, ai->unit_size);
- rc = pcpu_setup_first_chunk(ai, base);
+ pcpu_setup_first_chunk(ai, base);
goto out_free;
out_free_areas:
@@ -2839,7 +2806,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
int unit_pages;
size_t pages_size;
struct page **pages;
- int unit, i, j, rc;
+ int unit, i, j, rc = 0;
int upa;
int nr_g0_units;
@@ -2920,7 +2887,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
unit_pages, psize_str, ai->static_size,
ai->reserved_size, ai->dyn_size);
- rc = pcpu_setup_first_chunk(ai, vm.addr);
+ pcpu_setup_first_chunk(ai, vm.addr);
goto out_free_ar;
enomem:
@@ -3014,8 +2981,7 @@ void __init setup_per_cpu_areas(void)
ai->groups[0].nr_units = 1;
ai->groups[0].cpu_map[0] = 0;
- if (pcpu_setup_first_chunk(ai, fc) < 0)
- panic("Failed to initialize percpu areas.");
+ pcpu_setup_first_chunk(ai, fc);
pcpu_free_alloc_info(ai);
}
OpenPOWER on IntegriCloud