diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 78e7eded4063..070880fe1ff7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -64,7 +64,7 @@ DEFINE_SPINLOCK(hugetlb_lock); * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; -static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; +struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -2482,7 +2482,7 @@ static void __exit hugetlb_exit(void) } kobject_put(hugepages_kobj); - kfree(htlb_fault_mutex_table); + kfree(hugetlb_fault_mutex_table); } module_exit(hugetlb_exit); @@ -2515,12 +2515,12 @@ static int __init hugetlb_init(void) #else num_fault_mutexes = 1; #endif - htlb_fault_mutex_table = + hugetlb_fault_mutex_table = kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); - BUG_ON(!htlb_fault_mutex_table); + BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) - mutex_init(&htlb_fault_mutex_table[i]); + mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } module_init(hugetlb_init); @@ -3454,7 +3454,7 @@ backout_unlocked: } #ifdef CONFIG_SMP -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3479,7 +3479,7 @@ static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3527,8 +3527,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); - mutex_lock(&htlb_fault_mutex_table[hash]); + hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); + mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { @@ -3613,7 +3613,7 @@ out_ptl: put_page(pagecache_page); } out_mutex: - mutex_unlock(&htlb_fault_mutex_table[hash]); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and |