summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/prio_tree.c2
-rw-r--r--mm/slab.c6
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/vmscan.c2
11 files changed, 18 insertions, 18 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 034617f8cdb2..8b809ecefa39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1017,7 +1017,7 @@ static long region_chg(struct list_head *head, long f, long t)
/* If we are below the current region then a new region is required.
* Subtle, allocate a new region at the position but make it zero
- * size such that we can guarentee to record the reservation. */
+ * size such that we can guarantee to record the reservation. */
if (&rg->link == head || t < rg->from) {
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
if (!nrg)
diff --git a/mm/memory.c b/mm/memory.c
index 142683df8755..eefd5b68bc42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2713,7 +2713,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
return 0;
down_read(&mm->mmap_sem);
- /* ignore errors, just check how much was sucessfully transfered */
+ /* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
void *maddr;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 091b9c6c2529..1833879f8438 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -121,7 +121,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
err = __add_section(zone, i << PFN_SECTION_SHIFT);
/*
- * EEXIST is finally dealed with by ioresource collision
+ * EEXIST is finally dealt with by ioresource collision
* check. see add_memory() => register_memory_resource()
* Warning will be printed if there is collision.
*/
diff --git a/mm/mempool.c b/mm/mempool.c
index 02d5ec3feabc..a46eb1b4bb66 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -299,7 +299,7 @@ EXPORT_SYMBOL(mempool_free_slab);
/*
* A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
- * specfied by pool_data
+ * specified by pool_data
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7845462064f4..838a5e31394c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -989,7 +989,7 @@ int __set_page_dirty_no_writeback(struct page *page)
* mapping is pinned by the vma's ->vm_file reference.
*
* We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() insode tree_lock.
+ * mapping by re-checking page_mapping() inside tree_lock.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 43f757fcf30f..da69d833e067 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -123,7 +123,7 @@ static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
- * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
+ * MAX_ACTIVE_REGIONS determines the maximum number of distinct
* ranges of memory (RAM) that may be registered with add_active_range().
* Ranges passed to add_active_range() will be merged if possible
* so the number of times add_active_range() can be called is
@@ -1260,7 +1260,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
* skip over zones that are not allowed by the cpuset, or that have
* been recently (in last second) found to be nearly full. See further
* comments in mmzone.h. Reduces cache footprint of zonelist scans
- * that have to skip over alot of full or unallowed zones.
+ * that have to skip over a lot of full or unallowed zones.
*
* If the zonelist cache is present in the passed in zonelist, then
* returns a pointer to the allowed node mask (either the current
@@ -2358,7 +2358,7 @@ void build_all_zonelists(void)
__build_all_zonelists(NULL);
cpuset_init_current_mems_allowed();
} else {
- /* we have to stop all cpus to guaranntee there is no user
+ /* we have to stop all cpus to guarantee there is no user
of zonelist */
stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
/* cpuset refresh routine should be here */
@@ -2864,7 +2864,7 @@ static int __meminit first_active_region_index_in_nid(int nid)
/*
* Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardles of node
+ * Note: nid == MAX_NUMNODES returns next region regardless of node
*/
static int __meminit next_active_region_index_in_nid(int index, int nid)
{
diff --git a/mm/prio_tree.c b/mm/prio_tree.c
index b4e76c25f953..603ae98d9694 100644
--- a/mm/prio_tree.c
+++ b/mm/prio_tree.c
@@ -34,7 +34,7 @@
* Radix priority search tree for address_space->i_mmap
*
* For each vma that map a unique set of file pages i.e., unique [radix_index,
- * heap_index] value, we have a corresponing priority search tree node. If
+ * heap_index] value, we have a corresponding priority search tree node. If
* multiple vmas have identical [radix_index, heap_index] value, then one of
* them is used as a tree node and others are stored in a vm_set list. The tree
* node points to the first vma (head) of the list using vm_set.head.
diff --git a/mm/slab.c b/mm/slab.c
index 54eb555c4ef8..cfa6be4e378e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -26,7 +26,7 @@
* initialized objects.
*
* This means, that your constructor is used only for newly allocated
- * slabs and you must pass objects with the same intializations to
+ * slabs and you must pass objects with the same initializations to
* kmem_cache_free.
*
* Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
@@ -1369,7 +1369,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
* structure is usually allocated from kmem_cache_create() and
* gets destroyed at kmem_cache_destroy().
*/
- /* fall thru */
+ /* fall through */
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
@@ -3806,7 +3806,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
- * This initializes kmem_list3 or resizes varioius caches for all nodes.
+ * This initializes kmem_list3 or resizes various caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
{
diff --git a/mm/swap.c b/mm/swap.c
index a65eff8a517a..9ac88323d237 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -5,7 +5,7 @@
*/
/*
- * This file contains the default values for the opereation of the
+ * This file contains the default values for the operation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
* Documentation/sysctl/vm.txt.
* Started 18.12.91
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2e01af365848..af77e171e339 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -247,7 +247,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
EXPORT_SYMBOL_GPL(__get_vm_area);
/**
- * get_vm_area - reserve a contingous kernel virtual area
+ * get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
@@ -303,7 +303,7 @@ found:
}
/**
- * remove_vm_area - find and remove a contingous kernel virtual area
+ * remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
@@ -364,7 +364,7 @@ static void __vunmap(void *addr, int deallocate_pages)
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
*
- * Free the virtually contiguous memory area starting at @addr, as
+ * Free the virtually continuous memory area starting at @addr, as
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed.
*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cb474cc99645..e5a9597e3bbc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(unregister_shrinker);
* percentages of the lru and ageable caches. This should balance the seeks
* generated by these structures.
*
- * If the vm encounted mapped pages on the LRU it increase the pressure on
+ * If the vm encountered mapped pages on the LRU it increase the pressure on
* slab to avoid swapping.
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
OpenPOWER on IntegriCloud