diff options
author | Shailendra Verma <shailendra.capricorn@gmail.com> | 2015-05-23 10:38:14 +0530 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2015-05-29 09:20:01 -0400 |
commit | 402dd89d6cdbeeaab42b810542b487017725c628 (patch) | |
tree | aabc445f7213d0f2e7eb8144e95f1c129bea1103 /kernel | |
parent | 37b1ef31a568fc02e53587620226e5f3c66454c8 (diff) | |
download | blackbird-op-linux-402dd89d6cdbeeaab42b810542b487017725c628.tar.gz blackbird-op-linux-402dd89d6cdbeeaab42b810542b487017725c628.zip |
workqueue: fix typos in comments
tj: dropped iff -> if, iff is if and only if not a typo. Spotted by
Randy Dunlap.
Signed-off-by: Shailendra Verma <shailendra.capricorn@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c9eaa4e5c867..5243d4b03087 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -988,7 +988,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled * @head: target list to append @work to - * @nextp: out paramter for nested worklist walking + * @nextp: out parameter for nested worklist walking * * Schedule linked works starting from @work to @head. Work series to * be scheduled starts at @work and includes any consecutive work with @@ -3063,7 +3063,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, * init_worker_pool - initialize a newly zalloc'd worker_pool * @pool: worker_pool to initialize * - * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. + * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. * * Return: 0 on success, -errno on failure. Even on failure, all fields * inside @pool proper are initialized and put_unbound_pool() can be called @@ -4414,7 +4414,7 @@ static void rebind_workers(struct worker_pool *pool) /* * Restore CPU affinity of all workers. As all idle workers should * be on the run-queue of the associated CPU before any local - * wake-ups for concurrency management happen, restore CPU affinty + * wake-ups for concurrency management happen, restore CPU affinity * of all workers first and then clear UNBOUND. As we're called * from CPU_ONLINE, the following shouldn't fail. */ @@ -5105,7 +5105,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) int ret; /* - * Adjusting max_active or creating new pwqs by applyting + * Adjusting max_active or creating new pwqs by applying * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ |