From 693ba15c9202fe0283404abe4066e1b986e284eb Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 12 Jun 2018 12:05:45 -0700 Subject: scsi: Remove percpu_ida With its one user gone, remove the library code. Signed-off-by: Matthew Wilcox Reviewed-by: Jens Axboe Signed-off-by: Martin K. Petersen --- lib/Makefile | 2 +- lib/percpu_ida.c | 370 ------------------------------------------------------- 2 files changed, 1 insertion(+), 371 deletions(-) delete mode 100644 lib/percpu_ida.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 956b320292fe..055420101965 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -40,7 +40,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ + percpu-refcount.o rhashtable.o reciprocal_div.o \ once.o refcount.o usercopy.o errseq.o bucket_locks.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o obj-y += string_helpers.o diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c deleted file mode 100644 index 9bbd9c5d375a..000000000000 --- a/lib/percpu_ida.c +++ /dev/null @@ -1,370 +0,0 @@ -/* - * Percpu IDA library - * - * Copyright (C) 2013 Datera, Inc. Kent Overstreet - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct percpu_ida_cpu { - /* - * Even though this is percpu, we need a lock for tag stealing by remote - * CPUs: - */ - spinlock_t lock; - - /* nr_free/freelist form a stack of free IDs */ - unsigned nr_free; - unsigned freelist[]; -}; - -static inline void move_tags(unsigned *dst, unsigned *dst_nr, - unsigned *src, unsigned *src_nr, - unsigned nr) -{ - *src_nr -= nr; - memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr); - *dst_nr += nr; -} - -/* - * Try to steal tags from a remote cpu's percpu freelist. - * - * We first check how many percpu freelists have tags - * - * Then we iterate through the cpus until we find some tags - we don't attempt - * to find the "best" cpu to steal from, to keep cacheline bouncing to a - * minimum. - */ -static inline void steal_tags(struct percpu_ida *pool, - struct percpu_ida_cpu *tags) -{ - unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; - struct percpu_ida_cpu *remote; - - for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); - cpus_have_tags; cpus_have_tags--) { - cpu = cpumask_next(cpu, &pool->cpus_have_tags); - - if (cpu >= nr_cpu_ids) { - cpu = cpumask_first(&pool->cpus_have_tags); - if (cpu >= nr_cpu_ids) - BUG(); - } - - pool->cpu_last_stolen = cpu; - remote = per_cpu_ptr(pool->tag_cpu, cpu); - - cpumask_clear_cpu(cpu, &pool->cpus_have_tags); - - if (remote == tags) - continue; - - spin_lock(&remote->lock); - - if (remote->nr_free) { - memcpy(tags->freelist, - remote->freelist, - sizeof(unsigned) * remote->nr_free); - - tags->nr_free = remote->nr_free; - remote->nr_free = 0; - } - - spin_unlock(&remote->lock); - - if (tags->nr_free) - break; - } -} - -/* - * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto - * our percpu freelist: - */ -static inline void alloc_global_tags(struct percpu_ida *pool, - struct percpu_ida_cpu *tags) -{ - move_tags(tags->freelist, &tags->nr_free, - pool->freelist, &pool->nr_free, - min(pool->nr_free, pool->percpu_batch_size)); -} - -/** - * percpu_ida_alloc - allocate a tag - * @pool: pool to allocate from - * @state: task state for prepare_to_wait - * - * Returns a tag - an integer in the range [0..nr_tags) (passed to - * tag_pool_init()), or otherwise -ENOSPC on allocation failure. - * - * Safe to be called from interrupt context (assuming it isn't passed - * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). - * - * @gfp indicates whether or not to wait until a free id is available (it's not - * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep - * however long it takes until another thread frees an id (same semantics as a - * mempool). - * - * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. - */ -int percpu_ida_alloc(struct percpu_ida *pool, int state) -{ - DEFINE_WAIT(wait); - struct percpu_ida_cpu *tags; - unsigned long flags; - int tag = -ENOSPC; - - tags = raw_cpu_ptr(pool->tag_cpu); - spin_lock_irqsave(&tags->lock, flags); - - /* Fastpath */ - if (likely(tags->nr_free >= 0)) { - tag = tags->freelist[--tags->nr_free]; - spin_unlock_irqrestore(&tags->lock, flags); - return tag; - } - spin_unlock_irqrestore(&tags->lock, flags); - - while (1) { - spin_lock_irqsave(&pool->lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - - /* - * prepare_to_wait() must come before steal_tags(), in case - * percpu_ida_free() on another cpu flips a bit in - * cpus_have_tags - * - * global lock held and irqs disabled, don't need percpu lock - */ - if (state != TASK_RUNNING) - prepare_to_wait(&pool->wait, &wait, state); - - if (!tags->nr_free) - alloc_global_tags(pool, tags); - if (!tags->nr_free) - steal_tags(pool, tags); - - if (tags->nr_free) { - tag = tags->freelist[--tags->nr_free]; - if (tags->nr_free) - cpumask_set_cpu(smp_processor_id(), - &pool->cpus_have_tags); - } - - spin_unlock_irqrestore(&pool->lock, flags); - - if (tag >= 0 || state == TASK_RUNNING) - break; - - if (signal_pending_state(state, current)) { - tag = -ERESTARTSYS; - break; - } - - schedule(); - } - if (state != TASK_RUNNING) - finish_wait(&pool->wait, &wait); - - return tag; -} -EXPORT_SYMBOL_GPL(percpu_ida_alloc); - -/** - * percpu_ida_free - free a tag - * @pool: pool @tag was allocated from - * @tag: a tag previously allocated with percpu_ida_alloc() - * - * Safe to be called from interrupt context. - */ -void percpu_ida_free(struct percpu_ida *pool, unsigned tag) -{ - struct percpu_ida_cpu *tags; - unsigned long flags; - unsigned nr_free; - - BUG_ON(tag >= pool->nr_tags); - - tags = raw_cpu_ptr(pool->tag_cpu); - - spin_lock_irqsave(&tags->lock, flags); - tags->freelist[tags->nr_free++] = tag; - - nr_free = tags->nr_free; - - if (nr_free == 1) { - cpumask_set_cpu(smp_processor_id(), - &pool->cpus_have_tags); - wake_up(&pool->wait); - } - spin_unlock_irqrestore(&tags->lock, flags); - - if (nr_free == pool->percpu_max_size) { - spin_lock_irqsave(&pool->lock, flags); - spin_lock(&tags->lock); - - if (tags->nr_free == pool->percpu_max_size) { - move_tags(pool->freelist, &pool->nr_free, - tags->freelist, &tags->nr_free, - pool->percpu_batch_size); - - wake_up(&pool->wait); - } - spin_unlock(&tags->lock); - spin_unlock_irqrestore(&pool->lock, flags); - } -} -EXPORT_SYMBOL_GPL(percpu_ida_free); - -/** - * percpu_ida_destroy - release a tag pool's resources - * @pool: pool to free - * - * Frees the resources allocated by percpu_ida_init(). - */ -void percpu_ida_destroy(struct percpu_ida *pool) -{ - free_percpu(pool->tag_cpu); - free_pages((unsigned long) pool->freelist, - get_order(pool->nr_tags * sizeof(unsigned))); -} -EXPORT_SYMBOL_GPL(percpu_ida_destroy); - -/** - * percpu_ida_init - initialize a percpu tag pool - * @pool: pool to initialize - * @nr_tags: number of tags that will be available for allocation - * - * Initializes @pool so that it can be used to allocate tags - integers in the - * range [0, nr_tags). Typically, they'll be used by driver code to refer to a - * preallocated array of tag structures. - * - * Allocation is percpu, but sharding is limited by nr_tags - for best - * performance, the workload should not span more cpus than nr_tags / 128. - */ -int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, - unsigned long max_size, unsigned long batch_size) -{ - unsigned i, cpu, order; - - memset(pool, 0, sizeof(*pool)); - - init_waitqueue_head(&pool->wait); - spin_lock_init(&pool->lock); - pool->nr_tags = nr_tags; - pool->percpu_max_size = max_size; - pool->percpu_batch_size = batch_size; - - /* Guard against overflow */ - if (nr_tags > (unsigned) INT_MAX + 1) { - pr_err("percpu_ida_init(): nr_tags too large\n"); - return -EINVAL; - } - - order = get_order(nr_tags * sizeof(unsigned)); - pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); - if (!pool->freelist) - return -ENOMEM; - - for (i = 0; i < nr_tags; i++) - pool->freelist[i] = i; - - pool->nr_free = nr_tags; - - pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + - pool->percpu_max_size * sizeof(unsigned), - sizeof(unsigned)); - if (!pool->tag_cpu) - goto err; - - for_each_possible_cpu(cpu) - spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); - - return 0; -err: - percpu_ida_destroy(pool); - return -ENOMEM; -} -EXPORT_SYMBOL_GPL(__percpu_ida_init); - -/** - * percpu_ida_for_each_free - iterate free ids of a pool - * @pool: pool to iterate - * @fn: interate callback function - * @data: parameter for @fn - * - * Note, this doesn't guarantee to iterate all free ids restrictly. Some free - * ids might be missed, some might be iterated duplicated, and some might - * be iterated and not free soon. - */ -int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, - void *data) -{ - unsigned long flags; - struct percpu_ida_cpu *remote; - unsigned cpu, i, err = 0; - - for_each_possible_cpu(cpu) { - remote = per_cpu_ptr(pool->tag_cpu, cpu); - spin_lock_irqsave(&remote->lock, flags); - for (i = 0; i < remote->nr_free; i++) { - err = fn(remote->freelist[i], data); - if (err) - break; - } - spin_unlock_irqrestore(&remote->lock, flags); - if (err) - goto out; - } - - spin_lock_irqsave(&pool->lock, flags); - for (i = 0; i < pool->nr_free; i++) { - err = fn(pool->freelist[i], data); - if (err) - break; - } - spin_unlock_irqrestore(&pool->lock, flags); -out: - return err; -} -EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); - -/** - * percpu_ida_free_tags - return free tags number of a specific cpu or global pool - * @pool: pool related - * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids - * - * Note: this just returns a snapshot of free tags number. - */ -unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu) -{ - struct percpu_ida_cpu *remote; - if (cpu == nr_cpu_ids) - return pool->nr_free; - remote = per_cpu_ptr(pool->tag_cpu, cpu); - return remote->nr_free; -} -EXPORT_SYMBOL_GPL(percpu_ida_free_tags); -- cgit v1.2.1 From 624fa7790f80575a4ec28fbdb2034097dc18d051 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 22 Jun 2018 14:54:49 -0700 Subject: scsi: klist: Make it safe to use klists in atomic context In the scsi_transport_srp implementation it cannot be avoided to iterate over a klist from atomic context when using the legacy block layer instead of blk-mq. Hence this patch that makes it safe to use klists in atomic context. This patch avoids that lockdep reports the following: WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&(&k->k_lock)->rlock); local_irq_disable(); lock(&(&q->__queue_lock)->rlock); lock(&(&k->k_lock)->rlock); lock(&(&q->__queue_lock)->rlock); stack backtrace: Workqueue: kblockd blk_timeout_work Call Trace: dump_stack+0xa4/0xf5 check_usage+0x6e6/0x700 __lock_acquire+0x185d/0x1b50 lock_acquire+0xd2/0x260 _raw_spin_lock+0x32/0x50 klist_next+0x47/0x190 device_for_each_child+0x8e/0x100 srp_timed_out+0xaf/0x1d0 [scsi_transport_srp] scsi_times_out+0xd4/0x410 [scsi_mod] blk_rq_timed_out+0x36/0x70 blk_timeout_work+0x1b5/0x220 process_one_work+0x4fe/0xad0 worker_thread+0x63/0x5a0 kthread+0x1c1/0x1e0 ret_from_fork+0x24/0x30 See also commit c9ddf73476ff ("scsi: scsi_transport_srp: Fix shost to rport translation"). Signed-off-by: Bart Van Assche Cc: Martin K. Petersen Cc: James Bottomley Acked-by: Greg Kroah-Hartman Signed-off-by: Martin K. Petersen --- lib/klist.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/klist.c b/lib/klist.c index 0507fa5d84c5..f6b547812fe3 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); @@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i) prev = to_klist_node(prev->n_node.prev); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); @@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); @@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i) next = to_klist_node(next->n_node.next); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); -- cgit v1.2.1