From 4736346bb47966254ee6d1fc50267a2609791cba Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 23:30:00 +0800 Subject: elevator: use list_{first,prev,next}_entry To make the intention clearer, use list_{first,prev,next}_entry instead of list_entry. Signed-off-by: Geliang Tang Signed-off-by: Jens Axboe --- block/noop-iosched.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, static int noop_dispatch(struct request_queue *q, int force) { struct noop_data *nd = q->elevator->elevator_data; + struct request *rq; - if (!list_empty(&nd->queue)) { - struct request *rq; - rq = list_entry(nd->queue.next, struct request, queuelist); + rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); + if (rq) { list_del_init(&rq->queuelist); elv_dispatch_sort(q, rq); return 1; @@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) if (rq->queuelist.prev == &nd->queue) return NULL; - return list_entry(rq->queuelist.prev, struct request, queuelist); + return list_prev_entry(rq, queuelist); } static struct request * @@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) if (rq->queuelist.next == &nd->queue) return NULL; - return list_entry(rq->queuelist.next, struct request, queuelist); + return list_next_entry(rq, queuelist); } static int noop_init_queue(struct request_queue *q, struct elevator_type *e) -- cgit v1.2.1 From 1b2ff19e6a957b1ef0f365ad331b608af80e932e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 12 Nov 2015 14:25:52 +0100 Subject: blk-flush: Queue through IO scheduler when flush not required Currently blk_insert_flush() just adds flush request to q->queue_head when flush is not required. That completely bypasses IO scheduler so e.g. CFQ can be idling waiting for new request to arrive and will idle through the whole window unnecessarily. Luckily this only happens in rare cases as usually checks in generic_make_request_checks() clear FLUSH and FUA flags early if they are not needed. When no flushing is actually required, we can easily fix the problem by properly queueing the request through the IO scheduler. Ideally IO scheduler should be also made aware of requests queued via blk_flush_queue_rq(). However inserting flush request through IO scheduler can have unwanted side-effects since due to flush batching delaying the flush request in IO scheduler will delay all flush requests possibly coming from other processes. So we keep adding the request directly to q->queue_head. Signed-off-by: Jan Kara Reviewed-by: Jeff Moyer Signed-off-by: Jens Axboe --- block/blk-flush.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index 9c423e53324a..c81d56ec308f 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq) if (q->mq_ops) { blk_mq_insert_request(rq, false, false, true); } else - list_add_tail(&rq->queuelist, &q->queue_head); + q->elevator->type->ops.elevator_add_req_fn(q, rq); return; } -- cgit v1.2.1 From 02e2a5bfebe99edcf9d694575a75032d53fe1b73 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 19 Nov 2015 17:18:54 -0800 Subject: mac: validate mac_partition is within sector If md->signature == MAC_DRIVER_MAGIC and md->block_size == 1023, a single 512 byte sector would be read (secsize / 512). However the partition structure would be located past the end of the buffer (secsize % 512). Signed-off-by: Kees Cook Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- block/partitions/mac.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c @@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) Sector sect; unsigned char *data; int slot, blocks_in_map; - unsigned secsize; + unsigned secsize, datasize, partoffset; #ifdef CONFIG_PPC_PMAC int found_root = 0; int found_root_goodness = 0; @@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) } secsize = be16_to_cpu(md->block_size); put_dev_sector(sect); - data = read_part_sector(state, secsize/512, §); + datasize = round_down(secsize, 512); + data = read_part_sector(state, datasize / 512, §); if (!data) return -1; - part = (struct mac_partition *) (data + secsize%512); + partoffset = secsize % 512; + if (partoffset + sizeof(*part) > datasize) + return -1; + part = (struct mac_partition *) (data + partoffset); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { put_dev_sector(sect); return 0; /* not a MacOS disk */ -- cgit v1.2.1 From b094f89ca42fbb8ce40174d5f85ca8430e499da6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Nov 2015 20:29:45 -0700 Subject: blk-mq: fix calling unplug callbacks with preempt disabled Liu reported that running certain parts of xfstests threw the following error: BUG: sleeping function called from invalid context at mm/page_alloc.c:3190 in_atomic(): 1, irqs_disabled(): 0, pid: 6, name: kworker/u16:0 3 locks held by kworker/u16:0/6: #0: ("writeback"){++++.+}, at: [] process_one_work+0x173/0x730 #1: ((&(&wb->dwork)->work)){+.+.+.}, at: [] process_one_work+0x173/0x730 #2: (&type->s_umount_key#44){+++++.}, at: [] trylock_super+0x25/0x60 CPU: 5 PID: 6 Comm: kworker/u16:0 Tainted: G OE 4.3.0+ #3 Hardware name: Red Hat KVM, BIOS Bochs 01/01/2011 Workqueue: writeback wb_workfn (flush-btrfs-108) ffffffff81a3abab ffff88042e282ba8 ffffffff8130191b ffffffff81a3abab 0000000000000c76 ffff88042e282ba8 ffff88042e27c180 ffff88042e282bd8 ffffffff8108ed95 ffff880400000004 0000000000000000 0000000000000c76 Call Trace: [] dump_stack+0x4f/0x74 [] ___might_sleep+0x185/0x240 [] __might_sleep+0x52/0x90 [] __alloc_pages_nodemask+0x268/0x410 [] ? sched_clock_local+0x1c/0x90 [] ? local_clock+0x21/0x40 [] ? __lock_release+0x420/0x510 [] ? __lock_acquired+0x16c/0x3c0 [] alloc_pages_current+0xc5/0x210 [] ? rbio_is_full+0x55/0x70 [btrfs] [] ? mark_held_locks+0x78/0xa0 [] ? _raw_spin_unlock_irqrestore+0x40/0x60 [] full_stripe_write+0x5a/0xc0 [btrfs] [] __raid56_parity_write+0x39/0x60 [btrfs] [] run_plug+0x11b/0x140 [btrfs] [] btrfs_raid_unplug+0x23/0x70 [btrfs] [] blk_flush_plug_list+0x82/0x1f0 [] blk_sq_make_request+0x1f9/0x740 [] ? generic_make_request_checks+0x222/0x7c0 [] ? blk_queue_enter+0x124/0x310 [] ? blk_queue_enter+0x92/0x310 [] generic_make_request+0x172/0x2c0 [] ? generic_make_request+0x164/0x2c0 [] submit_bio+0x70/0x140 [] ? rbio_add_io_page+0x99/0x150 [btrfs] [] finish_rmw+0x4d9/0x600 [btrfs] [] full_stripe_write+0x9c/0xc0 [btrfs] [] raid56_parity_write+0xef/0x160 [btrfs] [] btrfs_map_bio+0xe3/0x2d0 [btrfs] [] btrfs_submit_bio_hook+0x8d/0x1d0 [btrfs] [] submit_one_bio+0x74/0xb0 [btrfs] [] submit_extent_page+0xe5/0x1c0 [btrfs] [] __extent_writepage_io+0x408/0x4c0 [btrfs] [] ? alloc_dummy_extent_buffer+0x140/0x140 [btrfs] [] __extent_writepage+0x218/0x3a0 [btrfs] [] ? mark_held_locks+0x78/0xa0 [] extent_write_cache_pages.clone.0+0x2f9/0x400 [btrfs] [] extent_writepages+0x52/0x70 [btrfs] [] ? btrfs_set_inode_index+0x70/0x70 [btrfs] [] btrfs_writepages+0x27/0x30 [btrfs] [] do_writepages+0x23/0x40 [] __writeback_single_inode+0x89/0x4d0 [] ? writeback_sb_inodes+0x260/0x480 [] ? writeback_sb_inodes+0x260/0x480 [] ? writeback_sb_inodes+0x15f/0x480 [] writeback_sb_inodes+0x2d2/0x480 [] ? down_read_trylock+0x57/0x60 [] ? trylock_super+0x25/0x60 [] ? rcu_read_lock_sched_held+0x4f/0x90 [] __writeback_inodes_wb+0x8c/0xc0 [] wb_writeback+0x2b5/0x500 [] ? mark_held_locks+0x78/0xa0 [] ? __local_bh_enable_ip+0x68/0xc0 [] ? wb_do_writeback+0x62/0x310 [] wb_do_writeback+0xc1/0x310 [] ? set_worker_desc+0x79/0x90 [] wb_workfn+0x92/0x330 [] process_one_work+0x223/0x730 [] ? process_one_work+0x173/0x730 [] ? worker_thread+0x18f/0x430 [] worker_thread+0x11d/0x430 [] ? maybe_create_worker+0xf0/0xf0 [] ? maybe_create_worker+0xf0/0xf0 [] kthread+0xef/0x110 [] ? schedule_tail+0x1e/0xd0 [] ? __init_kthread_worker+0x70/0x70 [] ret_from_fork+0x3f/0x70 [] ? __init_kthread_worker+0x70/0x70 The issue is that we've got the software context pinned while calling blk_flush_plug_list(), which flushes callbacks that are allowed to sleep. btrfs and raid has such callbacks. Flip the checks around a bit, so we can enable preempt a bit earlier and flush plugs without having preempt disabled. This only affects blk-mq driven devices, and only those that register a single queue. Reported-by: Liu Bo Tested-by: Liu Bo Cc: stable@kernel.org Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); /* - * we do limited pluging. If bio can be merged, do merge. + * We do limited pluging. If the bio can be merged, do that. * Otherwise the existing request in the plug list will be * issued. So the plug list will have one request at most */ if (plug) { /* * The plug list might get flushed before this. If that - * happens, same_queue_rq is invalid and plug list is empty - **/ + * happens, same_queue_rq is invalid and plug list is + * empty + */ if (same_queue_rq && !list_empty(&plug->mq_list)) { old_rq = same_queue_rq; list_del_init(&old_rq->queuelist); @@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); if (!request_count) trace_block_plug(q); - else if (request_count >= BLK_MAX_REQUEST_COUNT) { + + blk_mq_put_ctx(data.ctx); + + if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); trace_block_plug(q); } + list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(data.ctx); return cookie; } -- cgit v1.2.1 From 578270bfbd2803dc7b0b03fbc2ac119efbc73195 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 24 Nov 2015 10:35:29 +0800 Subject: block: fix segment split Inside blk_bio_segment_split(), previous bvec pointer(bvprvp) always points to the iterator local variable, which is obviously wrong, so fix it by pointing to the local variable of 'bvprv'. Fixes: 5014c311baa2b(block: fix bogus compiler warnings in blk-merge.c) Cc: stable@kernel.org #4.3 Reported-by: Michael Ellerman Reported-by: Mark Salter Tested-by: Laurent Dufour Tested-by: Mark Salter Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-merge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..f2efe8ae75bb 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -98,7 +98,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, seg_size += bv.bv_len; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; sectors += bv.bv_len >> 9; continue; } @@ -108,7 +108,7 @@ new_segment: nsegs++; bvprv = bv; - bvprvp = &bv; + bvprvp = &bvprv; seg_size = bv.bv_len; sectors += bv.bv_len >> 9; } -- cgit v1.2.1 From 02e707424c2eadbcda68cd38876c9f4434ca8e1a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 24 Nov 2015 10:35:30 +0800 Subject: blk-merge: fix blk_bio_segment_split Commit bdced438acd83a(block: setup bi_phys_segments after splitting) introduces function of computing bio->bi_phys_segments during bio splitting. Unfortunately both bio->bi_seg_front_size and bio->bi_seg_back_size arn't computed, so too many physical segments may be obtained for one request since both the two are used to check if one segment across two bios can be possible. This patch fixes the issue by computing the two variables in blk_bio_segment_split(). Fixes: bdced438acd83a(block: setup bi_phys_segments after splitting) Reported-by: Michael Ellerman Reported-by: Mark Salter Tested-by: Laurent Dufour Tested-by: Mark Salter Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-merge.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index f2efe8ae75bb..50793cdc5331 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; unsigned seg_size = 0, nsegs = 0, sectors = 0; + unsigned front_seg_size = bio->bi_seg_front_size; + bool do_split = true; + struct bio *new = NULL; bio_for_each_segment(bv, bio, iter) { if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) @@ -111,13 +114,26 @@ new_segment: bvprvp = &bvprv; seg_size = bv.bv_len; sectors += bv.bv_len >> 9; + + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; } - *segs = nsegs; - return NULL; + do_split = false; split: *segs = nsegs; - return bio_split(bio, sectors, GFP_NOIO, bs); + + if (do_split) { + new = bio_split(bio, sectors, GFP_NOIO, bs); + if (new) + bio = new; + } + + bio->bi_seg_front_size = front_seg_size; + if (seg_size > bio->bi_seg_back_size) + bio->bi_seg_back_size = seg_size; + + return do_split ? new : NULL; } void blk_queue_split(struct request_queue *q, struct bio **bio, -- cgit v1.2.1 From 12e57f59ca3344a588531f68eeede45666e8a6e0 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 24 Nov 2015 10:35:31 +0800 Subject: blk-merge: warn if figured out segment number is bigger than nr_phys_segments We had seen lots of reports of this kind issue, so add one warnning in blk-merge, then it can be triggered easily and avoid to depend on warning/bug from drivers. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-merge.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index 50793cdc5331..41a55ba0d78e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -428,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, if (sg) sg_mark_end(sg); + /* + * Something must have been wrong if the figured number of + * segment is bigger than number of req's physical segments + */ + WARN_ON(nsegs > rq->nr_phys_segments); + return nsegs; } EXPORT_SYMBOL(blk_rq_map_sg); -- cgit v1.2.1