diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 28 | ||||
-rw-r--r-- | block/elevator.c | 37 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 68 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 8 |
4 files changed, 92 insertions, 49 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 533a2938ffd6..07b706243772 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -568,6 +568,33 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, cfq_remove_request(next); } +static int cfq_allow_merge(request_queue_t *q, struct request *rq, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + const int rw = bio_data_dir(bio); + struct cfq_queue *cfqq; + pid_t key; + + /* + * Disallow merge of a sync bio into an async request. + */ + if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq)) + return 0; + + /* + * Lookup the cfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + key = cfq_queue_pid(current, rw, bio_sync(bio)); + cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); + + if (cfqq == RQ_CFQQ(rq)) + return 1; + + return 0; +} + static inline void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { @@ -2125,6 +2152,7 @@ static struct elevator_type iosched_cfq = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, + .elevator_allow_merge_fn = cfq_allow_merge, .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, .elevator_activate_req_fn = cfq_activate_request, diff --git a/block/elevator.c b/block/elevator.c index c0063f345c5d..f6dafa8c7c4d 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -51,6 +51,21 @@ static const int elv_hash_shift = 6; #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) /* + * Query io scheduler to see if the current process issuing bio may be + * merged with rq. + */ +static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) +{ + request_queue_t *q = rq->q; + elevator_t *e = q->elevator; + + if (e->ops->elevator_allow_merge_fn) + return e->ops->elevator_allow_merge_fn(q, rq, bio); + + return 1; +} + +/* * can we safely merge with this request? */ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) @@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) return 0; /* - * same device and no special stuff set, merge is ok + * must be same device and not a special request */ - if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) - return 1; + if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) + return 0; - return 0; + if (!elv_iosched_allow_merge(rq, bio)) + return 0; + + return 1; } EXPORT_SYMBOL(elv_rq_merge_ok); @@ -572,6 +590,12 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) */ rq->cmd_flags |= REQ_SOFTBARRIER; + /* + * Most requeues happen because of a busy condition, + * don't force unplug of the queue for that case. + */ + unplug_it = 0; + if (q->ordseq == 0) { list_add(&rq->queuelist, &q->queue_head); break; @@ -586,11 +610,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) } list_add_tail(&rq->queuelist, pos); - /* - * most requeues happen because of a busy condition, don't - * force unplug of the queue for that case. - */ - unplug_it = 0; break; default: diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 79807dbc306e..fb6789725e1b 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -static int ll_back_merge_fn(request_queue_t *q, struct request *req, - struct bio *bio) +int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } +EXPORT_SYMBOL(ll_back_merge_fn); static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) @@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } q->request_fn = rfn; - q->back_merge_fn = ll_back_merge_fn; - q->front_merge_fn = ll_front_merge_fn; - q->merge_requests_fn = ll_merge_requests_fn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); @@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq, else bio = bio_copy_user(q, uaddr, len, reading); - if (IS_ERR(bio)) { + if (IS_ERR(bio)) return PTR_ERR(bio); - } orig_bio = bio; blk_queue_bounce(q, &bio); + /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); - /* - * for most (all? don't know of any) queues we could - * skip grabbing the queue lock here. only drivers with - * funky private ->back_merge_fn() function could be - * problematic. - */ - spin_lock_irq(q->queue_lock); if (!rq->bio) blk_rq_bio_prep(q, rq, bio); - else if (!q->back_merge_fn(q, rq, bio)) { + else if (!ll_back_merge_fn(q, rq, bio)) { ret = -EINVAL; - spin_unlock_irq(q->queue_lock); goto unmap_bio; } else { rq->biotail->bi_next = bio; rq->biotail = bio; - rq->nr_sectors += bio_sectors(bio); - rq->hard_nr_sectors = rq->nr_sectors; rq->data_len += bio->bi_size; } - spin_unlock_irq(q->queue_lock); return bio->bi_size; @@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, unsigned long len) { unsigned long bytes_read = 0; + struct bio *bio = NULL; int ret; if (len > (q->max_hw_sectors << 9)) @@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ret = __blk_rq_map_user(q, rq, ubuf, map_len); if (ret < 0) goto unmap_rq; + if (!bio) + bio = rq->bio; bytes_read += ret; ubuf += ret; } @@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, rq->buffer = rq->data = NULL; return 0; unmap_rq: - blk_rq_unmap_user(rq); + blk_rq_unmap_user(bio); return ret; } @@ -2464,6 +2453,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * @rq: request to map data to * @iov: pointer to the iovec * @iov_count: number of elements in the iovec + * @len: I/O byte count * * Description: * Data will be mapped directly for zero copy io, if possible. Otherwise @@ -2509,27 +2499,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data - * @rq: rq to be unmapped + * @bio: start of bio list * * Description: - * Unmap a rq previously mapped by blk_rq_map_user(). - * rq->bio must be set to the original head of the request. + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the io completion may have changed rq->bio. */ -int blk_rq_unmap_user(struct request *rq) +int blk_rq_unmap_user(struct bio *bio) { - struct bio *bio, *mapped_bio; + struct bio *mapped_bio; + int ret = 0, ret2; - while ((bio = rq->bio)) { - if (bio_flagged(bio, BIO_BOUNCED)) + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; - else - mapped_bio = bio; - __blk_rq_unmap_user(mapped_bio); - rq->bio = bio->bi_next; - bio_put(bio); + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); } - return 0; + + return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); @@ -2822,7 +2818,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, * will have updated segment counts, update sector * counts here. */ - if (!q->merge_requests_fn(q, req, next)) + if (!ll_merge_requests_fn(q, req, next)) return 0; /* @@ -2939,7 +2935,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_BACK_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->back_merge_fn(q, req, bio)) + if (!ll_back_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); @@ -2956,7 +2952,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_FRONT_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->front_merge_fn(q, req, bio)) + if (!ll_front_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index f322b6a441d8..65c6a3cba6d6 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -223,7 +223,7 @@ static int verify_command(struct file *file, unsigned char *cmd) static int sg_io(struct file *file, request_queue_t *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { - unsigned long start_time; + unsigned long start_time, timeout; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; @@ -271,7 +271,8 @@ static int sg_io(struct file *file, request_queue_t *q, rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->timeout = jiffies_to_msecs(hdr->timeout); + timeout = msecs_to_jiffies(hdr->timeout); + rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) @@ -333,8 +334,7 @@ static int sg_io(struct file *file, request_queue_t *q, hdr->sb_len_wr = len; } - rq->bio = bio; - if (blk_rq_unmap_user(rq)) + if (blk_rq_unmap_user(bio)) ret = -EFAULT; /* may not have succeeded, but output values written to control |