summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:50 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 23:22:54 +0200
commitae03bf639a5027d27270123f5f6e3ee6a412781d (patch)
treed705f41a188ad656b1f47f7952626a9f992e3b8f /block
parente1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff)
downloadtalos-obmc-linux-ae03bf639a5027d27270123f5f6e3ee6a412781d.tar.gz
talos-obmc-linux-ae03bf639a5027d27270123f5f6e3ee6a412781d.zip
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c8
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c27
-rw-r--r--block/blk-settings.c15
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ioctl.c10
-rw-r--r--block/scsi_ioctl.c8
9 files changed, 54 insertions, 44 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d98054cdbd7..30022b4e2f63 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
bio->bi_sector = sector;
- if (nr_sects > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- nr_sects -= q->max_hw_sectors;
- sector += q->max_hw_sectors;
+ if (nr_sects > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ nr_sects -= queue_max_hw_sectors(q);
+ sector += queue_max_hw_sectors(q);
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 59c4af523112..7a4c40184a64 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}
- if (unlikely(nr_sectors > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
- bdevname(bio->bi_bdev, b),
- bio_sectors(bio),
- q->max_hw_sectors);
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
goto end_io;
}
@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (blk_rq_sectors(rq) > q->max_sectors ||
- blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
+ if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+ blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation.
*/
blk_recalc_rq_segments(rq);
- if (rq->nr_phys_segments > q->max_phys_segments ||
- rq->nr_phys_segments > q->max_hw_segments) {
+ if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
+ rq->nr_phys_segments > queue_max_hw_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index ef2492adca7e..9083cf0180cc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct bio *bio = NULL;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
struct bio *bio;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4974dd5767e5..39ce64432ba6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* never considered part of another segment, since that
* might change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
+ if (seg_size + bv->bv_len
+ > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- q->max_segment_size)
+ queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
- if (sg->length + nbytes > q->max_segment_size)
+ if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
- || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+ req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
- if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}
- if (total_phys_segments > q->max_phys_segments)
+ if (total_phys_segments > queue_max_phys_segments(q))
return 0;
- if (total_phys_segments > q->max_hw_segments)
+ if (total_phys_segments > queue_max_hw_segments(q))
return 0;
/* Merge is OK... */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 15c3164537b8..0b32f984eed2 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
}
EXPORT_SYMBOL(blk_queue_max_sectors);
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+ if (BLK_DEF_MAX_SECTORS > max_sectors)
+ q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
+ else
+ q->max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
- if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+ if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
- --q->max_hw_segments;
- --q->max_phys_segments;
+ blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+ blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 13d38b7e4d0f..142a4acddd43 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
- int max_sectors_kb = q->max_sectors >> 1;
+ int max_sectors_kb = queue_max_sectors(q) >> 1;
return queue_var_show(max_sectors_kb, (page));
}
@@ -109,7 +109,7 @@ static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
- max_hw_sectors_kb = q->max_hw_sectors >> 1,
+ max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL;
spin_lock_irq(q->queue_lock);
- q->max_sectors = max_sectors_kb << 1;
+ blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock);
return ret;
@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
- int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+ int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
return queue_var_show(max_hw_sectors_kb, (page));
}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 9eaa1940273a..df18a156d011 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
- bdev_get_queue(bdev)->max_sectors);
+ queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
diff --git a/block/ioctl.c b/block/ioctl.c
index 7aa97f65da82..500e4c73cc52 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_private = &wait;
bio->bi_sector = start;
- if (len > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- len -= q->max_hw_sectors;
- start += q->max_hw_sectors;
+ if (len > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ len -= queue_max_hw_sectors(q);
+ start += queue_max_hw_sectors(q);
} else {
bio->bi_size = len << 9;
len = 0;
@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
- return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+ return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a9670dd4b5de..5f8e798ede4e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
- unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
+ unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
return put_user(val, p);
}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
if (size < 0)
return -EINVAL;
- if (size > (q->max_sectors << 9))
- size = q->max_sectors << 9;
+ if (size > (queue_max_sectors(q) << 9))
+ size = queue_max_sectors(q) << 9;
q->sg_reserved_size = size;
return 0;
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
- if (hdr->dxfer_len > (q->max_hw_sectors << 9))
+ if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;
if (hdr->dxfer_len)
OpenPOWER on IntegriCloud