diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-11-16 10:09:42 +0100 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-11-16 10:09:42 +0100 |
commit | a02056349cdea2252cd2b21643ebf025e83a29f2 (patch) | |
tree | b7c889d6cbe8e7188d07d99a5c9da858c53a5b6c /block/cfq-iosched.c | |
parent | 34db1d595ef6f183fbc1e42cda45a3dfa0035258 (diff) | |
parent | e53beacd23d9cb47590da6a7a7f6d417b941a994 (diff) | |
download | blackbird-op-linux-a02056349cdea2252cd2b21643ebf025e83a29f2.tar.gz blackbird-op-linux-a02056349cdea2252cd2b21643ebf025e83a29f2.zip |
Merge branch 'v2.6.37-rc2' into for-2.6.38/core
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 39 |
1 files changed, 20 insertions, 19 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f90519430be6..73a58628f54a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -160,6 +160,7 @@ enum wl_prio_t { BE_WORKLOAD = 0, RT_WORKLOAD = 1, IDLE_WORKLOAD = 2, + CFQ_PRIO_NR, }; /* @@ -184,10 +185,19 @@ struct cfq_group { /* number of cfqq currently on this group */ int nr_cfqq; - /* Per group busy queus average. Useful for workload slice calc. */ - unsigned int busy_queues_avg[2]; /* - * rr lists of queues with requests, onle rr for each priority class. + * Per group busy queus average. Useful for workload slice calc. We + * create the array for each prio class but at run time it is used + * only for RT and BE class and slot for IDLE class remains unused. + * This is primarily done to avoid confusion and a gcc warning. + */ + unsigned int busy_queues_avg[CFQ_PRIO_NR]; + /* + * rr lists of queues with requests. We maintain service trees for + * RT and BE classes. These trees are subdivided in subclasses + * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE + * class there is no subclassification and all the cfq queues go on + * a single tree service_tree_idle. * Counts are embedded in the cfq_rb_root */ struct cfq_rb_root service_trees[2][3]; @@ -221,7 +231,6 @@ struct cfq_data { enum wl_type_t serving_type; unsigned long workload_expires; struct cfq_group *serving_group; - bool noidle_tree_requires_idle; /* * Each priority tree is sorted by next_request position. These @@ -977,8 +986,8 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) return NULL; } -void -cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) +void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, + unsigned int weight) { cfqg_of_blkg(blkg)->weight = weight; } @@ -2180,7 +2189,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) slice = max_t(unsigned, slice, CFQ_MIN_TT); cfq_log(cfqd, "workload slice:%d", slice); cfqd->workload_expires = jiffies + slice; - cfqd->noidle_tree_requires_idle = false; } static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) @@ -3188,7 +3196,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_mark_cfqq_deep(cfqq); - if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || + if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) + enable_idle = 0; + else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { @@ -3509,17 +3519,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) cfq_slice_expired(cfqd, 1); else if (sync && cfqq_empty && !cfq_close_cooperator(cfqd, cfqq)) { - cfqd->noidle_tree_requires_idle |= - !(rq->cmd_flags & REQ_NOIDLE); - /* - * Idling is enabled for SYNC_WORKLOAD. - * SYNC_NOIDLE_WORKLOAD idles at the end of the tree - * only if we processed at least one !REQ_NOIDLE request - */ - if (cfqd->serving_type == SYNC_WORKLOAD - || cfqd->noidle_tree_requires_idle - || cfqq->cfqg->nr_cfqq == 1) - cfq_arm_slice_timer(cfqd); + cfq_arm_slice_timer(cfqd); } } @@ -4105,6 +4105,7 @@ static struct blkio_policy_type blkio_policy_cfq = { .blkio_unlink_group_fn = cfq_unlink_blkio_group, .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, }, + .plid = BLKIO_POLICY_PROP, }; #else static struct blkio_policy_type blkio_policy_cfq; |