diff options
author | NeilBrown <neilb@suse.com> | 2017-03-15 14:05:13 +1100 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2017-03-22 19:16:56 -0700 |
commit | 0472a42ba1f89ec85f070c731f4440d7cc38c44c (patch) | |
tree | abdcc017ea2f4fc66e80b2d5f99921894702b687 /drivers/md/raid5.h | |
parent | 016c76ac76e4c678b01a75a602dc6be0282f5b29 (diff) | |
download | talos-op-linux-0472a42ba1f89ec85f070c731f4440d7cc38c44c.tar.gz talos-op-linux-0472a42ba1f89ec85f070c731f4440d7cc38c44c.zip |
md/raid5: remove over-loading of ->bi_phys_segments.
When a read request, which bypassed the cache, fails, we need to retry
it through the cache.
This involves attaching it to a sequence of stripe_heads, and it may not
be possible to get all the stripe_heads we need at once.
We do what we can, and record how far we got in ->bi_phys_segments so
we can pick up again later.
There is only ever one bio which may have a non-zero offset stored in
->bi_phys_segments, the one that is either active in the single thread
which calls retry_aligned_read(), or is in conf->retry_read_aligned
waiting for retry_aligned_read() to be called again.
So we only need to store one offset value. This can be in a local
variable passed between remove_bio_from_retry() and
retry_aligned_read(), or in the r5conf structure next to the
->retry_read_aligned pointer.
Storing it there allows the last usage of ->bi_phys_segments to be
removed from md/raid5.c.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.h')
-rw-r--r-- | drivers/md/raid5.h | 30 |
1 files changed, 1 insertions, 29 deletions
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7d74fb3f2ec6..cdc7f92e1806 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -487,35 +487,6 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) return NULL; } -/* - * We maintain a count of processed stripes in the upper 16 bits - */ -static inline int raid5_bi_processed_stripes(struct bio *bio) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - - return (atomic_read(segments) >> 16) & 0xffff; -} - -static inline void raid5_set_bi_processed_stripes(struct bio *bio, - unsigned int cnt) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - int old, new; - - do { - old = atomic_read(segments); - new = (old & 0xffff) | (cnt << 16); - } while (atomic_cmpxchg(segments, old, new) != old); -} - -static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - - atomic_set(segments, cnt); -} - /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. * This is because we sometimes take all the spinlocks * and creating that much locking depth can cause @@ -613,6 +584,7 @@ struct r5conf { struct list_head delayed_list; /* stripes that have plugged requests */ struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ struct bio *retry_read_aligned; /* currently retrying aligned bios */ + unsigned int retry_read_offset; /* sector offset into retry_read_aligned */ struct bio *retry_read_aligned_list; /* aligned bios retry list */ atomic_t preread_active_stripes; /* stripes with scheduled io */ atomic_t active_aligned_reads; |