From f679623f50545bc0577caf2d0f8675b61162f059 Mon Sep 17 00:00:00 2001 From: "Raz Ben-Jehuda(caro)" Date: Sun, 10 Dec 2006 02:20:46 -0800 Subject: [PATCH] [PATCH] md: handle bypassing the read cache (assuming nothing fails) Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b86ceba04f..269b7771a3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2633,6 +2633,84 @@ static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_ return max; } + +static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) +{ + sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); + unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int bio_sectors = bio->bi_size >> 9; + + return chunk_sectors >= + ((sector & (chunk_sectors - 1)) + bio_sectors); +} + +/* + * The "raid5_align_endio" should check if the read succeeded and if it + * did, call bio_endio on the original bio (having bio_put the new bio + * first). + * If the read failed.. + */ +int raid5_align_endio(struct bio *bi, unsigned int bytes , int error) +{ + struct bio* raid_bi = bi->bi_private; + if (bi->bi_size) + return 1; + bio_put(bi); + bio_endio(raid_bi, bytes, error); + return 0; +} + +static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) +{ + mddev_t *mddev = q->queuedata; + raid5_conf_t *conf = mddev_to_conf(mddev); + const unsigned int raid_disks = conf->raid_disks; + const unsigned int data_disks = raid_disks - 1; + unsigned int dd_idx, pd_idx; + struct bio* align_bi; + mdk_rdev_t *rdev; + + if (!in_chunk_boundary(mddev, raid_bio)) { + printk("chunk_aligned_read : non aligned\n"); + return 0; + } + /* + * use bio_clone to make a copy of the bio + */ + align_bi = bio_clone(raid_bio, GFP_NOIO); + if (!align_bi) + return 0; + /* + * set bi_end_io to a new function, and set bi_private to the + * original bio. + */ + align_bi->bi_end_io = raid5_align_endio; + align_bi->bi_private = raid_bio; + /* + * compute position + */ + align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, + raid_disks, + data_disks, + &dd_idx, + &pd_idx, + conf); + + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[dd_idx].rdev); + if (rdev && test_bit(In_sync, &rdev->flags)) { + align_bi->bi_bdev = rdev->bdev; + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + generic_make_request(align_bi); + return 1; + } else { + rcu_read_unlock(); + return 0; + } +} + + static int make_request(request_queue_t *q, struct bio * bi) { mddev_t *mddev = q->queuedata; -- 2.39.5