}
}
-static inline void free_r10bio(r10bio_t *r10_bio)
+static void free_r10bio(r10bio_t *r10_bio)
{
conf_t *conf = mddev_to_conf(r10_bio->mddev);
mempool_free(r10_bio, conf->r10bio_pool);
}
-static inline void put_buf(r10bio_t *r10_bio)
+static void put_buf(r10bio_t *r10_bio)
{
conf_t *conf = mddev_to_conf(r10_bio->mddev);
if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
+ else
+ disk = -1;
rcu_read_unlock();
return disk;
static void freeze_array(conf_t *conf)
{
/* stop syncio and normal IO and wait for everything to
- * go quite.
+ * go quiet.
* We increment barrier and nr_waiting, and then
* wait until barrier+nr_pending match nr_queued+2
*/
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
- if (i == conf->copies)
- BUG();
+ BUG_ON(i == conf->copies);
update_head_pos(i, r10_bio);
d = r10_bio->devs[i].devnum;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
- else if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
- md_error(r10_bio->mddev,
- conf->mirrors[d].rdev);
+ else {
+ atomic_add(r10_bio->sectors,
+ &conf->mirrors[d].rdev->corrected_errors);
+ if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
+ md_error(r10_bio->mddev,
+ conf->mirrors[d].rdev);
+ }
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
sl--;
d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev;
+ atomic_add(s, &rdev->corrected_errors);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
if (sync_page_io(rdev->bdev,
int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- if (conf->r10buf_pool)
- BUG();
+ BUG_ON(conf->r10buf_pool);
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool)
return -ENOMEM;
module_exit(raid_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
+MODULE_ALIAS("md-raid10");
MODULE_ALIAS("md-level-10");