X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-table.c;h=2bcde5798b5a6c27dc9b07b6ea9325a88dfdf5f8;hb=35cc0b975057389548bfe5703d438fe0deb4807e;hp=75fe9493e6af47059dbe79819b16683455304be8;hpb=61a46dc9d1c10d07a2ed6b7d346b868803b52506;p=linux-2.6 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 75fe9493e6..2bcde5798b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md) } /* - * If possible (ie. blk_size[major] is set), this checks an area - * of a destination device is valid. + * If possible, this checks an area of a destination device is valid. */ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) { - sector_t dev_size; - dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; + sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; + + if (!dev_size) + return 1; + return ((start < dev_size) && (len <= (dev_size - start))); } @@ -522,56 +524,61 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, return 0; } - -int dm_get_device(struct dm_target *ti, const char *path, sector_t start, - sector_t len, int mode, struct dm_dev **result) +void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) { - int r = __table_get_device(ti->table, ti, path, - start, len, mode, result); - if (!r) { - request_queue_t *q = bdev_get_queue((*result)->bdev); - struct io_restrictions *rs = &ti->limits; - - /* - * Combine the device limits low. - * - * FIXME: if we move an io_restriction struct - * into q this would just be a call to - * combine_restrictions_low() - */ + struct request_queue *q = bdev_get_queue(bdev); + struct io_restrictions *rs = &ti->limits; + + /* + * Combine the device limits low. + * + * FIXME: if we move an io_restriction struct + * into q this would just be a call to + * combine_restrictions_low() + */ + rs->max_sectors = + min_not_zero(rs->max_sectors, q->max_sectors); + + /* FIXME: Device-Mapper on top of RAID-0 breaks because DM + * currently doesn't honor MD's merge_bvec_fn routine. + * In this case, we'll force DM to use PAGE_SIZE or + * smaller I/O, just to be safe. A better fix is in the + * works, but add this for the time being so it will at + * least operate correctly. + */ + if (q->merge_bvec_fn) rs->max_sectors = - min_not_zero(rs->max_sectors, q->max_sectors); + min_not_zero(rs->max_sectors, + (unsigned int) (PAGE_SIZE >> 9)); - /* FIXME: Device-Mapper on top of RAID-0 breaks because DM - * currently doesn't honor MD's merge_bvec_fn routine. - * In this case, we'll force DM to use PAGE_SIZE or - * smaller I/O, just to be safe. A better fix is in the - * works, but add this for the time being so it will at - * least operate correctly. - */ - if (q->merge_bvec_fn) - rs->max_sectors = - min_not_zero(rs->max_sectors, - (unsigned int) (PAGE_SIZE >> 9)); + rs->max_phys_segments = + min_not_zero(rs->max_phys_segments, + q->max_phys_segments); - rs->max_phys_segments = - min_not_zero(rs->max_phys_segments, - q->max_phys_segments); + rs->max_hw_segments = + min_not_zero(rs->max_hw_segments, q->max_hw_segments); - rs->max_hw_segments = - min_not_zero(rs->max_hw_segments, q->max_hw_segments); + rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); + rs->max_segment_size = + min_not_zero(rs->max_segment_size, q->max_segment_size); - rs->max_segment_size = - min_not_zero(rs->max_segment_size, q->max_segment_size); + rs->seg_boundary_mask = + min_not_zero(rs->seg_boundary_mask, + q->seg_boundary_mask); - rs->seg_boundary_mask = - min_not_zero(rs->seg_boundary_mask, - q->seg_boundary_mask); + rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); +} +EXPORT_SYMBOL_GPL(dm_set_device_limits); - rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); - } +int dm_get_device(struct dm_target *ti, const char *path, sector_t start, + sector_t len, int mode, struct dm_dev **result) +{ + int r = __table_get_device(ti->table, ti, path, + start, len, mode, result); + + if (!r) + dm_set_device_limits(ti, (*result)->bdev); return r; } @@ -939,9 +946,20 @@ void dm_table_postsuspend_targets(struct dm_table *t) return suspend_targets(t, 1); } -void dm_table_resume_targets(struct dm_table *t) +int dm_table_resume_targets(struct dm_table *t) { - int i; + int i, r = 0; + + for (i = 0; i < t->num_targets; i++) { + struct dm_target *ti = t->targets + i; + + if (!ti->type->preresume) + continue; + + r = ti->type->preresume(ti); + if (r) + return r; + } for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; @@ -949,6 +967,8 @@ void dm_table_resume_targets(struct dm_table *t) if (ti->type->resume) ti->type->resume(ti); } + + return 0; } int dm_table_any_congested(struct dm_table *t, int bdi_bits) @@ -959,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) devices = dm_table_get_devices(t); for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); r |= bdi_congested(&q->backing_dev_info, bdi_bits); } @@ -972,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t) for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); if (q->unplug_fn) q->unplug_fn(q); @@ -983,10 +1003,15 @@ int dm_table_flush_all(struct dm_table *t) { struct list_head *d, *devices = dm_table_get_devices(t); int ret = 0; + unsigned i; + + for (i = 0; i < t->num_targets; i++) + if (t->targets[i].type->flush) + t->targets[i].type->flush(&t->targets[i]); for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); int err; if (!q->issue_flush_fn)