bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio->bi_size == 0)
- bio_endio(bio, bio->bi_size, error);
+ bio_endio(bio, error);
} else {
/*
return bio->bi_size;
/* if it was boucned we must call the end io function */
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
return PTR_ERR(bio);
if (bio->bi_size != len) {
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
return 0;
end_io:
- bio_endio(bio, nr_sectors << 9, err);
+ bio_endio(bio, err);
return 0;
}
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
break;
}
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
- bio_endio(bio, bio->bi_size, -ENOMEM);
+ bio_endio(bio, -ENOMEM);
return 0;
}
memset(buf, 0, sizeof(*buf));
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
- bio_endio(bio, bio->bi_size, -ENXIO);
+ bio_endio(bio, -ENXIO);
return 0;
}
disk_stat_add(disk, sectors[rw], n_sect);
disk_stat_add(disk, io_ticks, duration);
n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
- bio_endio(buf->bio, buf->bio->bi_size, n);
+ bio_endio(buf->bio, n);
mempool_free(buf, d->bufpool);
}
}
bio = buf->bio;
if (--buf->nframesout == 0) {
mempool_free(buf, d->bufpool);
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
}
skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
}
list_del(d->bufq.next);
bio = buf->bio;
mempool_free(buf, d->bufpool);
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
}
if (d->gd)
int nr_sectors = bio_sectors(bio);
bio->bi_next = NULL;
- bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
+ bio_endio(bio, status ? 0 : -EIO);
bio = xbh;
}
}
xbh = bio->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
+ bio_endio(bio, ok ? 0 : -EIO);
bio = xbh;
}
* a disk in the drive, and whether that disk is writable.
*/
-static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
+static void floppy_rb0_complete(struct bio *bio,
int err)
{
- if (bio->bi_size)
- return 1;
-
complete((struct completion *)bio->bi_private);
- return 0;
}
static int __floppy_read_block_0(struct block_device *bdev)
out:
spin_unlock_irq(&lo->lo_lock);
- bio_io_error(old_bio, old_bio->bi_size);
+ bio_io_error(old_bio);
return 0;
}
bio_put(bio);
} else {
int ret = do_bio_filebacked(lo, bio);
- bio_endio(bio, bio->bi_size, ret);
+ bio_endio(bio, ret);
}
}
}
}
-static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+static void pkt_end_io_read(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- if (bio->bi_size)
- return 1;
-
VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
-
- return 0;
}
-static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
+static void pkt_end_io_packet_write(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- if (bio->bi_size)
- return 1;
-
VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
- return 0;
}
/*
while (bio) {
next = bio->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
+ bio_endio(bio, uptodate ? 0 : -EIO);
bio = next;
}
pkt->orig_bios = pkt->orig_bios_tail = NULL;
}
-static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
+static void pkt_end_io_read_cloned(struct bio *bio, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- bio_endio(psd->bio, psd->bio->bi_size, err);
+ bio_endio(psd->bio, err);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
- return 0;
}
static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
return 0;
end_io:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
if (ret)
goto fail;
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
return_bio = bio->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
}
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
+ bio_endio(io->base_bio, io->error);
mempool_free(io, cc->io_pool);
}
queue_work(_kcryptd_workqueue, &io->work);
}
-static int crypt_endio(struct bio *clone, unsigned int done, int error)
+static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
/*
- * free the processed pages, even if
- * it's only a partially completed write
+ * free the processed pages
*/
- if (!read_io)
- crypt_free_buffer_pages(cc, clone, done);
-
- /* keep going - not finished yet */
- if (unlikely(clone->bi_size))
- return 1;
-
- if (!read_io)
+ if (!read_io) {
+ crypt_free_buffer_pages(cc, clone, clone->bi_size);
goto out;
+ }
if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
error = -EIO;
bio_put(clone);
io->post_process = 1;
kcryptd_queue_io(io);
- return 0;
+ return;
out:
bio_put(clone);
dec_pending(io, error);
- return error;
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
bio_put(bio);
}
-static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
+static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
}
}
-static int endio(struct bio *bio, unsigned int done, int error)
+static void endio(struct bio *bio, int error)
{
struct io *io;
unsigned region;
- /* keep going until we've finished */
- if (bio->bi_size)
- return 1;
-
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
bio_put(bio);
dec_count(io, region, error);
-
- return 0;
}
/*-----------------------------------------------------------------
r = map_io(m, bio, mpio, 1);
if (r < 0)
- bio_endio(bio, bio->bi_size, r);
+ bio_endio(bio, r);
else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
bio = next;
}
break;
}
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
*/
if (unlikely(ms->log_failure))
while ((bio = bio_list_pop(&sync)))
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
bio = n;
}
}
break;
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
blk_add_trace_bio(io->md->queue, io->bio,
BLK_TA_COMPLETE);
- bio_endio(io->bio, io->bio->bi_size, io->error);
+ bio_endio(io->bio, io->error);
}
free_io(io->md, io);
}
}
-static int clone_endio(struct bio *bio, unsigned int done, int error)
+static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (bio->bi_size)
- return 1;
-
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
- return 1;
+ return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
bio_put(bio);
free_tio(md, tio);
- return r;
}
static sector_t max_io_len(struct mapped_device *md,
ci.map = dm_get_table(md);
if (!ci.map) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return;
}
* guarantee it is (or can be) handled by the targets correctly.
*/
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
} else if (r == 0)
#include <linux/raid/md.h>
-static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error)
+static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
b->bi_size = bio->bi_size;
b->bi_sector = bio->bi_sector;
- if (bio->bi_size == 0)
- bio_put(bio);
+ bio_put(bio);
- clear_bit(BIO_UPTODATE, &b->bi_flags);
- return (b->bi_end_io)(b, bytes_done, -EIO);
+ bio_io_error(b);
}
typedef struct faulty_conf {
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
return 0;
}
sector_t block;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
bdevname(tmp_dev->rdev->bdev, b),
(unsigned long long)tmp_dev->size,
(unsigned long long)tmp_dev->offset);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
static int md_fail_request (struct request_queue *q, struct bio *bio)
{
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
}
-static int super_written(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written(struct bio *bio, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
}
-static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written_barrier(struct bio *bio, int error)
{
struct bio *bio2 = bio->bi_private;
mdk_rdev_t *rdev = bio2->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
error == -EOPNOTSUPP) {
spin_unlock_irqrestore(&mddev->write_lock, flags);
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
+ } else {
+ bio_put(bio2);
+ bio->bi_private = rdev;
+ super_written(bio, error);
}
- bio_put(bio2);
- bio->bi_private = rdev;
- return super_written(bio, bytes_done, error);
}
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
finish_wait(&mddev->sb_wait, &wq);
}
-static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
+static void bi_complete(struct bio *bio, int error)
{
- if (bio->bi_size)
- return 1;
-
complete((struct completion*)bio->bi_private);
- return 0;
}
int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
- bio_endio(bio, bio->bi_size, err);
+ bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
}
-static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
- int error)
+static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
- if (bio->bi_size)
- return 1;
-
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
else if (!bio_rw_ahead(bio)) {
} else
multipath_end_bh_io(mp_bh, error);
rdev_dec_pending(rdev, conf->mddev);
- return 0;
}
static void unplug_slaves(mddev_t *mddev)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
return 0;
}
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
" or bigger than %dk %llu %d\n", chunk_size,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
}
free_r1bio(r1_bio);
r1_bio->sector + (r1_bio->sectors);
}
-static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev);
- if (bio->bi_size)
- return 1;
-
mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
- return 0;
}
-static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r1_bio->mddev);
struct bio *to_put = NULL;
- if (bio->bi_size)
- return 1;
for (mirror = 0; mirror < conf->raid_disks; mirror++)
if (r1_bio->bios[mirror] == bio)
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
- bio_endio(mbio, mbio->bi_size, 0);
+ bio_endio(mbio, 0);
}
}
}
if (to_put)
bio_put(to_put);
-
- return 0;
}
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
if (rw == WRITE)
md_write_end(mddev);
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int i;
- if (bio->bi_size)
- return 1;
-
for (i=r1_bio->mddev->raid_disks; i--; )
if (r1_bio->bios[i] == bio)
break;
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int i;
int mirror=0;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
mirror = i;
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
}
- return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
{
struct bio *bio = r10_bio->master_bio;
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
free_r10bio(r10_bio);
}
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
-static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
-static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
-
for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio)
break;
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
unsigned long flags;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
" or bigger than %dk %llu %d\n", chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
- return 0;
}
/*
if (test_bit(R10BIO_Uptodate, &r10_bio->state))
generic_make_request(wbio);
else
- bio_endio(wbio, wbio->bi_size, -EIO);
+ bio_endio(wbio, -EIO);
}
{
struct bio *bi = return_bi;
while (bi) {
- int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
bi = return_bi;
return pending;
}
-static int
-raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
-static int
-raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
+static void
+raid5_end_read_request(struct bio *bi, int error);
+static void
+raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh)
{
conf->slab_cache = NULL;
}
-static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
- int error)
+static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (uptodate) {
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
-static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
- int error)
+static void raid5_end_write_request (struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
- if (bi->bi_size)
- return 1;
-
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (!uptodate)
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
* first).
* If the read failed..
*/
-static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
+static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- bio_endio(raid_bi, bytes, 0);
+ bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
- return 0;
+ return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
- return 0;
}
static int bio_fits_rdev(struct bio *bi)
int remaining;
if (unlikely(bio_barrier(bi))) {
- bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
+ bio_endio(bi, -EOPNOTSUPP);
return 0;
}
remaining = --bi->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = bi->bi_size;
if ( rw == WRITE )
md_write_end(mddev);
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
}
remaining = --raid_bio->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = raid_bio->bi_size;
- raid_bio->bi_size = 0;
- raid_bio->bi_end_io(raid_bio, bytes,
+ raid_bio->bi_end_io(raid_bio,
test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
? 0 : -EIO);
}
}
bytes_done += bvec->bv_len;
}
- bio_endio(bio, bytes_done, 0);
+ bio_endio(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
- bytes = bio->bi_size;
- bio->bi_size = 0;
- bio->bi_end_io(bio, bytes, 0);
+ bio_end_io(bio, 0);
return 0;
fail:
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
return blk_rq_append_bio(q, rq, bio);
}
-static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
+static void scsi_bi_endio(struct bio *bio, int error)
{
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- return 0;
}
/**
if (bio->bi_vcnt >= nr_vecs) {
err = scsi_merge_bio(rq, bio);
if (err) {
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
goto free_bios;
}
bio = NULL;
/*
* call endio instead of bio_put incase it was bounced
*/
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
return err;
bio_put(bio);
}
-static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+static void bio_map_kern_endio(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- return 0;
}
/**
* bio_endio - end I/O on a bio
* @bio: bio
- * @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
- * bio_endio() will end I/O on @bytes_done number of bytes. This
- * must always be the whole (remaining) bio. bio_endio() is the
+ * bio_endio() will end I/O on the whole bio. bio_endio() is the
* preferred way to end I/O on a bio, it takes care of clearing
* BIO_UPTODATE on error. @error is 0 on success, and and one of the
* established -Exxxx (-EIO, for instance) error values in case
* bio unless they own it and thus know that it has an end_io
* function.
**/
-void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
+void bio_endio(struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
- if (unlikely(bytes_done != bio->bi_size)) {
- printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
- bytes_done, bio->bi_size);
- bytes_done = bio->bi_size;
- }
-
- bio->bi_size = 0; /* expected by some callees - will be removed */
if (bio->bi_end_io)
- bio->bi_end_io(bio, bytes_done, error);
+ bio->bi_end_io(bio, error);
}
void bio_pair_release(struct bio_pair *bp)
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
- bio_endio(master, master->bi_size, bp->error);
+ bio_endio(master, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
-static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_1(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
-static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_2(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
/*
}
#if 0
-static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+static void blk_end_aio(struct bio *bio, int error)
{
struct kiocb *iocb = bio->bi_private;
atomic_t *bio_count = &iocb->ki_bio_count;
return tmp.b_blocknr;
}
-static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
+static void end_bio_bh_io_sync(struct bio *bio, int err)
{
struct buffer_head *bh = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
set_bit(BH_Eopnotsupp, &bh->b_state);
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
- return 0;
}
int submit_bh(int rw, struct buffer_head * bh)
/*
* Asynchronous IO callback.
*/
-static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_aio(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long remaining;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
/* cleanup the bio */
dio_bio_complete(dio, bio);
aio_complete(dio->iocb, ret, 0);
kfree(dio);
}
-
- return 0;
}
/*
* During I/O bi_private points at the dio. After I/O, bi_private is used to
* implement a singly-linked list of completed BIOs, at dio->bio_list.
*/
-static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
if (--dio->refcount == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
- return 0;
}
static int
}
-static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
+static void end_bio_io_page(struct bio *bio, int error)
{
struct page *page = bio->bi_private;
- if (bio->bi_size)
- return 1;
if (!error)
SetPageUptodate(page);
*
* executed at INTIODONE level
*/
-static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
+static void lbmIODone(struct bio *bio, int error)
{
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
/*
* get back jfs buffer bound to the i/o buffer
*/
unlock_page(page);
}
-static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done,
- int err)
+static void metapage_read_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_read_end_io: I/O error\n");
SetPageError(page);
end_page_writeback(page);
}
-static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done,
- int err)
+static void metapage_write_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
BUG_ON(!PagePrivate(page));
- if (bio->bi_size)
- return 1;
-
if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
SetPageError(page);
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
-static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+static void mpage_end_io_read(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
do {
struct page *page = bvec->bv_page;
unlock_page(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
- return 0;
}
-static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
+static void mpage_end_io_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
do {
struct page *page = bvec->bv_page;
end_page_writeback(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
- return 0;
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)
}
static int o2hb_bio_end_io(struct bio *bio,
- unsigned int bytes_done,
int error)
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
wc->wc_error = error;
}
- if (bio->bi_size)
- return 1;
-
o2hb_bio_wait_dec(wc, 1);
bio_put(bio);
return 0;
STATIC int
xfs_end_bio(
struct bio *bio,
- unsigned int bytes_done,
int error)
{
xfs_ioend_t *ioend = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
ASSERT(atomic_read(&bio->bi_cnt) >= 1);
ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
STATIC int
xfs_buf_bio_end_io(
struct bio *bio,
- unsigned int bytes_done,
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
bp->b_error = EIO;
struct bio_set;
struct bio;
-typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
+typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
/*
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
-#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
+#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
* drivers should not use the __ version unless they _really_ want to
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
-extern void bio_endio(struct bio *, unsigned int, int);
+extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int bio_hw_segments(struct request_queue *, struct bio *);
/* linux/mm/page_io.c */
extern int swap_readpage(struct file *, struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
+extern void end_swap_bio_read(struct bio *bio, int err);
/* linux/mm/swap_state.c */
extern struct address_space swapper_space;
mempool_free(bvec->bv_page, pool);
}
- bio_endio(bio_orig, bio_orig->bi_size, err);
+ bio_endio(bio_orig, err);
bio_put(bio);
}
-static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
+static void bounce_end_io_write(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
bounce_end_io(bio, page_pool, err);
- return 0;
}
-static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err)
+static void bounce_end_io_write_isa(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
bounce_end_io(bio, isa_page_pool, err);
- return 0;
}
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
bounce_end_io(bio, pool, err);
}
-static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+static void bounce_end_io_read(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
__bounce_end_io_read(bio, page_pool, err);
- return 0;
}
-static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err)
+static void bounce_end_io_read_isa(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
__bounce_end_io_read(bio, isa_page_pool, err);
- return 0;
}
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
return bio;
}
-static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err)
+static void end_swap_bio_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
- if (bio->bi_size)
- return 1;
-
if (!uptodate) {
SetPageError(page);
/*
}
end_page_writeback(page);
bio_put(bio);
- return 0;
}
-int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err)
+void end_swap_bio_read(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
- if (bio->bi_size)
- return 1;
-
if (!uptodate) {
SetPageError(page);
ClearPageUptodate(page);
}
unlock_page(page);
bio_put(bio);
- return 0;
}
/*