Commit 6712ecf8 authored by NeilBrown's avatar NeilBrown Committed by Jens Axboe
Browse files

Drop 'size' argument from bio_endio and bi_end_io


As bi_end_io is only called once when the reqeust is complete,
the 'size' argument is now redundant.  Remove it.

Now there is no need for bio_endio to subtract the size completed
from bi_size.  So don't do that either.

While we are at it, change bi_end_io to return void.
Signed-off-by: default avatarNeil Brown <neilb@suse.de>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 5bb23a68
......@@ -547,7 +547,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio->bi_size == 0)
bio_endio(bio, bio->bi_size, error);
bio_endio(bio, error);
} else {
/*
......@@ -2401,7 +2401,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
return bio->bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
......@@ -2510,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
......@@ -3040,7 +3040,7 @@ out:
return 0;
end_io:
bio_endio(bio, nr_sectors << 9, err);
bio_endio(bio, err);
return 0;
}
......@@ -3187,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio)
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
break;
}
......
......@@ -138,7 +138,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
bio_endio(bio, bio->bi_size, -ENOMEM);
bio_endio(bio, -ENOMEM);
return 0;
}
memset(buf, 0, sizeof(*buf));
......@@ -159,7 +159,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -ENXIO);
bio_endio(bio, -ENXIO);
return 0;
}
......
......@@ -652,7 +652,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
disk_stat_add(disk, sectors[rw], n_sect);
disk_stat_add(disk, io_ticks, duration);
n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
bio_endio(buf->bio, buf->bio->bi_size, n);
bio_endio(buf->bio, n);
mempool_free(buf, d->bufpool);
}
}
......
......@@ -119,7 +119,7 @@ aoedev_downdev(struct aoedev *d)
bio = buf->bio;
if (--buf->nframesout == 0) {
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
}
......@@ -130,7 +130,7 @@ aoedev_downdev(struct aoedev *d)
list_del(d->bufq.next);
bio = buf->bio;
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
if (d->gd)
......
......@@ -1194,7 +1194,7 @@ static inline void complete_buffers(struct bio *bio, int status)
int nr_sectors = bio_sectors(bio);
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
bio_endio(bio, status ? 0 : -EIO);
bio = xbh;
}
}
......
......@@ -987,7 +987,7 @@ static inline void complete_buffers(struct bio *bio, int ok)
xbh = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
bio_endio(bio, ok ? 0 : -EIO);
bio = xbh;
}
......
......@@ -3810,14 +3810,10 @@ static int check_floppy_change(struct gendisk *disk)
* a disk in the drive, and whether that disk is writable.
*/
static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
static void floppy_rb0_complete(struct bio *bio,
int err)
{
if (bio->bi_size)
return 1;
complete((struct completion *)bio->bi_private);
return 0;
}
static int __floppy_read_block_0(struct block_device *bdev)
......
......@@ -551,7 +551,7 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
out:
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
bio_io_error(old_bio);
return 0;
}
......@@ -580,7 +580,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
bio_put(bio);
} else {
int ret = do_bio_filebacked(lo, bio);
bio_endio(bio, bio->bi_size, ret);
bio_endio(bio, ret);
}
}
......
......@@ -1058,15 +1058,12 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
}
}
static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
......@@ -1077,19 +1074,14 @@ static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
return 0;
}
static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_packet_write(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
......@@ -1098,7 +1090,6 @@ static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
return 0;
}
/*
......@@ -1470,7 +1461,7 @@ static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
while (bio) {
next = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
bio_endio(bio, uptodate ? 0 : -EIO);
bio = next;
}
pkt->orig_bios = pkt->orig_bios_tail = NULL;
......@@ -2462,19 +2453,15 @@ static int pkt_close(struct inode *inode, struct file *file)
}
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read_cloned(struct bio *bio, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
if (bio->bi_size)
return 1;
bio_put(bio);
bio_endio(psd->bio, psd->bio->bi_size, err);
bio_endio(psd->bio, err);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
return 0;
}
static int pkt_make_request(struct request_queue *q, struct bio *bio)
......@@ -2620,7 +2607,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
return 0;
end_io:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
......
......@@ -287,10 +287,10 @@ static int rd_make_request(struct request_queue *q, struct bio *bio)
if (ret)
goto fail;
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
return 0;
fail:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
......
......@@ -545,7 +545,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
}
}
......
......@@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
bio_endio(io->base_bio, io->error);
mempool_free(io, cc->io_pool);
}
......@@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(_kcryptd_workqueue, &io->work);
}
static int crypt_endio(struct bio *clone, unsigned int done, int error)
static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
/*
* free the processed pages, even if
* it's only a partially completed write
* free the processed pages
*/
if (!read_io)
crypt_free_buffer_pages(cc, clone, done);
/* keep going - not finished yet */
if (unlikely(clone->bi_size))
return 1;
if (!read_io)
if (!read_io) {
crypt_free_buffer_pages(cc, clone, clone->bi_size);
goto out;
}
if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
error = -EIO;
......@@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
bio_put(clone);
io->post_process = 1;
kcryptd_queue_io(io);
return 0;
return;
out:
bio_put(clone);
dec_pending(io, error);
return error;
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
......
......@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
bio_put(bio);
}
static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
if (bio->bi_size)
return 1;
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
......
......@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
}
}
static int endio(struct bio *bio, unsigned int done, int error)
static void endio(struct bio *bio, int error)
{
struct io *io;
unsigned region;
/* keep going until we've finished */
if (bio->bi_size)
return 1;
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
......@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
dec_count(io, region, error);
return 0;
}
/*-----------------------------------------------------------------
......
......@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
r = map_io(m, bio, mpio, 1);
if (r < 0)
bio_endio(bio, bio->bi_size, r);
bio_endio(bio, r);
else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
else if (r == DM_MAPIO_REQUEUE)
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
bio = next;
}
......
......@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
break;
}
}
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
......@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
if (unlikely(ms->log_failure))
while ((bio = bio_list_pop(&sync)))
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
......
......@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
bio = n;
}
}
......
......@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
break;
}
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
......
......@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
blk_add_trace_bio(io->md->queue, io->bio,
BLK_TA_COMPLETE);
bio_endio(io->bio, io->bio->bi_size, io->error);
bio_endio(io->bio, io->error);
}
free_io(io->md, io);
}
}
static int clone_endio(struct bio *bio, unsigned int done, int error)
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
if (bio->bi_size)
return 1;
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
......@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
return 1;
return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
......@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
free_tio(md, tio);
return r;
}
static sector_t max_io_len(struct mapped_device *md,
......@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_table(md);
if (!ci.map) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return;
}
......@@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
* guarantee it is (or can be) handled by the targets correctly.
*/
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
......@@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
} else if (r == 0)
......
......@@ -65,18 +65,16 @@
#include <linux/raid/md.h>
static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error)
static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
b->bi_size = bio->bi_size;
b->bi_sector = bio->bi_sector;
if (bio->bi_size == 0)
bio_put(bio);
bio_put(bio);
clear_bit(BIO_UPTODATE, &b->bi_flags);
return (b->bi_end_io)(b, bytes_done, -EIO);
bio_io_error(b);
}
typedef struct faulty_conf {
......@@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
return 0;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment