Commit 1ce48904 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block

* 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block: (37 commits)
  [BLOCK] Fix failing compile with BLK_DEV_IO_TRACE=n
  compat_ioctl: move floppy handlers to block/compat_ioctl.c
  compat_ioctl: move cdrom handlers to block/compat_ioctl.c
  compat_ioctl: move BLKPG handling to block/compat_ioctl.c
  compat_ioctl: move hdio calls to block/compat_ioctl.c
  compat_ioctl: handle blk_trace ioctls
  compat_ioctl: add compat_blkdev_driver_ioctl()
  compat_ioctl: move common block ioctls to compat_blkdev_ioctl
  Sysace: Don't enable IRQ until after interrupt handler is registered
  Sysace: sparse fixes
  Sysace: Minor coding convention fixup
  drivers/block/umem: use DRIVER_NAME where appropriate
  drivers/block/umem: trim trailing whitespace
  drivers/block/umem: minor cleanups
  drivers/block/umem: use dev_printk()
  drivers/block/umem: move private include away from include/linux
  Sysace: Labels in C code should not be indented.
  Sysace: Add of_platform_bus binding
  Sysace: Move IRQ handler registration to occur after FSM is initialized
  Sysace: minor rework and cleanup changes
  ...
parents 55982fd1 780513ec
......@@ -477,9 +477,9 @@ With this multipage bio design:
the same bi_io_vec array, but with the index and size accordingly modified)
- A linked list of bios is used as before for unrelated merges (*) - this
avoids reallocs and makes independent completions easier to handle.
- Code that traverses the req list needs to make a distinction between
segments of a request (bio_for_each_segment) and the distinct completion
units/bios (rq_for_each_bio).
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
- Drivers which can't process a large bio in one shot can use the bi_idx
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
......@@ -664,14 +664,14 @@ in lvm or md.
3.2.1 Traversing segments and completion units in a request
The macros bio_for_each_segment() and rq_for_each_bio() should be used for
traversing the bios in the request list (drivers should avoid directly
trying to do it themselves). Using these helpers should also make it easier
to cope with block changes in the future.
The macro rq_for_each_segment() should be used for traversing the bios
in the request list (drivers should avoid directly trying to do it
themselves). Using these helpers should also make it easier to cope
with block changes in the future.
rq_for_each_bio(bio, rq)
bio_for_each_segment(bio_vec, bio, i)
/* bio_vec is now current segment */
struct req_iterator iter;
rq_for_each_segment(bio_vec, rq, iter)
/* bio_vec is now current segment */
I/O completion callbacks are per-bio rather than per-segment, so drivers
that traverse bio chains on completion need to keep that in mind. Drivers
......
......@@ -86,8 +86,15 @@ extern int sys_ioprio_get(int, int);
#error "Unsupported arch"
#endif
_syscall3(int, ioprio_set, int, which, int, who, int, ioprio);
_syscall2(int, ioprio_get, int, which, int, who);
static inline int ioprio_set(int which, int who, int ioprio)
{
return syscall(__NR_ioprio_set, which, who, ioprio);
}
static inline int ioprio_get(int which, int who)
{
return syscall(__NR_ioprio_get, which, who);
}
enum {
IOPRIO_CLASS_NONE,
......
......@@ -4156,6 +4156,13 @@ W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git
S: Supported
XILINX SYSTEMACE DRIVER
P: Grant Likely
M: grant.likely@secretlab.ca
W: http://www.secretlab.ca/
L: linux-kernel@vger.kernel.org
S: Maintained
XILINX UARTLITE SERIAL DRIVER
P: Peter Korsgaard
M: jacmet@sunsite.dk
......
......@@ -11,3 +11,4 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
obj-$(CONFIG_COMPAT) += compat_ioctl.o
......@@ -312,33 +312,26 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
struct blk_user_trace_setup buts;
struct blk_trace *old_bt, *bt = NULL;
struct dentry *dir = NULL;
char b[BDEVNAME_SIZE];
int ret, i;
if (copy_from_user(&buts, arg, sizeof(buts)))
return -EFAULT;
if (!buts.buf_size || !buts.buf_nr)
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
strcpy(buts.name, bdevname(bdev, b));
strcpy(buts->name, bdevname(bdev, b));
/*
* some device names have larger paths - convert the slashes
* to underscores for this to work as expected
*/
for (i = 0; i < strlen(buts.name); i++)
if (buts.name[i] == '/')
buts.name[i] = '_';
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
for (i = 0; i < strlen(buts->name); i++)
if (buts->name[i] == '/')
buts->name[i] = '_';
ret = -ENOMEM;
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
......@@ -350,7 +343,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
goto err;
ret = -ENOENT;
dir = blk_create_tree(buts.name);
dir = blk_create_tree(buts->name);
if (!dir)
goto err;
......@@ -363,20 +356,21 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
if (!bt->dropped_file)
goto err;
bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt);
bt->rchan = relay_open("trace", dir, buts->buf_size,
buts->buf_nr, &blk_relay_callbacks, bt);
if (!bt->rchan)
goto err;
bt->act_mask = buts.act_mask;
bt->act_mask = buts->act_mask;
if (!bt->act_mask)
bt->act_mask = (u16) -1;
bt->start_lba = buts.start_lba;
bt->end_lba = buts.end_lba;
bt->start_lba = buts->start_lba;
bt->end_lba = buts->end_lba;
if (!bt->end_lba)
bt->end_lba = -1ULL;
bt->pid = buts.pid;
bt->pid = buts->pid;
bt->trace_state = Blktrace_setup;
ret = -EBUSY;
......@@ -401,6 +395,26 @@ err:
return ret;
}
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
ret = copy_from_user(&buts, arg, sizeof(buts));
if (ret)
return -EFAULT;
ret = do_blk_trace_setup(q, bdev, &buts);
if (ret)
return ret;
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
return 0;
}
static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
......
This diff is collapsed.
......@@ -217,6 +217,10 @@ int blkdev_driver_ioctl(struct inode *inode, struct file *file,
}
EXPORT_SYMBOL_GPL(blkdev_driver_ioctl);
/*
* always keep this in sync with compat_blkdev_ioctl() and
* compat_blkdev_locked_ioctl()
*/
int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
unsigned long arg)
{
......@@ -284,21 +288,4 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
}
/* Most of the generic ioctls are handled in the normal fallback path.
This assumes the blkdev's low level compat_ioctl always returns
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
int ret = -ENOIOCTLCMD;
if (disk->fops->compat_ioctl) {
lock_kernel();
ret = disk->fops->compat_ioctl(file, cmd, arg);
unlock_kernel();
}
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_ioctl);
......@@ -42,6 +42,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
static void blk_recalc_rq_segments(struct request *rq);
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
/*
* For the allocated request tables
......@@ -428,7 +431,6 @@ static void queue_flush(struct request_queue *q, unsigned which)
static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
......@@ -525,56 +527,36 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
return 1;
}
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
* this request again with the original bi_end_io after an
* error occurs or post flush is complete.
*/
q->bi_size += bytes;
if (bio->bi_size)
return 1;
/* Reset bio */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_size = q->bi_size;
bio->bi_sector -= (q->bi_size >> 9);
q->bi_size = 0;
return 0;
}
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
if (&q->bar_rq != rq)
return 0;
/*
* Okay, this is the barrier request in progress, dry finish it.
*/
if (error && !q->orderr)
q->orderr = error;
if (&q->bar_rq != rq) {
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
endio = bio->bi_end_io;
private = bio->bi_private;
bio->bi_end_io = flush_dry_bio_endio;
bio->bi_private = q;
bio_endio(bio, nbytes, error);
if (unlikely(nbytes > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n",
__FUNCTION__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
bio->bi_end_io = endio;
bio->bi_private = private;
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio->bi_size == 0)
bio_endio(bio, error);
} else {
return 1;
/*
* Okay, this is the barrier request in progress, just
* record the error;
*/
if (error && !q->orderr)
q->orderr = error;
}
}
/**
......@@ -1220,16 +1202,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct request rq;
struct bio *nxt = bio->bi_next;
rq.q = q;
rq.bio = rq.biotail = bio;
bio->bi_next = NULL;
blk_recalc_rq_segments(&rq);
bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_hw_segments = rq.nr_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
static void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
int nr_hw_segs;
unsigned int phys_size;
unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
int seg_size;
int hw_seg_size;
int cluster;
struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
if (unlikely(!bio->bi_io_vec))
if (!rq->bio)
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
......@@ -1255,12 +1261,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
}
new_segment:
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
hw_seg_size += bv->bv_len;
} else {
else {
new_hw_segment:
if (hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
}
......@@ -1270,15 +1277,15 @@ new_hw_segment:
seg_size = bv->bv_len;
highprv = high;
}
if (hw_seg_size > bio->bi_hw_back_size)
bio->bi_hw_back_size = hw_seg_size;
if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
bio->bi_phys_segments = nr_phys_segs;
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
if (hw_seg_size > rq->biotail->bi_hw_back_size)
rq->biotail->bi_hw_back_size = hw_seg_size;
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
......@@ -1325,8 +1332,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
int nsegs, i, cluster;
struct req_iterator iter;
int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
......@@ -1335,35 +1342,30 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
rq_for_each_bio(bio, rq) {
/*
* for each segment in bio
*/
bio_for_each_segment(bvec, bio, i) {
int nbytes = bvec->bv_len;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
if (bvprv && cluster) {
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
sg[nsegs - 1].length += nbytes;
} else {
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
memset(&sg[nsegs],0,sizeof(struct scatterlist));
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset;
memset(&sg[nsegs],0,sizeof(struct scatterlist));
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset;
nsegs++;
}
bvprv = bvec;
} /* segments in bio */
} /* bios in rq */
nsegs++;
}
bvprv = bvec;
} /* segments in rq */
return nsegs;
}
......@@ -1420,7 +1422,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
return 1;
}
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
static int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
int len;
......@@ -1456,7 +1459,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *b
return ll_new_hw_segment(q, req, bio);
}
EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
......@@ -2346,6 +2348,23 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
return 0;
}
EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
......@@ -2377,23 +2396,12 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
*/
bio_get(bio);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
return bio->bi_size;
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
return bio->bi_size;
unmap_bio:
/* if it was boucned we must call the end io function */
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
......@@ -2502,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
......@@ -2912,15 +2920,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
req->nr_phys_segments = bio_phys_segments(req->q, bio);
req->nr_hw_segments = bio_hw_segments(req->q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
blk_rq_bio_prep(req->q, req, bio);
}
static int __make_request(struct request_queue *q, struct bio *bio)
......@@ -3038,7 +3040,7 @@ out:
return 0;
end_io:
bio_endio(bio, nr_sectors << 9, err);
bio_endio(bio, err);
return 0;
}
......@@ -3185,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio)
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
break;
}
......@@ -3329,48 +3331,6 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
struct request_queue *q = rq->q;
if (!rq->bio)
return;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq) {
/* Force bio hw/phys segs to be recalculated. */
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
nr_phys_segs += bio_phys_segments(q, bio);
nr_hw_segs += bio_hw_segments(q, bio);
if (prevbio) {
int pseg = phys_size + prevbio->bi_size + bio->bi_size;
int hseg = hw_size + prevbio->bi_size + bio->bi_size;
if (blk_phys_contig_segment(q, prevbio, bio) &&
pseg <= q->max_segment_size) {
nr_phys_segs--;
phys_size += prevbio->bi_size + bio->bi_size;
} else
phys_size = 0;
if (blk_hw_contig_segment(q, prevbio, bio) &&
hseg <= q->max_segment_size) {
nr_hw_segs--;
hw_size += prevbio->bi_size + bio->bi_size;
} else
hw_size = 0;
}
prevbio = bio;
}
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
......@@ -3442,8 +3402,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
if (!ordered_bio_endio(req, bio, nbytes, error))
bio_endio(bio, nbytes, error);
req_bio_endio(req, bio, nbytes, error);
next_idx = 0;
bio_nbytes = 0;
} else {
......@@ -3498,8 +3457,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
* if the request wasn't completed, update state
*/
if (bio_nbytes) {
if (!ordered_bio_endio(req, bio, bio_nbytes, error))
bio_endio(bio, bio_nbytes, error);
req_bio_endio(req, bio, bio_nbytes, error);
bio->bi_idx += next_idx;
bio_iovec(bio)->bv_offset += nr_bytes;
bio_iovec(bio)->bv_len -= nr_bytes;
......@@ -3574,7 +3532,7 @@ static void blk_done_softirq(struct softirq_action *h)
}
}
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
/*
......@@ -3595,7 +3553,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
static struct notifier_block __devinitdata blk_cpu_notifier = {
static struct notifier_block blk_cpu_notifier __cpuinitdata = {
.notifier_call = blk_cpu_notify,
};
......@@ -3680,8 +3638,8 @@ void end_request(struct request *req, int uptodate)
EXPORT_SYMBOL(end_request);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
......@@ -3695,9 +3653,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
}
EXPORT_SYMBOL(blk_rq_bio_prep);
if (bio->bi_bdev)
rq->rq_disk = bio->bi_bdev->bd_disk;
}
int kblockd_schedule_work(struct work_struct *work)
{
......
......@@ -138,7 +138,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
bio_endio(bio, bio->bi_size, -ENOMEM);
bio_endio(bio, -ENOMEM);
return 0;
}
memset(buf, 0, sizeof(*buf));
......@@ -159,7 +159,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -ENXIO);
bio_endio(bio, -ENXIO);
return 0;
}
......
......@@ -652,7 +652,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
disk_stat_add(disk, sectors[rw], n_sect);
disk_stat_add(disk, io_ticks, duration);
n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
bio_endio(buf->bio, buf->bio->bi_size, n);
bio_endio(buf->bio, n);
mempool_free(buf, d->bufpool);
}
}
......
......@@ -119,7 +119,7 @@ aoedev_downdev(struct aoedev *d)
bio = buf->bio;
if (--buf->nframesout == 0) {
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
}
......@@ -130,7 +130,7 @@ aoedev_downdev(struct aoedev *d)
list_del(d->bufq.next);
bio = buf->bio;
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
if (d->gd)
......
......@@ -1194,7 +1194,7 @@ static inline void complete_buffers(struct bio *bio, int status)
int nr_sectors = bio_sectors(bio);
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
bio_endio(bio, status ? 0 : -EIO);
bio = xbh;
}
}
......
......@@ -987,7 +987,7 @@ static inline void complete_buffers(struct bio *bio, int ok)
xbh = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
bio_endio(bio, ok ? 0 : -EIO);
bio = xbh;
}
......
......@@ -2437,22 +2437,19 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio *bio;
struct bio_vec *bv;
int size, i;
int size;
struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (page_address(bv->bv_page) + bv->bv_offset !=
base + size)
break;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
break;
size += bv->bv_len;
}
size += bv->bv_len;
}
return size >> 9;
......@@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
struct bio *bio;
char *buffer, *dma_buffer;
int size, i;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
......@@ -2514,43 +2511,41 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
size = current_req->current_nr_sectors << 9;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (!remaining)
break;
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
size = bv->bv_len;
SUPBOUND(size, remaining);
size = bv->bv_len;
SUPBOUND(size, remaining);
buffer = page_address(bv->bv_page) + bv->bv_offset;
buffer = page_address(bv->bv_page) + bv->bv_offset;
#ifdef FLOPPY_SANITY_CHECK
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
}
#ifdef FLOPPY_SANITY_CHECK
if (remaining) {
......@@ -3815,14 +3810,10 @@ static int check_floppy_change(struct gendisk *disk)
* a disk in the drive, and whether that disk is writable.
*/
static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
static void floppy_rb0_complete(struct bio *bio,
int err)
{
if (bio->bi_size)
return 1;
complete((struct completion *)bio->bi_private);
return 0;
}
static int __floppy_read_block_0(struct block_device *bdev)
......
......@@ -142,25 +142,23 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
* return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
unsigned int i = 0, idx, len = 0;
struct bio *bio;
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, idx) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
* the end of the data referred to by the "struct
* lguest_dma", so make sure that doesn't happen. */
BUG_ON(!bvec->bv_len);
/* Convert page & offset to a physical address */
dma->addr[i] = page_to_phys(bvec->bv_page)
+ bvec->bv_offset;
dma->len[i] = bvec->bv_len;
len += bvec->bv_len;
i++;
}
unsigned int i = 0, len = 0;
struct req_iterator iter;
struct bio_vec *bvec;
rq_for_each_segment(bvec, req, iter) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
* the end of the data referred to by the "struct
* lguest_dma", so make sure that doesn't happen. */
BUG_ON(!bvec->bv_len);
/* Convert page & offset to a physical address */
dma->addr[i] = page_to_phys(bvec->bv_page)
+ bvec->bv_offset;
dma->len[i] = bvec->bv_len;
len += bvec->bv_len;
i++;
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
......
......@@ -551,7 +551,7 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
out:
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
bio_io_error(old_bio);
return 0;
}
......@@ -580,7 +580,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
bio_put(bio);
} else {
int ret = do_bio_filebacked(lo, bio);
bio_endio(bio, bio->bi_size, ret);
bio_endio(bio, ret);
}
}
......
......@@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, i, flags;
int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
struct socket *sock = lo->sock;
......@@ -205,27 +205,23 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
flags = 0;
if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req,
bvec->bv_len);
result = sock_send_bvec(sock, bvec, flags);
if (result <= 0) {
printk(KERN_ERR "%s: Send data failed (result %d)\n",
lo->disk->disk_name,
result);
goto error_out;
}
rq_for_each_segment(bvec, req, iter) {
flags = 0;
if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(sock, bvec, flags);
if (result <= 0) {
printk(KERN_ERR "%s: Send data failed (result %d)\n",
lo->disk->disk_name, result);
goto error_out;
}
}
}
......@@ -321,22 +317,19 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
dprintk(DBG_RX, "%s: request %p: got reply\n",
lo->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
int i;
struct bio *bio;
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
lo->disk->disk_name,
result);
req->errors++;
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
struct req_iterator iter;
struct bio_vec *bvec;
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
lo->disk->disk_name, result);
req->errors++;
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
}
}
return req;
......
......@@ -1058,15 +1058,12 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
}
}
static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
......@@ -1077,19 +1074,14 @@ static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
return 0;
}
static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_packet_write(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
......@@ -1098,7 +1090,6 @@ static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
return 0;
}
/*
......@@ -1470,7 +1461,7 @@ static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
while (bio) {
next = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
bio_endio(bio, uptodate ? 0 : -EIO);
bio = next;
}
pkt->orig_bios = pkt->orig_bios_tail = NULL;
......@@ -2462,19 +2453,15 @@ static int pkt_close(struct inode *inode, struct file *file)
}
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read_cloned(struct bio *bio, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
if (bio->bi_size)
return 1;
bio_put(bio);
bio_endio(psd->bio, psd->bio->bi_size, err);
bio_endio(psd->bio, err);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
return 0;
}
static int pkt_make_request(struct request_queue *q, struct bio *bio)
......@@ -2620,7 +2607,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
return 0;
end_io:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
......
......@@ -91,30 +91,29 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct bio *bio;
sector_t sector;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned int i = 0, j;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_bio(bio, req) {
sector = bio->bi_sector;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(bio),
bio_sectors(bio), sector);
bio_for_each_segment(bvec, bio, j) {
size = bvec->bv_len;
buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
__bio_kunmap_atomic(bio, KM_IRQ0);
}
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio),
(unsigned long)iter.bio->bi_sector);
size = bvec->bv_len;
buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bvec->bv_page);
bvec_kunmap_irq(bvec, &flags);
i++;
}
}
......@@ -130,12 +129,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
struct bio *bio;
struct bio_vec *bv;
struct req_iterator iter;
rq_for_each_bio(bio, req)
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
#endif
......
......@@ -287,10 +287,10 @@ static int rd_make_request(struct request_queue *q, struct bio *bio)
if (ret)
goto fail;
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
return 0;
fail:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment