static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data);
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
/*
* For the allocated request tables
* set defaults
*/
q->nr_requests = BLKDEV_MAX_RQ;
- q->max_phys_segments = MAX_PHYS_SEGMENTS;
- q->max_hw_segments = MAX_HW_SEGMENTS;
+ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+ blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn;
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
+ rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
rq->q = q;
rq->special = NULL;
rq->data_len = 0;
rq->data = NULL;
+ rq->nr_phys_segments = 0;
rq->sense = NULL;
rq->end_io = NULL;
rq->end_io_data = NULL;
{
struct blk_queue_tag *bqt = q->queue_tags;
- if (unlikely(bqt == NULL || tag >= bqt->max_depth))
+ if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
return NULL;
return bqt->tag_index[tag];
memset(tag_index, 0, depth * sizeof(struct request *));
memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
+ tags->real_max_depth = depth;
tags->max_depth = depth;
tags->tag_index = tag_index;
tags->tag_map = tag_map;
if (!bqt)
return -ENXIO;
+ /*
+ * if we already have large enough real_max_depth. just
+ * adjust max_depth. *NOTE* as requests with tag value
+ * between new_depth and real_max_depth can be in-flight, tag
+ * map can not be shrunk blindly here.
+ */
+ if (new_depth <= bqt->real_max_depth) {
+ bqt->max_depth = new_depth;
+ return 0;
+ }
+
/*
* save the old state info, so we can copy it back
*/
tag_index = bqt->tag_index;
tag_map = bqt->tag_map;
- max_depth = bqt->max_depth;
+ max_depth = bqt->real_max_depth;
if (init_tag_map(q, bqt, new_depth))
return -ENOMEM;
BUG_ON(tag == -1);
- if (unlikely(tag >= bqt->max_depth))
+ if (unlikely(tag >= bqt->real_max_depth))
/*
* This can happen after tag depth has been reduced.
* FIXME: how about a warning or info message here?
}
-int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
return 0;
}
-EXPORT_SYMBOL(blk_phys_contig_segment);
-
-int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
return 1;
}
-EXPORT_SYMBOL(blk_hw_contig_segment);
-
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next)
{
- int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
- int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+ int total_phys_segments;
+ int total_hw_segments;
/*
* First check if the either of the requests are re-queued
return 0;
/*
- * Will it become to large?
+ * Will it become too large?
*/
if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
return 0;
if (!blk_remove_plug(q))
return;
- /*
- * was plugged, fire request_fn if queue has stuff to do
- */
- if (elv_next_request(q))
- q->request_fn(q);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__generic_unplug_device);
mempool_free(rq, q->rq.rq_pool);
}
-static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
- int gfp_mask)
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, gfp_mask))
+ if (!elv_set_request(q, rq, bio, gfp_mask))
return rq;
mempool_free(rq, q->rq.rq_pool);
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+ int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
- struct io_context *ioc = get_io_context(gfp_mask);
+ struct io_context *ioc = current_io_context(GFP_ATOMIC);
if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;
- spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- switch (elv_may_queue(q, rw)) {
+ switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
- spin_unlock_irq(q->queue_lock);
goto out;
}
get_rq:
+ /*
+ * Only allow batching queuers to allocate up to 50% over the defined
+ * limit of requests, otherwise we could have thousands of requests
+ * allocated with any setting of ->nr_requests
+ */
+ if (rl->count[rw] >= (3 * q->nr_requests / 2))
+ goto out;
+
rl->count[rw]++;
rl->starved[rw] = 0;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;
- spin_unlock_irq(q->queue_lock);
goto out;
}
rq_init(q, rq);
rq->rl = rl;
out:
- put_io_context(ioc);
return rq;
}
/*
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw)
+static struct request *get_request_wait(request_queue_t *q, int rw,
+ struct bio *bio)
{
- DEFINE_WAIT(wait);
struct request *rq;
- generic_unplug_device(q);
- do {
+ rq = get_request(q, rw, bio, GFP_NOIO);
+ while (!rq) {
+ DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, GFP_NOIO);
+ rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
io_schedule();
/*
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- ioc = get_io_context(GFP_NOIO);
+ ioc = current_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
- put_io_context(ioc);
+
+ spin_lock_irq(q->queue_lock);
}
finish_wait(&rl->wait[rw], &wait);
- } while (!rq);
+ }
return rq;
}
BUG_ON(rw != READ && rw != WRITE);
- if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw);
- else
- rq = get_request(q, rw, gfp_mask);
+ spin_lock_irq(q->queue_lock);
+ if (gfp_mask & __GFP_WAIT) {
+ rq = get_request_wait(q, rw, NULL);
+ } else {
+ rq = get_request(q, rw, NULL, gfp_mask);
+ if (!rq)
+ spin_unlock_irq(q->queue_lock);
+ }
+ /* q->queue_lock is unlocked at this point */
return rq;
}
-
EXPORT_SYMBOL(blk_get_request);
/**
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
- * @rw: READ or WRITE data
+ * @rq: request structure to fill
* @ubuf: the user buffer
* @len: length of user data
*
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
- unsigned int len)
+int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
+ unsigned int len)
{
unsigned long uaddr;
- struct request *rq;
struct bio *bio;
+ int reading;
if (len > (q->max_sectors << 9))
- return ERR_PTR(-EINVAL);
- if ((!len && ubuf) || (len && !ubuf))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
+ if (!len || !ubuf)
+ return -EINVAL;
- rq = blk_get_request(q, rw, __GFP_WAIT);
- if (!rq)
- return ERR_PTR(-ENOMEM);
+ reading = rq_data_dir(rq) == READ;
/*
* if alignment requirement is satisfied, map in user pages for
*/
uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
+ bio = bio_map_user(q, NULL, uaddr, len, reading);
else
- bio = bio_copy_user(q, uaddr, len, rw == READ);
+ bio = bio_copy_user(q, uaddr, len, reading);
if (!IS_ERR(bio)) {
rq->bio = rq->biotail = bio;
rq->buffer = rq->data = NULL;
rq->data_len = len;
- return rq;
+ return 0;
}
/*
* bio is the err-ptr
*/
- blk_put_request(rq);
- return (struct request *) bio;
+ return PTR_ERR(bio);
}
EXPORT_SYMBOL(blk_rq_map_user);
+/**
+ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to map data to
+ * @iov: pointer to the iovec
+ * @iov_count: number of elements in the iovec
+ *
+ * Description:
+ * Data will be mapped directly for zero copy io, if possible. Otherwise
+ * a kernel bounce buffer is used.
+ *
+ * A matching blk_rq_unmap_user() must be issued at the end of io, while
+ * still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
+ */
+int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+ struct sg_iovec *iov, int iov_count)
+{
+ struct bio *bio;
+
+ if (!iov || iov_count <= 0)
+ return -EINVAL;
+
+ /* we don't allow misaligned data like bio_map_user() does. If the
+ * user is using sg, they're expected to know the alignment constraints
+ * and respect them accordingly */
+ bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ rq->bio = rq->biotail = bio;
+ blk_rq_bio_prep(q, rq, bio);
+ rq->buffer = rq->data = NULL;
+ rq->data_len = bio->bi_size;
+ return 0;
+}
+
+EXPORT_SYMBOL(blk_rq_map_user_iov);
+
/**
* blk_rq_unmap_user - unmap a request with user data
- * @rq: request to be unmapped
- * @bio: bio for the request
+ * @bio: bio to be unmapped
* @ulen: length of user buffer
*
* Description:
- * Unmap a request previously mapped by blk_rq_map_user().
+ * Unmap a bio previously mapped by blk_rq_map_user().
*/
-int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
{
int ret = 0;
ret = bio_uncopy_user(bio);
}
- blk_put_request(rq);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
+/**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+ * @rq: request to fill
+ * @kbuf: the kernel buffer
+ * @len: length of user data
+ * @gfp_mask: memory allocation flags
+ */
+int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+ unsigned int len, unsigned int gfp_mask)
+{
+ struct bio *bio;
+
+ if (len > (q->max_sectors << 9))
+ return -EINVAL;
+ if (!len || !kbuf)
+ return -EINVAL;
+
+ bio = bio_map_kern(q, kbuf, len, gfp_mask);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (rq_data_dir(rq) == WRITE)
+ bio->bi_rw |= (1 << BIO_RW);
+
+ rq->bio = rq->biotail = bio;
+ blk_rq_bio_prep(q, rq, bio);
+
+ rq->buffer = rq->data = NULL;
+ rq->data_len = len;
+ return 0;
+}
+
+EXPORT_SYMBOL(blk_rq_map_kern);
+
+/**
+ * blk_execute_rq_nowait - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ * @done: I/O completion handler
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the io scheduler queue
+ * for execution. Don't wait for completion.
+ */
+void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head,
+ void (*done)(struct request *))
+{
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+ rq->rq_disk = bd_disk;
+ rq->flags |= REQ_NOMERGE;
+ rq->end_io = done;
+ elv_add_request(q, rq, where, 1);
+ generic_unplug_device(q);
+}
+
/**
* blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
+ * @at_head: insert request at head or tail of queue
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
- * for execution.
+ * for execution and wait for completion.
*/
int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
- struct request *rq)
+ struct request *rq, int at_head)
{
DECLARE_COMPLETION(wait);
char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0;
- rq->rq_disk = bd_disk;
-
/*
* we need an extra reference to the request, so we can look at
* it after io completion
rq->sense_len = 0;
}
- rq->flags |= REQ_NOMERGE;
rq->waiting = &wait;
- rq->end_io = blk_end_sync_rq;
- elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
- generic_unplug_device(q);
+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
wait_for_completion(&wait);
rq->waiting = NULL;
rq->data_len = 0;
rq->timeout = 60 * HZ;
- ret = blk_execute_rq(q, disk, rq);
+ ret = blk_execute_rq(q, disk, rq, 0);
if (ret && error_sector)
*error_sector = rq->sector;
EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
-void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
return;
req->rq_status = RQ_INACTIVE;
- req->q = NULL;
req->rl = NULL;
/*
req->rq_disk->in_flight--;
}
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
__blk_put_request(q, next);
return 1;
}
EXPORT_SYMBOL(blk_attempt_remerge);
-/*
- * Non-locking blk_attempt_remerge variant.
- */
-void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
-{
- attempt_back_merge(q, rq);
-}
-
-EXPORT_SYMBOL(__blk_attempt_remerge);
-
static int __make_request(request_queue_t *q, struct bio *bio)
{
- struct request *req, *freereq = NULL;
+ struct request *req;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+ unsigned short prio;
sector_t sector;
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
+ prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
goto end_io;
}
-again:
spin_lock_irq(q->queue_lock);
- if (elv_queue_empty(q)) {
- blk_plug_device(q);
- goto get_rq;
- }
- if (barrier)
+ if (unlikely(barrier) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req);
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req);
goto out;
- /*
- * elevator says don't/can't merge. get new request
- */
- case ELEVATOR_NO_MERGE:
- break;
-
+ /* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
- printk("elevator returned crap (%d)\n", el_ret);
- BUG();
+ ;
}
+get_rq:
/*
- * Grab a free request from the freelist - if that is empty, check
- * if we are doing read ahead and abort instead of blocking for
- * a free slot.
+ * Grab a free request. This is might sleep but can not fail.
+ * Returns with the queue unlocked.
+ */
+ req = get_request_wait(q, rw, bio);
+
+ /*
+ * After dropping the lock and possibly sleeping here, our request
+ * may now be mergeable after it had proven unmergeable (above).
+ * We don't worry about that case for efficiency. It won't happen
+ * often, and the elevators are able to handle it.
*/
-get_rq:
- if (freereq) {
- req = freereq;
- freereq = NULL;
- } else {
- spin_unlock_irq(q->queue_lock);
- if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
- /*
- * READA bit set
- */
- err = -EWOULDBLOCK;
- if (bio_rw_ahead(bio))
- goto end_io;
-
- freereq = get_request_wait(q, rw);
- }
- goto again;
- }
req->flags |= REQ_CMD;
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
+ req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
+ spin_lock_irq(q->queue_lock);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
add_request(q, req);
out:
- if (freereq)
- __blk_put_request(q, freereq);
if (sync)
__generic_unplug_device(q);
if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- switch (bio->bi_rw) {
+ switch (bio_data_dir(bio)) {
case READ:
p->read_sectors += bio_sectors(bio);
p->reads++;
{
struct request_list *rl = &q->rq;
struct request *rq;
+ int requeued = 0;
spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist);
- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+ elv_requeue_request(q, rq);
+ requeued++;
}
+ if (requeued)
+ q->request_fn(q);
+
spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]);
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
- bio->bi_rw = rw;
+ bio->bi_rw |= rw;
if (rw & WRITE)
mod_page_state(pgpgout, count);
else
EXPORT_SYMBOL(submit_bio);
-void blk_recalc_rq_segments(struct request *rq)
+static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
rq->nr_hw_segments = nr_hw_segs;
}
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
+static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
rq->hard_sector += nsect;
struct io_context *ioc;
local_irq_save(flags);
+ task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
+ ioc->task = NULL;
+ task_unlock(current);
local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit)
/*
* If the current task has no IO context then create one and initialise it.
- * If it does have a context, take a ref on it.
+ * Otherwise, return its existing IO context.
*
- * This is always called in the context of the task which submitted the I/O.
- * But weird things happen, so we disable local interrupts to ensure exclusive
- * access to *current.
+ * This returned IO context doesn't have a specifically elevated refcount,
+ * but since the current task itself holds a reference, the context can be
+ * used in general code, so long as it stays within `current` context.
*/
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *current_io_context(int gfp_flags)
{
struct task_struct *tsk = current;
- unsigned long flags;
struct io_context *ret;
- local_irq_save(flags);
ret = tsk->io_context;
- if (ret)
- goto out;
-
- local_irq_restore(flags);
+ if (likely(ret))
+ return ret;
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
atomic_set(&ret->refcount, 1);
- ret->pid = tsk->pid;
+ ret->task = current;
+ ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
- spin_lock_init(&ret->lock);
-
- local_irq_save(flags);
+ tsk->io_context = ret;
+ }
- /*
- * very unlikely, someone raced with us in setting up the task
- * io context. free new context and just grab a reference.
- */
- if (!tsk->io_context)
- tsk->io_context = ret;
- else {
- kmem_cache_free(iocontext_cachep, ret);
- ret = tsk->io_context;
- }
+ return ret;
+}
+EXPORT_SYMBOL(current_io_context);
-out:
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ */
+struct io_context *get_io_context(int gfp_flags)
+{
+ struct io_context *ret;
+ ret = current_io_context(gfp_flags);
+ if (likely(ret))
atomic_inc(&ret->refcount);
- local_irq_restore(flags);
- }
-
return ret;
}
EXPORT_SYMBOL(get_io_context);
.store = queue_attr_store,
};
-struct kobj_type queue_ktype = {
+static struct kobj_type queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
.default_attrs = default_attrs,
};