arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ
- || current->flags&PF_SYNCWRITE)
+ || (arq->request->flags & REQ_RW_SYNC))
arq->is_sync = 1;
else
arq->is_sync = 0;
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
-#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
-
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
{
- if (rw == READ || process_sync(task))
+ if (rw == READ || rw == WRITE_SYNC)
return task->pid;
return CFQ_KEY_ASYNC;
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ if (bio_sync(bio))
+ req->flags |= REQ_RW_SYNC;
+
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
inode = filp->f_dentry->d_inode;
mutex_lock(&inode->i_mutex);
- current->flags |= PF_SYNCWRITE;
rc = filemap_fdatawrite(inode->i_mapping);
err = filp->f_op->fsync(filp, filp->f_dentry, 1);
if (!rc)
err = filemap_fdatawait(inode->i_mapping);
if (!rc)
rc = err;
- current->flags &= ~PF_SYNCWRITE;
mutex_unlock(&inode->i_mutex);
VLDBG(curlun, "fdatasync -> %d\n", rc);
return rc;
goto out;
}
- current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
/*
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
- current->flags &= ~PF_SYNCWRITE;
out:
return ret;
}
NULL); /* vmas */
up_read(¤t->mm->mmap_sem);
- if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) {
+ if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
struct page *page = ZERO_PAGE(dio->curr_user_address);
/*
* A memory fault, but the filesystem has some outstanding
map_bh->b_state = 0;
map_bh->b_size = fs_count << dio->inode->i_blkbits;
- create = dio->rw == WRITE;
+ create = dio->rw & WRITE;
if (dio->lock_type == DIO_LOCKING) {
if (dio->block_in_file < (i_size_read(dio->inode) >>
dio->blkbits))
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
- if (dio->rw == WRITE) {
+ if (dio->rw & WRITE) {
page_cache_release(page);
return -ENOTBLK;
}
}
} /* end iovec loop */
- if (ret == -ENOTBLK && rw == WRITE) {
+ if (ret == -ENOTBLK && (rw & WRITE)) {
/*
* The remaining part of the request will be
* be handled by buffered I/O when we return
if (dio->is_async) {
int should_wait = 0;
- if (dio->result < dio->size && rw == WRITE) {
+ if (dio->result < dio->size && (rw & WRITE)) {
dio->waiter = current;
should_wait = 1;
}
ret = transferred;
/* We could have also come here on an AIO file extend */
- if (!is_sync_kiocb(iocb) && rw == WRITE &&
+ if (!is_sync_kiocb(iocb) && (rw & WRITE) &&
ret >= 0 && dio->result == dio->size)
/*
* For AIO writes where we have completed the
int acquire_i_mutex = 0;
if (rw & WRITE)
- current->flags |= PF_SYNCWRITE;
+ rw = WRITE_SYNC;
if (bdev)
bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
* even for AIO, we need to wait for i/o to complete before
* returning in this case.
*/
- dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) &&
+ dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
(end > i_size_read(inode)));
retval = direct_io_worker(rw, iocb, inode, iov, offset,
mutex_unlock(&inode->i_mutex);
else if (acquire_i_mutex)
mutex_lock(&inode->i_mutex);
- if (rw & WRITE)
- current->flags &= ~PF_SYNCWRITE;
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
int need_write_inode_now = 0;
int err2;
- current->flags |= PF_SYNCWRITE;
if (what & OSYNC_DATA)
err = filemap_fdatawrite(mapping);
if (what & (OSYNC_METADATA|OSYNC_DATA)) {
if (!err)
err = err2;
}
- current->flags &= ~PF_SYNCWRITE;
spin_lock(&inode_lock);
if ((inode->i_state & I_DIRTY) &&
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
__REQ_ORDERED_COLOR, /* is before or after barrier */
+ __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_NR_BITS, /* stops here */
};
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
+#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
#define PF_KSWAPD 0x00040000 /* I am kswapd */
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
-#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
-#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
-#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
-#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
+#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */
+#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
+#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
/*
* just ignore them, but return -ENOMEM at the end.
*/
down_read(¤t->mm->mmap_sem);
- if (flags & MS_SYNC)
- current->flags |= PF_SYNCWRITE;
vma = find_vma(current->mm, start);
if (!vma) {
error = -ENOMEM;
}
} while (vma && !done);
out_unlock:
- current->flags &= ~PF_SYNCWRITE;
up_read(¤t->mm->mmap_sem);
out:
return error;