2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_error.h"
39 #include "xfs_ioctl32.h"
40 #include "xfs_vnodeops.h"
42 #include <linux/dcache.h>
43 #include <linux/smp_lock.h>
45 static struct vm_operations_struct xfs_file_vm_ops;
46 #ifdef CONFIG_XFS_DMAPI
47 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
53 const struct iovec *iov,
54 unsigned long nr_segs,
58 struct file *file = iocb->ki_filp;
60 BUG_ON(iocb->ki_pos != pos);
61 if (unlikely(file->f_flags & O_DIRECT))
62 ioflags |= IO_ISDIRECT;
63 return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov,
64 nr_segs, &iocb->ki_pos, ioflags);
70 const struct iovec *iov,
71 unsigned long nr_segs,
74 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
78 xfs_file_aio_read_invis(
80 const struct iovec *iov,
81 unsigned long nr_segs,
84 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
90 const struct iovec *iov,
91 unsigned long nr_segs,
95 struct file *file = iocb->ki_filp;
97 BUG_ON(iocb->ki_pos != pos);
98 if (unlikely(file->f_flags & O_DIRECT))
99 ioflags |= IO_ISDIRECT;
100 return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs,
101 &iocb->ki_pos, ioflags);
107 const struct iovec *iov,
108 unsigned long nr_segs,
111 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
115 xfs_file_aio_write_invis(
117 const struct iovec *iov,
118 unsigned long nr_segs,
121 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
125 xfs_file_splice_read(
128 struct pipe_inode_info *pipe,
132 return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
133 infilp, ppos, pipe, len, flags, 0);
137 xfs_file_splice_read_invis(
140 struct pipe_inode_info *pipe,
144 return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
145 infilp, ppos, pipe, len, flags, IO_INVIS);
149 xfs_file_splice_write(
150 struct pipe_inode_info *pipe,
151 struct file *outfilp,
156 return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
157 pipe, outfilp, ppos, len, flags, 0);
161 xfs_file_splice_write_invis(
162 struct pipe_inode_info *pipe,
163 struct file *outfilp,
168 return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
169 pipe, outfilp, ppos, len, flags, IO_INVIS);
177 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
179 return -xfs_open(XFS_I(inode));
187 return -xfs_release(XFS_I(inode));
193 struct dentry *dentry,
196 int flags = FSYNC_WAIT;
200 xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED);
201 return -xfs_fsync(XFS_I(dentry->d_inode), flags,
202 (xfs_off_t)0, (xfs_off_t)-1);
205 #ifdef CONFIG_XFS_DMAPI
208 struct vm_area_struct *vma,
209 struct vm_fault *vmf)
211 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
212 bhv_vnode_t *vp = vn_from_inode(inode);
214 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
215 if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
216 return VM_FAULT_SIGBUS;
217 return filemap_fault(vma, vmf);
219 #endif /* CONFIG_XFS_DMAPI */
222 * Unfortunately we can't just use the clean and simple readdir implementation
223 * below, because nfs might call back into ->lookup from the filldir callback
224 * and that will deadlock the low-level btree code.
226 * Hopefully we'll find a better workaround that allows to use the optimal
227 * version at least for local readdirs for 2.6.25.
236 struct inode *inode = filp->f_path.dentry->d_inode;
237 xfs_inode_t *ip = XFS_I(inode);
242 * The Linux API doesn't pass down the total size of the buffer
243 * we read into down to the filesystem. With the filldir concept
244 * it's not needed for correct information, but the XFS dir2 leaf
245 * code wants an estimate of the buffer size to calculate it's
246 * readahead window and size the buffers used for mapping to
249 * Try to give it an estimate that's good enough, maybe at some
250 * point we can change the ->readdir prototype to include the
253 bufsize = (size_t)min_t(loff_t, PAGE_SIZE, inode->i_size);
255 error = xfs_readdir(ip, dirent, bufsize,
256 (xfs_off_t *)&filp->f_pos, filldir);
271 struct hack_callback {
286 struct hack_callback *buf = __buf;
287 struct hack_dirent *de = (struct hack_dirent *)(buf->dirent + buf->used);
290 reclen = ALIGN(sizeof(struct hack_dirent) + namlen, sizeof(u64));
291 if (buf->used + reclen > buf->len)
298 memcpy(de->name, name, namlen);
309 struct inode *inode = filp->f_path.dentry->d_inode;
310 xfs_inode_t *ip = XFS_I(inode);
311 struct hack_callback buf;
312 struct hack_dirent *de;
316 xfs_off_t start_offset, curr_offset, offset;
319 * Try fairly hard to get memory
321 buf.len = PAGE_CACHE_SIZE;
323 buf.dirent = kmalloc(buf.len, GFP_KERNEL);
327 } while (buf.len >= 1024);
332 curr_offset = filp->f_pos;
333 if (curr_offset == 0x7fffffff)
336 offset = filp->f_pos;
341 start_offset = offset;
344 error = -xfs_readdir(ip, &buf, buf.len, &offset,
346 if (error || offset == start_offset) {
352 de = (struct hack_dirent *)buf.dirent;
354 curr_offset = de->offset /* & 0x7fffffff */;
355 if (filldir(dirent, de->name, de->namlen,
356 curr_offset & 0x7fffffff,
357 de->ino, de->d_type)) {
361 reclen = ALIGN(sizeof(struct hack_dirent) + de->namlen,
364 de = (struct hack_dirent *)((char *)de + reclen);
371 filp->f_pos = offset & 0x7fffffff;
373 filp->f_pos = curr_offset;
384 struct vm_area_struct *vma)
386 vma->vm_ops = &xfs_file_vm_ops;
387 vma->vm_flags |= VM_CAN_NONLINEAR;
389 #ifdef CONFIG_XFS_DMAPI
390 if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI)
391 vma->vm_ops = &xfs_dmapi_file_vm_ops;
392 #endif /* CONFIG_XFS_DMAPI */
405 struct inode *inode = filp->f_path.dentry->d_inode;
407 error = xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p);
408 xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
410 /* NOTE: some of the ioctl's return positive #'s as a
411 * byte count indicating success, such as
412 * readlink_by_handle. So we don't "sign flip"
413 * like most other routines. This means true
414 * errors need to be returned as a negative value.
420 xfs_file_ioctl_invis(
426 struct inode *inode = filp->f_path.dentry->d_inode;
428 error = xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p);
429 xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
431 /* NOTE: some of the ioctl's return positive #'s as a
432 * byte count indicating success, such as
433 * readlink_by_handle. So we don't "sign flip"
434 * like most other routines. This means true
435 * errors need to be returned as a negative value.
440 #ifdef CONFIG_XFS_DMAPI
441 #ifdef HAVE_VMOP_MPROTECT
444 struct vm_area_struct *vma,
445 unsigned int newflags)
447 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
448 struct xfs_mount *mp = XFS_M(inode->i_sb);
451 if (mp->m_flags & XFS_MOUNT_DMAPI) {
452 if ((vma->vm_flags & VM_MAYSHARE) &&
453 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE))
454 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
458 #endif /* HAVE_VMOP_MPROTECT */
459 #endif /* CONFIG_XFS_DMAPI */
461 #ifdef HAVE_FOP_OPEN_EXEC
462 /* If the user is attempting to execute a file that is offline then
463 * we have to trigger a DMAPI READ event before the file is marked as busy
464 * otherwise the invisible I/O will not be able to write to the file to bring
471 struct xfs_mount *mp = XFS_M(inode->i_sb);
472 struct xfs_inode *ip = XFS_I(inode);
474 if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) &&
475 DM_EVENT_ENABLED(ip, DM_EVENT_READ))
476 return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
479 #endif /* HAVE_FOP_OPEN_EXEC */
482 * mmap()d file has taken write protection fault and is being made
483 * writable. We can set the page state up correctly for a writable
484 * page, which means we can do correct delalloc accounting (ENOSPC
485 * checking!) and unwritten extent mapping.
489 struct vm_area_struct *vma,
492 return block_page_mkwrite(vma, page, xfs_get_blocks);
495 const struct file_operations xfs_file_operations = {
496 .llseek = generic_file_llseek,
497 .read = do_sync_read,
498 .write = do_sync_write,
499 .aio_read = xfs_file_aio_read,
500 .aio_write = xfs_file_aio_write,
501 .splice_read = xfs_file_splice_read,
502 .splice_write = xfs_file_splice_write,
503 .unlocked_ioctl = xfs_file_ioctl,
505 .compat_ioctl = xfs_file_compat_ioctl,
507 .mmap = xfs_file_mmap,
508 .open = xfs_file_open,
509 .release = xfs_file_release,
510 .fsync = xfs_file_fsync,
511 #ifdef HAVE_FOP_OPEN_EXEC
512 .open_exec = xfs_file_open_exec,
516 const struct file_operations xfs_invis_file_operations = {
517 .llseek = generic_file_llseek,
518 .read = do_sync_read,
519 .write = do_sync_write,
520 .aio_read = xfs_file_aio_read_invis,
521 .aio_write = xfs_file_aio_write_invis,
522 .splice_read = xfs_file_splice_read_invis,
523 .splice_write = xfs_file_splice_write_invis,
524 .unlocked_ioctl = xfs_file_ioctl_invis,
526 .compat_ioctl = xfs_file_compat_invis_ioctl,
528 .mmap = xfs_file_mmap,
529 .open = xfs_file_open,
530 .release = xfs_file_release,
531 .fsync = xfs_file_fsync,
535 const struct file_operations xfs_dir_file_operations = {
536 .read = generic_read_dir,
537 .readdir = xfs_file_readdir,
538 .unlocked_ioctl = xfs_file_ioctl,
540 .compat_ioctl = xfs_file_compat_ioctl,
542 .fsync = xfs_file_fsync,
545 static struct vm_operations_struct xfs_file_vm_ops = {
546 .fault = filemap_fault,
547 .page_mkwrite = xfs_vm_page_mkwrite,
550 #ifdef CONFIG_XFS_DMAPI
551 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
552 .fault = xfs_vm_fault,
553 .page_mkwrite = xfs_vm_page_mkwrite,
554 #ifdef HAVE_VMOP_MPROTECT
555 .mprotect = xfs_vm_mprotect,
558 #endif /* CONFIG_XFS_DMAPI */