2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_error.h"
41 #include "xfs_ioctl32.h"
43 #include <linux/dcache.h>
44 #include <linux/smp_lock.h>
46 static struct vm_operations_struct xfs_file_vm_ops;
47 #ifdef CONFIG_XFS_DMAPI
48 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
59 struct iovec iov = {buf, count};
60 struct file *file = iocb->ki_filp;
61 bhv_vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
63 BUG_ON(iocb->ki_pos != pos);
64 if (unlikely(file->f_flags & O_DIRECT))
65 ioflags |= IO_ISDIRECT;
66 return bhv_vop_read(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL);
76 return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos);
80 xfs_file_aio_read_invis(
86 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
92 const char __user *buf,
97 struct iovec iov = {(void __user *)buf, count};
98 struct file *file = iocb->ki_filp;
99 struct inode *inode = file->f_mapping->host;
100 bhv_vnode_t *vp = vn_from_inode(inode);
102 BUG_ON(iocb->ki_pos != pos);
103 if (unlikely(file->f_flags & O_DIRECT))
104 ioflags |= IO_ISDIRECT;
105 return bhv_vop_write(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL);
111 const char __user *buf,
115 return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos);
119 xfs_file_aio_write_invis(
121 const char __user *buf,
125 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
128 STATIC inline ssize_t
131 const struct iovec *iov,
133 unsigned long nr_segs,
136 struct inode *inode = file->f_mapping->host;
137 bhv_vnode_t *vp = vn_from_inode(inode);
141 init_sync_kiocb(&kiocb, file);
142 kiocb.ki_pos = *ppos;
144 if (unlikely(file->f_flags & O_DIRECT))
145 ioflags |= IO_ISDIRECT;
146 rval = bhv_vop_read(vp, &kiocb, iov, nr_segs,
147 &kiocb.ki_pos, ioflags, NULL);
149 *ppos = kiocb.ki_pos;
156 const struct iovec *iov,
157 unsigned long nr_segs,
160 return __xfs_file_readv(file, iov, 0, nr_segs, ppos);
164 xfs_file_readv_invis(
166 const struct iovec *iov,
167 unsigned long nr_segs,
170 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos);
173 STATIC inline ssize_t
176 const struct iovec *iov,
178 unsigned long nr_segs,
181 struct inode *inode = file->f_mapping->host;
182 bhv_vnode_t *vp = vn_from_inode(inode);
186 init_sync_kiocb(&kiocb, file);
187 kiocb.ki_pos = *ppos;
188 if (unlikely(file->f_flags & O_DIRECT))
189 ioflags |= IO_ISDIRECT;
191 rval = bhv_vop_write(vp, &kiocb, iov, nr_segs,
192 &kiocb.ki_pos, ioflags, NULL);
194 *ppos = kiocb.ki_pos;
201 const struct iovec *iov,
202 unsigned long nr_segs,
205 return __xfs_file_writev(file, iov, 0, nr_segs, ppos);
209 xfs_file_writev_invis(
211 const struct iovec *iov,
212 unsigned long nr_segs,
215 return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos);
226 return bhv_vop_sendfile(vn_from_inode(filp->f_dentry->d_inode),
227 filp, pos, 0, count, actor, target, NULL);
231 xfs_file_sendfile_invis(
238 return bhv_vop_sendfile(vn_from_inode(filp->f_dentry->d_inode),
239 filp, pos, IO_INVIS, count, actor, target, NULL);
243 xfs_file_splice_read(
246 struct pipe_inode_info *pipe,
250 return bhv_vop_splice_read(vn_from_inode(infilp->f_dentry->d_inode),
251 infilp, ppos, pipe, len, flags, 0, NULL);
255 xfs_file_splice_read_invis(
258 struct pipe_inode_info *pipe,
262 return bhv_vop_splice_read(vn_from_inode(infilp->f_dentry->d_inode),
263 infilp, ppos, pipe, len, flags, IO_INVIS,
268 xfs_file_splice_write(
269 struct pipe_inode_info *pipe,
270 struct file *outfilp,
275 return bhv_vop_splice_write(vn_from_inode(outfilp->f_dentry->d_inode),
276 pipe, outfilp, ppos, len, flags, 0, NULL);
280 xfs_file_splice_write_invis(
281 struct pipe_inode_info *pipe,
282 struct file *outfilp,
287 return bhv_vop_splice_write(vn_from_inode(outfilp->f_dentry->d_inode),
288 pipe, outfilp, ppos, len, flags, IO_INVIS,
297 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
299 return -bhv_vop_open(vn_from_inode(inode), NULL);
306 return -bhv_vop_close(vn_from_inode(filp->f_dentry->d_inode), 0,
307 file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
315 bhv_vnode_t *vp = vn_from_inode(inode);
318 return -bhv_vop_release(vp);
325 struct dentry *dentry,
328 bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
329 int flags = FSYNC_WAIT;
335 return -bhv_vop_fsync(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1);
338 #ifdef CONFIG_XFS_DMAPI
341 struct vm_area_struct *area,
342 unsigned long address,
345 struct inode *inode = area->vm_file->f_dentry->d_inode;
346 bhv_vnode_t *vp = vn_from_inode(inode);
348 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
349 if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
351 return filemap_nopage(area, address, type);
353 #endif /* CONFIG_XFS_DMAPI */
362 bhv_vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
367 int namelen, size = 0;
368 size_t rlen = PAGE_CACHE_SIZE;
369 xfs_off_t start_offset, curr_offset;
370 xfs_dirent_t *dbp = NULL;
372 /* Try fairly hard to get memory */
374 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
377 } while (rlen >= 1024);
379 if (read_buf == NULL)
383 uio.uio_segflg = UIO_SYSSPACE;
384 curr_offset = filp->f_pos;
385 if (filp->f_pos != 0x7fffffff)
386 uio.uio_offset = filp->f_pos;
388 uio.uio_offset = 0xffffffff;
391 uio.uio_resid = iov.iov_len = rlen;
392 iov.iov_base = read_buf;
395 start_offset = uio.uio_offset;
397 error = bhv_vop_readdir(vp, &uio, NULL, &eof);
398 if ((uio.uio_offset == start_offset) || error) {
403 size = rlen - uio.uio_resid;
404 dbp = (xfs_dirent_t *)read_buf;
406 namelen = strlen(dbp->d_name);
408 if (filldir(dirent, dbp->d_name, namelen,
409 (loff_t) curr_offset & 0x7fffffff,
414 size -= dbp->d_reclen;
415 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
416 dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
422 filp->f_pos = uio.uio_offset & 0x7fffffff;
424 filp->f_pos = curr_offset;
434 struct vm_area_struct *vma)
436 vma->vm_ops = &xfs_file_vm_ops;
438 #ifdef CONFIG_XFS_DMAPI
439 if (vn_from_inode(filp->f_dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
440 vma->vm_ops = &xfs_dmapi_file_vm_ops;
441 #endif /* CONFIG_XFS_DMAPI */
454 struct inode *inode = filp->f_dentry->d_inode;
455 bhv_vnode_t *vp = vn_from_inode(inode);
457 error = bhv_vop_ioctl(vp, inode, filp, 0, cmd, (void __user *)p);
460 /* NOTE: some of the ioctl's return positive #'s as a
461 * byte count indicating success, such as
462 * readlink_by_handle. So we don't "sign flip"
463 * like most other routines. This means true
464 * errors need to be returned as a negative value.
470 xfs_file_ioctl_invis(
476 struct inode *inode = filp->f_dentry->d_inode;
477 bhv_vnode_t *vp = vn_from_inode(inode);
479 error = bhv_vop_ioctl(vp, inode, filp, IO_INVIS, cmd, (void __user *)p);
482 /* NOTE: some of the ioctl's return positive #'s as a
483 * byte count indicating success, such as
484 * readlink_by_handle. So we don't "sign flip"
485 * like most other routines. This means true
486 * errors need to be returned as a negative value.
491 #ifdef CONFIG_XFS_DMAPI
492 #ifdef HAVE_VMOP_MPROTECT
495 struct vm_area_struct *vma,
496 unsigned int newflags)
498 bhv_vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
501 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
502 if ((vma->vm_flags & VM_MAYSHARE) &&
503 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
504 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
506 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
511 #endif /* HAVE_VMOP_MPROTECT */
512 #endif /* CONFIG_XFS_DMAPI */
514 #ifdef HAVE_FOP_OPEN_EXEC
515 /* If the user is attempting to execute a file that is offline then
516 * we have to trigger a DMAPI READ event before the file is marked as busy
517 * otherwise the invisible I/O will not be able to write to the file to bring
524 bhv_vnode_t *vp = vn_from_inode(inode);
526 if (unlikely(vp->v_vfsp->vfs_flag & VFS_DMI)) {
527 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
528 xfs_inode_t *ip = xfs_vtoi(vp);
532 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ))
533 return -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
538 #endif /* HAVE_FOP_OPEN_EXEC */
540 const struct file_operations xfs_file_operations = {
541 .llseek = generic_file_llseek,
542 .read = do_sync_read,
543 .write = do_sync_write,
544 .readv = xfs_file_readv,
545 .writev = xfs_file_writev,
546 .aio_read = xfs_file_aio_read,
547 .aio_write = xfs_file_aio_write,
548 .sendfile = xfs_file_sendfile,
549 .splice_read = xfs_file_splice_read,
550 .splice_write = xfs_file_splice_write,
551 .unlocked_ioctl = xfs_file_ioctl,
553 .compat_ioctl = xfs_file_compat_ioctl,
555 .mmap = xfs_file_mmap,
556 .open = xfs_file_open,
557 .flush = xfs_file_close,
558 .release = xfs_file_release,
559 .fsync = xfs_file_fsync,
560 #ifdef HAVE_FOP_OPEN_EXEC
561 .open_exec = xfs_file_open_exec,
565 const struct file_operations xfs_invis_file_operations = {
566 .llseek = generic_file_llseek,
567 .read = do_sync_read,
568 .write = do_sync_write,
569 .readv = xfs_file_readv_invis,
570 .writev = xfs_file_writev_invis,
571 .aio_read = xfs_file_aio_read_invis,
572 .aio_write = xfs_file_aio_write_invis,
573 .sendfile = xfs_file_sendfile_invis,
574 .splice_read = xfs_file_splice_read_invis,
575 .splice_write = xfs_file_splice_write_invis,
576 .unlocked_ioctl = xfs_file_ioctl_invis,
578 .compat_ioctl = xfs_file_compat_invis_ioctl,
580 .mmap = xfs_file_mmap,
581 .open = xfs_file_open,
582 .flush = xfs_file_close,
583 .release = xfs_file_release,
584 .fsync = xfs_file_fsync,
588 const struct file_operations xfs_dir_file_operations = {
589 .read = generic_read_dir,
590 .readdir = xfs_file_readdir,
591 .unlocked_ioctl = xfs_file_ioctl,
593 .compat_ioctl = xfs_file_compat_ioctl,
595 .fsync = xfs_file_fsync,
598 static struct vm_operations_struct xfs_file_vm_ops = {
599 .nopage = filemap_nopage,
600 .populate = filemap_populate,
603 #ifdef CONFIG_XFS_DMAPI
604 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
605 .nopage = xfs_vm_nopage,
606 .populate = filemap_populate,
607 #ifdef HAVE_VMOP_MPROTECT
608 .mprotect = xfs_vm_mprotect,
611 #endif /* CONFIG_XFS_DMAPI */