2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
50 #include "xfs_vnodeops.h"
52 #include <linux/capability.h>
53 #include <linux/dcache.h>
54 #include <linux/mount.h>
55 #include <linux/namei.h>
56 #include <linux/pagemap.h>
59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
60 * a file or fs handle.
62 * XFS_IOC_PATH_TO_FSHANDLE
63 * returns fs handle for a mount point or path within that mount point
64 * XFS_IOC_FD_TO_HANDLE
65 * returns full handle for a FD opened in user space
66 * XFS_IOC_PATH_TO_HANDLE
67 * returns full handle for a path
76 xfs_fsop_handlereq_t hreq;
79 if (copy_from_user(&hreq, arg, sizeof(hreq)))
80 return -XFS_ERROR(EFAULT);
82 memset((char *)&handle, 0, sizeof(handle));
85 case XFS_IOC_PATH_TO_FSHANDLE:
86 case XFS_IOC_PATH_TO_HANDLE: {
90 error = user_path_walk_link((const char __user *)hreq.path, &nd);
94 ASSERT(nd.path.dentry);
95 ASSERT(nd.path.dentry->d_inode);
96 inode = igrab(nd.path.dentry->d_inode);
101 case XFS_IOC_FD_TO_HANDLE: {
104 file = fget(hreq.fd);
108 ASSERT(file->f_path.dentry);
109 ASSERT(file->f_path.dentry->d_inode);
110 inode = igrab(file->f_path.dentry->d_inode);
117 return -XFS_ERROR(EINVAL);
120 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
121 /* we're not in XFS anymore, Toto */
123 return -XFS_ERROR(EINVAL);
126 switch (inode->i_mode & S_IFMT) {
133 return -XFS_ERROR(EBADF);
136 /* now we can grab the fsid */
137 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
139 hsize = sizeof(xfs_fsid_t);
141 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
142 xfs_inode_t *ip = XFS_I(inode);
145 /* need to get access to the xfs_inode to read the generation */
146 lock_mode = xfs_ilock_map_shared(ip);
148 /* fill in fid section of handle from inode */
149 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
150 sizeof(handle.ha_fid.fid_len);
151 handle.ha_fid.fid_pad = 0;
152 handle.ha_fid.fid_gen = ip->i_d.di_gen;
153 handle.ha_fid.fid_ino = ip->i_ino;
155 xfs_iunlock_map_shared(ip, lock_mode);
157 hsize = XFS_HSIZE(handle);
160 /* now copy our handle into the user buffer & write out the size */
161 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
162 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
164 return -XFS_ERROR(EFAULT);
173 * Convert userspace handle data into inode.
175 * We use the fact that all the fsop_handlereq ioctl calls have a data
176 * structure argument whose first component is always a xfs_fsop_handlereq_t,
177 * so we can pass that sub structure into this handy, shared routine.
179 * If no error, caller must always iput the returned inode.
182 xfs_vget_fsop_handlereq(
184 struct inode *parinode, /* parent inode pointer */
185 xfs_fsop_handlereq_t *hreq,
186 struct inode **inode)
191 xfs_handle_t *handlep;
199 * Only allow handle opens under a directory.
201 if (!S_ISDIR(parinode->i_mode))
202 return XFS_ERROR(ENOTDIR);
204 hanp = hreq->ihandle;
205 hlen = hreq->ihandlen;
208 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
209 return XFS_ERROR(EINVAL);
210 if (copy_from_user(handlep, hanp, hlen))
211 return XFS_ERROR(EFAULT);
212 if (hlen < sizeof(*handlep))
213 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
214 if (hlen > sizeof(handlep->ha_fsid)) {
215 if (handlep->ha_fid.fid_len !=
216 (hlen - sizeof(handlep->ha_fsid) -
217 sizeof(handlep->ha_fid.fid_len)) ||
218 handlep->ha_fid.fid_pad)
219 return XFS_ERROR(EINVAL);
223 * Crack the handle, obtain the inode # & generation #
225 xfid = (struct xfs_fid *)&handlep->ha_fid;
226 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
228 igen = xfid->fid_gen;
230 return XFS_ERROR(EINVAL);
234 * Get the XFS inode, building a Linux inode to go with it.
236 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
240 return XFS_ERROR(EIO);
241 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
242 xfs_iput_new(ip, XFS_ILOCK_SHARED);
243 return XFS_ERROR(ENOENT);
246 xfs_iunlock(ip, XFS_ILOCK_SHARED);
248 *inode = XFS_ITOV(ip);
256 struct file *parfilp,
257 struct inode *parinode)
264 struct dentry *dentry;
265 xfs_fsop_handlereq_t hreq;
267 if (!capable(CAP_SYS_ADMIN))
268 return -XFS_ERROR(EPERM);
269 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
270 return -XFS_ERROR(EFAULT);
272 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
276 /* Restrict xfs_open_by_handle to directories & regular files. */
277 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
279 return -XFS_ERROR(EINVAL);
282 #if BITS_PER_LONG != 32
283 hreq.oflags |= O_LARGEFILE;
285 /* Put open permission in namei format. */
286 permflag = hreq.oflags;
287 if ((permflag+1) & O_ACCMODE)
289 if (permflag & O_TRUNC)
292 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
293 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
295 return -XFS_ERROR(EPERM);
298 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
300 return -XFS_ERROR(EACCES);
303 /* Can't write directories. */
304 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
306 return -XFS_ERROR(EISDIR);
309 if ((new_fd = get_unused_fd()) < 0) {
314 dentry = d_alloc_anon(inode);
315 if (dentry == NULL) {
317 put_unused_fd(new_fd);
318 return -XFS_ERROR(ENOMEM);
321 /* Ensure umount returns EBUSY on umounts while this file is open. */
322 mntget(parfilp->f_path.mnt);
324 /* Create file pointer. */
325 filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
327 put_unused_fd(new_fd);
328 return -XFS_ERROR(-PTR_ERR(filp));
330 if (inode->i_mode & S_IFREG) {
331 /* invisible operation should not change atime */
332 filp->f_flags |= O_NOATIME;
333 filp->f_op = &xfs_invis_file_operations;
336 fd_install(new_fd, filp);
341 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
342 * unused first argument.
357 if (len > (unsigned) buflen)
359 if (copy_to_user(buffer, link, len))
367 xfs_readlink_by_handle(
370 struct inode *parinode)
373 xfs_fsop_handlereq_t hreq;
378 if (!capable(CAP_SYS_ADMIN))
379 return -XFS_ERROR(EPERM);
380 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
381 return -XFS_ERROR(EFAULT);
383 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
387 /* Restrict this handle operation to symlinks only. */
388 if (!S_ISLNK(inode->i_mode)) {
389 error = -XFS_ERROR(EINVAL);
393 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
394 error = -XFS_ERROR(EFAULT);
398 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
402 error = -xfs_readlink(XFS_I(inode), link);
405 error = do_readlink(hreq.ohandle, olen, link);
417 xfs_fssetdm_by_handle(
420 struct inode *parinode)
423 struct fsdmidata fsd;
424 xfs_fsop_setdm_handlereq_t dmhreq;
427 if (!capable(CAP_MKNOD))
428 return -XFS_ERROR(EPERM);
429 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
430 return -XFS_ERROR(EFAULT);
432 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
436 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
437 error = -XFS_ERROR(EPERM);
441 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
442 error = -XFS_ERROR(EFAULT);
446 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
455 xfs_attrlist_by_handle(
458 struct inode *parinode)
461 attrlist_cursor_kern_t *cursor;
462 xfs_fsop_attrlist_handlereq_t al_hreq;
466 if (!capable(CAP_SYS_ADMIN))
467 return -XFS_ERROR(EPERM);
468 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
469 return -XFS_ERROR(EFAULT);
470 if (al_hreq.buflen > XATTR_LIST_MAX)
471 return -XFS_ERROR(EINVAL);
473 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
477 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
481 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
482 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
483 al_hreq.flags, cursor);
487 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
499 xfs_attrmulti_attr_get(
509 if (*len > XATTR_SIZE_MAX)
511 kbuf = kmalloc(*len, GFP_KERNEL);
515 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL);
519 if (copy_to_user(ubuf, kbuf, *len))
528 xfs_attrmulti_attr_set(
531 const char __user *ubuf,
538 if (IS_RDONLY(inode))
540 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
542 if (len > XATTR_SIZE_MAX)
545 kbuf = kmalloc(len, GFP_KERNEL);
549 if (copy_from_user(kbuf, ubuf, len))
552 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
560 xfs_attrmulti_attr_remove(
565 if (IS_RDONLY(inode))
567 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
569 return xfs_attr_remove(XFS_I(inode), name, flags);
573 xfs_attrmulti_by_handle(
576 struct inode *parinode)
579 xfs_attr_multiop_t *ops;
580 xfs_fsop_attrmulti_handlereq_t am_hreq;
582 unsigned int i, size;
585 if (!capable(CAP_SYS_ADMIN))
586 return -XFS_ERROR(EPERM);
587 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
588 return -XFS_ERROR(EFAULT);
590 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
595 size = am_hreq.opcount * sizeof(attr_multiop_t);
596 if (!size || size > 16 * PAGE_SIZE)
600 ops = kmalloc(size, GFP_KERNEL);
605 if (copy_from_user(ops, am_hreq.ops, size))
608 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
614 for (i = 0; i < am_hreq.opcount; i++) {
615 ops[i].am_error = strncpy_from_user(attr_name,
616 ops[i].am_attrname, MAXNAMELEN);
617 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
619 if (ops[i].am_error < 0)
622 switch (ops[i].am_opcode) {
624 ops[i].am_error = xfs_attrmulti_attr_get(inode,
625 attr_name, ops[i].am_attrvalue,
626 &ops[i].am_length, ops[i].am_flags);
629 ops[i].am_error = xfs_attrmulti_attr_set(inode,
630 attr_name, ops[i].am_attrvalue,
631 ops[i].am_length, ops[i].am_flags);
634 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
635 attr_name, ops[i].am_flags);
638 ops[i].am_error = EINVAL;
642 if (copy_to_user(am_hreq.ops, ops, size))
643 error = XFS_ERROR(EFAULT);
654 /* prototypes for a few of the stack-hungry cases that have
655 * their own functions. Functions are defined after their use
656 * so gcc doesn't get fancy and inline them with -03 */
660 struct xfs_inode *ip,
674 xfs_ioc_fsgeometry_v1(
698 struct xfs_inode *ip,
705 struct xfs_inode *ip,
716 struct inode *inode = filp->f_path.dentry->d_inode;
717 xfs_mount_t *mp = ip->i_mount;
720 xfs_itrace_entry(XFS_I(inode));
723 case XFS_IOC_ALLOCSP:
726 case XFS_IOC_UNRESVSP:
727 case XFS_IOC_ALLOCSP64:
728 case XFS_IOC_FREESP64:
729 case XFS_IOC_RESVSP64:
730 case XFS_IOC_UNRESVSP64:
732 * Only allow the sys admin to reserve space unless
733 * unwritten extents are enabled.
735 if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
736 !capable(CAP_SYS_ADMIN))
739 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
741 case XFS_IOC_DIOINFO: {
743 xfs_buftarg_t *target =
744 XFS_IS_REALTIME_INODE(ip) ?
745 mp->m_rtdev_targp : mp->m_ddev_targp;
747 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
748 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
750 if (copy_to_user(arg, &da, sizeof(da)))
751 return -XFS_ERROR(EFAULT);
755 case XFS_IOC_FSBULKSTAT_SINGLE:
756 case XFS_IOC_FSBULKSTAT:
757 case XFS_IOC_FSINUMBERS:
758 return xfs_ioc_bulkstat(mp, cmd, arg);
760 case XFS_IOC_FSGEOMETRY_V1:
761 return xfs_ioc_fsgeometry_v1(mp, arg);
763 case XFS_IOC_FSGEOMETRY:
764 return xfs_ioc_fsgeometry(mp, arg);
766 case XFS_IOC_GETVERSION:
767 return put_user(inode->i_generation, (int __user *)arg);
769 case XFS_IOC_FSGETXATTR:
770 return xfs_ioc_fsgetxattr(ip, 0, arg);
771 case XFS_IOC_FSGETXATTRA:
772 return xfs_ioc_fsgetxattr(ip, 1, arg);
773 case XFS_IOC_GETXFLAGS:
774 case XFS_IOC_SETXFLAGS:
775 case XFS_IOC_FSSETXATTR:
776 return xfs_ioc_xattr(ip, filp, cmd, arg);
778 case XFS_IOC_FSSETDM: {
779 struct fsdmidata dmi;
781 if (copy_from_user(&dmi, arg, sizeof(dmi)))
782 return -XFS_ERROR(EFAULT);
784 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
789 case XFS_IOC_GETBMAP:
790 case XFS_IOC_GETBMAPA:
791 return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
793 case XFS_IOC_GETBMAPX:
794 return xfs_ioc_getbmapx(ip, arg);
796 case XFS_IOC_FD_TO_HANDLE:
797 case XFS_IOC_PATH_TO_HANDLE:
798 case XFS_IOC_PATH_TO_FSHANDLE:
799 return xfs_find_handle(cmd, arg);
801 case XFS_IOC_OPEN_BY_HANDLE:
802 return xfs_open_by_handle(mp, arg, filp, inode);
804 case XFS_IOC_FSSETDM_BY_HANDLE:
805 return xfs_fssetdm_by_handle(mp, arg, inode);
807 case XFS_IOC_READLINK_BY_HANDLE:
808 return xfs_readlink_by_handle(mp, arg, inode);
810 case XFS_IOC_ATTRLIST_BY_HANDLE:
811 return xfs_attrlist_by_handle(mp, arg, inode);
813 case XFS_IOC_ATTRMULTI_BY_HANDLE:
814 return xfs_attrmulti_by_handle(mp, arg, inode);
816 case XFS_IOC_SWAPEXT: {
817 error = xfs_swapext((struct xfs_swapext __user *)arg);
821 case XFS_IOC_FSCOUNTS: {
822 xfs_fsop_counts_t out;
824 error = xfs_fs_counts(mp, &out);
828 if (copy_to_user(arg, &out, sizeof(out)))
829 return -XFS_ERROR(EFAULT);
833 case XFS_IOC_SET_RESBLKS: {
834 xfs_fsop_resblks_t inout;
837 if (!capable(CAP_SYS_ADMIN))
840 if (copy_from_user(&inout, arg, sizeof(inout)))
841 return -XFS_ERROR(EFAULT);
843 /* input parameter is passed in resblks field of structure */
845 error = xfs_reserve_blocks(mp, &in, &inout);
849 if (copy_to_user(arg, &inout, sizeof(inout)))
850 return -XFS_ERROR(EFAULT);
854 case XFS_IOC_GET_RESBLKS: {
855 xfs_fsop_resblks_t out;
857 if (!capable(CAP_SYS_ADMIN))
860 error = xfs_reserve_blocks(mp, NULL, &out);
864 if (copy_to_user(arg, &out, sizeof(out)))
865 return -XFS_ERROR(EFAULT);
870 case XFS_IOC_FSGROWFSDATA: {
871 xfs_growfs_data_t in;
873 if (!capable(CAP_SYS_ADMIN))
876 if (copy_from_user(&in, arg, sizeof(in)))
877 return -XFS_ERROR(EFAULT);
879 error = xfs_growfs_data(mp, &in);
883 case XFS_IOC_FSGROWFSLOG: {
886 if (!capable(CAP_SYS_ADMIN))
889 if (copy_from_user(&in, arg, sizeof(in)))
890 return -XFS_ERROR(EFAULT);
892 error = xfs_growfs_log(mp, &in);
896 case XFS_IOC_FSGROWFSRT: {
899 if (!capable(CAP_SYS_ADMIN))
902 if (copy_from_user(&in, arg, sizeof(in)))
903 return -XFS_ERROR(EFAULT);
905 error = xfs_growfs_rt(mp, &in);
910 if (!capable(CAP_SYS_ADMIN))
913 if (inode->i_sb->s_frozen == SB_UNFROZEN)
914 freeze_bdev(inode->i_sb->s_bdev);
918 if (!capable(CAP_SYS_ADMIN))
920 if (inode->i_sb->s_frozen != SB_UNFROZEN)
921 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
924 case XFS_IOC_GOINGDOWN: {
927 if (!capable(CAP_SYS_ADMIN))
930 if (get_user(in, (__uint32_t __user *)arg))
931 return -XFS_ERROR(EFAULT);
933 error = xfs_fs_goingdown(mp, in);
937 case XFS_IOC_ERROR_INJECTION: {
938 xfs_error_injection_t in;
940 if (!capable(CAP_SYS_ADMIN))
943 if (copy_from_user(&in, arg, sizeof(in)))
944 return -XFS_ERROR(EFAULT);
946 error = xfs_errortag_add(in.errtag, mp);
950 case XFS_IOC_ERROR_CLEARALL:
951 if (!capable(CAP_SYS_ADMIN))
954 error = xfs_errortag_clearall(mp, 1);
964 struct xfs_inode *ip,
975 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
976 return -XFS_ERROR(EPERM);
978 if (!(filp->f_mode & FMODE_WRITE))
979 return -XFS_ERROR(EBADF);
981 if (!S_ISREG(inode->i_mode))
982 return -XFS_ERROR(EINVAL);
984 if (copy_from_user(&bf, arg, sizeof(bf)))
985 return -XFS_ERROR(EFAULT);
987 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
988 attr_flags |= ATTR_NONBLOCK;
989 if (ioflags & IO_INVIS)
990 attr_flags |= ATTR_DMI;
992 error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos,
1003 xfs_fsop_bulkreq_t bulkreq;
1004 int count; /* # of records returned */
1005 xfs_ino_t inlast; /* last inode number */
1009 /* done = 1 if there are more stats to get and if bulkstat */
1010 /* should be called again (unused here, but used in dmapi) */
1012 if (!capable(CAP_SYS_ADMIN))
1015 if (XFS_FORCED_SHUTDOWN(mp))
1016 return -XFS_ERROR(EIO);
1018 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
1019 return -XFS_ERROR(EFAULT);
1021 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
1022 return -XFS_ERROR(EFAULT);
1024 if ((count = bulkreq.icount) <= 0)
1025 return -XFS_ERROR(EINVAL);
1027 if (bulkreq.ubuffer == NULL)
1028 return -XFS_ERROR(EINVAL);
1030 if (cmd == XFS_IOC_FSINUMBERS)
1031 error = xfs_inumbers(mp, &inlast, &count,
1032 bulkreq.ubuffer, xfs_inumbers_fmt);
1033 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1034 error = xfs_bulkstat_single(mp, &inlast,
1035 bulkreq.ubuffer, &done);
1036 else /* XFS_IOC_FSBULKSTAT */
1037 error = xfs_bulkstat(mp, &inlast, &count,
1038 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1039 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1040 BULKSTAT_FG_QUICK, &done);
1045 if (bulkreq.ocount != NULL) {
1046 if (copy_to_user(bulkreq.lastip, &inlast,
1048 return -XFS_ERROR(EFAULT);
1050 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
1051 return -XFS_ERROR(EFAULT);
1058 xfs_ioc_fsgeometry_v1(
1062 xfs_fsop_geom_v1_t fsgeo;
1065 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
1069 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1070 return -XFS_ERROR(EFAULT);
1079 xfs_fsop_geom_t fsgeo;
1082 error = xfs_fs_geometry(mp, &fsgeo, 4);
1086 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1087 return -XFS_ERROR(EFAULT);
1092 * Linux extended inode flags interface.
1096 xfs_merge_ioc_xflags(
1100 unsigned int xflags = start;
1102 if (flags & FS_IMMUTABLE_FL)
1103 xflags |= XFS_XFLAG_IMMUTABLE;
1105 xflags &= ~XFS_XFLAG_IMMUTABLE;
1106 if (flags & FS_APPEND_FL)
1107 xflags |= XFS_XFLAG_APPEND;
1109 xflags &= ~XFS_XFLAG_APPEND;
1110 if (flags & FS_SYNC_FL)
1111 xflags |= XFS_XFLAG_SYNC;
1113 xflags &= ~XFS_XFLAG_SYNC;
1114 if (flags & FS_NOATIME_FL)
1115 xflags |= XFS_XFLAG_NOATIME;
1117 xflags &= ~XFS_XFLAG_NOATIME;
1118 if (flags & FS_NODUMP_FL)
1119 xflags |= XFS_XFLAG_NODUMP;
1121 xflags &= ~XFS_XFLAG_NODUMP;
1128 __uint16_t di_flags)
1130 unsigned int flags = 0;
1132 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1133 flags |= FS_IMMUTABLE_FL;
1134 if (di_flags & XFS_DIFLAG_APPEND)
1135 flags |= FS_APPEND_FL;
1136 if (di_flags & XFS_DIFLAG_SYNC)
1137 flags |= FS_SYNC_FL;
1138 if (di_flags & XFS_DIFLAG_NOATIME)
1139 flags |= FS_NOATIME_FL;
1140 if (di_flags & XFS_DIFLAG_NODUMP)
1141 flags |= FS_NODUMP_FL;
1153 xfs_ilock(ip, XFS_ILOCK_SHARED);
1154 fa.fsx_xflags = xfs_ip2xflags(ip);
1155 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
1156 fa.fsx_projid = ip->i_d.di_projid;
1160 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
1161 fa.fsx_nextents = ip->i_afp->if_bytes /
1162 sizeof(xfs_bmbt_rec_t);
1164 fa.fsx_nextents = ip->i_d.di_anextents;
1166 fa.fsx_nextents = 0;
1168 if (ip->i_df.if_flags & XFS_IFEXTENTS)
1169 fa.fsx_nextents = ip->i_df.if_bytes /
1170 sizeof(xfs_bmbt_rec_t);
1172 fa.fsx_nextents = ip->i_d.di_nextents;
1174 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1176 if (copy_to_user(arg, &fa, sizeof(fa)))
1189 struct bhv_vattr *vattr;
1194 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
1195 if (unlikely(!vattr))
1199 case XFS_IOC_FSSETXATTR: {
1200 if (copy_from_user(&fa, arg, sizeof(fa))) {
1206 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1207 attr_flags |= ATTR_NONBLOCK;
1209 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
1210 vattr->va_xflags = fa.fsx_xflags;
1211 vattr->va_extsize = fa.fsx_extsize;
1212 vattr->va_projid = fa.fsx_projid;
1214 error = xfs_setattr(ip, vattr, attr_flags, NULL);
1216 vn_revalidate(XFS_ITOV(ip)); /* update flags */
1221 case XFS_IOC_GETXFLAGS: {
1222 flags = xfs_di2lxflags(ip->i_d.di_flags);
1223 if (copy_to_user(arg, &flags, sizeof(flags)))
1228 case XFS_IOC_SETXFLAGS: {
1229 if (copy_from_user(&flags, arg, sizeof(flags))) {
1234 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1235 FS_NOATIME_FL | FS_NODUMP_FL | \
1237 error = -EOPNOTSUPP;
1242 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1243 attr_flags |= ATTR_NONBLOCK;
1245 vattr->va_mask = XFS_AT_XFLAGS;
1246 vattr->va_xflags = xfs_merge_ioc_xflags(flags,
1249 error = xfs_setattr(ip, vattr, attr_flags, NULL);
1251 vn_revalidate(XFS_ITOV(ip)); /* update flags */
1267 struct xfs_inode *ip,
1276 if (copy_from_user(&bm, arg, sizeof(bm)))
1277 return -XFS_ERROR(EFAULT);
1279 if (bm.bmv_count < 2)
1280 return -XFS_ERROR(EINVAL);
1282 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1283 if (ioflags & IO_INVIS)
1284 iflags |= BMV_IF_NO_DMAPI_READ;
1286 error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags);
1290 if (copy_to_user(arg, &bm, sizeof(bm)))
1291 return -XFS_ERROR(EFAULT);
1297 struct xfs_inode *ip,
1300 struct getbmapx bmx;
1305 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1306 return -XFS_ERROR(EFAULT);
1308 if (bmx.bmv_count < 2)
1309 return -XFS_ERROR(EINVAL);
1312 * Map input getbmapx structure to a getbmap
1313 * structure for xfs_getbmap.
1315 GETBMAP_CONVERT(bmx, bm);
1317 iflags = bmx.bmv_iflags;
1319 if (iflags & (~BMV_IF_VALID))
1320 return -XFS_ERROR(EINVAL);
1322 iflags |= BMV_IF_EXTENDED;
1324 error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
1328 GETBMAP_CONVERT(bm, bmx);
1330 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1331 return -XFS_ERROR(EFAULT);