2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
50 #include "xfs_vnodeops.h"
52 #include <linux/capability.h>
53 #include <linux/dcache.h>
54 #include <linux/mount.h>
55 #include <linux/namei.h>
56 #include <linux/pagemap.h>
59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
60 * a file or fs handle.
62 * XFS_IOC_PATH_TO_FSHANDLE
63 * returns fs handle for a mount point or path within that mount point
64 * XFS_IOC_FD_TO_HANDLE
65 * returns full handle for a FD opened in user space
66 * XFS_IOC_PATH_TO_HANDLE
67 * returns full handle for a path
76 xfs_fsop_handlereq_t hreq;
79 if (copy_from_user(&hreq, arg, sizeof(hreq)))
80 return -XFS_ERROR(EFAULT);
82 memset((char *)&handle, 0, sizeof(handle));
85 case XFS_IOC_PATH_TO_FSHANDLE:
86 case XFS_IOC_PATH_TO_HANDLE: {
88 int error = user_lpath((const char __user *)hreq.path, &path);
93 ASSERT(path.dentry->d_inode);
94 inode = igrab(path.dentry->d_inode);
99 case XFS_IOC_FD_TO_HANDLE: {
102 file = fget(hreq.fd);
106 ASSERT(file->f_path.dentry);
107 ASSERT(file->f_path.dentry->d_inode);
108 inode = igrab(file->f_path.dentry->d_inode);
115 return -XFS_ERROR(EINVAL);
118 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
119 /* we're not in XFS anymore, Toto */
121 return -XFS_ERROR(EINVAL);
124 switch (inode->i_mode & S_IFMT) {
131 return -XFS_ERROR(EBADF);
134 /* now we can grab the fsid */
135 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
137 hsize = sizeof(xfs_fsid_t);
139 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
140 xfs_inode_t *ip = XFS_I(inode);
143 /* need to get access to the xfs_inode to read the generation */
144 lock_mode = xfs_ilock_map_shared(ip);
146 /* fill in fid section of handle from inode */
147 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
148 sizeof(handle.ha_fid.fid_len);
149 handle.ha_fid.fid_pad = 0;
150 handle.ha_fid.fid_gen = ip->i_d.di_gen;
151 handle.ha_fid.fid_ino = ip->i_ino;
153 xfs_iunlock_map_shared(ip, lock_mode);
155 hsize = XFS_HSIZE(handle);
158 /* now copy our handle into the user buffer & write out the size */
159 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
160 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
162 return -XFS_ERROR(EFAULT);
171 * Convert userspace handle data into inode.
173 * We use the fact that all the fsop_handlereq ioctl calls have a data
174 * structure argument whose first component is always a xfs_fsop_handlereq_t,
175 * so we can pass that sub structure into this handy, shared routine.
177 * If no error, caller must always iput the returned inode.
180 xfs_vget_fsop_handlereq(
182 struct inode *parinode, /* parent inode pointer */
183 xfs_fsop_handlereq_t *hreq,
184 struct inode **inode)
189 xfs_handle_t *handlep;
197 * Only allow handle opens under a directory.
199 if (!S_ISDIR(parinode->i_mode))
200 return XFS_ERROR(ENOTDIR);
202 hanp = hreq->ihandle;
203 hlen = hreq->ihandlen;
206 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
207 return XFS_ERROR(EINVAL);
208 if (copy_from_user(handlep, hanp, hlen))
209 return XFS_ERROR(EFAULT);
210 if (hlen < sizeof(*handlep))
211 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
212 if (hlen > sizeof(handlep->ha_fsid)) {
213 if (handlep->ha_fid.fid_len !=
214 (hlen - sizeof(handlep->ha_fsid) -
215 sizeof(handlep->ha_fid.fid_len)) ||
216 handlep->ha_fid.fid_pad)
217 return XFS_ERROR(EINVAL);
221 * Crack the handle, obtain the inode # & generation #
223 xfid = (struct xfs_fid *)&handlep->ha_fid;
224 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
226 igen = xfid->fid_gen;
228 return XFS_ERROR(EINVAL);
232 * Get the XFS inode, building a Linux inode to go with it.
234 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
238 return XFS_ERROR(EIO);
239 if (ip->i_d.di_gen != igen) {
240 xfs_iput_new(ip, XFS_ILOCK_SHARED);
241 return XFS_ERROR(ENOENT);
244 xfs_iunlock(ip, XFS_ILOCK_SHARED);
246 *inode = XFS_ITOV(ip);
254 struct file *parfilp,
255 struct inode *parinode)
262 struct dentry *dentry;
263 xfs_fsop_handlereq_t hreq;
265 if (!capable(CAP_SYS_ADMIN))
266 return -XFS_ERROR(EPERM);
267 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
268 return -XFS_ERROR(EFAULT);
270 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
274 /* Restrict xfs_open_by_handle to directories & regular files. */
275 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
277 return -XFS_ERROR(EINVAL);
280 #if BITS_PER_LONG != 32
281 hreq.oflags |= O_LARGEFILE;
283 /* Put open permission in namei format. */
284 permflag = hreq.oflags;
285 if ((permflag+1) & O_ACCMODE)
287 if (permflag & O_TRUNC)
290 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
291 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
293 return -XFS_ERROR(EPERM);
296 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
298 return -XFS_ERROR(EACCES);
301 /* Can't write directories. */
302 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
304 return -XFS_ERROR(EISDIR);
307 if ((new_fd = get_unused_fd()) < 0) {
312 dentry = d_alloc_anon(inode);
313 if (dentry == NULL) {
315 put_unused_fd(new_fd);
316 return -XFS_ERROR(ENOMEM);
319 /* Ensure umount returns EBUSY on umounts while this file is open. */
320 mntget(parfilp->f_path.mnt);
322 /* Create file pointer. */
323 filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
325 put_unused_fd(new_fd);
326 return -XFS_ERROR(-PTR_ERR(filp));
328 if (inode->i_mode & S_IFREG) {
329 /* invisible operation should not change atime */
330 filp->f_flags |= O_NOATIME;
331 filp->f_op = &xfs_invis_file_operations;
334 fd_install(new_fd, filp);
339 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
340 * unused first argument.
355 if (len > (unsigned) buflen)
357 if (copy_to_user(buffer, link, len))
365 xfs_readlink_by_handle(
368 struct inode *parinode)
371 xfs_fsop_handlereq_t hreq;
376 if (!capable(CAP_SYS_ADMIN))
377 return -XFS_ERROR(EPERM);
378 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
379 return -XFS_ERROR(EFAULT);
381 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
385 /* Restrict this handle operation to symlinks only. */
386 if (!S_ISLNK(inode->i_mode)) {
387 error = -XFS_ERROR(EINVAL);
391 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
392 error = -XFS_ERROR(EFAULT);
396 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
400 error = -xfs_readlink(XFS_I(inode), link);
403 error = do_readlink(hreq.ohandle, olen, link);
415 xfs_fssetdm_by_handle(
418 struct inode *parinode)
421 struct fsdmidata fsd;
422 xfs_fsop_setdm_handlereq_t dmhreq;
425 if (!capable(CAP_MKNOD))
426 return -XFS_ERROR(EPERM);
427 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
428 return -XFS_ERROR(EFAULT);
430 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
434 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
435 error = -XFS_ERROR(EPERM);
439 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
440 error = -XFS_ERROR(EFAULT);
444 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
453 xfs_attrlist_by_handle(
456 struct inode *parinode)
459 attrlist_cursor_kern_t *cursor;
460 xfs_fsop_attrlist_handlereq_t al_hreq;
464 if (!capable(CAP_SYS_ADMIN))
465 return -XFS_ERROR(EPERM);
466 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
467 return -XFS_ERROR(EFAULT);
468 if (al_hreq.buflen > XATTR_LIST_MAX)
469 return -XFS_ERROR(EINVAL);
472 * Reject flags, only allow namespaces.
474 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
475 return -XFS_ERROR(EINVAL);
477 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
481 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
485 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
486 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
487 al_hreq.flags, cursor);
491 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
503 xfs_attrmulti_attr_get(
513 if (*len > XATTR_SIZE_MAX)
515 kbuf = kmalloc(*len, GFP_KERNEL);
519 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
523 if (copy_to_user(ubuf, kbuf, *len))
532 xfs_attrmulti_attr_set(
535 const char __user *ubuf,
542 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
544 if (len > XATTR_SIZE_MAX)
547 kbuf = kmalloc(len, GFP_KERNEL);
551 if (copy_from_user(kbuf, ubuf, len))
554 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
562 xfs_attrmulti_attr_remove(
567 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
569 return xfs_attr_remove(XFS_I(inode), name, flags);
573 xfs_attrmulti_by_handle(
576 struct file *parfilp,
577 struct inode *parinode)
580 xfs_attr_multiop_t *ops;
581 xfs_fsop_attrmulti_handlereq_t am_hreq;
583 unsigned int i, size;
586 if (!capable(CAP_SYS_ADMIN))
587 return -XFS_ERROR(EPERM);
588 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
589 return -XFS_ERROR(EFAULT);
591 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
596 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
597 if (!size || size > 16 * PAGE_SIZE)
601 ops = kmalloc(size, GFP_KERNEL);
606 if (copy_from_user(ops, am_hreq.ops, size))
609 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
615 for (i = 0; i < am_hreq.opcount; i++) {
616 ops[i].am_error = strncpy_from_user(attr_name,
617 ops[i].am_attrname, MAXNAMELEN);
618 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
620 if (ops[i].am_error < 0)
623 switch (ops[i].am_opcode) {
625 ops[i].am_error = xfs_attrmulti_attr_get(inode,
626 attr_name, ops[i].am_attrvalue,
627 &ops[i].am_length, ops[i].am_flags);
630 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
633 ops[i].am_error = xfs_attrmulti_attr_set(inode,
634 attr_name, ops[i].am_attrvalue,
635 ops[i].am_length, ops[i].am_flags);
636 mnt_drop_write(parfilp->f_path.mnt);
639 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
642 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
643 attr_name, ops[i].am_flags);
644 mnt_drop_write(parfilp->f_path.mnt);
647 ops[i].am_error = EINVAL;
651 if (copy_to_user(am_hreq.ops, ops, size))
652 error = XFS_ERROR(EFAULT);
665 struct xfs_inode *ip,
676 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
677 return -XFS_ERROR(EPERM);
679 if (!(filp->f_mode & FMODE_WRITE))
680 return -XFS_ERROR(EBADF);
682 if (!S_ISREG(inode->i_mode))
683 return -XFS_ERROR(EINVAL);
685 if (copy_from_user(&bf, arg, sizeof(bf)))
686 return -XFS_ERROR(EFAULT);
688 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
689 attr_flags |= ATTR_NONBLOCK;
690 if (ioflags & IO_INVIS)
691 attr_flags |= ATTR_DMI;
693 error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos,
704 xfs_fsop_bulkreq_t bulkreq;
705 int count; /* # of records returned */
706 xfs_ino_t inlast; /* last inode number */
710 /* done = 1 if there are more stats to get and if bulkstat */
711 /* should be called again (unused here, but used in dmapi) */
713 if (!capable(CAP_SYS_ADMIN))
716 if (XFS_FORCED_SHUTDOWN(mp))
717 return -XFS_ERROR(EIO);
719 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
720 return -XFS_ERROR(EFAULT);
722 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
723 return -XFS_ERROR(EFAULT);
725 if ((count = bulkreq.icount) <= 0)
726 return -XFS_ERROR(EINVAL);
728 if (bulkreq.ubuffer == NULL)
729 return -XFS_ERROR(EINVAL);
731 if (cmd == XFS_IOC_FSINUMBERS)
732 error = xfs_inumbers(mp, &inlast, &count,
733 bulkreq.ubuffer, xfs_inumbers_fmt);
734 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
735 error = xfs_bulkstat_single(mp, &inlast,
736 bulkreq.ubuffer, &done);
737 else /* XFS_IOC_FSBULKSTAT */
738 error = xfs_bulkstat(mp, &inlast, &count,
739 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
740 sizeof(xfs_bstat_t), bulkreq.ubuffer,
741 BULKSTAT_FG_QUICK, &done);
746 if (bulkreq.ocount != NULL) {
747 if (copy_to_user(bulkreq.lastip, &inlast,
749 return -XFS_ERROR(EFAULT);
751 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
752 return -XFS_ERROR(EFAULT);
759 xfs_ioc_fsgeometry_v1(
763 xfs_fsop_geom_v1_t fsgeo;
766 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
770 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
771 return -XFS_ERROR(EFAULT);
780 xfs_fsop_geom_t fsgeo;
783 error = xfs_fs_geometry(mp, &fsgeo, 4);
787 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
788 return -XFS_ERROR(EFAULT);
793 * Linux extended inode flags interface.
797 xfs_merge_ioc_xflags(
801 unsigned int xflags = start;
803 if (flags & FS_IMMUTABLE_FL)
804 xflags |= XFS_XFLAG_IMMUTABLE;
806 xflags &= ~XFS_XFLAG_IMMUTABLE;
807 if (flags & FS_APPEND_FL)
808 xflags |= XFS_XFLAG_APPEND;
810 xflags &= ~XFS_XFLAG_APPEND;
811 if (flags & FS_SYNC_FL)
812 xflags |= XFS_XFLAG_SYNC;
814 xflags &= ~XFS_XFLAG_SYNC;
815 if (flags & FS_NOATIME_FL)
816 xflags |= XFS_XFLAG_NOATIME;
818 xflags &= ~XFS_XFLAG_NOATIME;
819 if (flags & FS_NODUMP_FL)
820 xflags |= XFS_XFLAG_NODUMP;
822 xflags &= ~XFS_XFLAG_NODUMP;
831 unsigned int flags = 0;
833 if (di_flags & XFS_DIFLAG_IMMUTABLE)
834 flags |= FS_IMMUTABLE_FL;
835 if (di_flags & XFS_DIFLAG_APPEND)
836 flags |= FS_APPEND_FL;
837 if (di_flags & XFS_DIFLAG_SYNC)
839 if (di_flags & XFS_DIFLAG_NOATIME)
840 flags |= FS_NOATIME_FL;
841 if (di_flags & XFS_DIFLAG_NODUMP)
842 flags |= FS_NODUMP_FL;
854 xfs_ilock(ip, XFS_ILOCK_SHARED);
855 fa.fsx_xflags = xfs_ip2xflags(ip);
856 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
857 fa.fsx_projid = ip->i_d.di_projid;
861 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
862 fa.fsx_nextents = ip->i_afp->if_bytes /
863 sizeof(xfs_bmbt_rec_t);
865 fa.fsx_nextents = ip->i_d.di_anextents;
869 if (ip->i_df.if_flags & XFS_IFEXTENTS)
870 fa.fsx_nextents = ip->i_df.if_bytes /
871 sizeof(xfs_bmbt_rec_t);
873 fa.fsx_nextents = ip->i_d.di_nextents;
875 xfs_iunlock(ip, XFS_ILOCK_SHARED);
877 if (copy_to_user(arg, &fa, sizeof(fa)))
889 struct bhv_vattr *vattr;
893 if (copy_from_user(&fa, arg, sizeof(fa)))
896 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
897 if (unlikely(!vattr))
901 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
902 attr_flags |= ATTR_NONBLOCK;
904 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
905 vattr->va_xflags = fa.fsx_xflags;
906 vattr->va_extsize = fa.fsx_extsize;
907 vattr->va_projid = fa.fsx_projid;
909 error = -xfs_setattr(ip, vattr, attr_flags, NULL);
911 vn_revalidate(XFS_ITOV(ip)); /* update flags */
923 flags = xfs_di2lxflags(ip->i_d.di_flags);
924 if (copy_to_user(arg, &flags, sizeof(flags)))
935 struct bhv_vattr *vattr;
940 if (copy_from_user(&flags, arg, sizeof(flags)))
943 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
944 FS_NOATIME_FL | FS_NODUMP_FL | \
948 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
949 if (unlikely(!vattr))
953 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
954 attr_flags |= ATTR_NONBLOCK;
956 vattr->va_mask = XFS_AT_XFLAGS;
957 vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
959 error = -xfs_setattr(ip, vattr, attr_flags, NULL);
961 vn_revalidate(XFS_ITOV(ip)); /* update flags */
968 struct xfs_inode *ip,
977 if (copy_from_user(&bm, arg, sizeof(bm)))
978 return -XFS_ERROR(EFAULT);
980 if (bm.bmv_count < 2)
981 return -XFS_ERROR(EINVAL);
983 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
984 if (ioflags & IO_INVIS)
985 iflags |= BMV_IF_NO_DMAPI_READ;
987 error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags);
991 if (copy_to_user(arg, &bm, sizeof(bm)))
992 return -XFS_ERROR(EFAULT);
998 struct xfs_inode *ip,
1001 struct getbmapx bmx;
1006 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1007 return -XFS_ERROR(EFAULT);
1009 if (bmx.bmv_count < 2)
1010 return -XFS_ERROR(EINVAL);
1013 * Map input getbmapx structure to a getbmap
1014 * structure for xfs_getbmap.
1016 GETBMAP_CONVERT(bmx, bm);
1018 iflags = bmx.bmv_iflags;
1020 if (iflags & (~BMV_IF_VALID))
1021 return -XFS_ERROR(EINVAL);
1023 iflags |= BMV_IF_EXTENDED;
1025 error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
1029 GETBMAP_CONVERT(bm, bmx);
1031 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1032 return -XFS_ERROR(EFAULT);
1045 struct inode *inode = filp->f_path.dentry->d_inode;
1046 xfs_mount_t *mp = ip->i_mount;
1049 xfs_itrace_entry(XFS_I(inode));
1052 case XFS_IOC_ALLOCSP:
1053 case XFS_IOC_FREESP:
1054 case XFS_IOC_RESVSP:
1055 case XFS_IOC_UNRESVSP:
1056 case XFS_IOC_ALLOCSP64:
1057 case XFS_IOC_FREESP64:
1058 case XFS_IOC_RESVSP64:
1059 case XFS_IOC_UNRESVSP64:
1061 * Only allow the sys admin to reserve space unless
1062 * unwritten extents are enabled.
1064 if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
1065 !capable(CAP_SYS_ADMIN))
1068 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
1070 case XFS_IOC_DIOINFO: {
1072 xfs_buftarg_t *target =
1073 XFS_IS_REALTIME_INODE(ip) ?
1074 mp->m_rtdev_targp : mp->m_ddev_targp;
1076 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
1077 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1079 if (copy_to_user(arg, &da, sizeof(da)))
1080 return -XFS_ERROR(EFAULT);
1084 case XFS_IOC_FSBULKSTAT_SINGLE:
1085 case XFS_IOC_FSBULKSTAT:
1086 case XFS_IOC_FSINUMBERS:
1087 return xfs_ioc_bulkstat(mp, cmd, arg);
1089 case XFS_IOC_FSGEOMETRY_V1:
1090 return xfs_ioc_fsgeometry_v1(mp, arg);
1092 case XFS_IOC_FSGEOMETRY:
1093 return xfs_ioc_fsgeometry(mp, arg);
1095 case XFS_IOC_GETVERSION:
1096 return put_user(inode->i_generation, (int __user *)arg);
1098 case XFS_IOC_FSGETXATTR:
1099 return xfs_ioc_fsgetxattr(ip, 0, arg);
1100 case XFS_IOC_FSGETXATTRA:
1101 return xfs_ioc_fsgetxattr(ip, 1, arg);
1102 case XFS_IOC_FSSETXATTR:
1103 return xfs_ioc_fssetxattr(ip, filp, arg);
1104 case XFS_IOC_GETXFLAGS:
1105 return xfs_ioc_getxflags(ip, arg);
1106 case XFS_IOC_SETXFLAGS:
1107 return xfs_ioc_setxflags(ip, filp, arg);
1109 case XFS_IOC_FSSETDM: {
1110 struct fsdmidata dmi;
1112 if (copy_from_user(&dmi, arg, sizeof(dmi)))
1113 return -XFS_ERROR(EFAULT);
1115 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
1120 case XFS_IOC_GETBMAP:
1121 case XFS_IOC_GETBMAPA:
1122 return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
1124 case XFS_IOC_GETBMAPX:
1125 return xfs_ioc_getbmapx(ip, arg);
1127 case XFS_IOC_FD_TO_HANDLE:
1128 case XFS_IOC_PATH_TO_HANDLE:
1129 case XFS_IOC_PATH_TO_FSHANDLE:
1130 return xfs_find_handle(cmd, arg);
1132 case XFS_IOC_OPEN_BY_HANDLE:
1133 return xfs_open_by_handle(mp, arg, filp, inode);
1135 case XFS_IOC_FSSETDM_BY_HANDLE:
1136 return xfs_fssetdm_by_handle(mp, arg, inode);
1138 case XFS_IOC_READLINK_BY_HANDLE:
1139 return xfs_readlink_by_handle(mp, arg, inode);
1141 case XFS_IOC_ATTRLIST_BY_HANDLE:
1142 return xfs_attrlist_by_handle(mp, arg, inode);
1144 case XFS_IOC_ATTRMULTI_BY_HANDLE:
1145 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
1147 case XFS_IOC_SWAPEXT: {
1148 error = xfs_swapext((struct xfs_swapext __user *)arg);
1152 case XFS_IOC_FSCOUNTS: {
1153 xfs_fsop_counts_t out;
1155 error = xfs_fs_counts(mp, &out);
1159 if (copy_to_user(arg, &out, sizeof(out)))
1160 return -XFS_ERROR(EFAULT);
1164 case XFS_IOC_SET_RESBLKS: {
1165 xfs_fsop_resblks_t inout;
1168 if (!capable(CAP_SYS_ADMIN))
1171 if (copy_from_user(&inout, arg, sizeof(inout)))
1172 return -XFS_ERROR(EFAULT);
1174 /* input parameter is passed in resblks field of structure */
1176 error = xfs_reserve_blocks(mp, &in, &inout);
1180 if (copy_to_user(arg, &inout, sizeof(inout)))
1181 return -XFS_ERROR(EFAULT);
1185 case XFS_IOC_GET_RESBLKS: {
1186 xfs_fsop_resblks_t out;
1188 if (!capable(CAP_SYS_ADMIN))
1191 error = xfs_reserve_blocks(mp, NULL, &out);
1195 if (copy_to_user(arg, &out, sizeof(out)))
1196 return -XFS_ERROR(EFAULT);
1201 case XFS_IOC_FSGROWFSDATA: {
1202 xfs_growfs_data_t in;
1204 if (!capable(CAP_SYS_ADMIN))
1207 if (copy_from_user(&in, arg, sizeof(in)))
1208 return -XFS_ERROR(EFAULT);
1210 error = xfs_growfs_data(mp, &in);
1214 case XFS_IOC_FSGROWFSLOG: {
1215 xfs_growfs_log_t in;
1217 if (!capable(CAP_SYS_ADMIN))
1220 if (copy_from_user(&in, arg, sizeof(in)))
1221 return -XFS_ERROR(EFAULT);
1223 error = xfs_growfs_log(mp, &in);
1227 case XFS_IOC_FSGROWFSRT: {
1230 if (!capable(CAP_SYS_ADMIN))
1233 if (copy_from_user(&in, arg, sizeof(in)))
1234 return -XFS_ERROR(EFAULT);
1236 error = xfs_growfs_rt(mp, &in);
1240 case XFS_IOC_FREEZE:
1241 if (!capable(CAP_SYS_ADMIN))
1244 if (inode->i_sb->s_frozen == SB_UNFROZEN)
1245 freeze_bdev(inode->i_sb->s_bdev);
1249 if (!capable(CAP_SYS_ADMIN))
1251 if (inode->i_sb->s_frozen != SB_UNFROZEN)
1252 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
1255 case XFS_IOC_GOINGDOWN: {
1258 if (!capable(CAP_SYS_ADMIN))
1261 if (get_user(in, (__uint32_t __user *)arg))
1262 return -XFS_ERROR(EFAULT);
1264 error = xfs_fs_goingdown(mp, in);
1268 case XFS_IOC_ERROR_INJECTION: {
1269 xfs_error_injection_t in;
1271 if (!capable(CAP_SYS_ADMIN))
1274 if (copy_from_user(&in, arg, sizeof(in)))
1275 return -XFS_ERROR(EFAULT);
1277 error = xfs_errortag_add(in.errtag, mp);
1281 case XFS_IOC_ERROR_CLEARALL:
1282 if (!capable(CAP_SYS_ADMIN))
1285 error = xfs_errortag_clearall(mp, 1);