]> err.no Git - linux-2.6/blob - fs/xfs/linux-2.6/xfs_lrw.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[linux-2.6] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51 #include "xfs_vnodeops.h"
52
53 #include <linux/capability.h>
54 #include <linux/writeback.h>
55
56
57 #if defined(XFS_RW_TRACE)
58 void
59 xfs_rw_enter_trace(
60         int                     tag,
61         xfs_inode_t             *ip,
62         void                    *data,
63         size_t                  segs,
64         loff_t                  offset,
65         int                     ioflags)
66 {
67         if (ip->i_rwtrace == NULL)
68                 return;
69         ktrace_enter(ip->i_rwtrace,
70                 (void *)(unsigned long)tag,
71                 (void *)ip,
72                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
73                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
74                 (void *)data,
75                 (void *)((unsigned long)segs),
76                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
77                 (void *)((unsigned long)(offset & 0xffffffff)),
78                 (void *)((unsigned long)ioflags),
79                 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
80                 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
81                 (void *)((unsigned long)current_pid()),
82                 (void *)NULL,
83                 (void *)NULL,
84                 (void *)NULL,
85                 (void *)NULL);
86 }
87
88 void
89 xfs_inval_cached_trace(
90         xfs_inode_t     *ip,
91         xfs_off_t       offset,
92         xfs_off_t       len,
93         xfs_off_t       first,
94         xfs_off_t       last)
95 {
96
97         if (ip->i_rwtrace == NULL)
98                 return;
99         ktrace_enter(ip->i_rwtrace,
100                 (void *)(__psint_t)XFS_INVAL_CACHED,
101                 (void *)ip,
102                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
103                 (void *)((unsigned long)(offset & 0xffffffff)),
104                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
105                 (void *)((unsigned long)(len & 0xffffffff)),
106                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
107                 (void *)((unsigned long)(first & 0xffffffff)),
108                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
109                 (void *)((unsigned long)(last & 0xffffffff)),
110                 (void *)((unsigned long)current_pid()),
111                 (void *)NULL,
112                 (void *)NULL,
113                 (void *)NULL,
114                 (void *)NULL,
115                 (void *)NULL);
116 }
117 #endif
118
119 /*
120  *      xfs_iozero
121  *
122  *      xfs_iozero clears the specified range of buffer supplied,
123  *      and marks all the affected blocks as valid and modified.  If
124  *      an affected block is not allocated, it will be allocated.  If
125  *      an affected block is not completely overwritten, and is not
126  *      valid before the operation, it will be read from disk before
127  *      being partially zeroed.
128  */
129 STATIC int
130 xfs_iozero(
131         struct xfs_inode        *ip,    /* inode                        */
132         loff_t                  pos,    /* offset in file               */
133         size_t                  count)  /* size of data to zero         */
134 {
135         struct page             *page;
136         struct address_space    *mapping;
137         int                     status;
138
139         mapping = ip->i_vnode->i_mapping;
140         do {
141                 unsigned offset, bytes;
142                 void *fsdata;
143
144                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
145                 bytes = PAGE_CACHE_SIZE - offset;
146                 if (bytes > count)
147                         bytes = count;
148
149                 status = pagecache_write_begin(NULL, mapping, pos, bytes,
150                                         AOP_FLAG_UNINTERRUPTIBLE,
151                                         &page, &fsdata);
152                 if (status)
153                         break;
154
155                 zero_user(page, offset, bytes);
156
157                 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
158                                         page, fsdata);
159                 WARN_ON(status <= 0); /* can't return less than zero! */
160                 pos += bytes;
161                 count -= bytes;
162                 status = 0;
163         } while (count);
164
165         return (-status);
166 }
167
168 ssize_t                 /* bytes read, or (-)  error */
169 xfs_read(
170         xfs_inode_t             *ip,
171         struct kiocb            *iocb,
172         const struct iovec      *iovp,
173         unsigned int            segs,
174         loff_t                  *offset,
175         int                     ioflags)
176 {
177         struct file             *file = iocb->ki_filp;
178         struct inode            *inode = file->f_mapping->host;
179         xfs_mount_t             *mp = ip->i_mount;
180         size_t                  size = 0;
181         ssize_t                 ret = 0;
182         xfs_fsize_t             n;
183         unsigned long           seg;
184
185
186         XFS_STATS_INC(xs_read_calls);
187
188         /* START copy & waste from filemap.c */
189         for (seg = 0; seg < segs; seg++) {
190                 const struct iovec *iv = &iovp[seg];
191
192                 /*
193                  * If any segment has a negative length, or the cumulative
194                  * length ever wraps negative then return -EINVAL.
195                  */
196                 size += iv->iov_len;
197                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
198                         return XFS_ERROR(-EINVAL);
199         }
200         /* END copy & waste from filemap.c */
201
202         if (unlikely(ioflags & IO_ISDIRECT)) {
203                 xfs_buftarg_t   *target =
204                         XFS_IS_REALTIME_INODE(ip) ?
205                                 mp->m_rtdev_targp : mp->m_ddev_targp;
206                 if ((*offset & target->bt_smask) ||
207                     (size & target->bt_smask)) {
208                         if (*offset == ip->i_size) {
209                                 return (0);
210                         }
211                         return -XFS_ERROR(EINVAL);
212                 }
213         }
214
215         n = XFS_MAXIOFFSET(mp) - *offset;
216         if ((n <= 0) || (size == 0))
217                 return 0;
218
219         if (n < size)
220                 size = n;
221
222         if (XFS_FORCED_SHUTDOWN(mp))
223                 return -EIO;
224
225         if (unlikely(ioflags & IO_ISDIRECT))
226                 mutex_lock(&inode->i_mutex);
227         xfs_ilock(ip, XFS_IOLOCK_SHARED);
228
229         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
230                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
231                 int iolock = XFS_IOLOCK_SHARED;
232
233                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
234                                         dmflags, &iolock);
235                 if (ret) {
236                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
237                         if (unlikely(ioflags & IO_ISDIRECT))
238                                 mutex_unlock(&inode->i_mutex);
239                         return ret;
240                 }
241         }
242
243         if (unlikely(ioflags & IO_ISDIRECT)) {
244                 if (inode->i_mapping->nrpages)
245                         ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
246                                                     -1, FI_REMAPF_LOCKED);
247                 mutex_unlock(&inode->i_mutex);
248                 if (ret) {
249                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
250                         return ret;
251                 }
252         }
253
254         xfs_rw_enter_trace(XFS_READ_ENTER, ip,
255                                 (void *)iovp, segs, *offset, ioflags);
256
257         iocb->ki_pos = *offset;
258         ret = generic_file_aio_read(iocb, iovp, segs, *offset);
259         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
260                 ret = wait_on_sync_kiocb(iocb);
261         if (ret > 0)
262                 XFS_STATS_ADD(xs_read_bytes, ret);
263
264         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
265         return ret;
266 }
267
268 ssize_t
269 xfs_splice_read(
270         xfs_inode_t             *ip,
271         struct file             *infilp,
272         loff_t                  *ppos,
273         struct pipe_inode_info  *pipe,
274         size_t                  count,
275         int                     flags,
276         int                     ioflags)
277 {
278         xfs_mount_t             *mp = ip->i_mount;
279         ssize_t                 ret;
280
281         XFS_STATS_INC(xs_read_calls);
282         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
283                 return -EIO;
284
285         xfs_ilock(ip, XFS_IOLOCK_SHARED);
286
287         if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
288                 int iolock = XFS_IOLOCK_SHARED;
289                 int error;
290
291                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
292                                         FILP_DELAY_FLAG(infilp), &iolock);
293                 if (error) {
294                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
295                         return -error;
296                 }
297         }
298         xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
299                            pipe, count, *ppos, ioflags);
300         ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
301         if (ret > 0)
302                 XFS_STATS_ADD(xs_read_bytes, ret);
303
304         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
305         return ret;
306 }
307
308 ssize_t
309 xfs_splice_write(
310         xfs_inode_t             *ip,
311         struct pipe_inode_info  *pipe,
312         struct file             *outfilp,
313         loff_t                  *ppos,
314         size_t                  count,
315         int                     flags,
316         int                     ioflags)
317 {
318         xfs_mount_t             *mp = ip->i_mount;
319         ssize_t                 ret;
320         struct inode            *inode = outfilp->f_mapping->host;
321         xfs_fsize_t             isize, new_size;
322
323         XFS_STATS_INC(xs_write_calls);
324         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
325                 return -EIO;
326
327         xfs_ilock(ip, XFS_IOLOCK_EXCL);
328
329         if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
330                 int iolock = XFS_IOLOCK_EXCL;
331                 int error;
332
333                 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
334                                         FILP_DELAY_FLAG(outfilp), &iolock);
335                 if (error) {
336                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
337                         return -error;
338                 }
339         }
340
341         new_size = *ppos + count;
342
343         xfs_ilock(ip, XFS_ILOCK_EXCL);
344         if (new_size > ip->i_size)
345                 ip->i_new_size = new_size;
346         xfs_iunlock(ip, XFS_ILOCK_EXCL);
347
348         xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
349                            pipe, count, *ppos, ioflags);
350         ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
351         if (ret > 0)
352                 XFS_STATS_ADD(xs_write_bytes, ret);
353
354         isize = i_size_read(inode);
355         if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
356                 *ppos = isize;
357
358         if (*ppos > ip->i_size) {
359                 xfs_ilock(ip, XFS_ILOCK_EXCL);
360                 if (*ppos > ip->i_size)
361                         ip->i_size = *ppos;
362                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
363         }
364
365         if (ip->i_new_size) {
366                 xfs_ilock(ip, XFS_ILOCK_EXCL);
367                 ip->i_new_size = 0;
368                 if (ip->i_d.di_size > ip->i_size)
369                         ip->i_d.di_size = ip->i_size;
370                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
371         }
372         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
373         return ret;
374 }
375
376 /*
377  * This routine is called to handle zeroing any space in the last
378  * block of the file that is beyond the EOF.  We do this since the
379  * size is being increased without writing anything to that block
380  * and we don't want anyone to read the garbage on the disk.
381  */
382 STATIC int                              /* error (positive) */
383 xfs_zero_last_block(
384         xfs_inode_t     *ip,
385         xfs_fsize_t     offset,
386         xfs_fsize_t     isize)
387 {
388         xfs_fileoff_t   last_fsb;
389         xfs_mount_t     *mp = ip->i_mount;
390         int             nimaps;
391         int             zero_offset;
392         int             zero_len;
393         int             error = 0;
394         xfs_bmbt_irec_t imap;
395
396         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
397
398         zero_offset = XFS_B_FSB_OFFSET(mp, isize);
399         if (zero_offset == 0) {
400                 /*
401                  * There are no extra bytes in the last block on disk to
402                  * zero, so return.
403                  */
404                 return 0;
405         }
406
407         last_fsb = XFS_B_TO_FSBT(mp, isize);
408         nimaps = 1;
409         error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
410                           &nimaps, NULL, NULL);
411         if (error) {
412                 return error;
413         }
414         ASSERT(nimaps > 0);
415         /*
416          * If the block underlying isize is just a hole, then there
417          * is nothing to zero.
418          */
419         if (imap.br_startblock == HOLESTARTBLOCK) {
420                 return 0;
421         }
422         /*
423          * Zero the part of the last block beyond the EOF, and write it
424          * out sync.  We need to drop the ilock while we do this so we
425          * don't deadlock when the buffer cache calls back to us.
426          */
427         xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
428
429         zero_len = mp->m_sb.sb_blocksize - zero_offset;
430         if (isize + zero_len > offset)
431                 zero_len = offset - isize;
432         error = xfs_iozero(ip, isize, zero_len);
433
434         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
435         ASSERT(error >= 0);
436         return error;
437 }
438
439 /*
440  * Zero any on disk space between the current EOF and the new,
441  * larger EOF.  This handles the normal case of zeroing the remainder
442  * of the last block in the file and the unusual case of zeroing blocks
443  * out beyond the size of the file.  This second case only happens
444  * with fixed size extents and when the system crashes before the inode
445  * size was updated but after blocks were allocated.  If fill is set,
446  * then any holes in the range are filled and zeroed.  If not, the holes
447  * are left alone as holes.
448  */
449
450 int                                     /* error (positive) */
451 xfs_zero_eof(
452         xfs_inode_t     *ip,
453         xfs_off_t       offset,         /* starting I/O offset */
454         xfs_fsize_t     isize)          /* current inode size */
455 {
456         xfs_mount_t     *mp = ip->i_mount;
457         xfs_fileoff_t   start_zero_fsb;
458         xfs_fileoff_t   end_zero_fsb;
459         xfs_fileoff_t   zero_count_fsb;
460         xfs_fileoff_t   last_fsb;
461         xfs_fileoff_t   zero_off;
462         xfs_fsize_t     zero_len;
463         int             nimaps;
464         int             error = 0;
465         xfs_bmbt_irec_t imap;
466
467         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
468         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
469         ASSERT(offset > isize);
470
471         /*
472          * First handle zeroing the block on which isize resides.
473          * We only zero a part of that block so it is handled specially.
474          */
475         error = xfs_zero_last_block(ip, offset, isize);
476         if (error) {
477                 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
478                 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
479                 return error;
480         }
481
482         /*
483          * Calculate the range between the new size and the old
484          * where blocks needing to be zeroed may exist.  To get the
485          * block where the last byte in the file currently resides,
486          * we need to subtract one from the size and truncate back
487          * to a block boundary.  We subtract 1 in case the size is
488          * exactly on a block boundary.
489          */
490         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
491         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
492         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
493         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
494         if (last_fsb == end_zero_fsb) {
495                 /*
496                  * The size was only incremented on its last block.
497                  * We took care of that above, so just return.
498                  */
499                 return 0;
500         }
501
502         ASSERT(start_zero_fsb <= end_zero_fsb);
503         while (start_zero_fsb <= end_zero_fsb) {
504                 nimaps = 1;
505                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
506                 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
507                                   0, NULL, 0, &imap, &nimaps, NULL, NULL);
508                 if (error) {
509                         ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
510                         ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
511                         return error;
512                 }
513                 ASSERT(nimaps > 0);
514
515                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
516                     imap.br_startblock == HOLESTARTBLOCK) {
517                         /*
518                          * This loop handles initializing pages that were
519                          * partially initialized by the code below this
520                          * loop. It basically zeroes the part of the page
521                          * that sits on a hole and sets the page as P_HOLE
522                          * and calls remapf if it is a mapped file.
523                          */
524                         start_zero_fsb = imap.br_startoff + imap.br_blockcount;
525                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
526                         continue;
527                 }
528
529                 /*
530                  * There are blocks we need to zero.
531                  * Drop the inode lock while we're doing the I/O.
532                  * We'll still have the iolock to protect us.
533                  */
534                 xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
535
536                 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
537                 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
538
539                 if ((zero_off + zero_len) > offset)
540                         zero_len = offset - zero_off;
541
542                 error = xfs_iozero(ip, zero_off, zero_len);
543                 if (error) {
544                         goto out_lock;
545                 }
546
547                 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
548                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
549
550                 xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
551         }
552
553         return 0;
554
555 out_lock:
556         xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
557         ASSERT(error >= 0);
558         return error;
559 }
560
561 ssize_t                         /* bytes written, or (-) error */
562 xfs_write(
563         struct xfs_inode        *xip,
564         struct kiocb            *iocb,
565         const struct iovec      *iovp,
566         unsigned int            nsegs,
567         loff_t                  *offset,
568         int                     ioflags)
569 {
570         struct file             *file = iocb->ki_filp;
571         struct address_space    *mapping = file->f_mapping;
572         struct inode            *inode = mapping->host;
573         unsigned long           segs = nsegs;
574         xfs_mount_t             *mp;
575         ssize_t                 ret = 0, error = 0;
576         xfs_fsize_t             isize, new_size;
577         int                     iolock;
578         int                     eventsent = 0;
579         size_t                  ocount = 0, count;
580         loff_t                  pos;
581         int                     need_i_mutex;
582
583         XFS_STATS_INC(xs_write_calls);
584
585         error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
586         if (error)
587                 return error;
588
589         count = ocount;
590         pos = *offset;
591
592         if (count == 0)
593                 return 0;
594
595         mp = xip->i_mount;
596
597         xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
598
599         if (XFS_FORCED_SHUTDOWN(mp))
600                 return -EIO;
601
602 relock:
603         if (ioflags & IO_ISDIRECT) {
604                 iolock = XFS_IOLOCK_SHARED;
605                 need_i_mutex = 0;
606         } else {
607                 iolock = XFS_IOLOCK_EXCL;
608                 need_i_mutex = 1;
609                 mutex_lock(&inode->i_mutex);
610         }
611
612         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
613
614 start:
615         error = -generic_write_checks(file, &pos, &count,
616                                         S_ISBLK(inode->i_mode));
617         if (error) {
618                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
619                 goto out_unlock_mutex;
620         }
621
622         if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
623             !(ioflags & IO_INVIS) && !eventsent)) {
624                 int             dmflags = FILP_DELAY_FLAG(file);
625
626                 if (need_i_mutex)
627                         dmflags |= DM_FLAGS_IMUX;
628
629                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
630                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
631                                       pos, count, dmflags, &iolock);
632                 if (error) {
633                         goto out_unlock_internal;
634                 }
635                 xfs_ilock(xip, XFS_ILOCK_EXCL);
636                 eventsent = 1;
637
638                 /*
639                  * The iolock was dropped and reacquired in XFS_SEND_DATA
640                  * so we have to recheck the size when appending.
641                  * We will only "goto start;" once, since having sent the
642                  * event prevents another call to XFS_SEND_DATA, which is
643                  * what allows the size to change in the first place.
644                  */
645                 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
646                         goto start;
647         }
648
649         if (ioflags & IO_ISDIRECT) {
650                 xfs_buftarg_t   *target =
651                         XFS_IS_REALTIME_INODE(xip) ?
652                                 mp->m_rtdev_targp : mp->m_ddev_targp;
653
654                 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
655                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
656                         return XFS_ERROR(-EINVAL);
657                 }
658
659                 if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
660                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
661                         iolock = XFS_IOLOCK_EXCL;
662                         need_i_mutex = 1;
663                         mutex_lock(&inode->i_mutex);
664                         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
665                         goto start;
666                 }
667         }
668
669         new_size = pos + count;
670         if (new_size > xip->i_size)
671                 xip->i_new_size = new_size;
672
673         if (likely(!(ioflags & IO_INVIS))) {
674                 file_update_time(file);
675                 xfs_ichgtime_fast(xip, inode,
676                                   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
677         }
678
679         /*
680          * If the offset is beyond the size of the file, we have a couple
681          * of things to do. First, if there is already space allocated
682          * we need to either create holes or zero the disk or ...
683          *
684          * If there is a page where the previous size lands, we need
685          * to zero it out up to the new size.
686          */
687
688         if (pos > xip->i_size) {
689                 error = xfs_zero_eof(xip, pos, xip->i_size);
690                 if (error) {
691                         xfs_iunlock(xip, XFS_ILOCK_EXCL);
692                         goto out_unlock_internal;
693                 }
694         }
695         xfs_iunlock(xip, XFS_ILOCK_EXCL);
696
697         /*
698          * If we're writing the file then make sure to clear the
699          * setuid and setgid bits if the process is not being run
700          * by root.  This keeps people from modifying setuid and
701          * setgid binaries.
702          */
703
704         if (((xip->i_d.di_mode & S_ISUID) ||
705             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
706                 (S_ISGID | S_IXGRP))) &&
707              !capable(CAP_FSETID)) {
708                 error = xfs_write_clear_setuid(xip);
709                 if (likely(!error))
710                         error = -remove_suid(file->f_path.dentry);
711                 if (unlikely(error)) {
712                         goto out_unlock_internal;
713                 }
714         }
715
716 retry:
717         /* We can write back this queue in page reclaim */
718         current->backing_dev_info = mapping->backing_dev_info;
719
720         if ((ioflags & IO_ISDIRECT)) {
721                 if (mapping->nrpages) {
722                         WARN_ON(need_i_mutex == 0);
723                         xfs_inval_cached_trace(xip, pos, -1,
724                                         (pos & PAGE_CACHE_MASK), -1);
725                         error = xfs_flushinval_pages(xip,
726                                         (pos & PAGE_CACHE_MASK),
727                                         -1, FI_REMAPF_LOCKED);
728                         if (error)
729                                 goto out_unlock_internal;
730                 }
731
732                 if (need_i_mutex) {
733                         /* demote the lock now the cached pages are gone */
734                         xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
735                         mutex_unlock(&inode->i_mutex);
736
737                         iolock = XFS_IOLOCK_SHARED;
738                         need_i_mutex = 0;
739                 }
740
741                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
742                                 *offset, ioflags);
743                 ret = generic_file_direct_write(iocb, iovp,
744                                 &segs, pos, offset, count, ocount);
745
746                 /*
747                  * direct-io write to a hole: fall through to buffered I/O
748                  * for completing the rest of the request.
749                  */
750                 if (ret >= 0 && ret != count) {
751                         XFS_STATS_ADD(xs_write_bytes, ret);
752
753                         pos += ret;
754                         count -= ret;
755
756                         ioflags &= ~IO_ISDIRECT;
757                         xfs_iunlock(xip, iolock);
758                         goto relock;
759                 }
760         } else {
761                 xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
762                                 *offset, ioflags);
763                 ret = generic_file_buffered_write(iocb, iovp, segs,
764                                 pos, offset, count, ret);
765         }
766
767         current->backing_dev_info = NULL;
768
769         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
770                 ret = wait_on_sync_kiocb(iocb);
771
772         if (ret == -ENOSPC &&
773             DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
774                 xfs_iunlock(xip, iolock);
775                 if (need_i_mutex)
776                         mutex_unlock(&inode->i_mutex);
777                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
778                                 DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
779                                 0, 0, 0); /* Delay flag intentionally  unused */
780                 if (need_i_mutex)
781                         mutex_lock(&inode->i_mutex);
782                 xfs_ilock(xip, iolock);
783                 if (error)
784                         goto out_unlock_internal;
785                 pos = xip->i_size;
786                 ret = 0;
787                 goto retry;
788         }
789
790         isize = i_size_read(inode);
791         if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
792                 *offset = isize;
793
794         if (*offset > xip->i_size) {
795                 xfs_ilock(xip, XFS_ILOCK_EXCL);
796                 if (*offset > xip->i_size)
797                         xip->i_size = *offset;
798                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
799         }
800
801         error = -ret;
802         if (ret <= 0)
803                 goto out_unlock_internal;
804
805         XFS_STATS_ADD(xs_write_bytes, ret);
806
807         /* Handle various SYNC-type writes */
808         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
809                 int error2;
810
811                 xfs_iunlock(xip, iolock);
812                 if (need_i_mutex)
813                         mutex_unlock(&inode->i_mutex);
814                 error2 = sync_page_range(inode, mapping, pos, ret);
815                 if (!error)
816                         error = error2;
817                 if (need_i_mutex)
818                         mutex_lock(&inode->i_mutex);
819                 xfs_ilock(xip, iolock);
820                 error2 = xfs_write_sync_logforce(mp, xip);
821                 if (!error)
822                         error = error2;
823         }
824
825  out_unlock_internal:
826         if (xip->i_new_size) {
827                 xfs_ilock(xip, XFS_ILOCK_EXCL);
828                 xip->i_new_size = 0;
829                 /*
830                  * If this was a direct or synchronous I/O that failed (such
831                  * as ENOSPC) then part of the I/O may have been written to
832                  * disk before the error occured.  In this case the on-disk
833                  * file size may have been adjusted beyond the in-memory file
834                  * size and now needs to be truncated back.
835                  */
836                 if (xip->i_d.di_size > xip->i_size)
837                         xip->i_d.di_size = xip->i_size;
838                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
839         }
840         xfs_iunlock(xip, iolock);
841  out_unlock_mutex:
842         if (need_i_mutex)
843                 mutex_unlock(&inode->i_mutex);
844         return -error;
845 }
846
847 /*
848  * All xfs metadata buffers except log state machine buffers
849  * get this attached as their b_bdstrat callback function.
850  * This is so that we can catch a buffer
851  * after prematurely unpinning it to forcibly shutdown the filesystem.
852  */
853 int
854 xfs_bdstrat_cb(struct xfs_buf *bp)
855 {
856         xfs_mount_t     *mp;
857
858         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
859         if (!XFS_FORCED_SHUTDOWN(mp)) {
860                 xfs_buf_iorequest(bp);
861                 return 0;
862         } else {
863                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
864                 /*
865                  * Metadata write that didn't get logged but
866                  * written delayed anyway. These aren't associated
867                  * with a transaction, and can be ignored.
868                  */
869                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
870                     (XFS_BUF_ISREAD(bp)) == 0)
871                         return (xfs_bioerror_relse(bp));
872                 else
873                         return (xfs_bioerror(bp));
874         }
875 }
876
877 /*
878  * Wrapper around bdstrat so that we can stop data from going to disk in case
879  * we are shutting down the filesystem.  Typically user data goes thru this
880  * path; one of the exceptions is the superblock.
881  */
882 void
883 xfsbdstrat(
884         struct xfs_mount        *mp,
885         struct xfs_buf          *bp)
886 {
887         ASSERT(mp);
888         if (!XFS_FORCED_SHUTDOWN(mp)) {
889                 xfs_buf_iorequest(bp);
890                 return;
891         }
892
893         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
894         xfs_bioerror_relse(bp);
895 }
896
897 /*
898  * If the underlying (data/log/rt) device is readonly, there are some
899  * operations that cannot proceed.
900  */
901 int
902 xfs_dev_is_read_only(
903         xfs_mount_t             *mp,
904         char                    *message)
905 {
906         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
907             xfs_readonly_buftarg(mp->m_logdev_targp) ||
908             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
909                 cmn_err(CE_NOTE,
910                         "XFS: %s required on read-only device.", message);
911                 cmn_err(CE_NOTE,
912                         "XFS: write access unavailable, cannot proceed.");
913                 return EROFS;
914         }
915         return 0;
916 }