4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/backing-dev.h>
25 #include <linux/stat.h>
26 #include <linux/fcntl.h>
27 #include <linux/mpage.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
42 static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
63 static inline int cifs_convert_flags(unsigned int flags)
65 if ((flags & O_ACCMODE) == O_RDONLY)
67 else if ((flags & O_ACCMODE) == O_WRONLY)
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
79 static inline int cifs_get_disposition(unsigned int flags)
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
87 else if ((flags & O_TRUNC) == O_TRUNC)
88 return FILE_OVERWRITE;
93 /* all arguments to this function must be checked for validity in caller */
94 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
95 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
96 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
97 char *full_path, int xid)
102 /* want handles we can use to read with first
103 in the list so we do not have to walk the
104 list to search for one in prepare_write */
105 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
106 list_add_tail(&pCifsFile->flist,
107 &pCifsInode->openFileList);
109 list_add(&pCifsFile->flist,
110 &pCifsInode->openFileList);
112 write_unlock(&GlobalSMBSeslock);
113 if (pCifsInode->clientCanCacheRead) {
114 /* we have the inode open somewhere else
115 no need to discard cache data */
116 goto client_can_cache;
119 /* BB need same check in cifs_create too? */
120 /* if not oplocked, invalidate inode pages if mtime or file
122 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
123 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
124 (file->f_dentry->d_inode->i_size ==
125 (loff_t)le64_to_cpu(buf->EndOfFile))) {
126 cFYI(1, ("inode unchanged on server"));
128 if (file->f_dentry->d_inode->i_mapping) {
129 /* BB no need to lock inode until after invalidate
130 since namei code should already have it locked? */
131 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
133 cFYI(1, ("invalidating remote inode since open detected it "
135 invalidate_remote_inode(file->f_dentry->d_inode);
139 if (pTcon->ses->capabilities & CAP_UNIX)
140 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
141 full_path, inode->i_sb, xid);
143 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
144 full_path, buf, inode->i_sb, xid);
146 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
147 pCifsInode->clientCanCacheAll = TRUE;
148 pCifsInode->clientCanCacheRead = TRUE;
149 cFYI(1, ("Exclusive Oplock granted on inode %p",
150 file->f_dentry->d_inode));
151 } else if ((*oplock & 0xF) == OPLOCK_READ)
152 pCifsInode->clientCanCacheRead = TRUE;
157 int cifs_open(struct inode *inode, struct file *file)
161 struct cifs_sb_info *cifs_sb;
162 struct cifsTconInfo *pTcon;
163 struct cifsFileInfo *pCifsFile;
164 struct cifsInodeInfo *pCifsInode;
165 struct list_head *tmp;
166 char *full_path = NULL;
170 FILE_ALL_INFO *buf = NULL;
174 cifs_sb = CIFS_SB(inode->i_sb);
175 pTcon = cifs_sb->tcon;
177 if (file->f_flags & O_CREAT) {
178 /* search inode for this file and fill in file->private_data */
179 pCifsInode = CIFS_I(file->f_dentry->d_inode);
180 read_lock(&GlobalSMBSeslock);
181 list_for_each(tmp, &pCifsInode->openFileList) {
182 pCifsFile = list_entry(tmp, struct cifsFileInfo,
184 if ((pCifsFile->pfile == NULL) &&
185 (pCifsFile->pid == current->tgid)) {
186 /* mode set in cifs_create */
188 /* needed for writepage */
189 pCifsFile->pfile = file;
191 file->private_data = pCifsFile;
195 read_unlock(&GlobalSMBSeslock);
196 if (file->private_data != NULL) {
201 if (file->f_flags & O_EXCL)
202 cERROR(1, ("could not find file instance for "
203 "new file %p", file));
207 full_path = build_path_from_dentry(file->f_dentry);
208 if (full_path == NULL) {
213 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
214 inode, file->f_flags, full_path));
215 desiredAccess = cifs_convert_flags(file->f_flags);
217 /*********************************************************************
218 * open flag mapping table:
220 * POSIX Flag CIFS Disposition
221 * ---------- ----------------
222 * O_CREAT FILE_OPEN_IF
223 * O_CREAT | O_EXCL FILE_CREATE
224 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
225 * O_TRUNC FILE_OVERWRITE
226 * none of the above FILE_OPEN
228 * Note that there is not a direct match between disposition
229 * FILE_SUPERSEDE (ie create whether or not file exists although
230 * O_CREAT | O_TRUNC is similar but truncates the existing
231 * file rather than creating a new file as FILE_SUPERSEDE does
232 * (which uses the attributes / metadata passed in on open call)
234 *? O_SYNC is a reasonable match to CIFS writethrough flag
235 *? and the read write flags match reasonably. O_LARGEFILE
236 *? is irrelevant because largefile support is always used
237 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
238 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
239 *********************************************************************/
241 disposition = cifs_get_disposition(file->f_flags);
248 /* BB pass O_SYNC flag through on file attributes .. BB */
250 /* Also refresh inode by passing in file_info buf returned by SMBOpen
251 and calling get_inode_info with returned buf (at least helps
252 non-Unix server case) */
254 /* BB we can not do this if this is the second open of a file
255 and the first handle has writebehind data, we might be
256 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
257 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
263 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
269 rc = -EIO; /* no NT SMB support fall into legacy open below */
272 /* Old server, try legacy style OpenX */
273 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
274 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
275 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
276 & CIFS_MOUNT_MAP_SPECIAL_CHR);
279 cFYI(1, ("cifs_open returned 0x%x", rc));
283 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (file->private_data == NULL) {
288 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
289 write_lock(&GlobalSMBSeslock);
290 list_add(&pCifsFile->tlist, &pTcon->openFileList);
292 pCifsInode = CIFS_I(file->f_dentry->d_inode);
294 rc = cifs_open_inode_helper(inode, file, pCifsInode,
296 &oplock, buf, full_path, xid);
298 write_unlock(&GlobalSMBSeslock);
301 if (oplock & CIFS_CREATE_ACTION) {
302 /* time to set mode which we can not set earlier due to
303 problems creating new read-only files */
304 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
305 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
307 (__u64)-1, (__u64)-1, 0 /* dev */,
309 cifs_sb->mnt_cifs_flags &
310 CIFS_MOUNT_MAP_SPECIAL_CHR);
312 /* BB implement via Windows security descriptors eg
313 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
315 in the meantime could set r/o dos attribute when
316 perms are eg: mode & 0222 == 0 */
327 /* Try to reacquire byte range locks that were released when session */
328 /* to server was lost */
329 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
333 /* BB list all locks open on this file and relock */
338 static int cifs_reopen_file(struct inode *inode, struct file *file,
343 struct cifs_sb_info *cifs_sb;
344 struct cifsTconInfo *pTcon;
345 struct cifsFileInfo *pCifsFile;
346 struct cifsInodeInfo *pCifsInode;
347 char *full_path = NULL;
349 int disposition = FILE_OPEN;
354 if (file->private_data) {
355 pCifsFile = (struct cifsFileInfo *)file->private_data;
360 down(&pCifsFile->fh_sem);
361 if (pCifsFile->invalidHandle == FALSE) {
362 up(&pCifsFile->fh_sem);
367 if (file->f_dentry == NULL) {
368 up(&pCifsFile->fh_sem);
369 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373 cifs_sb = CIFS_SB(inode->i_sb);
374 pTcon = cifs_sb->tcon;
375 /* can not grab rename sem here because various ops, including
376 those that already have the rename sem can end up causing writepage
377 to get called and if the server was down that means we end up here,
378 and we can never tell if the caller already has the rename_sem */
379 full_path = build_path_from_dentry(file->f_dentry);
380 if (full_path == NULL) {
381 up(&pCifsFile->fh_sem);
386 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
387 inode, file->f_flags,full_path));
388 desiredAccess = cifs_convert_flags(file->f_flags);
395 /* Can not refresh inode by passing in file_info buf to be returned
396 by SMBOpen and then calling get_inode_info with returned buf
397 since file might have write behind data that needs to be flushed
398 and server version of file size can be stale. If we knew for sure
399 that inode was not dirty locally we could do this */
401 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
403 up(&pCifsFile->fh_sem);
408 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
409 CREATE_NOT_DIR, &netfid, &oplock, NULL,
410 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
411 CIFS_MOUNT_MAP_SPECIAL_CHR);
413 up(&pCifsFile->fh_sem);
414 cFYI(1, ("cifs_open returned 0x%x", rc));
415 cFYI(1, ("oplock: %d", oplock));
417 pCifsFile->netfid = netfid;
418 pCifsFile->invalidHandle = FALSE;
419 up(&pCifsFile->fh_sem);
420 pCifsInode = CIFS_I(inode);
423 filemap_write_and_wait(inode->i_mapping);
424 /* temporarily disable caching while we
425 go to server to get inode info */
426 pCifsInode->clientCanCacheAll = FALSE;
427 pCifsInode->clientCanCacheRead = FALSE;
428 if (pTcon->ses->capabilities & CAP_UNIX)
429 rc = cifs_get_inode_info_unix(&inode,
430 full_path, inode->i_sb, xid);
432 rc = cifs_get_inode_info(&inode,
433 full_path, NULL, inode->i_sb,
435 } /* else we are writing out data to server already
436 and could deadlock if we tried to flush data, and
437 since we do not know if we have data that would
438 invalidate the current end of file on the server
439 we can not go to the server to get the new inod
441 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
442 pCifsInode->clientCanCacheAll = TRUE;
443 pCifsInode->clientCanCacheRead = TRUE;
444 cFYI(1, ("Exclusive Oplock granted on inode %p",
445 file->f_dentry->d_inode));
446 } else if ((oplock & 0xF) == OPLOCK_READ) {
447 pCifsInode->clientCanCacheRead = TRUE;
448 pCifsInode->clientCanCacheAll = FALSE;
450 pCifsInode->clientCanCacheRead = FALSE;
451 pCifsInode->clientCanCacheAll = FALSE;
453 cifs_relock_file(pCifsFile);
462 int cifs_close(struct inode *inode, struct file *file)
466 struct cifs_sb_info *cifs_sb;
467 struct cifsTconInfo *pTcon;
468 struct cifsFileInfo *pSMBFile =
469 (struct cifsFileInfo *)file->private_data;
473 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon;
476 pSMBFile->closePend = TRUE;
478 /* no sense reconnecting to close a file that is
480 if (pTcon->tidStatus != CifsNeedReconnect) {
482 while((atomic_read(&pSMBFile->wrtPending) != 0)
483 && (timeout < 1000) ) {
484 /* Give write a better chance to get to
485 server ahead of the close. We do not
486 want to add a wait_q here as it would
487 increase the memory utilization as
488 the struct would be in each open file,
489 but this should give enough time to
491 cERROR(1,("close with pending writes"));
495 rc = CIFSSMBClose(xid, pTcon,
499 write_lock(&GlobalSMBSeslock);
500 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist);
502 write_unlock(&GlobalSMBSeslock);
503 kfree(pSMBFile->search_resume_name);
504 kfree(file->private_data);
505 file->private_data = NULL;
509 if (list_empty(&(CIFS_I(inode)->openFileList))) {
510 cFYI(1, ("closing last open instance for inode %p", inode));
511 /* if the file is not open we do not know if we can cache info
512 on this inode, much less write behind and read ahead */
513 CIFS_I(inode)->clientCanCacheRead = FALSE;
514 CIFS_I(inode)->clientCanCacheAll = FALSE;
516 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
517 rc = CIFS_I(inode)->write_behind_rc;
522 int cifs_closedir(struct inode *inode, struct file *file)
526 struct cifsFileInfo *pCFileStruct =
527 (struct cifsFileInfo *)file->private_data;
530 cFYI(1, ("Closedir inode = 0x%p", inode));
535 struct cifsTconInfo *pTcon;
536 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
538 pTcon = cifs_sb->tcon;
540 cFYI(1, ("Freeing private data in close dir"));
541 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
542 (pCFileStruct->invalidHandle == FALSE)) {
543 pCFileStruct->invalidHandle = TRUE;
544 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
545 cFYI(1, ("Closing uncompleted readdir with rc %d",
547 /* not much we can do if it fails anyway, ignore rc */
550 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
552 cFYI(1, ("closedir free smb buf in srch struct"));
553 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
554 if(pCFileStruct->srch_inf.smallBuf)
555 cifs_small_buf_release(ptmp);
557 cifs_buf_release(ptmp);
559 ptmp = pCFileStruct->search_resume_name;
561 cFYI(1, ("closedir free resume name"));
562 pCFileStruct->search_resume_name = NULL;
565 kfree(file->private_data);
566 file->private_data = NULL;
568 /* BB can we lock the filestruct while this is going on? */
573 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
579 int wait_flag = FALSE;
580 struct cifs_sb_info *cifs_sb;
581 struct cifsTconInfo *pTcon;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
594 if (pfLock->fl_flags & FL_POSIX)
596 if (pfLock->fl_flags & FL_FLOCK)
598 if (pfLock->fl_flags & FL_SLEEP) {
599 cFYI(1, ("Blocking lock"));
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
604 "not implemented yet"));
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
614 } else if (pfLock->fl_type == F_UNLCK) {
615 cFYI(1, ("F_UNLCK"));
617 /* Check if unlock includes more than
619 } else if (pfLock->fl_type == F_RDLCK) {
620 cFYI(1, ("F_RDLCK"));
621 lockType |= LOCKING_ANDX_SHARED_LOCK;
623 } else if (pfLock->fl_type == F_EXLCK) {
624 cFYI(1, ("F_EXLCK"));
626 } else if (pfLock->fl_type == F_SHLCK) {
627 cFYI(1, ("F_SHLCK"));
628 lockType |= LOCKING_ANDX_SHARED_LOCK;
631 cFYI(1, ("Unknown type of lock"));
633 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
634 pTcon = cifs_sb->tcon;
636 if (file->private_data == NULL) {
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
643 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the
647 if((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
648 (CIFS_UNIX_FCNTL_CAP &
649 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
651 if(lockType & LOCKING_ANDX_SHARED_LOCK)
652 posix_lock_type = CIFS_RDLCK;
654 posix_lock_type = CIFS_WRLCK;
655 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
657 posix_lock_type, wait_flag);
662 /* BB we could chain these into one lock request BB */
663 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
664 0, 1, lockType, 0 /* wait flag */ );
666 rc = CIFSSMBLock(xid, pTcon, netfid, length,
667 pfLock->fl_start, 1 /* numUnlock */ ,
668 0 /* numLock */ , lockType,
670 pfLock->fl_type = F_UNLCK;
672 cERROR(1, ("Error unlocking previously locked "
673 "range %d during test of lock", rc));
677 /* if rc == ERR_SHARING_VIOLATION ? */
678 rc = 0; /* do not change lock type to unlock
679 since range in use */
685 if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
686 (CIFS_UNIX_FCNTL_CAP &
687 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
689 if(lockType & LOCKING_ANDX_SHARED_LOCK)
690 posix_lock_type = CIFS_RDLCK;
692 posix_lock_type = CIFS_WRLCK;
695 posix_lock_type = CIFS_UNLCK;
696 else if(numLock == 0) {
697 /* if no lock or unlock then nothing
698 to do since we do not know what it is */
702 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
704 posix_lock_type, wait_flag);
706 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
707 numUnlock, numLock, lockType, wait_flag);
708 if (pfLock->fl_flags & FL_POSIX)
709 posix_lock_file_wait(file, pfLock);
714 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
715 size_t write_size, loff_t *poffset)
718 unsigned int bytes_written = 0;
719 unsigned int total_written;
720 struct cifs_sb_info *cifs_sb;
721 struct cifsTconInfo *pTcon;
723 struct cifsFileInfo *open_file;
725 if (file->f_dentry == NULL)
728 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
732 pTcon = cifs_sb->tcon;
735 (" write %d bytes to offset %lld of %s", write_size,
736 *poffset, file->f_dentry->d_name.name)); */
738 if (file->private_data == NULL)
741 open_file = (struct cifsFileInfo *) file->private_data;
744 if (file->f_dentry->d_inode == NULL) {
749 if (*poffset > file->f_dentry->d_inode->i_size)
750 long_op = 2; /* writes past end of file can take a long time */
754 for (total_written = 0; write_size > total_written;
755 total_written += bytes_written) {
757 while (rc == -EAGAIN) {
758 if (file->private_data == NULL) {
759 /* file has been closed on us */
761 /* if we have gotten here we have written some data
762 and blocked, and the file has been freed on us while
763 we blocked so return what we managed to write */
764 return total_written;
766 if (open_file->closePend) {
769 return total_written;
773 if (open_file->invalidHandle) {
774 if ((file->f_dentry == NULL) ||
775 (file->f_dentry->d_inode == NULL)) {
777 return total_written;
779 /* we could deadlock if we called
780 filemap_fdatawait from here so tell
781 reopen_file not to flush data to server
783 rc = cifs_reopen_file(file->f_dentry->d_inode,
789 rc = CIFSSMBWrite(xid, pTcon,
791 min_t(const int, cifs_sb->wsize,
792 write_size - total_written),
793 *poffset, &bytes_written,
794 NULL, write_data + total_written, long_op);
796 if (rc || (bytes_written == 0)) {
804 *poffset += bytes_written;
805 long_op = FALSE; /* subsequent writes fast -
806 15 seconds is plenty */
809 cifs_stats_bytes_written(pTcon, total_written);
811 /* since the write may have blocked check these pointers again */
812 if (file->f_dentry) {
813 if (file->f_dentry->d_inode) {
814 struct inode *inode = file->f_dentry->d_inode;
815 inode->i_ctime = inode->i_mtime =
816 current_fs_time(inode->i_sb);
817 if (total_written > 0) {
818 if (*poffset > file->f_dentry->d_inode->i_size)
819 i_size_write(file->f_dentry->d_inode,
822 mark_inode_dirty_sync(file->f_dentry->d_inode);
826 return total_written;
829 static ssize_t cifs_write(struct file *file, const char *write_data,
830 size_t write_size, loff_t *poffset)
833 unsigned int bytes_written = 0;
834 unsigned int total_written;
835 struct cifs_sb_info *cifs_sb;
836 struct cifsTconInfo *pTcon;
838 struct cifsFileInfo *open_file;
840 if (file->f_dentry == NULL)
843 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
847 pTcon = cifs_sb->tcon;
849 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
850 *poffset, file->f_dentry->d_name.name));
852 if (file->private_data == NULL)
855 open_file = (struct cifsFileInfo *)file->private_data;
858 if (file->f_dentry->d_inode == NULL) {
863 if (*poffset > file->f_dentry->d_inode->i_size)
864 long_op = 2; /* writes past end of file can take a long time */
868 for (total_written = 0; write_size > total_written;
869 total_written += bytes_written) {
871 while (rc == -EAGAIN) {
872 if (file->private_data == NULL) {
873 /* file has been closed on us */
875 /* if we have gotten here we have written some data
876 and blocked, and the file has been freed on us
877 while we blocked so return what we managed to
879 return total_written;
881 if (open_file->closePend) {
884 return total_written;
888 if (open_file->invalidHandle) {
889 if ((file->f_dentry == NULL) ||
890 (file->f_dentry->d_inode == NULL)) {
892 return total_written;
894 /* we could deadlock if we called
895 filemap_fdatawait from here so tell
896 reopen_file not to flush data to
898 rc = cifs_reopen_file(file->f_dentry->d_inode,
903 if(experimEnabled || (pTcon->ses->server &&
904 ((pTcon->ses->server->secMode &
905 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
910 len = min((size_t)cifs_sb->wsize,
911 write_size - total_written);
912 /* iov[0] is reserved for smb header */
913 iov[1].iov_base = (char *)write_data +
915 iov[1].iov_len = len;
916 rc = CIFSSMBWrite2(xid, pTcon,
917 open_file->netfid, len,
918 *poffset, &bytes_written,
921 rc = CIFSSMBWrite(xid, pTcon,
923 min_t(const int, cifs_sb->wsize,
924 write_size - total_written),
925 *poffset, &bytes_written,
926 write_data + total_written,
929 if (rc || (bytes_written == 0)) {
937 *poffset += bytes_written;
938 long_op = FALSE; /* subsequent writes fast -
939 15 seconds is plenty */
942 cifs_stats_bytes_written(pTcon, total_written);
944 /* since the write may have blocked check these pointers again */
945 if (file->f_dentry) {
946 if (file->f_dentry->d_inode) {
947 file->f_dentry->d_inode->i_ctime =
948 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
949 if (total_written > 0) {
950 if (*poffset > file->f_dentry->d_inode->i_size)
951 i_size_write(file->f_dentry->d_inode,
954 mark_inode_dirty_sync(file->f_dentry->d_inode);
958 return total_written;
961 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
963 struct cifsFileInfo *open_file;
966 /* Having a null inode here (because mapping->host was set to zero by
967 the VFS or MM) should not happen but we had reports of on oops (due to
968 it being zero) during stress testcases so we need to check for it */
970 if(cifs_inode == NULL) {
971 cERROR(1,("Null inode passed to cifs_writeable_file"));
976 read_lock(&GlobalSMBSeslock);
977 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
978 if (open_file->closePend)
980 if (open_file->pfile &&
981 ((open_file->pfile->f_flags & O_RDWR) ||
982 (open_file->pfile->f_flags & O_WRONLY))) {
983 atomic_inc(&open_file->wrtPending);
984 read_unlock(&GlobalSMBSeslock);
985 if((open_file->invalidHandle) &&
986 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
987 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
988 open_file->pfile, FALSE);
989 /* if it fails, try another handle - might be */
990 /* dangerous to hold up writepages with retry */
992 cFYI(1,("failed on reopen file in wp"));
993 read_lock(&GlobalSMBSeslock);
994 /* can not use this handle, no write
995 pending on this one after all */
997 (&open_file->wrtPending);
1004 read_unlock(&GlobalSMBSeslock);
1008 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1010 struct address_space *mapping = page->mapping;
1011 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1014 int bytes_written = 0;
1015 struct cifs_sb_info *cifs_sb;
1016 struct cifsTconInfo *pTcon;
1017 struct inode *inode;
1018 struct cifsFileInfo *open_file;
1020 if (!mapping || !mapping->host)
1023 inode = page->mapping->host;
1024 cifs_sb = CIFS_SB(inode->i_sb);
1025 pTcon = cifs_sb->tcon;
1027 offset += (loff_t)from;
1028 write_data = kmap(page);
1031 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1036 /* racing with truncate? */
1037 if (offset > mapping->host->i_size) {
1039 return 0; /* don't care */
1042 /* check to make sure that we are not extending the file */
1043 if (mapping->host->i_size - offset < (loff_t)to)
1044 to = (unsigned)(mapping->host->i_size - offset);
1046 open_file = find_writable_file(CIFS_I(mapping->host));
1048 bytes_written = cifs_write(open_file->pfile, write_data,
1050 atomic_dec(&open_file->wrtPending);
1051 /* Does mm or vfs already set times? */
1052 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1053 if ((bytes_written > 0) && (offset)) {
1055 } else if (bytes_written < 0) {
1060 cFYI(1, ("No writeable filehandles for inode"));
1068 static int cifs_writepages(struct address_space *mapping,
1069 struct writeback_control *wbc)
1071 struct backing_dev_info *bdi = mapping->backing_dev_info;
1072 unsigned int bytes_to_write;
1073 unsigned int bytes_written;
1074 struct cifs_sb_info *cifs_sb;
1078 int range_whole = 0;
1079 struct kvec iov[32];
1085 struct cifsFileInfo *open_file;
1087 struct pagevec pvec;
1092 cifs_sb = CIFS_SB(mapping->host->i_sb);
1095 * If wsize is smaller that the page cache size, default to writing
1096 * one page at a time via cifs_writepage
1098 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1099 return generic_writepages(mapping, wbc);
1101 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1102 if(cifs_sb->tcon->ses->server->secMode &
1103 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1105 return generic_writepages(mapping, wbc);
1108 * BB: Is this meaningful for a non-block-device file system?
1109 * If it is, we should test it again after we do I/O
1111 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1112 wbc->encountered_congestion = 1;
1118 pagevec_init(&pvec, 0);
1119 if (wbc->range_cyclic) {
1120 index = mapping->writeback_index; /* Start from prev offset */
1123 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1124 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1125 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1130 while (!done && (index <= end) &&
1131 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1132 PAGECACHE_TAG_DIRTY,
1133 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1142 for (i = 0; i < nr_pages; i++) {
1143 page = pvec.pages[i];
1145 * At this point we hold neither mapping->tree_lock nor
1146 * lock on the page itself: the page may be truncated or
1147 * invalidated (changing page->mapping to NULL), or even
1148 * swizzled back from swapper_space to tmpfs file
1154 else if (TestSetPageLocked(page))
1157 if (unlikely(page->mapping != mapping)) {
1162 if (!wbc->range_cyclic && page->index > end) {
1168 if (next && (page->index != next)) {
1169 /* Not next consecutive page */
1174 if (wbc->sync_mode != WB_SYNC_NONE)
1175 wait_on_page_writeback(page);
1177 if (PageWriteback(page) ||
1178 !test_clear_page_dirty(page)) {
1183 if (page_offset(page) >= mapping->host->i_size) {
1190 * BB can we get rid of this? pages are held by pvec
1192 page_cache_get(page);
1194 len = min(mapping->host->i_size - page_offset(page),
1195 (loff_t)PAGE_CACHE_SIZE);
1197 /* reserve iov[0] for the smb header */
1199 iov[n_iov].iov_base = kmap(page);
1200 iov[n_iov].iov_len = len;
1201 bytes_to_write += len;
1205 offset = page_offset(page);
1207 next = page->index + 1;
1208 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1212 /* Search for a writable handle every time we call
1213 * CIFSSMBWrite2. We can't rely on the last handle
1214 * we used to still be valid
1216 open_file = find_writable_file(CIFS_I(mapping->host));
1218 cERROR(1, ("No writable handles for inode"));
1221 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1223 bytes_to_write, offset,
1224 &bytes_written, iov, n_iov,
1226 atomic_dec(&open_file->wrtPending);
1227 if (rc || bytes_written < bytes_to_write) {
1228 cERROR(1,("Write2 ret %d, written = %d",
1229 rc, bytes_written));
1230 /* BB what if continued retry is
1231 requested via mount flags? */
1232 set_bit(AS_EIO, &mapping->flags);
1234 cifs_stats_bytes_written(cifs_sb->tcon,
1238 for (i = 0; i < n_iov; i++) {
1239 page = pvec.pages[first + i];
1240 /* Should we also set page error on
1241 success rc but too little data written? */
1242 /* BB investigate retry logic on temporary
1243 server crash cases and how recovery works
1244 when page marked as error */
1249 page_cache_release(page);
1251 if ((wbc->nr_to_write -= n_iov) <= 0)
1255 pagevec_release(&pvec);
1257 if (!scanned && !done) {
1259 * We hit the last page and there is more work to be done: wrap
1260 * back to the start of the file
1266 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1267 mapping->writeback_index = index;
1274 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1280 /* BB add check for wbc flags */
1281 page_cache_get(page);
1282 if (!PageUptodate(page)) {
1283 cFYI(1, ("ppw - page not up to date"));
1286 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1287 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1289 page_cache_release(page);
1294 static int cifs_commit_write(struct file *file, struct page *page,
1295 unsigned offset, unsigned to)
1299 struct inode *inode = page->mapping->host;
1300 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1304 cFYI(1, ("commit write for page %p up to position %lld for %d",
1305 page, position, to));
1306 if (position > inode->i_size) {
1307 i_size_write(inode, position);
1308 /* if (file->private_data == NULL) {
1311 open_file = (struct cifsFileInfo *)file->private_data;
1312 cifs_sb = CIFS_SB(inode->i_sb);
1314 while (rc == -EAGAIN) {
1315 if ((open_file->invalidHandle) &&
1316 (!open_file->closePend)) {
1317 rc = cifs_reopen_file(
1318 file->f_dentry->d_inode, file);
1322 if (!open_file->closePend) {
1323 rc = CIFSSMBSetFileSize(xid,
1324 cifs_sb->tcon, position,
1326 open_file->pid, FALSE);
1332 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1335 if (!PageUptodate(page)) {
1336 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1337 /* can not rely on (or let) writepage write this data */
1339 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1344 /* this is probably better than directly calling
1345 partialpage_write since in this function the file handle is
1346 known which we might as well leverage */
1347 /* BB check if anything else missing out of ppw
1348 such as updating last write time */
1349 page_data = kmap(page);
1350 rc = cifs_write(file, page_data + offset, to-offset,
1354 /* else if (rc < 0) should we set writebehind rc? */
1357 set_page_dirty(page);
1364 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1368 struct inode *inode = file->f_dentry->d_inode;
1372 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1373 dentry->d_name.name, datasync));
1375 rc = filemap_fdatawrite(inode->i_mapping);
1377 CIFS_I(inode)->write_behind_rc = 0;
1382 /* static void cifs_sync_page(struct page *page)
1384 struct address_space *mapping;
1385 struct inode *inode;
1386 unsigned long index = page->index;
1387 unsigned int rpages = 0;
1390 cFYI(1, ("sync page %p",page));
1391 mapping = page->mapping;
1394 inode = mapping->host;
1398 /* fill in rpages then
1399 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1401 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1411 * As file closes, flush all cached write data for this inode checking
1412 * for write behind errors.
1414 int cifs_flush(struct file *file, fl_owner_t id)
1416 struct inode * inode = file->f_dentry->d_inode;
1419 /* Rather than do the steps manually:
1420 lock the inode for writing
1421 loop through pages looking for write behind data (dirty pages)
1422 coalesce into contiguous 16K (or smaller) chunks to write to server
1423 send to server (prefer in parallel)
1424 deal with writebehind errors
1425 unlock inode for writing
1426 filemapfdatawrite appears easier for the time being */
1428 rc = filemap_fdatawrite(inode->i_mapping);
1429 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1430 CIFS_I(inode)->write_behind_rc = 0;
1432 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1437 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1438 size_t read_size, loff_t *poffset)
1441 unsigned int bytes_read = 0;
1442 unsigned int total_read = 0;
1443 unsigned int current_read_size;
1444 struct cifs_sb_info *cifs_sb;
1445 struct cifsTconInfo *pTcon;
1447 struct cifsFileInfo *open_file;
1448 char *smb_read_data;
1449 char __user *current_offset;
1450 struct smb_com_read_rsp *pSMBr;
1453 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1454 pTcon = cifs_sb->tcon;
1456 if (file->private_data == NULL) {
1460 open_file = (struct cifsFileInfo *)file->private_data;
1462 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1463 cFYI(1, ("attempting read on write only file instance"));
1465 for (total_read = 0, current_offset = read_data;
1466 read_size > total_read;
1467 total_read += bytes_read, current_offset += bytes_read) {
1468 current_read_size = min_t(const int, read_size - total_read,
1471 smb_read_data = NULL;
1472 while (rc == -EAGAIN) {
1473 int buf_type = CIFS_NO_BUFFER;
1474 if ((open_file->invalidHandle) &&
1475 (!open_file->closePend)) {
1476 rc = cifs_reopen_file(file->f_dentry->d_inode,
1481 rc = CIFSSMBRead(xid, pTcon,
1483 current_read_size, *poffset,
1484 &bytes_read, &smb_read_data,
1486 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1487 if (smb_read_data) {
1488 if (copy_to_user(current_offset,
1490 4 /* RFC1001 length field */ +
1491 le16_to_cpu(pSMBr->DataOffset),
1496 if(buf_type == CIFS_SMALL_BUFFER)
1497 cifs_small_buf_release(smb_read_data);
1498 else if(buf_type == CIFS_LARGE_BUFFER)
1499 cifs_buf_release(smb_read_data);
1500 smb_read_data = NULL;
1503 if (rc || (bytes_read == 0)) {
1511 cifs_stats_bytes_read(pTcon, bytes_read);
1512 *poffset += bytes_read;
1520 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1524 unsigned int bytes_read = 0;
1525 unsigned int total_read;
1526 unsigned int current_read_size;
1527 struct cifs_sb_info *cifs_sb;
1528 struct cifsTconInfo *pTcon;
1530 char *current_offset;
1531 struct cifsFileInfo *open_file;
1532 int buf_type = CIFS_NO_BUFFER;
1535 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1536 pTcon = cifs_sb->tcon;
1538 if (file->private_data == NULL) {
1542 open_file = (struct cifsFileInfo *)file->private_data;
1544 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1545 cFYI(1, ("attempting read on write only file instance"));
1547 for (total_read = 0, current_offset = read_data;
1548 read_size > total_read;
1549 total_read += bytes_read, current_offset += bytes_read) {
1550 current_read_size = min_t(const int, read_size - total_read,
1552 /* For windows me and 9x we do not want to request more
1553 than it negotiated since it will refuse the read then */
1555 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1556 current_read_size = min_t(const int, current_read_size,
1557 pTcon->ses->server->maxBuf - 128);
1560 while (rc == -EAGAIN) {
1561 if ((open_file->invalidHandle) &&
1562 (!open_file->closePend)) {
1563 rc = cifs_reopen_file(file->f_dentry->d_inode,
1568 rc = CIFSSMBRead(xid, pTcon,
1570 current_read_size, *poffset,
1571 &bytes_read, ¤t_offset,
1574 if (rc || (bytes_read == 0)) {
1582 cifs_stats_bytes_read(pTcon, total_read);
1583 *poffset += bytes_read;
1590 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1592 struct dentry *dentry = file->f_dentry;
1596 rc = cifs_revalidate(dentry);
1598 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1602 rc = generic_file_mmap(file, vma);
1608 static void cifs_copy_cache_pages(struct address_space *mapping,
1609 struct list_head *pages, int bytes_read, char *data,
1610 struct pagevec *plru_pvec)
1615 while (bytes_read > 0) {
1616 if (list_empty(pages))
1619 page = list_entry(pages->prev, struct page, lru);
1620 list_del(&page->lru);
1622 if (add_to_page_cache(page, mapping, page->index,
1624 page_cache_release(page);
1625 cFYI(1, ("Add page cache failed"));
1626 data += PAGE_CACHE_SIZE;
1627 bytes_read -= PAGE_CACHE_SIZE;
1631 target = kmap_atomic(page,KM_USER0);
1633 if (PAGE_CACHE_SIZE > bytes_read) {
1634 memcpy(target, data, bytes_read);
1635 /* zero the tail end of this partial page */
1636 memset(target + bytes_read, 0,
1637 PAGE_CACHE_SIZE - bytes_read);
1640 memcpy(target, data, PAGE_CACHE_SIZE);
1641 bytes_read -= PAGE_CACHE_SIZE;
1643 kunmap_atomic(target, KM_USER0);
1645 flush_dcache_page(page);
1646 SetPageUptodate(page);
1648 if (!pagevec_add(plru_pvec, page))
1649 __pagevec_lru_add(plru_pvec);
1650 data += PAGE_CACHE_SIZE;
1655 static int cifs_readpages(struct file *file, struct address_space *mapping,
1656 struct list_head *page_list, unsigned num_pages)
1662 struct cifs_sb_info *cifs_sb;
1663 struct cifsTconInfo *pTcon;
1665 unsigned int read_size,i;
1666 char *smb_read_data = NULL;
1667 struct smb_com_read_rsp *pSMBr;
1668 struct pagevec lru_pvec;
1669 struct cifsFileInfo *open_file;
1670 int buf_type = CIFS_NO_BUFFER;
1673 if (file->private_data == NULL) {
1677 open_file = (struct cifsFileInfo *)file->private_data;
1678 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1679 pTcon = cifs_sb->tcon;
1681 pagevec_init(&lru_pvec, 0);
1683 for (i = 0; i < num_pages; ) {
1684 unsigned contig_pages;
1685 struct page *tmp_page;
1686 unsigned long expected_index;
1688 if (list_empty(page_list))
1691 page = list_entry(page_list->prev, struct page, lru);
1692 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1694 /* count adjacent pages that we will read into */
1697 list_entry(page_list->prev, struct page, lru)->index;
1698 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1699 if (tmp_page->index == expected_index) {
1705 if (contig_pages + i > num_pages)
1706 contig_pages = num_pages - i;
1708 /* for reads over a certain size could initiate async
1711 read_size = contig_pages * PAGE_CACHE_SIZE;
1712 /* Read size needs to be in multiples of one page */
1713 read_size = min_t(const unsigned int, read_size,
1714 cifs_sb->rsize & PAGE_CACHE_MASK);
1717 while (rc == -EAGAIN) {
1718 if ((open_file->invalidHandle) &&
1719 (!open_file->closePend)) {
1720 rc = cifs_reopen_file(file->f_dentry->d_inode,
1726 rc = CIFSSMBRead(xid, pTcon,
1729 &bytes_read, &smb_read_data,
1731 /* BB more RC checks ? */
1733 if (smb_read_data) {
1734 if(buf_type == CIFS_SMALL_BUFFER)
1735 cifs_small_buf_release(smb_read_data);
1736 else if(buf_type == CIFS_LARGE_BUFFER)
1737 cifs_buf_release(smb_read_data);
1738 smb_read_data = NULL;
1742 if ((rc < 0) || (smb_read_data == NULL)) {
1743 cFYI(1, ("Read error in readpages: %d", rc));
1744 /* clean up remaing pages off list */
1745 while (!list_empty(page_list) && (i < num_pages)) {
1746 page = list_entry(page_list->prev, struct page,
1748 list_del(&page->lru);
1749 page_cache_release(page);
1752 } else if (bytes_read > 0) {
1753 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1754 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1755 smb_read_data + 4 /* RFC1001 hdr */ +
1756 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1758 i += bytes_read >> PAGE_CACHE_SHIFT;
1759 cifs_stats_bytes_read(pTcon, bytes_read);
1760 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1761 i++; /* account for partial page */
1763 /* server copy of file can have smaller size
1765 /* BB do we need to verify this common case ?
1766 this case is ok - if we are at server EOF
1767 we will hit it on next read */
1769 /* while (!list_empty(page_list) && (i < num_pages)) {
1770 page = list_entry(page_list->prev,
1772 list_del(&page->list);
1773 page_cache_release(page);
1778 cFYI(1, ("No bytes read (%d) at offset %lld . "
1779 "Cleaning remaining pages from readahead list",
1780 bytes_read, offset));
1781 /* BB turn off caching and do new lookup on
1782 file size at server? */
1783 while (!list_empty(page_list) && (i < num_pages)) {
1784 page = list_entry(page_list->prev, struct page,
1786 list_del(&page->lru);
1788 /* BB removeme - replace with zero of page? */
1789 page_cache_release(page);
1793 if (smb_read_data) {
1794 if(buf_type == CIFS_SMALL_BUFFER)
1795 cifs_small_buf_release(smb_read_data);
1796 else if(buf_type == CIFS_LARGE_BUFFER)
1797 cifs_buf_release(smb_read_data);
1798 smb_read_data = NULL;
1803 pagevec_lru_add(&lru_pvec);
1805 /* need to free smb_read_data buf before exit */
1806 if (smb_read_data) {
1807 if(buf_type == CIFS_SMALL_BUFFER)
1808 cifs_small_buf_release(smb_read_data);
1809 else if(buf_type == CIFS_LARGE_BUFFER)
1810 cifs_buf_release(smb_read_data);
1811 smb_read_data = NULL;
1818 static int cifs_readpage_worker(struct file *file, struct page *page,
1824 page_cache_get(page);
1825 read_data = kmap(page);
1826 /* for reads over a certain size could initiate async read ahead */
1828 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1833 cFYI(1, ("Bytes read %d",rc));
1835 file->f_dentry->d_inode->i_atime =
1836 current_fs_time(file->f_dentry->d_inode->i_sb);
1838 if (PAGE_CACHE_SIZE > rc)
1839 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1841 flush_dcache_page(page);
1842 SetPageUptodate(page);
1847 page_cache_release(page);
1851 static int cifs_readpage(struct file *file, struct page *page)
1853 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1859 if (file->private_data == NULL) {
1864 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1865 page, (int)offset, (int)offset));
1867 rc = cifs_readpage_worker(file, page, &offset);
1875 /* We do not want to update the file size from server for inodes
1876 open for write - to avoid races with writepage extending
1877 the file - in the future we could consider allowing
1878 refreshing the inode only on increases in the file size
1879 but this is tricky to do without racing with writebehind
1880 page caching in the current Linux kernel design */
1881 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1883 struct cifsFileInfo *open_file = NULL;
1886 open_file = find_writable_file(cifsInode);
1889 struct cifs_sb_info *cifs_sb;
1891 /* there is not actually a write pending so let
1892 this handle go free and allow it to
1893 be closable if needed */
1894 atomic_dec(&open_file->wrtPending);
1896 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1897 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1898 /* since no page cache to corrupt on directio
1899 we can change size safely */
1908 static int cifs_prepare_write(struct file *file, struct page *page,
1909 unsigned from, unsigned to)
1912 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1913 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1914 if (!PageUptodate(page)) {
1915 /* if (to - from != PAGE_CACHE_SIZE) {
1916 void *kaddr = kmap_atomic(page, KM_USER0);
1917 memset(kaddr, 0, from);
1918 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1919 flush_dcache_page(page);
1920 kunmap_atomic(kaddr, KM_USER0);
1922 /* If we are writing a full page it will be up to date,
1923 no need to read from the server */
1924 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1925 SetPageUptodate(page);
1927 /* might as well read a page, it is fast enough */
1928 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1929 rc = cifs_readpage_worker(file, page, &offset);
1931 /* should we try using another file handle if there is one -
1932 how would we lock it to prevent close of that handle
1933 racing with this read?
1934 In any case this will be written out by commit_write */
1938 /* BB should we pass any errors back?
1939 e.g. if we do not have read access to the file */
1943 const struct address_space_operations cifs_addr_ops = {
1944 .readpage = cifs_readpage,
1945 .readpages = cifs_readpages,
1946 .writepage = cifs_writepage,
1947 .writepages = cifs_writepages,
1948 .prepare_write = cifs_prepare_write,
1949 .commit_write = cifs_commit_write,
1950 .set_page_dirty = __set_page_dirty_nobuffers,
1951 /* .sync_page = cifs_sync_page, */
1956 * cifs_readpages requires the server to support a buffer large enough to
1957 * contain the header plus one complete page of data. Otherwise, we need
1958 * to leave cifs_readpages out of the address space operations.
1960 const struct address_space_operations cifs_addr_ops_smallbuf = {
1961 .readpage = cifs_readpage,
1962 .writepage = cifs_writepage,
1963 .writepages = cifs_writepages,
1964 .prepare_write = cifs_prepare_write,
1965 .commit_write = cifs_commit_write,
1966 .set_page_dirty = __set_page_dirty_nobuffers,
1967 /* .sync_page = cifs_sync_page, */