X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=fs%2Fcifs%2Ffile.c;h=5ade53d7bca89624cd6b9381d4e6ba91543517be;hb=9d8d5a284e11e45953ad1f12a5bf1ebc18eefb75;hp=3766db2bb7f28bb7828efe7c68013a0c28eba4ce;hpb=f9f5c81769f88bccd177423a30a7d30461754c39;p=linux-2.6 diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 3766db2bb7..5ade53d7bc 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -21,11 +21,15 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #include #include +#include #include #include #include +#include +#include #include #include "cifsfs.h" #include "cifspdu.h" @@ -47,6 +51,11 @@ static inline struct cifsFileInfo *cifs_init_private( private_data->pInode = inode; private_data->invalidHandle = FALSE; private_data->closePend = FALSE; + /* we have to track num writers to the inode, since writepages + does not tell us which handle the write is for so there can + be a close (overlapping with write) of the filehandle that + cifs_writepages chose to use */ + atomic_set(&private_data->wrtPending,0); return private_data; } @@ -118,8 +127,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, if (file->f_dentry->d_inode->i_mapping) { /* BB no need to lock inode until after invalidate since namei code should already have it locked? */ - filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); - filemap_fdatawait(file->f_dentry->d_inode->i_mapping); + filemap_write_and_wait(file->f_dentry->d_inode->i_mapping); } cFYI(1, ("invalidating remote inode since open detected it " "changed")); @@ -410,8 +418,7 @@ static int cifs_reopen_file(struct inode *inode, struct file *file, pCifsInode = CIFS_I(inode); if (pCifsInode) { if (can_flush) { - filemap_fdatawrite(inode->i_mapping); - filemap_fdatawait(inode->i_mapping); + filemap_write_and_wait(inode->i_mapping); /* temporarily disable caching while we go to server to get inode info */ pCifsInode->clientCanCacheAll = FALSE; @@ -470,6 +477,22 @@ int cifs_close(struct inode *inode, struct file *file) /* no sense reconnecting to close a file that is already closed */ if (pTcon->tidStatus != CifsNeedReconnect) { + int timeout = 2; + while((atomic_read(&pSMBFile->wrtPending) != 0) + && (timeout < 1000) ) { + /* Give write a better chance to get to + server ahead of the close. We do not + want to add a wait_q here as it would + increase the memory utilization as + the struct would be in each open file, + but this should give enough time to + clear the socket */ + write_unlock(&file->f_owner.lock); + cERROR(1,("close with pending writes")); + msleep(timeout); + write_lock(&file->f_owner.lock); + timeout *= 4; + } write_unlock(&file->f_owner.lock); rc = CIFSSMBClose(xid, pTcon, pSMBFile->netfid); @@ -849,13 +872,19 @@ static ssize_t cifs_write(struct file *file, const char *write_data, /* BB FIXME We can not sign across two buffers yet */ if((experimEnabled) && ((pTcon->ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) { + struct kvec iov[2]; + unsigned int len; + + len = min((size_t)cifs_sb->wsize, + write_size - total_written); + /* iov[0] is reserved for smb header */ + iov[1].iov_base = (char *)write_data + + total_written; + iov[1].iov_len = len; rc = CIFSSMBWrite2(xid, pTcon, - open_file->netfid, - min_t(const int, cifs_sb->wsize, - write_size - total_written), + open_file->netfid, len, *poffset, &bytes_written, - write_data + total_written, - long_op); + iov, 1, long_op); } else /* BB FIXME fixup indentation of line below */ #endif @@ -898,6 +927,43 @@ static ssize_t cifs_write(struct file *file, const char *write_data, return total_written; } +struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode) +{ + struct cifsFileInfo *open_file; + int rc; + + read_lock(&GlobalSMBSeslock); + list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { + if (open_file->closePend) + continue; + if (open_file->pfile && + ((open_file->pfile->f_flags & O_RDWR) || + (open_file->pfile->f_flags & O_WRONLY))) { + atomic_inc(&open_file->wrtPending); + read_unlock(&GlobalSMBSeslock); + if((open_file->invalidHandle) && + (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) { + rc = cifs_reopen_file(&cifs_inode->vfs_inode, + open_file->pfile, FALSE); + /* if it fails, try another handle - might be */ + /* dangerous to hold up writepages with retry */ + if(rc) { + cFYI(1,("failed on reopen file in wp")); + read_lock(&GlobalSMBSeslock); + /* can not use this handle, no write + pending on this one after all */ + atomic_dec + (&open_file->wrtPending); + continue; + } + } + return open_file; + } + } + read_unlock(&GlobalSMBSeslock); + return NULL; +} + static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; @@ -908,10 +974,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; struct inode *inode; - struct cifsInodeInfo *cifsInode; - struct cifsFileInfo *open_file = NULL; - struct list_head *tmp; - struct list_head *tmp1; + struct cifsFileInfo *open_file; if (!mapping || !mapping->host) return -EFAULT; @@ -939,49 +1002,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) if (mapping->host->i_size - offset < (loff_t)to) to = (unsigned)(mapping->host->i_size - offset); - cifsInode = CIFS_I(mapping->host); - read_lock(&GlobalSMBSeslock); - /* BB we should start at the end */ - list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, flist); - if (open_file->closePend) - continue; - /* We check if file is open for writing first */ - if ((open_file->pfile) && - ((open_file->pfile->f_flags & O_RDWR) || - (open_file->pfile->f_flags & O_WRONLY))) { - read_unlock(&GlobalSMBSeslock); - bytes_written = cifs_write(open_file->pfile, - write_data, to-from, - &offset); - read_lock(&GlobalSMBSeslock); + open_file = find_writable_file(CIFS_I(mapping->host)); + if (open_file) { + bytes_written = cifs_write(open_file->pfile, write_data, + to-from, &offset); + atomic_dec(&open_file->wrtPending); /* Does mm or vfs already set times? */ - inode->i_atime = - inode->i_mtime = current_fs_time(inode->i_sb); - if ((bytes_written > 0) && (offset)) { - rc = 0; - } else if (bytes_written < 0) { - if (rc == -EBADF) { - /* have seen a case in which kernel seemed to - have closed/freed a file even with writes - active so we might as well see if there are - other file structs to try for the same - inode before giving up */ - continue; - } else - rc = bytes_written; - } - break; /* now that we found a valid file handle and - tried to write to it we are done, no sense - continuing to loop looking for another */ - } - if (tmp->next == NULL) { - cFYI(1, ("File instance %p removed", tmp)); - break; + inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); + if ((bytes_written > 0) && (offset)) { + rc = 0; + } else if (bytes_written < 0) { + if (rc != -EBADF) + rc = bytes_written; } - } - read_unlock(&GlobalSMBSeslock); - if (open_file == NULL) { + } else { cFYI(1, ("No writeable filehandles for inode")); rc = -EIO; } @@ -990,20 +1024,207 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) return rc; } -#if 0 +#ifdef CONFIG_CIFS_EXPERIMENTAL static int cifs_writepages(struct address_space *mapping, - struct writeback_control *wbc) + struct writeback_control *wbc) { - int rc = -EFAULT; + struct backing_dev_info *bdi = mapping->backing_dev_info; + unsigned int bytes_to_write; + unsigned int bytes_written; + struct cifs_sb_info *cifs_sb; + int done = 0; + pgoff_t end = -1; + pgoff_t index; + int is_range = 0; + struct kvec iov[32]; + int len; + int n_iov = 0; + pgoff_t next; + int nr_pages; + __u64 offset = 0; + struct cifsFileInfo *open_file; + struct page *page; + struct pagevec pvec; + int rc = 0; + int scanned = 0; int xid; + cifs_sb = CIFS_SB(mapping->host->i_sb); + + /* + * If wsize is smaller that the page cache size, default to writing + * one page at a time via cifs_writepage + */ + if (cifs_sb->wsize < PAGE_CACHE_SIZE) + return generic_writepages(mapping, wbc); + + /* BB FIXME we do not have code to sign across multiple buffers yet, + so go to older writepage style write which we can sign if needed */ + if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server)) + if(cifs_sb->tcon->ses->server->secMode & + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + return generic_writepages(mapping, wbc); + + /* + * BB: Is this meaningful for a non-block-device file system? + * If it is, we should test it again after we do I/O + */ + if (wbc->nonblocking && bdi_write_congested(bdi)) { + wbc->encountered_congestion = 1; + return 0; + } + xid = GetXid(); - /* Find contiguous pages then iterate through repeating - call 16K write then Setpageuptodate or if LARGE_WRITE_X - support then send larger writes via kevec so as to eliminate - a memcpy */ + pagevec_init(&pvec, 0); + if (wbc->sync_mode == WB_SYNC_NONE) + index = mapping->writeback_index; /* Start from prev offset */ + else { + index = 0; + scanned = 1; + } + if (wbc->start || wbc->end) { + index = wbc->start >> PAGE_CACHE_SHIFT; + end = wbc->end >> PAGE_CACHE_SHIFT; + is_range = 1; + scanned = 1; + } +retry: + while (!done && (index <= end) && + (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) { + int first; + unsigned int i; + + first = -1; + next = 0; + n_iov = 0; + bytes_to_write = 0; + + for (i = 0; i < nr_pages; i++) { + page = pvec.pages[i]; + /* + * At this point we hold neither mapping->tree_lock nor + * lock on the page itself: the page may be truncated or + * invalidated (changing page->mapping to NULL), or even + * swizzled back from swapper_space to tmpfs file + * mapping + */ + + if (first < 0) + lock_page(page); + else if (TestSetPageLocked(page)) + break; + + if (unlikely(page->mapping != mapping)) { + unlock_page(page); + break; + } + + if (unlikely(is_range) && (page->index > end)) { + done = 1; + unlock_page(page); + break; + } + + if (next && (page->index != next)) { + /* Not next consecutive page */ + unlock_page(page); + break; + } + + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + + if (PageWriteback(page) || + !test_clear_page_dirty(page)) { + unlock_page(page); + break; + } + + if (page_offset(page) >= mapping->host->i_size) { + done = 1; + unlock_page(page); + break; + } + + /* + * BB can we get rid of this? pages are held by pvec + */ + page_cache_get(page); + + len = min(mapping->host->i_size - page_offset(page), + (loff_t)PAGE_CACHE_SIZE); + + /* reserve iov[0] for the smb header */ + n_iov++; + iov[n_iov].iov_base = kmap(page); + iov[n_iov].iov_len = len; + bytes_to_write += len; + + if (first < 0) { + first = i; + offset = page_offset(page); + } + next = page->index + 1; + if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize) + break; + } + if (n_iov) { + /* Search for a writable handle every time we call + * CIFSSMBWrite2. We can't rely on the last handle + * we used to still be valid + */ + open_file = find_writable_file(CIFS_I(mapping->host)); + if (!open_file) { + cERROR(1, ("No writable handles for inode")); + rc = -EBADF; + } else { + rc = CIFSSMBWrite2(xid, cifs_sb->tcon, + open_file->netfid, + bytes_to_write, offset, + &bytes_written, iov, n_iov, + 1); + atomic_dec(&open_file->wrtPending); + if (rc || bytes_written < bytes_to_write) { + cERROR(1,("Write2 ret %d, written = %d", + rc, bytes_written)); + /* BB what if continued retry is + requested via mount flags? */ + set_bit(AS_EIO, &mapping->flags); + SetPageError(page); + } else { + cifs_stats_bytes_written(cifs_sb->tcon, + bytes_written); + } + } + for (i = 0; i < n_iov; i++) { + page = pvec.pages[first + i]; + kunmap(page); + unlock_page(page); + page_cache_release(page); + } + if ((wbc->nr_to_write -= n_iov) <= 0) + done = 1; + index = next; + } + pagevec_release(&pvec); + } + if (!scanned && !done) { + /* + * We hit the last page and there is more work to be done: wrap + * back to the start of the file + */ + scanned = 1; + index = 0; + goto retry; + } + if (!is_range) + mapping->writeback_index = index; + FreeXid(xid); + return rc; } #endif @@ -1598,40 +1819,21 @@ static int cifs_readpage(struct file *file, struct page *page) page caching in the current Linux kernel design */ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode) { - struct list_head *tmp; - struct list_head *tmp1; struct cifsFileInfo *open_file = NULL; - int rc = TRUE; - if (cifsInode == NULL) - return rc; - - read_lock(&GlobalSMBSeslock); - list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, flist); - if (open_file == NULL) - break; - if (open_file->closePend) - continue; - /* We check if file is open for writing, - BB we could supplement this with a check to see if file size - changes have been flushed to server - ie inode metadata dirty */ - if ((open_file->pfile) && - ((open_file->pfile->f_flags & O_RDWR) || - (open_file->pfile->f_flags & O_WRONLY))) { - rc = FALSE; - break; - } - if (tmp->next == NULL) { - cFYI(1, ("File instance %p removed", tmp)); - break; - } - } - read_unlock(&GlobalSMBSeslock); - return rc; + if (cifsInode) + open_file = find_writable_file(cifsInode); + + if(open_file) { + /* there is not actually a write pending so let + this handle go free and allow it to + be closable if needed */ + atomic_dec(&open_file->wrtPending); + return 0; + } else + return 1; } - static int cifs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { @@ -1671,6 +1873,9 @@ struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage, .readpages = cifs_readpages, .writepage = cifs_writepage, +#ifdef CONFIG_CIFS_EXPERIMENTAL + .writepages = cifs_writepages, +#endif .prepare_write = cifs_prepare_write, .commit_write = cifs_commit_write, .set_page_dirty = __set_page_dirty_nobuffers,