]> err.no Git - linux-2.6/blob - fs/nfs/direct.c
NFS: support EIOCBQUEUED return in direct read path
[linux-2.6] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts) 
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets 
13  * considerably larger than the client's memory do not always benefit 
14  * from a local cache.  A streaming video server, for instance, has no 
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  *
38  */
39
40 #include <linux/config.h>
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/smp_lock.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/sunrpc/clnt.h>
52
53 #include <asm/system.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
56
57 #include "iostat.h"
58
59 #define NFSDBG_FACILITY         NFSDBG_VFS
60 #define MAX_DIRECTIO_SIZE       (4096UL << PAGE_SHIFT)
61
62 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
63 static kmem_cache_t *nfs_direct_cachep;
64
65 /*
66  * This represents a set of asynchronous requests that we're waiting on
67  */
68 struct nfs_direct_req {
69         struct kref             kref;           /* release manager */
70         struct list_head        list;           /* nfs_read_data structs */
71         struct file *           filp;           /* file descriptor */
72         struct kiocb *          iocb;           /* controlling i/o request */
73         wait_queue_head_t       wait;           /* wait for i/o completion */
74         struct inode *          inode;          /* target file of I/O */
75         struct page **          pages;          /* pages in our buffer */
76         unsigned int            npages;         /* count of pages */
77         atomic_t                complete,       /* i/os we're waiting for */
78                                 count,          /* bytes actually processed */
79                                 error;          /* any reported error */
80 };
81
82
83 /**
84  * nfs_direct_IO - NFS address space operation for direct I/O
85  * @rw: direction (read or write)
86  * @iocb: target I/O control block
87  * @iov: array of vectors that define I/O buffer
88  * @pos: offset in file to begin the operation
89  * @nr_segs: size of iovec array
90  *
91  * The presence of this routine in the address space ops vector means
92  * the NFS client supports direct I/O.  However, we shunt off direct
93  * read and write requests before the VFS gets them, so this method
94  * should never be called.
95  */
96 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
97 {
98         struct dentry *dentry = iocb->ki_filp->f_dentry;
99
100         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
101                         dentry->d_name.name, (long long) pos, nr_segs);
102
103         return -EINVAL;
104 }
105
106 static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
107 {
108         int result = -ENOMEM;
109         unsigned long page_count;
110         size_t array_size;
111
112         /* set an arbitrary limit to prevent type overflow */
113         /* XXX: this can probably be as large as INT_MAX */
114         if (size > MAX_DIRECTIO_SIZE) {
115                 *pages = NULL;
116                 return -EFBIG;
117         }
118
119         page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120         page_count -= user_addr >> PAGE_SHIFT;
121
122         array_size = (page_count * sizeof(struct page *));
123         *pages = kmalloc(array_size, GFP_KERNEL);
124         if (*pages) {
125                 down_read(&current->mm->mmap_sem);
126                 result = get_user_pages(current, current->mm, user_addr,
127                                         page_count, (rw == READ), 0,
128                                         *pages, NULL);
129                 up_read(&current->mm->mmap_sem);
130                 /*
131                  * If we got fewer pages than expected from get_user_pages(),
132                  * the user buffer runs off the end of a mapping; return EFAULT.
133                  */
134                 if (result >= 0 && result < page_count) {
135                         nfs_free_user_pages(*pages, result, 0);
136                         *pages = NULL;
137                         result = -EFAULT;
138                 }
139         }
140         return result;
141 }
142
143 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
144 {
145         int i;
146         for (i = 0; i < npages; i++) {
147                 struct page *page = pages[i];
148                 if (do_dirty && !PageCompound(page))
149                         set_page_dirty_lock(page);
150                 page_cache_release(page);
151         }
152         kfree(pages);
153 }
154
155 static void nfs_direct_req_release(struct kref *kref)
156 {
157         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
158         kmem_cache_free(nfs_direct_cachep, dreq);
159 }
160
161 /*
162  * Note we also set the number of requests we have in the dreq when we are
163  * done.  This prevents races with I/O completion so we will always wait
164  * until all requests have been dispatched and completed.
165  */
166 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
167 {
168         struct list_head *list;
169         struct nfs_direct_req *dreq;
170         unsigned int reads = 0;
171         unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
172
173         dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
174         if (!dreq)
175                 return NULL;
176
177         kref_init(&dreq->kref);
178         init_waitqueue_head(&dreq->wait);
179         INIT_LIST_HEAD(&dreq->list);
180         dreq->iocb = NULL;
181         atomic_set(&dreq->count, 0);
182         atomic_set(&dreq->error, 0);
183
184         list = &dreq->list;
185         for(;;) {
186                 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
187
188                 if (unlikely(!data)) {
189                         while (!list_empty(list)) {
190                                 data = list_entry(list->next,
191                                                   struct nfs_read_data, pages);
192                                 list_del(&data->pages);
193                                 nfs_readdata_free(data);
194                         }
195                         kref_put(&dreq->kref, nfs_direct_req_release);
196                         return NULL;
197                 }
198
199                 INIT_LIST_HEAD(&data->pages);
200                 list_add(&data->pages, list);
201
202                 data->req = (struct nfs_page *) dreq;
203                 reads++;
204                 if (nbytes <= rsize)
205                         break;
206                 nbytes -= rsize;
207         }
208         kref_get(&dreq->kref);
209         atomic_set(&dreq->complete, reads);
210         return dreq;
211 }
212
213 /*
214  * We must hold a reference to all the pages in this direct read request
215  * until the RPCs complete.  This could be long *after* we are woken up in
216  * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
217  *
218  * In addition, synchronous I/O uses a stack-allocated iocb.  Thus we
219  * can't trust the iocb is still valid here if this is a synchronous
220  * request.  If the waiter is woken prematurely, the iocb is long gone.
221  */
222 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
223 {
224         struct nfs_read_data *data = calldata;
225         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
226
227         if (nfs_readpage_result(task, data) != 0)
228                 return;
229         if (likely(task->tk_status >= 0))
230                 atomic_add(data->res.count, &dreq->count);
231         else
232                 atomic_set(&dreq->error, task->tk_status);
233
234         if (unlikely(atomic_dec_and_test(&dreq->complete))) {
235                 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
236                 if (dreq->iocb) {
237                         long res = atomic_read(&dreq->error);
238                         if (!res)
239                                 res = atomic_read(&dreq->count);
240                         aio_complete(dreq->iocb, res, 0);
241                 } else
242                         wake_up(&dreq->wait);
243                 kref_put(&dreq->kref, nfs_direct_req_release);
244         }
245 }
246
247 static const struct rpc_call_ops nfs_read_direct_ops = {
248         .rpc_call_done = nfs_direct_read_result,
249         .rpc_release = nfs_readdata_release,
250 };
251
252 /*
253  * For each nfs_read_data struct that was allocated on the list, dispatch
254  * an NFS READ operation
255  */
256 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t file_offset)
257 {
258         struct file *file = dreq->filp;
259         struct inode *inode = file->f_mapping->host;
260         struct nfs_open_context *ctx = (struct nfs_open_context *)
261                                                         file->private_data;
262         struct list_head *list = &dreq->list;
263         struct page **pages = dreq->pages;
264         size_t rsize = NFS_SERVER(inode)->rsize;
265         unsigned int curpage, pgbase;
266
267         curpage = 0;
268         pgbase = user_addr & ~PAGE_MASK;
269         do {
270                 struct nfs_read_data *data;
271                 size_t bytes;
272
273                 bytes = rsize;
274                 if (count < rsize)
275                         bytes = count;
276
277                 data = list_entry(list->next, struct nfs_read_data, pages);
278                 list_del_init(&data->pages);
279
280                 data->inode = inode;
281                 data->cred = ctx->cred;
282                 data->args.fh = NFS_FH(inode);
283                 data->args.context = ctx;
284                 data->args.offset = file_offset;
285                 data->args.pgbase = pgbase;
286                 data->args.pages = &pages[curpage];
287                 data->args.count = bytes;
288                 data->res.fattr = &data->fattr;
289                 data->res.eof = 0;
290                 data->res.count = bytes;
291
292                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
293                                 &nfs_read_direct_ops, data);
294                 NFS_PROTO(inode)->read_setup(data);
295
296                 data->task.tk_cookie = (unsigned long) inode;
297
298                 lock_kernel();
299                 rpc_execute(&data->task);
300                 unlock_kernel();
301
302                 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
303                                 data->task.tk_pid,
304                                 inode->i_sb->s_id,
305                                 (long long)NFS_FILEID(inode),
306                                 bytes,
307                                 (unsigned long long)data->args.offset);
308
309                 file_offset += bytes;
310                 pgbase += bytes;
311                 curpage += pgbase >> PAGE_SHIFT;
312                 pgbase &= ~PAGE_MASK;
313
314                 count -= bytes;
315         } while (count != 0);
316 }
317
318 /*
319  * Collects and returns the final error value/byte-count.
320  */
321 static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
322 {
323         int result = -EIOCBQUEUED;
324
325         /* Async requests don't wait here */
326         if (dreq->iocb)
327                 goto out;
328
329         result = 0;
330         if (intr) {
331                 result = wait_event_interruptible(dreq->wait,
332                                         (atomic_read(&dreq->complete) == 0));
333         } else {
334                 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
335         }
336
337         if (!result)
338                 result = atomic_read(&dreq->error);
339         if (!result)
340                 result = atomic_read(&dreq->count);
341
342 out:
343         kref_put(&dreq->kref, nfs_direct_req_release);
344         return (ssize_t) result;
345 }
346
347 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages)
348 {
349         ssize_t result;
350         sigset_t oldset;
351         struct inode *inode = iocb->ki_filp->f_mapping->host;
352         struct rpc_clnt *clnt = NFS_CLIENT(inode);
353         struct nfs_direct_req *dreq;
354
355         dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
356         if (!dreq)
357                 return -ENOMEM;
358
359         dreq->pages = pages;
360         dreq->npages = nr_pages;
361         dreq->inode = inode;
362         dreq->filp = iocb->ki_filp;
363         if (!is_sync_kiocb(iocb))
364                 dreq->iocb = iocb;
365
366         nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
367         rpc_clnt_sigmask(clnt, &oldset);
368         nfs_direct_read_schedule(dreq, user_addr, count, file_offset);
369         result = nfs_direct_read_wait(dreq, clnt->cl_intr);
370         rpc_clnt_sigunmask(clnt, &oldset);
371
372         return result;
373 }
374
375 static ssize_t nfs_direct_write_seg(struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, int nr_pages)
376 {
377         const unsigned int wsize = NFS_SERVER(inode)->wsize;
378         size_t request;
379         int curpage, need_commit;
380         ssize_t result, tot_bytes;
381         struct nfs_writeverf first_verf;
382         struct nfs_write_data *wdata;
383
384         wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
385         if (!wdata)
386                 return -ENOMEM;
387
388         wdata->inode = inode;
389         wdata->cred = ctx->cred;
390         wdata->args.fh = NFS_FH(inode);
391         wdata->args.context = ctx;
392         wdata->args.stable = NFS_UNSTABLE;
393         if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
394                 wdata->args.stable = NFS_FILE_SYNC;
395         wdata->res.fattr = &wdata->fattr;
396         wdata->res.verf = &wdata->verf;
397
398         nfs_begin_data_update(inode);
399 retry:
400         need_commit = 0;
401         tot_bytes = 0;
402         curpage = 0;
403         request = count;
404         wdata->args.pgbase = user_addr & ~PAGE_MASK;
405         wdata->args.offset = file_offset;
406         do {
407                 wdata->args.count = request;
408                 if (wdata->args.count > wsize)
409                         wdata->args.count = wsize;
410                 wdata->args.pages = &pages[curpage];
411
412                 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
413                         wdata->args.count, (long long) wdata->args.offset,
414                         user_addr + tot_bytes, wdata->args.pgbase, curpage);
415
416                 lock_kernel();
417                 result = NFS_PROTO(inode)->write(wdata);
418                 unlock_kernel();
419
420                 if (result <= 0) {
421                         if (tot_bytes > 0)
422                                 break;
423                         goto out;
424                 }
425
426                 if (tot_bytes == 0)
427                         memcpy(&first_verf.verifier, &wdata->verf.verifier,
428                                                 sizeof(first_verf.verifier));
429                 if (wdata->verf.committed != NFS_FILE_SYNC) {
430                         need_commit = 1;
431                         if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
432                                         sizeof(first_verf.verifier)))
433                                 goto sync_retry;
434                 }
435
436                 tot_bytes += result;
437
438                 /* in case of a short write: stop now, let the app recover */
439                 if (result < wdata->args.count)
440                         break;
441
442                 wdata->args.offset += result;
443                 wdata->args.pgbase += result;
444                 curpage += wdata->args.pgbase >> PAGE_SHIFT;
445                 wdata->args.pgbase &= ~PAGE_MASK;
446                 request -= result;
447         } while (request != 0);
448
449         /*
450          * Commit data written so far, even in the event of an error
451          */
452         if (need_commit) {
453                 wdata->args.count = tot_bytes;
454                 wdata->args.offset = file_offset;
455
456                 lock_kernel();
457                 result = NFS_PROTO(inode)->commit(wdata);
458                 unlock_kernel();
459
460                 if (result < 0 || memcmp(&first_verf.verifier,
461                                          &wdata->verf.verifier,
462                                          sizeof(first_verf.verifier)) != 0)
463                         goto sync_retry;
464         }
465         result = tot_bytes;
466
467 out:
468         nfs_end_data_update(inode);
469         nfs_writedata_free(wdata);
470         return result;
471
472 sync_retry:
473         wdata->args.stable = NFS_FILE_SYNC;
474         goto retry;
475 }
476
477 /*
478  * Upon return, generic_file_direct_IO invalidates any cached pages
479  * that non-direct readers might access, so they will pick up these
480  * writes immediately.
481  */
482 static ssize_t nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs)
483 {
484         ssize_t tot_bytes = 0;
485         unsigned long seg = 0;
486
487         while ((seg < nr_segs) && (tot_bytes >= 0)) {
488                 ssize_t result;
489                 int page_count;
490                 struct page **pages;
491                 const struct iovec *vec = &iov[seg++];
492                 unsigned long user_addr = (unsigned long) vec->iov_base;
493                 size_t size = vec->iov_len;
494
495                 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
496                 if (page_count < 0) {
497                         nfs_free_user_pages(pages, 0, 0);
498                         if (tot_bytes > 0)
499                                 break;
500                         return page_count;
501                 }
502
503                 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, size);
504                 result = nfs_direct_write_seg(inode, ctx, user_addr, size,
505                                 file_offset, pages, page_count);
506                 nfs_free_user_pages(pages, page_count, 0);
507
508                 if (result <= 0) {
509                         if (tot_bytes > 0)
510                                 break;
511                         return result;
512                 }
513                 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
514                 tot_bytes += result;
515                 file_offset += result;
516                 if (result < size)
517                         break;
518         }
519         return tot_bytes;
520 }
521
522 /**
523  * nfs_file_direct_read - file direct read operation for NFS files
524  * @iocb: target I/O control block
525  * @buf: user's buffer into which to read data
526  * count: number of bytes to read
527  * pos: byte offset in file where reading starts
528  *
529  * We use this function for direct reads instead of calling
530  * generic_file_aio_read() in order to avoid gfar's check to see if
531  * the request starts before the end of the file.  For that check
532  * to work, we must generate a GETATTR before each direct read, and
533  * even then there is a window between the GETATTR and the subsequent
534  * READ where the file size could change.  So our preference is simply
535  * to do all reads the application wants, and the server will take
536  * care of managing the end of file boundary.
537  * 
538  * This function also eliminates unnecessarily updating the file's
539  * atime locally, as the NFS server sets the file's atime, and this
540  * client must read the updated atime from the server back into its
541  * cache.
542  */
543 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
544 {
545         ssize_t retval = -EINVAL;
546         int page_count;
547         struct page **pages;
548         struct file *file = iocb->ki_filp;
549         struct address_space *mapping = file->f_mapping;
550
551         dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
552                 file->f_dentry->d_parent->d_name.name,
553                 file->f_dentry->d_name.name,
554                 (unsigned long) count, (long long) pos);
555
556         if (count < 0)
557                 goto out;
558         retval = -EFAULT;
559         if (!access_ok(VERIFY_WRITE, buf, count))
560                 goto out;
561         retval = 0;
562         if (!count)
563                 goto out;
564
565         retval = nfs_sync_mapping(mapping);
566         if (retval)
567                 goto out;
568
569         page_count = nfs_get_user_pages(READ, (unsigned long) buf,
570                                                 count, &pages);
571         if (page_count < 0) {
572                 nfs_free_user_pages(pages, 0, 0);
573                 retval = page_count;
574                 goto out;
575         }
576
577         retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
578                                                 pages, page_count);
579         if (retval > 0)
580                 iocb->ki_pos = pos + retval;
581
582 out:
583         return retval;
584 }
585
586 /**
587  * nfs_file_direct_write - file direct write operation for NFS files
588  * @iocb: target I/O control block
589  * @buf: user's buffer from which to write data
590  * count: number of bytes to write
591  * pos: byte offset in file where writing starts
592  *
593  * We use this function for direct writes instead of calling
594  * generic_file_aio_write() in order to avoid taking the inode
595  * semaphore and updating the i_size.  The NFS server will set
596  * the new i_size and this client must read the updated size
597  * back into its cache.  We let the server do generic write
598  * parameter checking and report problems.
599  *
600  * We also avoid an unnecessary invocation of generic_osync_inode(),
601  * as it is fairly meaningless to sync the metadata of an NFS file.
602  *
603  * We eliminate local atime updates, see direct read above.
604  *
605  * We avoid unnecessary page cache invalidations for normal cached
606  * readers of this file.
607  *
608  * Note that O_APPEND is not supported for NFS direct writes, as there
609  * is no atomic O_APPEND write facility in the NFS protocol.
610  */
611 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
612 {
613         ssize_t retval;
614         struct file *file = iocb->ki_filp;
615         struct nfs_open_context *ctx =
616                         (struct nfs_open_context *) file->private_data;
617         struct address_space *mapping = file->f_mapping;
618         struct inode *inode = mapping->host;
619         struct iovec iov = {
620                 .iov_base = (char __user *)buf,
621         };
622
623         dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
624                 file->f_dentry->d_parent->d_name.name,
625                 file->f_dentry->d_name.name,
626                 (unsigned long) count, (long long) pos);
627
628         retval = -EINVAL;
629         if (!is_sync_kiocb(iocb))
630                 goto out;
631
632         retval = generic_write_checks(file, &pos, &count, 0);
633         if (retval)
634                 goto out;
635
636         retval = -EINVAL;
637         if ((ssize_t) count < 0)
638                 goto out;
639         retval = 0;
640         if (!count)
641                 goto out;
642         iov.iov_len = count,
643
644         retval = -EFAULT;
645         if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
646                 goto out;
647
648         retval = nfs_sync_mapping(mapping);
649         if (retval)
650                 goto out;
651
652         retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
653         if (mapping->nrpages)
654                 invalidate_inode_pages2(mapping);
655         if (retval > 0)
656                 iocb->ki_pos = pos + retval;
657
658 out:
659         return retval;
660 }
661
662 int nfs_init_directcache(void)
663 {
664         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
665                                                 sizeof(struct nfs_direct_req),
666                                                 0, SLAB_RECLAIM_ACCOUNT,
667                                                 NULL, NULL);
668         if (nfs_direct_cachep == NULL)
669                 return -ENOMEM;
670
671         return 0;
672 }
673
674 void nfs_destroy_directcache(void)
675 {
676         if (kmem_cache_destroy(nfs_direct_cachep))
677                 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
678 }