2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->connected)
31 spin_unlock(&fuse_lock);
35 static void fuse_request_init(struct fuse_req *req)
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
43 struct fuse_req *fuse_request_alloc(void)
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
47 fuse_request_init(req);
51 void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req);
56 static void block_sigs(sigset_t *oldset)
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
64 static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL);
70 * Reset request, so that it can be reused
72 * The caller must be _very_ careful to make sure, that it is holding
73 * the only reference to req
75 void fuse_reset_request(struct fuse_req *req)
77 int preallocated = req->preallocated;
78 BUG_ON(atomic_read(&req->count) != 1);
79 fuse_request_init(req);
80 req->preallocated = preallocated;
83 static void __fuse_get_request(struct fuse_req *req)
85 atomic_inc(&req->count);
88 /* Must be called with > 1 refcount */
89 static void __fuse_put_request(struct fuse_req *req)
91 BUG_ON(atomic_read(&req->count) < 2);
92 atomic_dec(&req->count);
95 static struct fuse_req *do_get_request(struct fuse_conn *fc)
99 spin_lock(&fuse_lock);
100 BUG_ON(list_empty(&fc->unused_list));
101 req = list_entry(fc->unused_list.next, struct fuse_req, list);
102 list_del_init(&req->list);
103 spin_unlock(&fuse_lock);
104 fuse_request_init(req);
105 req->preallocated = 1;
106 req->in.h.uid = current->fsuid;
107 req->in.h.gid = current->fsgid;
108 req->in.h.pid = current->pid;
112 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
113 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
118 atomic_inc(&fc->num_waiting);
120 intr = down_interruptible(&fc->outstanding_sem);
121 restore_sigs(&oldset);
123 atomic_dec(&fc->num_waiting);
126 return do_get_request(fc);
129 /* Must be called with fuse_lock held */
130 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
132 if (req->preallocated) {
133 atomic_dec(&fc->num_waiting);
134 list_add(&req->list, &fc->unused_list);
136 fuse_request_free(req);
138 /* If we are in debt decrease that first */
139 if (fc->outstanding_debt)
140 fc->outstanding_debt--;
142 up(&fc->outstanding_sem);
145 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
147 if (atomic_dec_and_test(&req->count)) {
148 spin_lock(&fuse_lock);
149 fuse_putback_request(fc, req);
150 spin_unlock(&fuse_lock);
154 static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
156 if (atomic_dec_and_test(&req->count))
157 fuse_putback_request(fc, req);
160 void fuse_release_background(struct fuse_req *req)
166 spin_lock(&fuse_lock);
167 list_del(&req->bg_entry);
168 spin_unlock(&fuse_lock);
172 * This function is called when a request is finished. Either a reply
173 * has arrived or it was interrupted (and not yet sent) or some error
174 * occurred during communication with userspace, or the device file
175 * was closed. In case of a background request the reference to the
176 * stored objects are released. The requester thread is woken up (if
177 * still waiting), the 'end' callback is called if given, else the
178 * reference to the request is released
180 * Releasing extra reference for foreground requests must be done
181 * within the same locked region as setting state to finished. This
182 * is because fuse_reset_request() may be called after request is
183 * finished and it must be the sole possessor. If request is
184 * interrupted and put in the background, it will return with an error
185 * and hence never be reset and reused.
187 * Called with fuse_lock, unlocks it
189 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
191 list_del(&req->list);
192 req->state = FUSE_REQ_FINISHED;
193 if (!req->background) {
194 wake_up(&req->waitq);
195 fuse_put_request_locked(fc, req);
196 spin_unlock(&fuse_lock);
198 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
200 spin_unlock(&fuse_lock);
201 down_read(&fc->sbput_sem);
203 fuse_release_background(req);
204 up_read(&fc->sbput_sem);
208 fuse_put_request(fc, req);
213 * Unfortunately request interruption not just solves the deadlock
214 * problem, it causes problems too. These stem from the fact, that an
215 * interrupted request is continued to be processed in userspace,
216 * while all the locks and object references (inode and file) held
217 * during the operation are released.
219 * To release the locks is exactly why there's a need to interrupt the
220 * request, so there's not a lot that can be done about this, except
221 * introduce additional locking in userspace.
223 * More important is to keep inode and file references until userspace
224 * has replied, otherwise FORGET and RELEASE could be sent while the
225 * inode/file is still used by the filesystem.
227 * For this reason the concept of "background" request is introduced.
228 * An interrupted request is backgrounded if it has been already sent
229 * to userspace. Backgrounding involves getting an extra reference to
230 * inode(s) or file used in the request, and adding the request to
231 * fc->background list. When a reply is received for a background
232 * request, the object references are released, and the request is
233 * removed from the list. If the filesystem is unmounted while there
234 * are still background requests, the list is walked and references
235 * are released as if a reply was received.
237 * There's one more use for a background request. The RELEASE message is
238 * always sent as background, since it doesn't return an error or
241 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
244 list_add(&req->bg_entry, &fc->background);
246 req->inode = igrab(req->inode);
248 req->inode2 = igrab(req->inode2);
253 /* Called with fuse_lock held. Releases, and then reacquires it. */
254 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
258 spin_unlock(&fuse_lock);
260 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
261 restore_sigs(&oldset);
262 spin_lock(&fuse_lock);
263 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
266 if (!req->interrupted) {
267 req->out.h.error = -EINTR;
268 req->interrupted = 1;
271 /* This is uninterruptible sleep, because data is
272 being copied to/from the buffers of req. During
273 locked state, there mustn't be any filesystem
274 operation (e.g. page fault), since that could lead
276 spin_unlock(&fuse_lock);
277 wait_event(req->waitq, !req->locked);
278 spin_lock(&fuse_lock);
280 if (req->state == FUSE_REQ_PENDING) {
281 list_del(&req->list);
282 __fuse_put_request(req);
283 } else if (req->state == FUSE_REQ_SENT)
284 background_request(fc, req);
287 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
292 for (i = 0; i < numargs; i++)
293 nbytes += args[i].size;
298 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
301 /* zero is special */
304 req->in.h.unique = fc->reqctr;
305 req->in.h.len = sizeof(struct fuse_in_header) +
306 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
307 if (!req->preallocated) {
308 /* If request is not preallocated (either FORGET or
309 RELEASE), then still decrease outstanding_sem, so
310 user can't open infinite number of files while not
311 processing the RELEASE requests. However for
312 efficiency do it without blocking, so if down()
313 would block, just increase the debt instead */
314 if (down_trylock(&fc->outstanding_sem))
315 fc->outstanding_debt++;
317 list_add_tail(&req->list, &fc->pending);
318 req->state = FUSE_REQ_PENDING;
320 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
324 * This can only be interrupted by a SIGKILL
326 void request_send(struct fuse_conn *fc, struct fuse_req *req)
329 spin_lock(&fuse_lock);
331 req->out.h.error = -ENOTCONN;
332 else if (fc->conn_error)
333 req->out.h.error = -ECONNREFUSED;
335 queue_request(fc, req);
336 /* acquire extra reference, since request is still needed
337 after request_end() */
338 __fuse_get_request(req);
340 request_wait_answer(fc, req);
342 spin_unlock(&fuse_lock);
345 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
347 spin_lock(&fuse_lock);
349 queue_request(fc, req);
350 spin_unlock(&fuse_lock);
352 req->out.h.error = -ENOTCONN;
353 request_end(fc, req);
357 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
360 request_send_nowait(fc, req);
363 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
366 spin_lock(&fuse_lock);
367 background_request(fc, req);
368 spin_unlock(&fuse_lock);
369 request_send_nowait(fc, req);
373 * Lock the request. Up to the next unlock_request() there mustn't be
374 * anything that could cause a page-fault. If the request was already
375 * interrupted bail out.
377 static int lock_request(struct fuse_req *req)
381 spin_lock(&fuse_lock);
382 if (req->interrupted)
386 spin_unlock(&fuse_lock);
392 * Unlock request. If it was interrupted during being locked, the
393 * requester thread is currently waiting for it to be unlocked, so
396 static void unlock_request(struct fuse_req *req)
399 spin_lock(&fuse_lock);
401 if (req->interrupted)
402 wake_up(&req->waitq);
403 spin_unlock(&fuse_lock);
407 struct fuse_copy_state {
409 struct fuse_req *req;
410 const struct iovec *iov;
411 unsigned long nr_segs;
412 unsigned long seglen;
420 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
421 struct fuse_req *req, const struct iovec *iov,
422 unsigned long nr_segs)
424 memset(cs, 0, sizeof(*cs));
428 cs->nr_segs = nr_segs;
431 /* Unmap and put previous page of userspace buffer */
432 static void fuse_copy_finish(struct fuse_copy_state *cs)
435 kunmap_atomic(cs->mapaddr, KM_USER0);
437 flush_dcache_page(cs->pg);
438 set_page_dirty_lock(cs->pg);
446 * Get another pagefull of userspace buffer, and map it to kernel
447 * address space, and lock request
449 static int fuse_copy_fill(struct fuse_copy_state *cs)
451 unsigned long offset;
454 unlock_request(cs->req);
455 fuse_copy_finish(cs);
457 BUG_ON(!cs->nr_segs);
458 cs->seglen = cs->iov[0].iov_len;
459 cs->addr = (unsigned long) cs->iov[0].iov_base;
463 down_read(¤t->mm->mmap_sem);
464 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
466 up_read(¤t->mm->mmap_sem);
470 offset = cs->addr % PAGE_SIZE;
471 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
472 cs->buf = cs->mapaddr + offset;
473 cs->len = min(PAGE_SIZE - offset, cs->seglen);
474 cs->seglen -= cs->len;
477 return lock_request(cs->req);
480 /* Do as much copy to/from userspace buffer as we can */
481 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
483 unsigned ncpy = min(*size, cs->len);
486 memcpy(cs->buf, *val, ncpy);
488 memcpy(*val, cs->buf, ncpy);
498 * Copy a page in the request to/from the userspace buffer. Must be
501 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
502 unsigned offset, unsigned count, int zeroing)
504 if (page && zeroing && count < PAGE_SIZE) {
505 void *mapaddr = kmap_atomic(page, KM_USER1);
506 memset(mapaddr, 0, PAGE_SIZE);
507 kunmap_atomic(mapaddr, KM_USER1);
511 if (!cs->len && (err = fuse_copy_fill(cs)))
514 void *mapaddr = kmap_atomic(page, KM_USER1);
515 void *buf = mapaddr + offset;
516 offset += fuse_copy_do(cs, &buf, &count);
517 kunmap_atomic(mapaddr, KM_USER1);
519 offset += fuse_copy_do(cs, NULL, &count);
521 if (page && !cs->write)
522 flush_dcache_page(page);
526 /* Copy pages in the request to/from userspace buffer */
527 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
531 struct fuse_req *req = cs->req;
532 unsigned offset = req->page_offset;
533 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
535 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
536 struct page *page = req->pages[i];
537 int err = fuse_copy_page(cs, page, offset, count, zeroing);
542 count = min(nbytes, (unsigned) PAGE_SIZE);
548 /* Copy a single argument in the request to/from userspace buffer */
549 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
553 if (!cs->len && (err = fuse_copy_fill(cs)))
555 fuse_copy_do(cs, &val, &size);
560 /* Copy request arguments to/from userspace buffer */
561 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
562 unsigned argpages, struct fuse_arg *args,
568 for (i = 0; !err && i < numargs; i++) {
569 struct fuse_arg *arg = &args[i];
570 if (i == numargs - 1 && argpages)
571 err = fuse_copy_pages(cs, arg->size, zeroing);
573 err = fuse_copy_one(cs, arg->value, arg->size);
578 /* Wait until a request is available on the pending list */
579 static void request_wait(struct fuse_conn *fc)
581 DECLARE_WAITQUEUE(wait, current);
583 add_wait_queue_exclusive(&fc->waitq, &wait);
584 while (fc->connected && list_empty(&fc->pending)) {
585 set_current_state(TASK_INTERRUPTIBLE);
586 if (signal_pending(current))
589 spin_unlock(&fuse_lock);
591 spin_lock(&fuse_lock);
593 set_current_state(TASK_RUNNING);
594 remove_wait_queue(&fc->waitq, &wait);
598 * Read a single request into the userspace filesystem's buffer. This
599 * function waits until a request is available, then removes it from
600 * the pending list and copies request data to userspace buffer. If
601 * no reply is needed (FORGET) or request has been interrupted or
602 * there was an error during the copying then it's finished by calling
603 * request_end(). Otherwise add it to the processing list, and set
606 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
607 unsigned long nr_segs, loff_t *off)
610 struct fuse_conn *fc;
611 struct fuse_req *req;
613 struct fuse_copy_state cs;
617 spin_lock(&fuse_lock);
618 fc = file->private_data;
624 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
625 list_empty(&fc->pending))
633 if (list_empty(&fc->pending))
636 req = list_entry(fc->pending.next, struct fuse_req, list);
637 req->state = FUSE_REQ_READING;
638 list_move(&req->list, &fc->io);
642 /* If request is too large, reply with an error and restart the read */
643 if (iov_length(iov, nr_segs) < reqsize) {
644 req->out.h.error = -EIO;
645 /* SETXATTR is special, since it may contain too large data */
646 if (in->h.opcode == FUSE_SETXATTR)
647 req->out.h.error = -E2BIG;
648 request_end(fc, req);
651 spin_unlock(&fuse_lock);
652 fuse_copy_init(&cs, 1, req, iov, nr_segs);
653 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
655 err = fuse_copy_args(&cs, in->numargs, in->argpages,
656 (struct fuse_arg *) in->args, 0);
657 fuse_copy_finish(&cs);
658 spin_lock(&fuse_lock);
660 if (!err && req->interrupted)
663 if (!req->interrupted)
664 req->out.h.error = -EIO;
665 request_end(fc, req);
669 request_end(fc, req);
671 req->state = FUSE_REQ_SENT;
672 list_move_tail(&req->list, &fc->processing);
673 spin_unlock(&fuse_lock);
678 spin_unlock(&fuse_lock);
682 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
683 size_t nbytes, loff_t *off)
686 iov.iov_len = nbytes;
688 return fuse_dev_readv(file, &iov, 1, off);
691 /* Look up request on processing list by unique ID */
692 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
694 struct list_head *entry;
696 list_for_each(entry, &fc->processing) {
697 struct fuse_req *req;
698 req = list_entry(entry, struct fuse_req, list);
699 if (req->in.h.unique == unique)
705 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
708 unsigned reqsize = sizeof(struct fuse_out_header);
711 return nbytes != reqsize ? -EINVAL : 0;
713 reqsize += len_args(out->numargs, out->args);
715 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
717 else if (reqsize > nbytes) {
718 struct fuse_arg *lastarg = &out->args[out->numargs-1];
719 unsigned diffsize = reqsize - nbytes;
720 if (diffsize > lastarg->size)
722 lastarg->size -= diffsize;
724 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
729 * Write a single reply to a request. First the header is copied from
730 * the write buffer. The request is then searched on the processing
731 * list by the unique ID found in the header. If found, then remove
732 * it from the list and copy the rest of the buffer to the request.
733 * The request is finished by calling request_end()
735 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
736 unsigned long nr_segs, loff_t *off)
739 unsigned nbytes = iov_length(iov, nr_segs);
740 struct fuse_req *req;
741 struct fuse_out_header oh;
742 struct fuse_copy_state cs;
743 struct fuse_conn *fc = fuse_get_conn(file);
747 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
748 if (nbytes < sizeof(struct fuse_out_header))
751 err = fuse_copy_one(&cs, &oh, sizeof(oh));
755 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
759 spin_lock(&fuse_lock);
764 req = request_find(fc, oh.unique);
769 if (req->interrupted) {
770 spin_unlock(&fuse_lock);
771 fuse_copy_finish(&cs);
772 spin_lock(&fuse_lock);
773 request_end(fc, req);
776 list_move(&req->list, &fc->io);
780 spin_unlock(&fuse_lock);
782 err = copy_out_args(&cs, &req->out, nbytes);
783 fuse_copy_finish(&cs);
785 spin_lock(&fuse_lock);
788 if (req->interrupted)
790 } else if (!req->interrupted)
791 req->out.h.error = -EIO;
792 request_end(fc, req);
794 return err ? err : nbytes;
797 spin_unlock(&fuse_lock);
799 fuse_copy_finish(&cs);
803 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
804 size_t nbytes, loff_t *off)
807 iov.iov_len = nbytes;
808 iov.iov_base = (char __user *) buf;
809 return fuse_dev_writev(file, &iov, 1, off);
812 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
814 unsigned mask = POLLOUT | POLLWRNORM;
815 struct fuse_conn *fc = fuse_get_conn(file);
819 poll_wait(file, &fc->waitq, wait);
821 spin_lock(&fuse_lock);
824 else if (!list_empty(&fc->pending))
825 mask |= POLLIN | POLLRDNORM;
826 spin_unlock(&fuse_lock);
832 * Abort all requests on the given list (pending or processing)
834 * This function releases and reacquires fuse_lock
836 static void end_requests(struct fuse_conn *fc, struct list_head *head)
838 while (!list_empty(head)) {
839 struct fuse_req *req;
840 req = list_entry(head->next, struct fuse_req, list);
841 req->out.h.error = -ECONNABORTED;
842 request_end(fc, req);
843 spin_lock(&fuse_lock);
848 * Abort requests under I/O
850 * The requests are set to interrupted and finished, and the request
851 * waiter is woken up. This will make request_wait_answer() wait
852 * until the request is unlocked and then return.
854 * If the request is asynchronous, then the end function needs to be
855 * called after waiting for the request to be unlocked (if it was
858 static void end_io_requests(struct fuse_conn *fc)
860 while (!list_empty(&fc->io)) {
861 struct fuse_req *req =
862 list_entry(fc->io.next, struct fuse_req, list);
863 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
865 req->interrupted = 1;
866 req->out.h.error = -ECONNABORTED;
867 req->state = FUSE_REQ_FINISHED;
868 list_del_init(&req->list);
869 wake_up(&req->waitq);
872 /* The end function will consume this reference */
873 __fuse_get_request(req);
874 spin_unlock(&fuse_lock);
875 wait_event(req->waitq, !req->locked);
877 spin_lock(&fuse_lock);
883 * Abort all requests.
885 * Emergency exit in case of a malicious or accidental deadlock, or
886 * just a hung filesystem.
888 * The same effect is usually achievable through killing the
889 * filesystem daemon and all users of the filesystem. The exception
890 * is the combination of an asynchronous request and the tricky
891 * deadlock (see Documentation/filesystems/fuse.txt).
893 * During the aborting, progression of requests from the pending and
894 * processing lists onto the io list, and progression of new requests
895 * onto the pending list is prevented by req->connected being false.
897 * Progression of requests under I/O to the processing list is
898 * prevented by the req->interrupted flag being true for these
899 * requests. For this reason requests on the io list must be aborted
902 void fuse_abort_conn(struct fuse_conn *fc)
904 spin_lock(&fuse_lock);
908 end_requests(fc, &fc->pending);
909 end_requests(fc, &fc->processing);
910 wake_up_all(&fc->waitq);
911 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
913 spin_unlock(&fuse_lock);
916 static int fuse_dev_release(struct inode *inode, struct file *file)
918 struct fuse_conn *fc;
920 spin_lock(&fuse_lock);
921 fc = file->private_data;
924 end_requests(fc, &fc->pending);
925 end_requests(fc, &fc->processing);
927 spin_unlock(&fuse_lock);
929 fasync_helper(-1, file, 0, &fc->fasync);
930 kobject_put(&fc->kobj);
936 static int fuse_dev_fasync(int fd, struct file *file, int on)
938 struct fuse_conn *fc = fuse_get_conn(file);
942 /* No locking - fasync_helper does its own locking */
943 return fasync_helper(fd, file, on, &fc->fasync);
946 const struct file_operations fuse_dev_operations = {
947 .owner = THIS_MODULE,
949 .read = fuse_dev_read,
950 .readv = fuse_dev_readv,
951 .write = fuse_dev_write,
952 .writev = fuse_dev_writev,
953 .poll = fuse_dev_poll,
954 .release = fuse_dev_release,
955 .fasync = fuse_dev_fasync,
958 static struct miscdevice fuse_miscdevice = {
961 .fops = &fuse_dev_operations,
964 int __init fuse_dev_init(void)
967 fuse_req_cachep = kmem_cache_create("fuse_request",
968 sizeof(struct fuse_req),
970 if (!fuse_req_cachep)
973 err = misc_register(&fuse_miscdevice);
975 goto out_cache_clean;
980 kmem_cache_destroy(fuse_req_cachep);
985 void fuse_dev_cleanup(void)
987 misc_deregister(&fuse_miscdevice);
988 kmem_cache_destroy(fuse_req_cachep);