2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->connected)
31 spin_unlock(&fuse_lock);
35 static void fuse_request_init(struct fuse_req *req)
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
43 struct fuse_req *fuse_request_alloc(void)
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
47 fuse_request_init(req);
51 void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req);
56 static void block_sigs(sigset_t *oldset)
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
64 static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL);
69 void fuse_reset_request(struct fuse_req *req)
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
77 static void __fuse_get_request(struct fuse_req *req)
79 atomic_inc(&req->count);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
89 static struct fuse_req *do_get_request(struct fuse_conn *fc)
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
112 atomic_inc(&fc->num_waiting);
114 intr = down_interruptible(&fc->outstanding_sem);
115 restore_sigs(&oldset);
117 atomic_dec(&fc->num_waiting);
120 return do_get_request(fc);
123 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
125 spin_lock(&fuse_lock);
126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting);
128 list_add(&req->list, &fc->unused_list);
130 fuse_request_free(req);
132 /* If we are in debt decrease that first */
133 if (fc->outstanding_debt)
134 fc->outstanding_debt--;
136 up(&fc->outstanding_sem);
137 spin_unlock(&fuse_lock);
140 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
142 if (atomic_dec_and_test(&req->count))
143 fuse_putback_request(fc, req);
146 void fuse_release_background(struct fuse_req *req)
152 spin_lock(&fuse_lock);
153 list_del(&req->bg_entry);
154 spin_unlock(&fuse_lock);
157 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
160 struct fuse_init_out *arg = &req->misc.init_out;
162 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
165 fc->minor = arg->minor;
166 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
169 /* After INIT reply is received other requests can go
170 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
171 up()s on outstanding_sem. The last up() is done in
172 fuse_putback_request() */
173 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
174 up(&fc->outstanding_sem);
176 fuse_put_request(fc, req);
180 * This function is called when a request is finished. Either a reply
181 * has arrived or it was interrupted (and not yet sent) or some error
182 * occurred during communication with userspace, or the device file
183 * was closed. In case of a background request the reference to the
184 * stored objects are released. The requester thread is woken up (if
185 * still waiting), the 'end' callback is called if given, else the
186 * reference to the request is released
188 * Called with fuse_lock, unlocks it
190 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
192 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
194 list_del(&req->list);
195 req->state = FUSE_REQ_FINISHED;
196 spin_unlock(&fuse_lock);
197 if (req->background) {
198 down_read(&fc->sbput_sem);
200 fuse_release_background(req);
201 up_read(&fc->sbput_sem);
203 wake_up(&req->waitq);
207 fuse_put_request(fc, req);
211 * Unfortunately request interruption not just solves the deadlock
212 * problem, it causes problems too. These stem from the fact, that an
213 * interrupted request is continued to be processed in userspace,
214 * while all the locks and object references (inode and file) held
215 * during the operation are released.
217 * To release the locks is exactly why there's a need to interrupt the
218 * request, so there's not a lot that can be done about this, except
219 * introduce additional locking in userspace.
221 * More important is to keep inode and file references until userspace
222 * has replied, otherwise FORGET and RELEASE could be sent while the
223 * inode/file is still used by the filesystem.
225 * For this reason the concept of "background" request is introduced.
226 * An interrupted request is backgrounded if it has been already sent
227 * to userspace. Backgrounding involves getting an extra reference to
228 * inode(s) or file used in the request, and adding the request to
229 * fc->background list. When a reply is received for a background
230 * request, the object references are released, and the request is
231 * removed from the list. If the filesystem is unmounted while there
232 * are still background requests, the list is walked and references
233 * are released as if a reply was received.
235 * There's one more use for a background request. The RELEASE message is
236 * always sent as background, since it doesn't return an error or
239 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
242 list_add(&req->bg_entry, &fc->background);
244 req->inode = igrab(req->inode);
246 req->inode2 = igrab(req->inode2);
251 /* Called with fuse_lock held. Releases, and then reacquires it. */
252 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
256 spin_unlock(&fuse_lock);
258 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
259 restore_sigs(&oldset);
260 spin_lock(&fuse_lock);
261 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
264 if (!req->interrupted) {
265 req->out.h.error = -EINTR;
266 req->interrupted = 1;
269 /* This is uninterruptible sleep, because data is
270 being copied to/from the buffers of req. During
271 locked state, there mustn't be any filesystem
272 operation (e.g. page fault), since that could lead
274 spin_unlock(&fuse_lock);
275 wait_event(req->waitq, !req->locked);
276 spin_lock(&fuse_lock);
278 if (req->state == FUSE_REQ_PENDING) {
279 list_del(&req->list);
280 __fuse_put_request(req);
281 } else if (req->state == FUSE_REQ_SENT)
282 background_request(fc, req);
285 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
290 for (i = 0; i < numargs; i++)
291 nbytes += args[i].size;
296 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
299 /* zero is special */
302 req->in.h.unique = fc->reqctr;
303 req->in.h.len = sizeof(struct fuse_in_header) +
304 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
305 if (!req->preallocated) {
306 /* If request is not preallocated (either FORGET or
307 RELEASE), then still decrease outstanding_sem, so
308 user can't open infinite number of files while not
309 processing the RELEASE requests. However for
310 efficiency do it without blocking, so if down()
311 would block, just increase the debt instead */
312 if (down_trylock(&fc->outstanding_sem))
313 fc->outstanding_debt++;
315 list_add_tail(&req->list, &fc->pending);
316 req->state = FUSE_REQ_PENDING;
321 * This can only be interrupted by a SIGKILL
323 void request_send(struct fuse_conn *fc, struct fuse_req *req)
326 spin_lock(&fuse_lock);
328 req->out.h.error = -ENOTCONN;
329 else if (fc->conn_error)
330 req->out.h.error = -ECONNREFUSED;
332 queue_request(fc, req);
333 /* acquire extra reference, since request is still needed
334 after request_end() */
335 __fuse_get_request(req);
337 request_wait_answer(fc, req);
339 spin_unlock(&fuse_lock);
342 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
344 spin_lock(&fuse_lock);
346 queue_request(fc, req);
347 spin_unlock(&fuse_lock);
349 req->out.h.error = -ENOTCONN;
350 request_end(fc, req);
354 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
357 request_send_nowait(fc, req);
360 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
363 spin_lock(&fuse_lock);
364 background_request(fc, req);
365 spin_unlock(&fuse_lock);
366 request_send_nowait(fc, req);
369 void fuse_send_init(struct fuse_conn *fc)
371 /* This is called from fuse_read_super() so there's guaranteed
372 to be exactly one request available */
373 struct fuse_req *req = fuse_get_request(fc);
374 struct fuse_init_in *arg = &req->misc.init_in;
375 arg->major = FUSE_KERNEL_VERSION;
376 arg->minor = FUSE_KERNEL_MINOR_VERSION;
377 req->in.h.opcode = FUSE_INIT;
379 req->in.args[0].size = sizeof(*arg);
380 req->in.args[0].value = arg;
381 req->out.numargs = 1;
382 /* Variable length arguement used for backward compatibility
383 with interface version < 7.5. Rest of init_out is zeroed
384 by do_get_request(), so a short reply is not a problem */
386 req->out.args[0].size = sizeof(struct fuse_init_out);
387 req->out.args[0].value = &req->misc.init_out;
388 req->end = process_init_reply;
389 request_send_background(fc, req);
393 * Lock the request. Up to the next unlock_request() there mustn't be
394 * anything that could cause a page-fault. If the request was already
395 * interrupted bail out.
397 static int lock_request(struct fuse_req *req)
401 spin_lock(&fuse_lock);
402 if (req->interrupted)
406 spin_unlock(&fuse_lock);
412 * Unlock request. If it was interrupted during being locked, the
413 * requester thread is currently waiting for it to be unlocked, so
416 static void unlock_request(struct fuse_req *req)
419 spin_lock(&fuse_lock);
421 if (req->interrupted)
422 wake_up(&req->waitq);
423 spin_unlock(&fuse_lock);
427 struct fuse_copy_state {
429 struct fuse_req *req;
430 const struct iovec *iov;
431 unsigned long nr_segs;
432 unsigned long seglen;
440 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
441 struct fuse_req *req, const struct iovec *iov,
442 unsigned long nr_segs)
444 memset(cs, 0, sizeof(*cs));
448 cs->nr_segs = nr_segs;
451 /* Unmap and put previous page of userspace buffer */
452 static void fuse_copy_finish(struct fuse_copy_state *cs)
455 kunmap_atomic(cs->mapaddr, KM_USER0);
457 flush_dcache_page(cs->pg);
458 set_page_dirty_lock(cs->pg);
466 * Get another pagefull of userspace buffer, and map it to kernel
467 * address space, and lock request
469 static int fuse_copy_fill(struct fuse_copy_state *cs)
471 unsigned long offset;
474 unlock_request(cs->req);
475 fuse_copy_finish(cs);
477 BUG_ON(!cs->nr_segs);
478 cs->seglen = cs->iov[0].iov_len;
479 cs->addr = (unsigned long) cs->iov[0].iov_base;
483 down_read(¤t->mm->mmap_sem);
484 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
486 up_read(¤t->mm->mmap_sem);
490 offset = cs->addr % PAGE_SIZE;
491 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
492 cs->buf = cs->mapaddr + offset;
493 cs->len = min(PAGE_SIZE - offset, cs->seglen);
494 cs->seglen -= cs->len;
497 return lock_request(cs->req);
500 /* Do as much copy to/from userspace buffer as we can */
501 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
503 unsigned ncpy = min(*size, cs->len);
506 memcpy(cs->buf, *val, ncpy);
508 memcpy(*val, cs->buf, ncpy);
518 * Copy a page in the request to/from the userspace buffer. Must be
521 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
522 unsigned offset, unsigned count, int zeroing)
524 if (page && zeroing && count < PAGE_SIZE) {
525 void *mapaddr = kmap_atomic(page, KM_USER1);
526 memset(mapaddr, 0, PAGE_SIZE);
527 kunmap_atomic(mapaddr, KM_USER1);
531 if (!cs->len && (err = fuse_copy_fill(cs)))
534 void *mapaddr = kmap_atomic(page, KM_USER1);
535 void *buf = mapaddr + offset;
536 offset += fuse_copy_do(cs, &buf, &count);
537 kunmap_atomic(mapaddr, KM_USER1);
539 offset += fuse_copy_do(cs, NULL, &count);
541 if (page && !cs->write)
542 flush_dcache_page(page);
546 /* Copy pages in the request to/from userspace buffer */
547 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
551 struct fuse_req *req = cs->req;
552 unsigned offset = req->page_offset;
553 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
555 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
556 struct page *page = req->pages[i];
557 int err = fuse_copy_page(cs, page, offset, count, zeroing);
562 count = min(nbytes, (unsigned) PAGE_SIZE);
568 /* Copy a single argument in the request to/from userspace buffer */
569 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
573 if (!cs->len && (err = fuse_copy_fill(cs)))
575 fuse_copy_do(cs, &val, &size);
580 /* Copy request arguments to/from userspace buffer */
581 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
582 unsigned argpages, struct fuse_arg *args,
588 for (i = 0; !err && i < numargs; i++) {
589 struct fuse_arg *arg = &args[i];
590 if (i == numargs - 1 && argpages)
591 err = fuse_copy_pages(cs, arg->size, zeroing);
593 err = fuse_copy_one(cs, arg->value, arg->size);
598 /* Wait until a request is available on the pending list */
599 static void request_wait(struct fuse_conn *fc)
601 DECLARE_WAITQUEUE(wait, current);
603 add_wait_queue_exclusive(&fc->waitq, &wait);
604 while (fc->connected && list_empty(&fc->pending)) {
605 set_current_state(TASK_INTERRUPTIBLE);
606 if (signal_pending(current))
609 spin_unlock(&fuse_lock);
611 spin_lock(&fuse_lock);
613 set_current_state(TASK_RUNNING);
614 remove_wait_queue(&fc->waitq, &wait);
618 * Read a single request into the userspace filesystem's buffer. This
619 * function waits until a request is available, then removes it from
620 * the pending list and copies request data to userspace buffer. If
621 * no reply is needed (FORGET) or request has been interrupted or
622 * there was an error during the copying then it's finished by calling
623 * request_end(). Otherwise add it to the processing list, and set
626 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
627 unsigned long nr_segs, loff_t *off)
630 struct fuse_conn *fc;
631 struct fuse_req *req;
633 struct fuse_copy_state cs;
637 spin_lock(&fuse_lock);
638 fc = file->private_data;
647 if (list_empty(&fc->pending))
650 req = list_entry(fc->pending.next, struct fuse_req, list);
651 req->state = FUSE_REQ_READING;
652 list_move(&req->list, &fc->io);
656 /* If request is too large, reply with an error and restart the read */
657 if (iov_length(iov, nr_segs) < reqsize) {
658 req->out.h.error = -EIO;
659 /* SETXATTR is special, since it may contain too large data */
660 if (in->h.opcode == FUSE_SETXATTR)
661 req->out.h.error = -E2BIG;
662 request_end(fc, req);
665 spin_unlock(&fuse_lock);
666 fuse_copy_init(&cs, 1, req, iov, nr_segs);
667 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
669 err = fuse_copy_args(&cs, in->numargs, in->argpages,
670 (struct fuse_arg *) in->args, 0);
671 fuse_copy_finish(&cs);
672 spin_lock(&fuse_lock);
674 if (!err && req->interrupted)
677 if (!req->interrupted)
678 req->out.h.error = -EIO;
679 request_end(fc, req);
683 request_end(fc, req);
685 req->state = FUSE_REQ_SENT;
686 list_move_tail(&req->list, &fc->processing);
687 spin_unlock(&fuse_lock);
692 spin_unlock(&fuse_lock);
696 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
697 size_t nbytes, loff_t *off)
700 iov.iov_len = nbytes;
702 return fuse_dev_readv(file, &iov, 1, off);
705 /* Look up request on processing list by unique ID */
706 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
708 struct list_head *entry;
710 list_for_each(entry, &fc->processing) {
711 struct fuse_req *req;
712 req = list_entry(entry, struct fuse_req, list);
713 if (req->in.h.unique == unique)
719 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
722 unsigned reqsize = sizeof(struct fuse_out_header);
725 return nbytes != reqsize ? -EINVAL : 0;
727 reqsize += len_args(out->numargs, out->args);
729 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
731 else if (reqsize > nbytes) {
732 struct fuse_arg *lastarg = &out->args[out->numargs-1];
733 unsigned diffsize = reqsize - nbytes;
734 if (diffsize > lastarg->size)
736 lastarg->size -= diffsize;
738 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
743 * Write a single reply to a request. First the header is copied from
744 * the write buffer. The request is then searched on the processing
745 * list by the unique ID found in the header. If found, then remove
746 * it from the list and copy the rest of the buffer to the request.
747 * The request is finished by calling request_end()
749 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
750 unsigned long nr_segs, loff_t *off)
753 unsigned nbytes = iov_length(iov, nr_segs);
754 struct fuse_req *req;
755 struct fuse_out_header oh;
756 struct fuse_copy_state cs;
757 struct fuse_conn *fc = fuse_get_conn(file);
761 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
762 if (nbytes < sizeof(struct fuse_out_header))
765 err = fuse_copy_one(&cs, &oh, sizeof(oh));
769 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
773 spin_lock(&fuse_lock);
778 req = request_find(fc, oh.unique);
783 if (req->interrupted) {
784 spin_unlock(&fuse_lock);
785 fuse_copy_finish(&cs);
786 spin_lock(&fuse_lock);
787 request_end(fc, req);
790 list_move(&req->list, &fc->io);
794 spin_unlock(&fuse_lock);
796 err = copy_out_args(&cs, &req->out, nbytes);
797 fuse_copy_finish(&cs);
799 spin_lock(&fuse_lock);
802 if (req->interrupted)
804 } else if (!req->interrupted)
805 req->out.h.error = -EIO;
806 request_end(fc, req);
808 return err ? err : nbytes;
811 spin_unlock(&fuse_lock);
813 fuse_copy_finish(&cs);
817 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
818 size_t nbytes, loff_t *off)
821 iov.iov_len = nbytes;
822 iov.iov_base = (char __user *) buf;
823 return fuse_dev_writev(file, &iov, 1, off);
826 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
828 struct fuse_conn *fc = fuse_get_conn(file);
829 unsigned mask = POLLOUT | POLLWRNORM;
834 poll_wait(file, &fc->waitq, wait);
836 spin_lock(&fuse_lock);
837 if (!list_empty(&fc->pending))
838 mask |= POLLIN | POLLRDNORM;
839 spin_unlock(&fuse_lock);
845 * Abort all requests on the given list (pending or processing)
847 * This function releases and reacquires fuse_lock
849 static void end_requests(struct fuse_conn *fc, struct list_head *head)
851 while (!list_empty(head)) {
852 struct fuse_req *req;
853 req = list_entry(head->next, struct fuse_req, list);
854 req->out.h.error = -ECONNABORTED;
855 request_end(fc, req);
856 spin_lock(&fuse_lock);
861 * Abort requests under I/O
863 * The requests are set to interrupted and finished, and the request
864 * waiter is woken up. This will make request_wait_answer() wait
865 * until the request is unlocked and then return.
867 * If the request is asynchronous, then the end function needs to be
868 * called after waiting for the request to be unlocked (if it was
871 static void end_io_requests(struct fuse_conn *fc)
873 while (!list_empty(&fc->io)) {
874 struct fuse_req *req =
875 list_entry(fc->io.next, struct fuse_req, list);
876 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
878 req->interrupted = 1;
879 req->out.h.error = -ECONNABORTED;
880 req->state = FUSE_REQ_FINISHED;
881 list_del_init(&req->list);
882 wake_up(&req->waitq);
885 /* The end function will consume this reference */
886 __fuse_get_request(req);
887 spin_unlock(&fuse_lock);
888 wait_event(req->waitq, !req->locked);
890 spin_lock(&fuse_lock);
896 * Abort all requests.
898 * Emergency exit in case of a malicious or accidental deadlock, or
899 * just a hung filesystem.
901 * The same effect is usually achievable through killing the
902 * filesystem daemon and all users of the filesystem. The exception
903 * is the combination of an asynchronous request and the tricky
904 * deadlock (see Documentation/filesystems/fuse.txt).
906 * During the aborting, progression of requests from the pending and
907 * processing lists onto the io list, and progression of new requests
908 * onto the pending list is prevented by req->connected being false.
910 * Progression of requests under I/O to the processing list is
911 * prevented by the req->interrupted flag being true for these
912 * requests. For this reason requests on the io list must be aborted
915 void fuse_abort_conn(struct fuse_conn *fc)
917 spin_lock(&fuse_lock);
921 end_requests(fc, &fc->pending);
922 end_requests(fc, &fc->processing);
923 wake_up_all(&fc->waitq);
925 spin_unlock(&fuse_lock);
928 static int fuse_dev_release(struct inode *inode, struct file *file)
930 struct fuse_conn *fc;
932 spin_lock(&fuse_lock);
933 fc = file->private_data;
936 end_requests(fc, &fc->pending);
937 end_requests(fc, &fc->processing);
939 spin_unlock(&fuse_lock);
941 kobject_put(&fc->kobj);
946 struct file_operations fuse_dev_operations = {
947 .owner = THIS_MODULE,
949 .read = fuse_dev_read,
950 .readv = fuse_dev_readv,
951 .write = fuse_dev_write,
952 .writev = fuse_dev_writev,
953 .poll = fuse_dev_poll,
954 .release = fuse_dev_release,
957 static struct miscdevice fuse_miscdevice = {
960 .fops = &fuse_dev_operations,
963 int __init fuse_dev_init(void)
966 fuse_req_cachep = kmem_cache_create("fuse_request",
967 sizeof(struct fuse_req),
969 if (!fuse_req_cachep)
972 err = misc_register(&fuse_miscdevice);
974 goto out_cache_clean;
979 kmem_cache_destroy(fuse_req_cachep);
984 void fuse_dev_cleanup(void)
986 misc_deregister(&fuse_miscdevice);
987 kmem_cache_destroy(fuse_req_cachep);