]> err.no Git - linux-2.6/commitdiff
IB: Put rlimit accounting struct in struct ib_umem
authorRoland Dreier <rolandd@cisco.com>
Thu, 19 Apr 2007 03:20:28 +0000 (20:20 -0700)
committerRoland Dreier <rolandd@cisco.com>
Wed, 9 May 2007 01:00:37 +0000 (18:00 -0700)
When memory pinned with ib_umem_get() is released, ib_umem_release()
needs to subtract the amount of memory being unpinned from
mm->locked_vm.  However, ib_umem_release() may be called with
mm->mmap_sem already held for writing if the memory is being released
as part of an munmap() call, so it is sometimes necessary to defer
this accounting into a workqueue.

However, the work struct used to defer this accounting is dynamically
allocated before it is queued, so there is the possibility of failing
that allocation.  If the allocation fails, then ib_umem_release has no
choice except to bail out and leave the process with a permanently
elevated locked_vm.

Fix this by allocating the structure to defer accounting as part of
the original struct ib_umem, so there's no possibility of failing a
later allocation if creating the struct ib_umem and pinning memory
succeeds.

Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/core/umem.c
include/rdma/ib_umem.h

index 48e854cf416f4c273a0272d2972dc60fe9d78268..f32ca5fbb26bcc56b02cd8c97bfa39bce1f3c63b 100644 (file)
 
 #include "uverbs.h"
 
-struct ib_umem_account_work {
-       struct work_struct work;
-       struct mm_struct  *mm;
-       unsigned long      diff;
-};
-
-
 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 {
        struct ib_umem_chunk *chunk, *tmp;
@@ -192,16 +185,15 @@ out:
 }
 EXPORT_SYMBOL(ib_umem_get);
 
-static void ib_umem_account(struct work_struct *_work)
+static void ib_umem_account(struct work_struct *work)
 {
-       struct ib_umem_account_work *work =
-               container_of(_work, struct ib_umem_account_work, work);
-
-       down_write(&work->mm->mmap_sem);
-       work->mm->locked_vm -= work->diff;
-       up_write(&work->mm->mmap_sem);
-       mmput(work->mm);
-       kfree(work);
+       struct ib_umem *umem = container_of(work, struct ib_umem, work);
+
+       down_write(&umem->mm->mmap_sem);
+       umem->mm->locked_vm -= umem->diff;
+       up_write(&umem->mm->mmap_sem);
+       mmput(umem->mm);
+       kfree(umem);
 }
 
 /**
@@ -210,7 +202,6 @@ static void ib_umem_account(struct work_struct *_work)
  */
 void ib_umem_release(struct ib_umem *umem)
 {
-       struct ib_umem_account_work *work;
        struct ib_ucontext *context = umem->context;
        struct mm_struct *mm;
        unsigned long diff;
@@ -222,7 +213,6 @@ void ib_umem_release(struct ib_umem *umem)
                return;
 
        diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
-       kfree(umem);
 
        /*
         * We may be called with the mm's mmap_sem already held.  This
@@ -233,17 +223,11 @@ void ib_umem_release(struct ib_umem *umem)
         * we defer the vm_locked accounting to the system workqueue.
         */
        if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
-               work = kmalloc(sizeof *work, GFP_KERNEL);
-               if (!work) {
-                       mmput(mm);
-                       return;
-               }
+               INIT_WORK(&umem->work, ib_umem_account);
+               umem->mm   = mm;
+               umem->diff = diff;
 
-               INIT_WORK(&work->work, ib_umem_account);
-               work->mm   = mm;
-               work->diff = diff;
-
-               schedule_work(&work->work);
+               schedule_work(&umem->work);
                return;
        } else
                down_write(&mm->mmap_sem);
@@ -251,6 +235,7 @@ void ib_umem_release(struct ib_umem *umem)
        current->mm->locked_vm -= diff;
        up_write(&mm->mmap_sem);
        mmput(mm);
+       kfree(umem);
 }
 EXPORT_SYMBOL(ib_umem_release);
 
index 06307f7e43e0d0c9f8e5ca2458963279178e5dc4..b3a36f7d79e5ee76d68e3a54b7bb516a092f8bee 100644 (file)
@@ -45,6 +45,9 @@ struct ib_umem {
        int                     page_size;
        int                     writable;
        struct list_head        chunk_list;
+       struct work_struct      work;
+       struct mm_struct       *mm;
+       unsigned long           diff;
 };
 
 struct ib_umem_chunk {