]> err.no Git - linux-2.6/commitdiff
IB/umem: Add hugetlb flag to struct ib_umem
authorJoachim Fenkes <fenkes@de.ibm.com>
Thu, 13 Sep 2007 16:15:28 +0000 (18:15 +0200)
committerRoland Dreier <rolandd@cisco.com>
Wed, 10 Oct 2007 02:59:13 +0000 (19:59 -0700)
During ib_umem_get(), determine whether all pages from the memory
region are hugetlb pages and report this in the "hugetlb" member.
Low-level drivers can use this information if they need it.

Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/core/umem.c
include/rdma/ib_umem.h

index 664d2faa9e744b48aa6ce11e4c195ad18d2d66eb..2f54e29dc7a64b3771e6fec854d7f4699b355e17 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
+#include <linux/hugetlb.h>
 
 #include "uverbs.h"
 
@@ -75,6 +76,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 {
        struct ib_umem *umem;
        struct page **page_list;
+       struct vm_area_struct **vma_list;
        struct ib_umem_chunk *chunk;
        unsigned long locked;
        unsigned long lock_limit;
@@ -104,6 +106,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
         */
        umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
 
+       /* We assume the memory is from hugetlb until proved otherwise */
+       umem->hugetlb   = 1;
+
        INIT_LIST_HEAD(&umem->chunk_list);
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
@@ -112,6 +117,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * if we can't alloc the vma_list, it's not so bad;
+        * just assume the memory is not hugetlb memory
+        */
+       vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
+       if (!vma_list)
+               umem->hugetlb = 0;
+
        npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
 
        down_write(&current->mm->mmap_sem);
@@ -131,7 +144,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                ret = get_user_pages(current, current->mm, cur_base,
                                     min_t(int, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
-                                    1, !umem->writable, page_list, NULL);
+                                    1, !umem->writable, page_list, vma_list);
 
                if (ret < 0)
                        goto out;
@@ -152,6 +165,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
                        chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
                        for (i = 0; i < chunk->nents; ++i) {
+                               if (vma_list &&
+                                   !is_vm_hugetlb_page(vma_list[i + off]))
+                                       umem->hugetlb = 0;
                                chunk->page_list[i].page   = page_list[i + off];
                                chunk->page_list[i].offset = 0;
                                chunk->page_list[i].length = PAGE_SIZE;
@@ -186,6 +202,8 @@ out:
                current->mm->locked_vm = locked;
 
        up_write(&current->mm->mmap_sem);
+       if (vma_list)
+               free_page((unsigned long) vma_list);
        free_page((unsigned long) page_list);
 
        return ret < 0 ? ERR_PTR(ret) : umem;
index c533d6c7903f0990ffa489581d0011bc5f094e1f..22298423cf0b3d0e597e347de0ab376b8bbcaf24 100644 (file)
@@ -45,6 +45,7 @@ struct ib_umem {
        int                     offset;
        int                     page_size;
        int                     writable;
+       int                     hugetlb;
        struct list_head        chunk_list;
        struct work_struct      work;
        struct mm_struct       *mm;