X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmalloc.c;h=2e01af365848eb3d8485369d2fc4d9c4dad2a05c;hb=b4f48b6363c81ca743ef46943ef23fd72e60f679;hp=ddf87145cc49e0dc3d4e6e74f157b6a5e2e08f5c;hpb=ae6f4a8b986c9d9eff00fcbd23d1c2585b97fe57;p=linux-2.6 diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ddf87145cc..2e01af3658 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -164,6 +164,7 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) flush_cache_vmap((unsigned long) area->addr, end); return err; } +EXPORT_SYMBOL_GPL(map_vm_area); static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, @@ -189,7 +190,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl if (unlikely(!size)) return NULL; - area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node); + area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); + if (unlikely(!area)) return NULL; @@ -242,6 +244,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, { return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); } +EXPORT_SYMBOL_GPL(__get_vm_area); /** * get_vm_area - reserve a contingous kernel virtual area @@ -432,11 +435,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + PAGE_KERNEL, node); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, - (gfp_mask & GFP_LEVEL_MASK), + (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, node); } area->pages = pages; @@ -445,7 +449,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, kfree(area); return NULL; } - memset(area->pages, 0, array_size); for (i = 0; i < area->nr_pages; i++) { if (node < 0) @@ -583,9 +586,9 @@ void *vmalloc_exec(unsigned long size) } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) -#define GFP_VMALLOC32 GFP_DMA32 +#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) -#define GFP_VMALLOC32 GFP_DMA +#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL #else #define GFP_VMALLOC32 GFP_KERNEL #endif @@ -767,3 +770,56 @@ EXPORT_SYMBOL(remap_vmalloc_range); void __attribute__((weak)) vmalloc_sync_all(void) { } + + +static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) +{ + /* apply_to_page_range() does all the hard work. */ + return 0; +} + +/** + * alloc_vm_area - allocate a range of kernel address space + * @size: size of the area + * @returns: NULL on failure, vm_struct on success + * + * This function reserves a range of kernel address space, and + * allocates pagetables to map that range. No actual mappings + * are created. If the kernel address space is not shared + * between processes, it syncs the pagetable across all + * processes. + */ +struct vm_struct *alloc_vm_area(size_t size) +{ + struct vm_struct *area; + + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + + /* + * This ensures that page tables are constructed for this region + * of kernel virtual address space and mapped into init_mm. + */ + if (apply_to_page_range(&init_mm, (unsigned long)area->addr, + area->size, f, NULL)) { + free_vm_area(area); + return NULL; + } + + /* Make sure the pagetables are constructed in process kernel + mappings */ + vmalloc_sync_all(); + + return area; +} +EXPORT_SYMBOL_GPL(alloc_vm_area); + +void free_vm_area(struct vm_struct *area) +{ + struct vm_struct *ret; + ret = remove_vm_area(area->addr); + BUG_ON(ret != area); + kfree(area); +} +EXPORT_SYMBOL_GPL(free_vm_area);