4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/bootmem.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/hugetlb.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
20 static DEFINE_MUTEX(vmem_mutex);
22 struct memory_segment {
23 struct list_head list;
28 static LIST_HEAD(mem_segs);
30 static pud_t *vmem_pud_alloc(void)
35 pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
38 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
43 static pmd_t *vmem_pmd_alloc(void)
48 pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
51 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
56 static pte_t __ref *vmem_pte_alloc(void)
60 if (slab_is_available())
61 pte = (pte_t *) page_table_alloc(&init_mm);
63 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
66 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
67 PTRS_PER_PTE * sizeof(pte_t));
72 * Add a physical memory range to the 1:1 mapping.
74 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
76 unsigned long address;
84 for (address = start; address < start + size; address += PAGE_SIZE) {
85 pg_dir = pgd_offset_k(address);
86 if (pgd_none(*pg_dir)) {
87 pu_dir = vmem_pud_alloc();
90 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
93 pu_dir = pud_offset(pg_dir, address);
94 if (pud_none(*pu_dir)) {
95 pm_dir = vmem_pmd_alloc();
98 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
101 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
102 pm_dir = pmd_offset(pu_dir, address);
105 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
106 (address + HPAGE_SIZE <= start + size) &&
107 (address >= HPAGE_SIZE)) {
108 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
109 pmd_val(*pm_dir) = pte_val(pte);
110 address += HPAGE_SIZE - PAGE_SIZE;
114 if (pmd_none(*pm_dir)) {
115 pt_dir = vmem_pte_alloc();
118 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
121 pt_dir = pte_offset_kernel(pm_dir, address);
126 flush_tlb_kernel_range(start, start + size);
131 * Remove a physical memory range from the 1:1 mapping.
132 * Currently only invalidates page table entries.
134 static void vmem_remove_range(unsigned long start, unsigned long size)
136 unsigned long address;
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 for (address = start; address < start + size; address += PAGE_SIZE) {
145 pg_dir = pgd_offset_k(address);
146 pu_dir = pud_offset(pg_dir, address);
147 if (pud_none(*pu_dir))
149 pm_dir = pmd_offset(pu_dir, address);
150 if (pmd_none(*pm_dir))
153 if (pmd_huge(*pm_dir)) {
154 pmd_clear_kernel(pm_dir);
155 address += HPAGE_SIZE - PAGE_SIZE;
159 pt_dir = pte_offset_kernel(pm_dir, address);
162 flush_tlb_kernel_range(start, start + size);
166 * Add a backed mem_map array to the virtual mem_map array.
168 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
170 unsigned long address, start_addr, end_addr;
178 start_addr = (unsigned long) start;
179 end_addr = (unsigned long) (start + nr);
181 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
182 pg_dir = pgd_offset_k(address);
183 if (pgd_none(*pg_dir)) {
184 pu_dir = vmem_pud_alloc();
187 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
190 pu_dir = pud_offset(pg_dir, address);
191 if (pud_none(*pu_dir)) {
192 pm_dir = vmem_pmd_alloc();
195 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
198 pm_dir = pmd_offset(pu_dir, address);
199 if (pmd_none(*pm_dir)) {
200 pt_dir = vmem_pte_alloc();
203 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
206 pt_dir = pte_offset_kernel(pm_dir, address);
207 if (pte_none(*pt_dir)) {
208 unsigned long new_page;
210 new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
213 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
219 flush_tlb_kernel_range(start_addr, end_addr);
224 * Add memory segment to the segment list if it doesn't overlap with
225 * an already present segment.
227 static int insert_memory_segment(struct memory_segment *seg)
229 struct memory_segment *tmp;
231 if (seg->start + seg->size >= VMEM_MAX_PHYS ||
232 seg->start + seg->size < seg->start)
235 list_for_each_entry(tmp, &mem_segs, list) {
236 if (seg->start >= tmp->start + tmp->size)
238 if (seg->start + seg->size <= tmp->start)
242 list_add(&seg->list, &mem_segs);
247 * Remove memory segment from the segment list.
249 static void remove_memory_segment(struct memory_segment *seg)
251 list_del(&seg->list);
254 static void __remove_shared_memory(struct memory_segment *seg)
256 remove_memory_segment(seg);
257 vmem_remove_range(seg->start, seg->size);
260 int vmem_remove_mapping(unsigned long start, unsigned long size)
262 struct memory_segment *seg;
265 mutex_lock(&vmem_mutex);
268 list_for_each_entry(seg, &mem_segs, list) {
269 if (seg->start == start && seg->size == size)
273 if (seg->start != start || seg->size != size)
277 __remove_shared_memory(seg);
280 mutex_unlock(&vmem_mutex);
284 int vmem_add_mapping(unsigned long start, unsigned long size)
286 struct memory_segment *seg;
289 mutex_lock(&vmem_mutex);
291 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
297 ret = insert_memory_segment(seg);
301 ret = vmem_add_mem(start, size, 0);
307 __remove_shared_memory(seg);
311 mutex_unlock(&vmem_mutex);
316 * map whole physical memory to virtual memory (identity mapping)
317 * we reserve enough space in the vmalloc area for vmemmap to hotplug
318 * additional memory segments.
320 void __init vmem_map_init(void)
322 unsigned long ro_start, ro_end;
323 unsigned long start, end;
326 INIT_LIST_HEAD(&init_mm.context.crst_list);
327 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
328 init_mm.context.noexec = 0;
329 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
330 ro_end = PFN_ALIGN((unsigned long)&_eshared);
331 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
332 start = memory_chunk[i].addr;
333 end = memory_chunk[i].addr + memory_chunk[i].size;
334 if (start >= ro_end || end <= ro_start)
335 vmem_add_mem(start, end - start, 0);
336 else if (start >= ro_start && end <= ro_end)
337 vmem_add_mem(start, end - start, 1);
338 else if (start >= ro_start) {
339 vmem_add_mem(start, ro_end - start, 1);
340 vmem_add_mem(ro_end, end - ro_end, 0);
341 } else if (end < ro_end) {
342 vmem_add_mem(start, ro_start - start, 0);
343 vmem_add_mem(ro_start, end - ro_start, 1);
345 vmem_add_mem(start, ro_start - start, 0);
346 vmem_add_mem(ro_start, ro_end - ro_start, 1);
347 vmem_add_mem(ro_end, end - ro_end, 0);
353 * Convert memory chunk array to a memory segment list so there is a single
354 * list that contains both r/w memory and shared memory segments.
356 static int __init vmem_convert_memory_chunk(void)
358 struct memory_segment *seg;
361 mutex_lock(&vmem_mutex);
362 for (i = 0; i < MEMORY_CHUNKS; i++) {
363 if (!memory_chunk[i].size)
365 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
367 panic("Out of memory...\n");
368 seg->start = memory_chunk[i].addr;
369 seg->size = memory_chunk[i].size;
370 insert_memory_segment(seg);
372 mutex_unlock(&vmem_mutex);
376 core_initcall(vmem_convert_memory_chunk);