X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fm68k%2Fmm%2Fmotorola.c;h=7d571a2b44dd0bd2d61e6d3dcc9239146fe5d4c4;hb=0e9871df2389560e94ba01e40959140ee56def4b;hp=d855fec263172c0ad0bb65ffa67a8121066e71fc;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=linux-2.6 diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index d855fec263..7d571a2b44 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -1,5 +1,5 @@ /* - * linux/arch/m68k/motorola.c + * linux/arch/m68k/mm/motorola.c * * Routines specific to the Motorola MMU, originally from: * linux/arch/m68k/init.c @@ -8,7 +8,6 @@ * Moved 8/20/1999 Sam Creasey */ -#include #include #include #include @@ -44,6 +43,11 @@ unsigned long mm_cachebits; EXPORT_SYMBOL(mm_cachebits); #endif +/* size of memory already mapped in head.S */ +#define INIT_MAPPED_SIZE (4UL<<20) + +extern unsigned long availmem; + static pte_t * __init kernel_page_table(void) { pte_t *ptablep; @@ -99,19 +103,20 @@ static pmd_t * __init kernel_ptr_table(void) return last_pgtable; } -static unsigned long __init -map_chunk (unsigned long addr, long size) +static void __init map_node(int node) { #define PTRTREESIZE (256*1024) #define ROOTTREESIZE (32*1024*1024) - static unsigned long virtaddr = PAGE_OFFSET; - unsigned long physaddr; + unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; pmd_t *pmd_dir; pte_t *pte_dir; - physaddr = (addr | m68k_supervisor_cachemode | - _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); + size = m68k_memory[node].size; + physaddr = m68k_memory[node].addr; + virtaddr = (unsigned long)phys_to_virt(physaddr); + physaddr |= m68k_supervisor_cachemode | + _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; if (CPU_IS_040_OR_060) physaddr |= _PAGE_GLOBAL040; @@ -191,8 +196,6 @@ map_chunk (unsigned long addr, long size) #ifdef DEBUG printk("\n"); #endif - - return virtaddr; } /* @@ -201,15 +204,16 @@ map_chunk (unsigned long addr, long size) */ void __init paging_init(void) { - int chunk; - unsigned long mem_avail = 0; - unsigned long zones_size[3] = { 0, }; + unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + unsigned long min_addr, max_addr; + unsigned long addr, size, end; + int i; #ifdef DEBUG { extern unsigned long availmem; - printk ("start of paging_init (%p, %lx, %lx, %lx)\n", - kernel_pg_dir, availmem, start_mem, end_mem); + printk ("start of paging_init (%p, %lx)\n", + kernel_pg_dir, availmem); } #endif @@ -223,24 +227,62 @@ void __init paging_init(void) pgprot_val(protection_map[i]) |= _PAGE_CACHE040; } + min_addr = m68k_memory[0].addr; + max_addr = min_addr + m68k_memory[0].size; + for (i = 1; i < m68k_num_memory;) { + if (m68k_memory[i].addr < min_addr) { + printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", + m68k_memory[i].addr, m68k_memory[i].size); + printk("Fix your bootloader or use a memfile to make use of this area!\n"); + m68k_num_memory--; + memmove(m68k_memory + i, m68k_memory + i + 1, + (m68k_num_memory - i) * sizeof(struct mem_info)); + continue; + } + addr = m68k_memory[i].addr + m68k_memory[i].size; + if (addr > max_addr) + max_addr = addr; + i++; + } + m68k_memoffset = min_addr - PAGE_OFFSET; + m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; + + module_fixup(NULL, __start_fixup, __stop_fixup); + flush_icache(); + + high_memory = phys_to_virt(max_addr); + + min_low_pfn = availmem >> PAGE_SHIFT; + max_low_pfn = max_addr >> PAGE_SHIFT; + + for (i = 0; i < m68k_num_memory; i++) { + addr = m68k_memory[i].addr; + end = addr + m68k_memory[i].size; + m68k_setup_node(i); + availmem = PAGE_ALIGN(availmem); + availmem += init_bootmem_node(NODE_DATA(i), + availmem >> PAGE_SHIFT, + addr >> PAGE_SHIFT, + end >> PAGE_SHIFT); + } + /* * Map the physical memory available into the kernel virtual - * address space. It may allocate some memory for page - * tables and thus modify availmem. + * address space. First initialize the bootmem allocator with + * the memory we already mapped, so map_node() has something + * to allocate. */ + addr = m68k_memory[0].addr; + size = m68k_memory[0].size; + free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); + map_node(0); + if (size > INIT_MAPPED_SIZE) + free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); - for (chunk = 0; chunk < m68k_num_memory; chunk++) { - mem_avail = map_chunk (m68k_memory[chunk].addr, - m68k_memory[chunk].size); - - } + for (i = 1; i < m68k_num_memory; i++) + map_node(i); flush_tlb_all(); -#ifdef DEBUG - printk ("memory available is %ldKB\n", mem_avail >> 10); - printk ("start_mem is %#lx\nvirtual_end is %#lx\n", - start_mem, end_mem); -#endif /* * initialize the bad page table and bad page to point @@ -257,14 +299,11 @@ void __init paging_init(void) #ifdef DEBUG printk ("before free_area_init\n"); #endif - zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ? - (mach_max_dma_address+1) : (unsigned long)high_memory); - zones_size[1] = (unsigned long)high_memory - zones_size[0]; - - zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT; - zones_size[1] >>= PAGE_SHIFT; - - free_area_init(zones_size); + for (i = 0; i < m68k_num_memory; i++) { + zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; + free_area_init_node(i, pg_data_map + i, zones_size, + m68k_memory[i].addr >> PAGE_SHIFT, NULL); + } } extern char __init_begin, __init_end; @@ -276,7 +315,7 @@ void free_initmem(void) addr = (unsigned long)&__init_begin; for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { virt_to_page(addr)->flags &= ~(1 << PG_reserved); - set_page_count(virt_to_page(addr), 1); + init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; }