2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
40 int page_is_ram(unsigned long pagenr)
42 unsigned long addr, end;
46 * A special case is the first 4Kb of memory;
47 * This is a BIOS owned area, not kernel ram, but generally
48 * not listed as such in the E820 table.
54 * Second special case: Some BIOSen report the PC BIOS
55 * area (640->1Mb) as ram even though it is not.
57 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
58 pagenr < (BIOS_END >> PAGE_SHIFT))
61 for (i = 0; i < e820.nr_map; i++) {
65 if (e820.map[i].type != E820_RAM)
67 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
68 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
71 if ((pagenr >= addr) && (pagenr < end))
78 * Fix up the linear direct mapping of the kernel to avoid cache attribute
81 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
82 enum ioremap_mode mode)
84 unsigned long nrpages = size >> PAGE_SHIFT;
88 case IOR_MODE_UNCACHED:
90 err = set_memory_uc(vaddr, nrpages);
93 err = set_memory_wb(vaddr, nrpages);
101 * Remap an arbitrary physical address space into the kernel virtual
102 * address space. Needed when the kernel wants to access high addresses
105 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
106 * have to convert them into an offset in a page-aligned mapping, but the
107 * caller shouldn't need to know that small detail.
109 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
110 enum ioremap_mode mode)
112 unsigned long pfn, offset, last_addr, vaddr;
113 struct vm_struct *area;
116 /* Don't allow wraparound or zero size */
117 last_addr = phys_addr + size - 1;
118 if (!size || last_addr < phys_addr)
122 * Don't remap the low PCI/ISA area, it's always mapped..
124 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
125 return (__force void __iomem *)phys_to_virt(phys_addr);
128 * Don't allow anybody to remap normal RAM that we're using..
130 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
131 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
132 if (page_is_ram(pfn) && pfn_valid(pfn) &&
133 !PageReserved(pfn_to_page(pfn)))
138 case IOR_MODE_UNCACHED:
141 * FIXME: we will use UC MINUS for now, as video fb drivers
142 * depend on it. Upcoming ioremap_wc() will fix this behavior.
144 prot = PAGE_KERNEL_UC_MINUS;
146 case IOR_MODE_CACHED:
152 * Mappings have to be page-aligned
154 offset = phys_addr & ~PAGE_MASK;
155 phys_addr &= PAGE_MASK;
156 size = PAGE_ALIGN(last_addr+1) - phys_addr;
161 area = get_vm_area(size, VM_IOREMAP);
164 area->phys_addr = phys_addr;
165 vaddr = (unsigned long) area->addr;
166 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
171 if (ioremap_change_attr(vaddr, size, mode) < 0) {
176 return (void __iomem *) (vaddr + offset);
180 * ioremap_nocache - map bus memory into CPU space
181 * @offset: bus address of the memory
182 * @size: size of the resource to map
184 * ioremap_nocache performs a platform specific sequence of operations to
185 * make bus memory CPU accessible via the readb/readw/readl/writeb/
186 * writew/writel functions and the other mmio helpers. The returned
187 * address is not guaranteed to be usable directly as a virtual
190 * This version of ioremap ensures that the memory is marked uncachable
191 * on the CPU as well as honouring existing caching rules from things like
192 * the PCI bus. Note that there are other caches and buffers on many
193 * busses. In particular driver authors should read up on PCI writes
195 * It's useful if some control registers are in such an area and
196 * write combining or read caching is not desirable:
198 * Must be freed with iounmap.
200 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
202 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
204 EXPORT_SYMBOL(ioremap_nocache);
206 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
208 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
210 EXPORT_SYMBOL(ioremap_cache);
213 * iounmap - Free a IO remapping
214 * @addr: virtual address from ioremap_*
216 * Caller must ensure there is only one unmapping for the same pointer.
218 void iounmap(volatile void __iomem *addr)
220 struct vm_struct *p, *o;
222 if ((void __force *)addr <= high_memory)
226 * __ioremap special-cases the PCI/ISA range by not instantiating a
227 * vm_area and by simply returning an address into the kernel mapping
228 * of ISA space. So handle that here.
230 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
231 addr < phys_to_virt(ISA_END_ADDRESS))
234 addr = (volatile void __iomem *)
235 (PAGE_MASK & (unsigned long __force)addr);
237 /* Use the vm area unlocked, assuming the caller
238 ensures there isn't another iounmap for the same address
239 in parallel. Reuse of the virtual address is prevented by
240 leaving it in the global lists until we're done with it.
241 cpa takes care of the direct mappings. */
242 read_lock(&vmlist_lock);
243 for (p = vmlist; p; p = p->next) {
247 read_unlock(&vmlist_lock);
250 printk(KERN_ERR "iounmap: bad address %p\n", addr);
255 /* Finally remove it */
256 o = remove_vm_area((void *)addr);
257 BUG_ON(p != o || o == NULL);
260 EXPORT_SYMBOL(iounmap);
264 int __initdata early_ioremap_debug;
266 static int __init early_ioremap_debug_setup(char *str)
268 early_ioremap_debug = 1;
272 early_param("early_ioremap_debug", early_ioremap_debug_setup);
274 static __initdata int after_paging_init;
275 static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
276 __attribute__((aligned(PAGE_SIZE)));
278 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
280 /* Don't assume we're using swapper_pg_dir at this point */
281 pgd_t *base = __va(read_cr3());
282 pgd_t *pgd = &base[pgd_index(addr)];
283 pud_t *pud = pud_offset(pgd, addr);
284 pmd_t *pmd = pmd_offset(pud, addr);
289 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
291 return &bm_pte[pte_index(addr)];
294 void __init early_ioremap_init(void)
298 if (early_ioremap_debug)
299 printk(KERN_INFO "early_ioremap_init()\n");
301 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
302 memset(bm_pte, 0, sizeof(bm_pte));
303 pmd_populate_kernel(&init_mm, pmd, bm_pte);
306 * The boot-ioremap range spans multiple pmds, for which
307 * we are not prepared:
309 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
311 printk(KERN_WARNING "pmd %p != %p\n",
312 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
313 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
314 fix_to_virt(FIX_BTMAP_BEGIN));
315 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
316 fix_to_virt(FIX_BTMAP_END));
318 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
319 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
324 void __init early_ioremap_clear(void)
328 if (early_ioremap_debug)
329 printk(KERN_INFO "early_ioremap_clear()\n");
331 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
333 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
337 void __init early_ioremap_reset(void)
339 enum fixed_addresses idx;
340 unsigned long addr, phys;
343 after_paging_init = 1;
344 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
345 addr = fix_to_virt(idx);
346 pte = early_ioremap_pte(addr);
347 if (pte_present(*pte)) {
348 phys = pte_val(*pte) & PAGE_MASK;
349 set_fixmap(idx, phys);
354 static void __init __early_set_fixmap(enum fixed_addresses idx,
355 unsigned long phys, pgprot_t flags)
357 unsigned long addr = __fix_to_virt(idx);
360 if (idx >= __end_of_fixed_addresses) {
364 pte = early_ioremap_pte(addr);
365 if (pgprot_val(flags))
366 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
368 pte_clear(NULL, addr, pte);
369 __flush_tlb_one(addr);
372 static inline void __init early_set_fixmap(enum fixed_addresses idx,
375 if (after_paging_init)
376 set_fixmap(idx, phys);
378 __early_set_fixmap(idx, phys, PAGE_KERNEL);
381 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
383 if (after_paging_init)
386 __early_set_fixmap(idx, 0, __pgprot(0));
390 int __initdata early_ioremap_nested;
392 static int __init check_early_ioremap_leak(void)
394 if (!early_ioremap_nested)
398 "Debug warning: early ioremap leak of %d areas detected.\n",
399 early_ioremap_nested);
401 "please boot with early_ioremap_debug and report the dmesg.\n");
406 late_initcall(check_early_ioremap_leak);
408 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
410 unsigned long offset, last_addr;
411 unsigned int nrpages, nesting;
412 enum fixed_addresses idx0, idx;
414 WARN_ON(system_state != SYSTEM_BOOTING);
416 nesting = early_ioremap_nested;
417 if (early_ioremap_debug) {
418 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
419 phys_addr, size, nesting);
423 /* Don't allow wraparound or zero size */
424 last_addr = phys_addr + size - 1;
425 if (!size || last_addr < phys_addr) {
430 if (nesting >= FIX_BTMAPS_NESTING) {
434 early_ioremap_nested++;
436 * Mappings have to be page-aligned
438 offset = phys_addr & ~PAGE_MASK;
439 phys_addr &= PAGE_MASK;
440 size = PAGE_ALIGN(last_addr) - phys_addr;
443 * Mappings have to fit in the FIX_BTMAP area.
445 nrpages = size >> PAGE_SHIFT;
446 if (nrpages > NR_FIX_BTMAPS) {
454 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
456 while (nrpages > 0) {
457 early_set_fixmap(idx, phys_addr);
458 phys_addr += PAGE_SIZE;
462 if (early_ioremap_debug)
463 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
465 return (void *) (offset + fix_to_virt(idx0));
468 void __init early_iounmap(void *addr, unsigned long size)
470 unsigned long virt_addr;
471 unsigned long offset;
472 unsigned int nrpages;
473 enum fixed_addresses idx;
474 unsigned int nesting;
476 nesting = --early_ioremap_nested;
477 WARN_ON(nesting < 0);
479 if (early_ioremap_debug) {
480 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
485 virt_addr = (unsigned long)addr;
486 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
490 offset = virt_addr & ~PAGE_MASK;
491 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
493 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
494 while (nrpages > 0) {
495 early_clear_fixmap(idx);
501 void __this_fixmap_does_not_exist(void)
506 #endif /* CONFIG_X86_32 */