2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
40 int page_is_ram(unsigned long pagenr)
42 unsigned long addr, end;
45 for (i = 0; i < e820.nr_map; i++) {
49 if (e820.map[i].type != E820_RAM)
51 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
52 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
55 * Sanity check: Some BIOSen report areas as RAM that
56 * are not. Notably the 640->1Mb area, which is the
59 if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
60 end < (BIOS_END >> PAGE_SHIFT))
63 if ((pagenr >= addr) && (pagenr < end))
70 * Fix up the linear direct mapping of the kernel to avoid cache attribute
73 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
74 enum ioremap_mode mode)
76 unsigned long nrpages = size >> PAGE_SHIFT;
80 case IOR_MODE_UNCACHED:
82 err = set_memory_uc(vaddr, nrpages);
85 err = set_memory_wb(vaddr, nrpages);
93 * Remap an arbitrary physical address space into the kernel virtual
94 * address space. Needed when the kernel wants to access high addresses
97 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
98 * have to convert them into an offset in a page-aligned mapping, but the
99 * caller shouldn't need to know that small detail.
101 static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
102 enum ioremap_mode mode)
104 unsigned long pfn, offset, last_addr, vaddr;
105 struct vm_struct *area;
108 /* Don't allow wraparound or zero size */
109 last_addr = phys_addr + size - 1;
110 if (!size || last_addr < phys_addr)
114 * Don't remap the low PCI/ISA area, it's always mapped..
116 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
117 return (__force void __iomem *)phys_to_virt(phys_addr);
120 * Don't allow anybody to remap normal RAM that we're using..
122 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
123 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
124 if (page_is_ram(pfn) && pfn_valid(pfn) &&
125 !PageReserved(pfn_to_page(pfn)))
130 case IOR_MODE_UNCACHED:
132 prot = PAGE_KERNEL_NOCACHE;
134 case IOR_MODE_CACHED:
140 * Mappings have to be page-aligned
142 offset = phys_addr & ~PAGE_MASK;
143 phys_addr &= PAGE_MASK;
144 size = PAGE_ALIGN(last_addr+1) - phys_addr;
149 area = get_vm_area(size, VM_IOREMAP);
152 area->phys_addr = phys_addr;
153 vaddr = (unsigned long) area->addr;
154 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
155 remove_vm_area((void *)(vaddr & PAGE_MASK));
159 if (ioremap_change_attr(vaddr, size, mode) < 0) {
164 return (void __iomem *) (vaddr + offset);
168 * ioremap_nocache - map bus memory into CPU space
169 * @offset: bus address of the memory
170 * @size: size of the resource to map
172 * ioremap_nocache performs a platform specific sequence of operations to
173 * make bus memory CPU accessible via the readb/readw/readl/writeb/
174 * writew/writel functions and the other mmio helpers. The returned
175 * address is not guaranteed to be usable directly as a virtual
178 * This version of ioremap ensures that the memory is marked uncachable
179 * on the CPU as well as honouring existing caching rules from things like
180 * the PCI bus. Note that there are other caches and buffers on many
181 * busses. In particular driver authors should read up on PCI writes
183 * It's useful if some control registers are in such an area and
184 * write combining or read caching is not desirable:
186 * Must be freed with iounmap.
188 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
190 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
192 EXPORT_SYMBOL(ioremap_nocache);
194 void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
196 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
198 EXPORT_SYMBOL(ioremap_cache);
201 * iounmap - Free a IO remapping
202 * @addr: virtual address from ioremap_*
204 * Caller must ensure there is only one unmapping for the same pointer.
206 void iounmap(volatile void __iomem *addr)
208 struct vm_struct *p, *o;
210 if ((void __force *)addr <= high_memory)
214 * __ioremap special-cases the PCI/ISA range by not instantiating a
215 * vm_area and by simply returning an address into the kernel mapping
216 * of ISA space. So handle that here.
218 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
219 addr < phys_to_virt(ISA_END_ADDRESS))
222 addr = (volatile void __iomem *)
223 (PAGE_MASK & (unsigned long __force)addr);
225 /* Use the vm area unlocked, assuming the caller
226 ensures there isn't another iounmap for the same address
227 in parallel. Reuse of the virtual address is prevented by
228 leaving it in the global lists until we're done with it.
229 cpa takes care of the direct mappings. */
230 read_lock(&vmlist_lock);
231 for (p = vmlist; p; p = p->next) {
235 read_unlock(&vmlist_lock);
238 printk(KERN_ERR "iounmap: bad address %p\n", addr);
243 /* Finally remove it */
244 o = remove_vm_area((void *)addr);
245 BUG_ON(p != o || o == NULL);
248 EXPORT_SYMBOL(iounmap);
252 int __initdata early_ioremap_debug;
254 static int __init early_ioremap_debug_setup(char *str)
256 early_ioremap_debug = 1;
260 early_param("early_ioremap_debug", early_ioremap_debug_setup);
262 static __initdata int after_paging_init;
263 static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
264 __attribute__((aligned(PAGE_SIZE)));
266 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
268 pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)];
269 pud_t *pud = pud_offset(pgd, addr);
270 pmd_t *pmd = pmd_offset(pud, addr);
275 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
277 return &bm_pte[pte_index(addr)];
280 void __init early_ioremap_init(void)
284 if (early_ioremap_debug)
285 printk(KERN_INFO "early_ioremap_init()\n");
287 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
288 memset(bm_pte, 0, sizeof(bm_pte));
289 set_pmd(pmd, __pmd(__pa(bm_pte) | _PAGE_TABLE));
292 * The boot-ioremap range spans multiple pmds, for which
293 * we are not prepared:
295 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
297 printk(KERN_WARNING "pmd %p != %p\n",
298 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
300 fix_to_virt(FIX_BTMAP_BEGIN));
301 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
302 fix_to_virt(FIX_BTMAP_END));
304 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
305 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
310 void __init early_ioremap_clear(void)
314 if (early_ioremap_debug)
315 printk(KERN_INFO "early_ioremap_clear()\n");
317 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
319 paravirt_release_pt(__pa(pmd) >> PAGE_SHIFT);
323 void __init early_ioremap_reset(void)
325 enum fixed_addresses idx;
326 unsigned long addr, phys;
329 after_paging_init = 1;
330 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
331 addr = fix_to_virt(idx);
332 pte = early_ioremap_pte(addr);
333 if (pte_present(*pte)) {
334 phys = pte_val(*pte) & PAGE_MASK;
335 set_fixmap(idx, phys);
340 static void __init __early_set_fixmap(enum fixed_addresses idx,
341 unsigned long phys, pgprot_t flags)
343 unsigned long addr = __fix_to_virt(idx);
346 if (idx >= __end_of_fixed_addresses) {
350 pte = early_ioremap_pte(addr);
351 if (pgprot_val(flags))
352 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
354 pte_clear(NULL, addr, pte);
355 __flush_tlb_one(addr);
358 static inline void __init early_set_fixmap(enum fixed_addresses idx,
361 if (after_paging_init)
362 set_fixmap(idx, phys);
364 __early_set_fixmap(idx, phys, PAGE_KERNEL);
367 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
369 if (after_paging_init)
372 __early_set_fixmap(idx, 0, __pgprot(0));
376 int __initdata early_ioremap_nested;
378 static int __init check_early_ioremap_leak(void)
380 if (!early_ioremap_nested)
384 "Debug warning: early ioremap leak of %d areas detected.\n",
385 early_ioremap_nested);
387 "please boot with early_ioremap_debug and report the dmesg.\n");
392 late_initcall(check_early_ioremap_leak);
394 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
396 unsigned long offset, last_addr;
397 unsigned int nrpages, nesting;
398 enum fixed_addresses idx0, idx;
400 WARN_ON(system_state != SYSTEM_BOOTING);
402 nesting = early_ioremap_nested;
403 if (early_ioremap_debug) {
404 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
405 phys_addr, size, nesting);
409 /* Don't allow wraparound or zero size */
410 last_addr = phys_addr + size - 1;
411 if (!size || last_addr < phys_addr) {
416 if (nesting >= FIX_BTMAPS_NESTING) {
420 early_ioremap_nested++;
422 * Mappings have to be page-aligned
424 offset = phys_addr & ~PAGE_MASK;
425 phys_addr &= PAGE_MASK;
426 size = PAGE_ALIGN(last_addr) - phys_addr;
429 * Mappings have to fit in the FIX_BTMAP area.
431 nrpages = size >> PAGE_SHIFT;
432 if (nrpages > NR_FIX_BTMAPS) {
440 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
442 while (nrpages > 0) {
443 early_set_fixmap(idx, phys_addr);
444 phys_addr += PAGE_SIZE;
448 if (early_ioremap_debug)
449 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
451 return (void *) (offset + fix_to_virt(idx0));
454 void __init early_iounmap(void *addr, unsigned long size)
456 unsigned long virt_addr;
457 unsigned long offset;
458 unsigned int nrpages;
459 enum fixed_addresses idx;
460 unsigned int nesting;
462 nesting = --early_ioremap_nested;
463 WARN_ON(nesting < 0);
465 if (early_ioremap_debug) {
466 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
471 virt_addr = (unsigned long)addr;
472 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
476 offset = virt_addr & ~PAGE_MASK;
477 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
479 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
480 while (nrpages > 0) {
481 early_clear_fixmap(idx);
487 void __this_fixmap_does_not_exist(void)
492 #endif /* CONFIG_X86_32 */