2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
24 unsigned long __phys_addr(unsigned long x)
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
30 EXPORT_SYMBOL(__phys_addr);
35 * Fix up the linear direct mapping of the kernel to avoid cache attribute
38 static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
41 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
44 /* No change for pages after the last mapping */
45 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
48 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
49 vaddr = (unsigned long) __va(phys_addr);
52 * If there is no identity map for this address,
53 * change_page_attr_addr is unnecessary
55 if (!lookup_address(vaddr, &level))
59 * Must use an address here and not struct page because the
60 * phys addr can be a in hole between nodes and not have a
63 err = change_page_attr_addr(vaddr, npages, prot);
72 * Remap an arbitrary physical address space into the kernel virtual
73 * address space. Needed when the kernel wants to access high addresses
76 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
77 * have to convert them into an offset in a page-aligned mapping, but the
78 * caller shouldn't need to know that small detail.
80 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
84 struct vm_struct *area;
85 unsigned long offset, last_addr;
88 /* Don't allow wraparound or zero size */
89 last_addr = phys_addr + size - 1;
90 if (!size || last_addr < phys_addr)
94 * Don't remap the low PCI/ISA area, it's always mapped..
96 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
97 return (__force void __iomem *)phys_to_virt(phys_addr);
101 * Don't allow anybody to remap normal RAM that we're using..
103 if (phys_addr <= virt_to_phys(high_memory - 1)) {
104 char *t_addr, *t_end;
107 t_addr = __va(phys_addr);
108 t_end = t_addr + (size - 1);
110 for (page = virt_to_page(t_addr);
111 page <= virt_to_page(t_end); page++)
112 if (!PageReserved(page))
117 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
120 * Mappings have to be page-aligned
122 offset = phys_addr & ~PAGE_MASK;
123 phys_addr &= PAGE_MASK;
124 size = PAGE_ALIGN(last_addr+1) - phys_addr;
129 area = get_vm_area(size, VM_IOREMAP);
132 area->phys_addr = phys_addr;
133 addr = (void __iomem *) area->addr;
134 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
135 phys_addr, pgprot)) {
136 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
140 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
145 return (void __iomem *) (offset + (char __iomem *)addr);
147 EXPORT_SYMBOL(__ioremap);
150 * ioremap_nocache - map bus memory into CPU space
151 * @offset: bus address of the memory
152 * @size: size of the resource to map
154 * ioremap_nocache performs a platform specific sequence of operations to
155 * make bus memory CPU accessible via the readb/readw/readl/writeb/
156 * writew/writel functions and the other mmio helpers. The returned
157 * address is not guaranteed to be usable directly as a virtual
160 * This version of ioremap ensures that the memory is marked uncachable
161 * on the CPU as well as honouring existing caching rules from things like
162 * the PCI bus. Note that there are other caches and buffers on many
163 * busses. In particular driver authors should read up on PCI writes
165 * It's useful if some control registers are in such an area and
166 * write combining or read caching is not desirable:
168 * Must be freed with iounmap.
170 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
172 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
174 EXPORT_SYMBOL(ioremap_nocache);
177 * iounmap - Free a IO remapping
178 * @addr: virtual address from ioremap_*
180 * Caller must ensure there is only one unmapping for the same pointer.
182 void iounmap(volatile void __iomem *addr)
184 struct vm_struct *p, *o;
186 if ((void __force *)addr <= high_memory)
190 * __ioremap special-cases the PCI/ISA range by not instantiating a
191 * vm_area and by simply returning an address into the kernel mapping
192 * of ISA space. So handle that here.
194 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
195 addr < phys_to_virt(ISA_END_ADDRESS))
198 addr = (volatile void __iomem *)
199 (PAGE_MASK & (unsigned long __force)addr);
201 /* Use the vm area unlocked, assuming the caller
202 ensures there isn't another iounmap for the same address
203 in parallel. Reuse of the virtual address is prevented by
204 leaving it in the global lists until we're done with it.
205 cpa takes care of the direct mappings. */
206 read_lock(&vmlist_lock);
207 for (p = vmlist; p; p = p->next) {
211 read_unlock(&vmlist_lock);
214 printk(KERN_ERR "iounmap: bad address %p\n", addr);
219 /* Reset the direct mapping. Can block */
220 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
222 /* Finally remove it */
223 o = remove_vm_area((void *)addr);
224 BUG_ON(p != o || o == NULL);
227 EXPORT_SYMBOL(iounmap);
231 int __initdata early_ioremap_debug;
233 static int __init early_ioremap_debug_setup(char *str)
235 early_ioremap_debug = 1;
239 early_param("early_ioremap_debug", early_ioremap_debug_setup);
241 static __initdata int after_paging_init;
242 static __initdata unsigned long bm_pte[1024]
243 __attribute__((aligned(PAGE_SIZE)));
245 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
247 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
250 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
252 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
255 void __init early_ioremap_init(void)
259 if (early_ioremap_debug)
260 printk(KERN_DEBUG "early_ioremap_init()\n");
262 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
263 *pgd = __pa(bm_pte) | _PAGE_TABLE;
264 memset(bm_pte, 0, sizeof(bm_pte));
266 * The boot-ioremap range spans multiple pgds, for which
267 * we are not prepared:
269 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
271 printk(KERN_WARNING "pgd %p != %p\n",
272 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
273 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
274 fix_to_virt(FIX_BTMAP_BEGIN));
275 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
276 fix_to_virt(FIX_BTMAP_END));
278 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
279 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
284 void __init early_ioremap_clear(void)
288 if (early_ioremap_debug)
289 printk(KERN_DEBUG "early_ioremap_clear()\n");
291 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
296 void __init early_ioremap_reset(void)
298 enum fixed_addresses idx;
299 unsigned long *pte, phys, addr;
301 after_paging_init = 1;
302 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
303 addr = fix_to_virt(idx);
304 pte = early_ioremap_pte(addr);
305 if (!*pte & _PAGE_PRESENT) {
306 phys = *pte & PAGE_MASK;
307 set_fixmap(idx, phys);
312 static void __init __early_set_fixmap(enum fixed_addresses idx,
313 unsigned long phys, pgprot_t flags)
315 unsigned long *pte, addr = __fix_to_virt(idx);
317 if (idx >= __end_of_fixed_addresses) {
321 pte = early_ioremap_pte(addr);
322 if (pgprot_val(flags))
323 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
326 __flush_tlb_one(addr);
329 static inline void __init early_set_fixmap(enum fixed_addresses idx,
332 if (after_paging_init)
333 set_fixmap(idx, phys);
335 __early_set_fixmap(idx, phys, PAGE_KERNEL);
338 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
340 if (after_paging_init)
343 __early_set_fixmap(idx, 0, __pgprot(0));
347 int __initdata early_ioremap_nested;
349 static int __init check_early_ioremap_leak(void)
351 if (!early_ioremap_nested)
355 "Debug warning: early ioremap leak of %d areas detected.\n",
356 early_ioremap_nested);
358 "please boot with early_ioremap_debug and report the dmesg.\n");
363 late_initcall(check_early_ioremap_leak);
365 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
367 unsigned long offset, last_addr;
368 unsigned int nrpages, nesting;
369 enum fixed_addresses idx0, idx;
371 WARN_ON(system_state != SYSTEM_BOOTING);
373 nesting = early_ioremap_nested;
374 if (early_ioremap_debug) {
375 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
376 phys_addr, size, nesting);
380 /* Don't allow wraparound or zero size */
381 last_addr = phys_addr + size - 1;
382 if (!size || last_addr < phys_addr) {
387 if (nesting >= FIX_BTMAPS_NESTING) {
391 early_ioremap_nested++;
393 * Mappings have to be page-aligned
395 offset = phys_addr & ~PAGE_MASK;
396 phys_addr &= PAGE_MASK;
397 size = PAGE_ALIGN(last_addr) - phys_addr;
400 * Mappings have to fit in the FIX_BTMAP area.
402 nrpages = size >> PAGE_SHIFT;
403 if (nrpages > NR_FIX_BTMAPS) {
411 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
413 while (nrpages > 0) {
414 early_set_fixmap(idx, phys_addr);
415 phys_addr += PAGE_SIZE;
419 if (early_ioremap_debug)
420 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
422 return (void *) (offset + fix_to_virt(idx0));
425 void __init early_iounmap(void *addr, unsigned long size)
427 unsigned long virt_addr;
428 unsigned long offset;
429 unsigned int nrpages;
430 enum fixed_addresses idx;
431 unsigned int nesting;
433 nesting = --early_ioremap_nested;
434 WARN_ON(nesting < 0);
436 if (early_ioremap_debug) {
437 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
442 virt_addr = (unsigned long)addr;
443 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
447 offset = virt_addr & ~PAGE_MASK;
448 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
450 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
451 while (nrpages > 0) {
452 early_clear_fixmap(idx);
458 void __this_fixmap_does_not_exist(void)
463 #endif /* CONFIG_X86_32 */