]> err.no Git - linux-2.6/commitdiff
Merge changes from linux-2.6 by hand
authorPaul Mackerras <paulus@samba.org>
Sat, 22 Oct 2005 06:51:34 +0000 (16:51 +1000)
committerPaul Mackerras <paulus@samba.org>
Sat, 22 Oct 2005 06:51:34 +0000 (16:51 +1000)
1  2 
arch/powerpc/kernel/cputable.c
arch/powerpc/mm/mem.c
arch/powerpc/platforms/iseries/htab.c
arch/powerpc/platforms/powermac/setup.c
include/asm-powerpc/cputable.h

index 214f3b088edfcc62c0ce9709afb8192898629490,6b76cf58d9e068c3ac6687a7f20d1bf6c61bac9c..1fb80baebc8763379f8ee286211bfc8f5125de8e
@@@ -301,11 -88,13 +301,11 @@@ struct cpu_spec  cpu_specs[] = 
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x00010000,
                .cpu_name               = "601",
 -              .cpu_features           = CPU_FTR_COMMON | CPU_FTR_601 |
 -                      CPU_FTR_HPTE_TABLE,
 -              .cpu_user_features      = COMMON_PPC | PPC_FEATURE_601_INSTR |
 +              .cpu_features           = CPU_FTRS_PPC601,
 +              .cpu_user_features      = COMMON_USER | PPC_FEATURE_601_INSTR |
-                       PPC_FEATURE_UNIFIED_CACHE,
+                       PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
 -              .cpu_setup              = __setup_cpu_601
        },
        {       /* 603 */
                .pvr_mask               = 0xffff0000,
                .pvr_mask               = 0xffffff00,
                .pvr_value              = 0x00201400,
                .cpu_name               = "403GCX",
 -              .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
 -                      CPU_FTR_USE_TB,
 +              .cpu_features           = CPU_FTRS_40X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = PPC_FEATURE_32 |
+                       PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
        },
index 55b5860ed3c98c21c1633f915bdb0b0be94de1c7,0000000000000000000000000000000000000000..5e9206715f09f5963f8bf6f6ba095dc07a22e65b
mode 100644,000000..100644
--- /dev/null
@@@ -1,485 -1,0 +1,484 @@@
-       __hash_page(address, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
-                   0x300, local);
 +/*
 + *  PowerPC version
 + *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 + *
 + *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 + *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 + *    Copyright (C) 1996 Paul Mackerras
 + *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 + *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 + *
 + *  Derived from "arch/i386/mm/init.c"
 + *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 + *
 + *  This program is free software; you can redistribute it and/or
 + *  modify it under the terms of the GNU General Public License
 + *  as published by the Free Software Foundation; either version
 + *  2 of the License, or (at your option) any later version.
 + *
 + */
 +
 +#include <linux/config.h>
 +#include <linux/module.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
 +#include <linux/errno.h>
 +#include <linux/string.h>
 +#include <linux/types.h>
 +#include <linux/mm.h>
 +#include <linux/stddef.h>
 +#include <linux/init.h>
 +#include <linux/bootmem.h>
 +#include <linux/highmem.h>
 +#include <linux/initrd.h>
 +#include <linux/pagemap.h>
 +
 +#include <asm/pgalloc.h>
 +#include <asm/prom.h>
 +#include <asm/io.h>
 +#include <asm/mmu_context.h>
 +#include <asm/pgtable.h>
 +#include <asm/mmu.h>
 +#include <asm/smp.h>
 +#include <asm/machdep.h>
 +#include <asm/btext.h>
 +#include <asm/tlb.h>
 +#include <asm/bootinfo.h>
 +#include <asm/prom.h>
 +#include <asm/lmb.h>
 +#include <asm/sections.h>
 +#ifdef CONFIG_PPC64
 +#include <asm/vdso.h>
 +#endif
 +
 +#include "mmu_decl.h"
 +
 +#ifndef CPU_FTR_COHERENT_ICACHE
 +#define CPU_FTR_COHERENT_ICACHE       0       /* XXX for now */
 +#define CPU_FTR_NOEXECUTE     0
 +#endif
 +
 +int init_bootmem_done;
 +int mem_init_done;
 +
 +/*
 + * This is called by /dev/mem to know if a given address has to
 + * be mapped non-cacheable or not
 + */
 +int page_is_ram(unsigned long pfn)
 +{
 +      unsigned long paddr = (pfn << PAGE_SHIFT);
 +
 +#ifndef CONFIG_PPC64  /* XXX for now */
 +      return paddr < __pa(high_memory);
 +#else
 +      int i;
 +      for (i=0; i < lmb.memory.cnt; i++) {
 +              unsigned long base;
 +
 +              base = lmb.memory.region[i].base;
 +
 +              if ((paddr >= base) &&
 +                      (paddr < (base + lmb.memory.region[i].size))) {
 +                      return 1;
 +              }
 +      }
 +
 +      return 0;
 +#endif
 +}
 +EXPORT_SYMBOL(page_is_ram);
 +
 +pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
 +                            unsigned long size, pgprot_t vma_prot)
 +{
 +      if (ppc_md.phys_mem_access_prot)
 +              return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
 +
 +      if (!page_is_ram(addr >> PAGE_SHIFT))
 +              vma_prot = __pgprot(pgprot_val(vma_prot)
 +                                  | _PAGE_GUARDED | _PAGE_NO_CACHE);
 +      return vma_prot;
 +}
 +EXPORT_SYMBOL(phys_mem_access_prot);
 +
 +void show_mem(void)
 +{
 +      unsigned long total = 0, reserved = 0;
 +      unsigned long shared = 0, cached = 0;
 +      unsigned long highmem = 0;
 +      struct page *page;
 +      pg_data_t *pgdat;
 +      unsigned long i;
 +
 +      printk("Mem-info:\n");
 +      show_free_areas();
 +      printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 +      for_each_pgdat(pgdat) {
 +              for (i = 0; i < pgdat->node_spanned_pages; i++) {
 +                      page = pgdat_page_nr(pgdat, i);
 +                      total++;
 +                      if (PageHighMem(page))
 +                              highmem++;
 +                      if (PageReserved(page))
 +                              reserved++;
 +                      else if (PageSwapCache(page))
 +                              cached++;
 +                      else if (page_count(page))
 +                              shared += page_count(page) - 1;
 +              }
 +      }
 +      printk("%ld pages of RAM\n", total);
 +#ifdef CONFIG_HIGHMEM
 +      printk("%ld pages of HIGHMEM\n", highmem);
 +#endif
 +      printk("%ld reserved pages\n", reserved);
 +      printk("%ld pages shared\n", shared);
 +      printk("%ld pages swap cached\n", cached);
 +}
 +
 +/*
 + * Initialize the bootmem system and give it all the memory we
 + * have available.  If we are using highmem, we only put the
 + * lowmem into the bootmem system.
 + */
 +#ifndef CONFIG_NEED_MULTIPLE_NODES
 +void __init do_init_bootmem(void)
 +{
 +      unsigned long i;
 +      unsigned long start, bootmap_pages;
 +      unsigned long total_pages;
 +      int boot_mapsize;
 +
 +      max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
 +#ifdef CONFIG_HIGHMEM
 +      total_pages = total_lowmem >> PAGE_SHIFT;
 +#endif
 +
 +      /*
 +       * Find an area to use for the bootmem bitmap.  Calculate the size of
 +       * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 +       * Add 1 additional page in case the address isn't page-aligned.
 +       */
 +      bootmap_pages = bootmem_bootmap_pages(total_pages);
 +
 +      start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 +      BUG_ON(!start);
 +
 +      boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
 +
 +      /* Add all physical memory to the bootmem map, mark each area
 +       * present.
 +       */
 +      for (i = 0; i < lmb.memory.cnt; i++) {
 +              unsigned long base = lmb.memory.region[i].base;
 +              unsigned long size = lmb_size_bytes(&lmb.memory, i);
 +#ifdef CONFIG_HIGHMEM
 +              if (base >= total_lowmem)
 +                      continue;
 +              if (base + size > total_lowmem)
 +                      size = total_lowmem - base;
 +#endif
 +              free_bootmem(base, size);
 +      }
 +
 +      /* reserve the sections we're already using */
 +      for (i = 0; i < lmb.reserved.cnt; i++)
 +              reserve_bootmem(lmb.reserved.region[i].base,
 +                              lmb_size_bytes(&lmb.reserved, i));
 +
 +      /* XXX need to clip this if using highmem? */
 +      for (i = 0; i < lmb.memory.cnt; i++)
 +              memory_present(0, lmb_start_pfn(&lmb.memory, i),
 +                             lmb_end_pfn(&lmb.memory, i));
 +      init_bootmem_done = 1;
 +}
 +
 +/*
 + * paging_init() sets up the page tables - in fact we've already done this.
 + */
 +void __init paging_init(void)
 +{
 +      unsigned long zones_size[MAX_NR_ZONES];
 +      unsigned long zholes_size[MAX_NR_ZONES];
 +      unsigned long total_ram = lmb_phys_mem_size();
 +      unsigned long top_of_ram = lmb_end_of_DRAM();
 +
 +#ifdef CONFIG_HIGHMEM
 +      map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
 +      pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
 +                      (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
 +      map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
 +      kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
 +                      (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
 +      kmap_prot = PAGE_KERNEL;
 +#endif /* CONFIG_HIGHMEM */
 +
 +      printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 +             top_of_ram, total_ram);
 +      printk(KERN_INFO "Memory hole size: %ldMB\n",
 +             (top_of_ram - total_ram) >> 20);
 +      /*
 +       * All pages are DMA-able so we put them all in the DMA zone.
 +       */
 +      memset(zones_size, 0, sizeof(zones_size));
 +      memset(zholes_size, 0, sizeof(zholes_size));
 +
 +      zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 +      zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +
 +#ifdef CONFIG_HIGHMEM
 +      zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
 +      zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
 +      zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +#else
 +      zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 +      zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
 +#endif /* CONFIG_HIGHMEM */
 +
 +      free_area_init_node(0, NODE_DATA(0), zones_size,
 +                          __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 +}
 +#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 +
 +void __init mem_init(void)
 +{
 +#ifdef CONFIG_NEED_MULTIPLE_NODES
 +      int nid;
 +#endif
 +      pg_data_t *pgdat;
 +      unsigned long i;
 +      struct page *page;
 +      unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 +
 +      num_physpages = max_pfn;        /* RAM is assumed contiguous */
 +      high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 +
 +#ifdef CONFIG_NEED_MULTIPLE_NODES
 +        for_each_online_node(nid) {
 +              if (NODE_DATA(nid)->node_spanned_pages != 0) {
 +                      printk("freeing bootmem node %x\n", nid);
 +                      totalram_pages +=
 +                              free_all_bootmem_node(NODE_DATA(nid));
 +              }
 +      }
 +#else
 +      max_mapnr = num_physpages;
 +      totalram_pages += free_all_bootmem();
 +#endif
 +      for_each_pgdat(pgdat) {
 +              for (i = 0; i < pgdat->node_spanned_pages; i++) {
 +                      page = pgdat_page_nr(pgdat, i);
 +                      if (PageReserved(page))
 +                              reservedpages++;
 +              }
 +      }
 +
 +      codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
 +      datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
 +      initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 +      bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 +
 +#ifdef CONFIG_HIGHMEM
 +      {
 +              unsigned long pfn, highmem_mapnr;
 +
 +              highmem_mapnr = total_lowmem >> PAGE_SHIFT;
 +              for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 +                      struct page *page = pfn_to_page(pfn);
 +
 +                      ClearPageReserved(page);
 +                      set_page_count(page, 1);
 +                      __free_page(page);
 +                      totalhigh_pages++;
 +              }
 +              totalram_pages += totalhigh_pages;
 +              printk(KERN_INFO "High memory: %luk\n",
 +                     totalhigh_pages << (PAGE_SHIFT-10));
 +      }
 +#endif /* CONFIG_HIGHMEM */
 +
 +      printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 +             "%luk reserved, %luk data, %luk bss, %luk init)\n",
 +              (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
 +              num_physpages << (PAGE_SHIFT-10),
 +              codesize >> 10,
 +              reservedpages << (PAGE_SHIFT-10),
 +              datasize >> 10,
 +              bsssize >> 10,
 +              initsize >> 10);
 +
 +      mem_init_done = 1;
 +
 +#ifdef CONFIG_PPC64
 +      /* Initialize the vDSO */
 +      vdso_init();
 +#endif
 +}
 +
 +/*
 + * This is called when a page has been modified by the kernel.
 + * It just marks the page as not i-cache clean.  We do the i-cache
 + * flush later when the page is given to a user process, if necessary.
 + */
 +void flush_dcache_page(struct page *page)
 +{
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &page->flags))
 +              clear_bit(PG_arch_1, &page->flags);
 +}
 +EXPORT_SYMBOL(flush_dcache_page);
 +
 +void flush_dcache_icache_page(struct page *page)
 +{
 +#ifdef CONFIG_BOOKE
 +      void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
 +      __flush_dcache_icache(start);
 +      kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
 +#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
 +      /* On 8xx there is no need to kmap since highmem is not supported */
 +      __flush_dcache_icache(page_address(page)); 
 +#else
 +      __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 +#endif
 +
 +}
 +void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 +{
 +      clear_page(page);
 +
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +      /*
 +       * We shouldnt have to do this, but some versions of glibc
 +       * require it (ld.so assumes zero filled pages are icache clean)
 +       * - Anton
 +       */
 +
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &pg->flags))
 +              clear_bit(PG_arch_1, &pg->flags);
 +}
 +EXPORT_SYMBOL(clear_user_page);
 +
 +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 +                  struct page *pg)
 +{
 +      copy_page(vto, vfrom);
 +
 +      /*
 +       * We should be able to use the following optimisation, however
 +       * there are two problems.
 +       * Firstly a bug in some versions of binutils meant PLT sections
 +       * were not marked executable.
 +       * Secondly the first word in the GOT section is blrl, used
 +       * to establish the GOT address. Until recently the GOT was
 +       * not marked executable.
 +       * - Anton
 +       */
 +#if 0
 +      if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
 +              return;
 +#endif
 +
 +      if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 +              return;
 +
 +      /* avoid an atomic op if possible */
 +      if (test_bit(PG_arch_1, &pg->flags))
 +              clear_bit(PG_arch_1, &pg->flags);
 +}
 +
 +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 +                           unsigned long addr, int len)
 +{
 +      unsigned long maddr;
 +
 +      maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
 +      flush_icache_range(maddr, maddr + len);
 +      kunmap(page);
 +}
 +EXPORT_SYMBOL(flush_icache_user_range);
 +
 +/*
 + * This is called at the end of handling a user page fault, when the
 + * fault has been handled by updating a PTE in the linux page tables.
 + * We use it to preload an HPTE into the hash table corresponding to
 + * the updated linux PTE.
 + * 
 + * This must always be called with the mm->page_table_lock held
 + */
 +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 +                    pte_t pte)
 +{
 +      /* handle i-cache coherency */
 +      unsigned long pfn = pte_pfn(pte);
 +#ifdef CONFIG_PPC32
 +      pmd_t *pmd;
 +#else
 +      unsigned long vsid;
 +      void *pgdir;
 +      pte_t *ptep;
 +      int local = 0;
 +      cpumask_t tmp;
 +      unsigned long flags;
 +#endif
 +
 +      /* handle i-cache coherency */
 +      if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
 +          !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
 +          pfn_valid(pfn)) {
 +              struct page *page = pfn_to_page(pfn);
 +              if (!PageReserved(page)
 +                  && !test_bit(PG_arch_1, &page->flags)) {
 +                      if (vma->vm_mm == current->active_mm) {
 +#ifdef CONFIG_8xx
 +                      /* On 8xx, cache control instructions (particularly 
 +                       * "dcbst" from flush_dcache_icache) fault as write 
 +                       * operation if there is an unpopulated TLB entry 
 +                       * for the address in question. To workaround that, 
 +                       * we invalidate the TLB here, thus avoiding dcbst 
 +                       * misbehaviour.
 +                       */
 +                              _tlbie(address);
 +#endif
 +                              __flush_dcache_icache((void *) address);
 +                      } else
 +                              flush_dcache_icache_page(page);
 +                      set_bit(PG_arch_1, &page->flags);
 +              }
 +      }
 +
 +#ifdef CONFIG_PPC_STD_MMU
 +      /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
 +      if (!pte_young(pte) || address >= TASK_SIZE)
 +              return;
 +#ifdef CONFIG_PPC32
 +      if (Hash == 0)
 +              return;
 +      pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
 +      if (!pmd_none(*pmd))
 +              add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
 +#else
 +      pgdir = vma->vm_mm->pgd;
 +      if (pgdir == NULL)
 +              return;
 +
 +      ptep = find_linux_pte(pgdir, address);
 +      if (!ptep)
 +              return;
 +
 +      vsid = get_vsid(vma->vm_mm->context.id, address);
 +
 +      local_irq_save(flags);
 +      tmp = cpumask_of_cpu(smp_processor_id());
 +      if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 +              local = 1;
 +
++      __hash_page(address, 0, vsid, ptep, 0x300, local);
 +      local_irq_restore(flags);
 +#endif
 +#endif
 +}
Simple merge
index da0cb165dfc601c8a7608aee4f12f245b4d5fe04,0000000000000000000000000000000000000000..50f5dd78790095e4184c2e0626c978d68ffdc7ae
mode 100644,000000..100644
--- /dev/null
@@@ -1,787 -1,0 +1,789 @@@
-       struct device_node *np;
 +/*
 + *  Powermac setup and early boot code plus other random bits.
 + *
 + *  PowerPC version
 + *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 + *
 + *  Adapted for Power Macintosh by Paul Mackerras
 + *    Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
 + *
 + *  Derived from "arch/alpha/kernel/setup.c"
 + *    Copyright (C) 1995 Linus Torvalds
 + *
 + *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
 + *
 + *  This program is free software; you can redistribute it and/or
 + *  modify it under the terms of the GNU General Public License
 + *  as published by the Free Software Foundation; either version
 + *  2 of the License, or (at your option) any later version.
 + *
 + */
 +
 +/*
 + * bootup setup stuff..
 + */
 +
 +#include <linux/config.h>
 +#include <linux/init.h>
 +#include <linux/errno.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
 +#include <linux/mm.h>
 +#include <linux/stddef.h>
 +#include <linux/unistd.h>
 +#include <linux/ptrace.h>
 +#include <linux/slab.h>
 +#include <linux/user.h>
 +#include <linux/a.out.h>
 +#include <linux/tty.h>
 +#include <linux/string.h>
 +#include <linux/delay.h>
 +#include <linux/ioport.h>
 +#include <linux/major.h>
 +#include <linux/initrd.h>
 +#include <linux/vt_kern.h>
 +#include <linux/console.h>
 +#include <linux/ide.h>
 +#include <linux/pci.h>
 +#include <linux/adb.h>
 +#include <linux/cuda.h>
 +#include <linux/pmu.h>
 +#include <linux/irq.h>
 +#include <linux/seq_file.h>
 +#include <linux/root_dev.h>
 +#include <linux/bitops.h>
 +#include <linux/suspend.h>
 +
 +#include <asm/reg.h>
 +#include <asm/sections.h>
 +#include <asm/prom.h>
 +#include <asm/system.h>
 +#include <asm/pgtable.h>
 +#include <asm/io.h>
 +#include <asm/pci-bridge.h>
 +#include <asm/ohare.h>
 +#include <asm/mediabay.h>
 +#include <asm/machdep.h>
 +#include <asm/dma.h>
 +#include <asm/cputable.h>
 +#include <asm/btext.h>
 +#include <asm/pmac_feature.h>
 +#include <asm/time.h>
 +#include <asm/of_device.h>
 +#include <asm/mmu_context.h>
 +#include <asm/iommu.h>
 +#include <asm/smu.h>
 +#include <asm/pmc.h>
 +#include <asm/mpic.h>
 +
 +#include "pmac.h"
 +
 +#undef SHOW_GATWICK_IRQS
 +
 +unsigned char drive_info;
 +
 +int ppc_override_l2cr = 0;
 +int ppc_override_l2cr_value;
 +int has_l2cache = 0;
 +
 +int pmac_newworld = 1;
 +
 +static int current_root_goodness = -1;
 +
 +extern int pmac_newworld;
 +extern struct machdep_calls pmac_md;
 +
 +#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
 +
 +#ifdef CONFIG_PPC64
 +#include <asm/udbg.h>
 +int sccdbg;
 +#endif
 +
 +extern void zs_kgdb_hook(int tty_num);
 +
 +sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
 +EXPORT_SYMBOL(sys_ctrler);
 +
 +#ifdef CONFIG_PMAC_SMU
 +unsigned long smu_cmdbuf_abs;
 +EXPORT_SYMBOL(smu_cmdbuf_abs);
 +#endif
 +
 +#ifdef CONFIG_SMP
 +extern struct smp_ops_t psurge_smp_ops;
 +extern struct smp_ops_t core99_smp_ops;
 +#endif /* CONFIG_SMP */
 +
 +static void pmac_show_cpuinfo(struct seq_file *m)
 +{
 +      struct device_node *np;
 +      char *pp;
 +      int plen;
 +      int mbmodel;
 +      unsigned int mbflags;
 +      char* mbname;
 +
 +      mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
 +                                  PMAC_MB_INFO_MODEL, 0);
 +      mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
 +                                  PMAC_MB_INFO_FLAGS, 0);
 +      if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
 +                            (long) &mbname) != 0)
 +              mbname = "Unknown";
 +
 +      /* find motherboard type */
 +      seq_printf(m, "machine\t\t: ");
 +      np = of_find_node_by_path("/");
 +      if (np != NULL) {
 +              pp = (char *) get_property(np, "model", NULL);
 +              if (pp != NULL)
 +                      seq_printf(m, "%s\n", pp);
 +              else
 +                      seq_printf(m, "PowerMac\n");
 +              pp = (char *) get_property(np, "compatible", &plen);
 +              if (pp != NULL) {
 +                      seq_printf(m, "motherboard\t:");
 +                      while (plen > 0) {
 +                              int l = strlen(pp) + 1;
 +                              seq_printf(m, " %s", pp);
 +                              plen -= l;
 +                              pp += l;
 +                      }
 +                      seq_printf(m, "\n");
 +              }
 +              of_node_put(np);
 +      } else
 +              seq_printf(m, "PowerMac\n");
 +
 +      /* print parsed model */
 +      seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
 +      seq_printf(m, "pmac flags\t: %08x\n", mbflags);
 +
 +      /* find l2 cache info */
 +      np = of_find_node_by_name(NULL, "l2-cache");
 +      if (np == NULL)
 +              np = of_find_node_by_type(NULL, "cache");
 +      if (np != NULL) {
 +              unsigned int *ic = (unsigned int *)
 +                      get_property(np, "i-cache-size", NULL);
 +              unsigned int *dc = (unsigned int *)
 +                      get_property(np, "d-cache-size", NULL);
 +              seq_printf(m, "L2 cache\t:");
 +              has_l2cache = 1;
 +              if (get_property(np, "cache-unified", NULL) != 0 && dc) {
 +                      seq_printf(m, " %dK unified", *dc / 1024);
 +              } else {
 +                      if (ic)
 +                              seq_printf(m, " %dK instruction", *ic / 1024);
 +                      if (dc)
 +                              seq_printf(m, "%s %dK data",
 +                                         (ic? " +": ""), *dc / 1024);
 +              }
 +              pp = get_property(np, "ram-type", NULL);
 +              if (pp)
 +                      seq_printf(m, " %s", pp);
 +              seq_printf(m, "\n");
 +              of_node_put(np);
 +      }
 +
 +      /* Indicate newworld/oldworld */
 +      seq_printf(m, "pmac-generation\t: %s\n",
 +                 pmac_newworld ? "NewWorld" : "OldWorld");
 +}
 +
 +static void pmac_show_percpuinfo(struct seq_file *m, int i)
 +{
 +#ifdef CONFIG_CPU_FREQ_PMAC
 +      extern unsigned int pmac_get_one_cpufreq(int i);
 +      unsigned int freq = pmac_get_one_cpufreq(i);
 +      if (freq != 0) {
 +              seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
 +              return;
 +      }
 +#endif /* CONFIG_CPU_FREQ_PMAC */
 +#ifdef CONFIG_PPC32
 +      of_show_percpuinfo(m, i);
 +#endif
 +}
 +
 +#ifndef CONFIG_ADB_CUDA
 +int find_via_cuda(void)
 +{
 +      if (!find_devices("via-cuda"))
 +              return 0;
 +      printk("WARNING ! Your machine is CUDA-based but your kernel\n");
 +      printk("          wasn't compiled with CONFIG_ADB_CUDA option !\n");
 +      return 0;
 +}
 +#endif
 +
 +#ifndef CONFIG_ADB_PMU
 +int find_via_pmu(void)
 +{
 +      if (!find_devices("via-pmu"))
 +              return 0;
 +      printk("WARNING ! Your machine is PMU-based but your kernel\n");
 +      printk("          wasn't compiled with CONFIG_ADB_PMU option !\n");
 +      return;
 +}
 +#endif
 +
 +#ifndef CONFIG_PMAC_SMU
 +int smu_init(void)
 +{
 +      /* should check and warn if SMU is present */
 +      return 0;
 +}
 +#endif
 +
 +#ifdef CONFIG_PPC32
 +static volatile u32 *sysctrl_regs;
 +
 +static void __init ohare_init(void)
 +{
 +      /* this area has the CPU identification register
 +         and some registers used by smp boards */
 +      sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
 +
 +      /*
 +       * Turn on the L2 cache.
 +       * We assume that we have a PSX memory controller iff
 +       * we have an ohare I/O controller.
 +       */
 +      if (find_devices("ohare") != NULL) {
 +              if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
 +                      if (sysctrl_regs[4] & 0x10)
 +                              sysctrl_regs[4] |= 0x04000020;
 +                      else
 +                              sysctrl_regs[4] |= 0x04000000;
 +                      if(has_l2cache)
 +                              printk(KERN_INFO "Level 2 cache enabled\n");
 +              }
 +      }
 +}
 +
 +static void __init l2cr_init(void)
 +{
 +      /* Checks "l2cr-value" property in the registry */
 +      if (cpu_has_feature(CPU_FTR_L2CR)) {
 +              struct device_node *np = find_devices("cpus");
 +              if (np == 0)
 +                      np = find_type_devices("cpu");
 +              if (np != 0) {
 +                      unsigned int *l2cr = (unsigned int *)
 +                              get_property(np, "l2cr-value", NULL);
 +                      if (l2cr != 0) {
 +                              ppc_override_l2cr = 1;
 +                              ppc_override_l2cr_value = *l2cr;
 +                              _set_L2CR(0);
 +                              _set_L2CR(ppc_override_l2cr_value);
 +                      }
 +              }
 +      }
 +
 +      if (ppc_override_l2cr)
 +              printk(KERN_INFO "L2CR overridden (0x%x), "
 +                     "backside cache is %s\n",
 +                     ppc_override_l2cr_value,
 +                     (ppc_override_l2cr_value & 0x80000000)
 +                              ? "enabled" : "disabled");
 +}
 +#endif
 +
 +void __init pmac_setup_arch(void)
 +{
 +      struct device_node *cpu;
 +      int *fp;
 +      unsigned long pvr;
 +
 +      pvr = PVR_VER(mfspr(SPRN_PVR));
 +
 +      /* Set loops_per_jiffy to a half-way reasonable value,
 +         for use until calibrate_delay gets called. */
 +      loops_per_jiffy = 50000000 / HZ;
 +      cpu = of_find_node_by_type(NULL, "cpu");
 +      if (cpu != NULL) {
 +              fp = (int *) get_property(cpu, "clock-frequency", NULL);
 +              if (fp != NULL) {
 +                      if (pvr >= 0x30 && pvr < 0x80)
 +                              /* PPC970 etc. */
 +                              loops_per_jiffy = *fp / (3 * HZ);
 +                      else if (pvr == 4 || pvr >= 8)
 +                              /* 604, G3, G4 etc. */
 +                              loops_per_jiffy = *fp / HZ;
 +                      else
 +                              /* 601, 603, etc. */
 +                              loops_per_jiffy = *fp / (2 * HZ);
 +              }
 +              of_node_put(cpu);
 +      }
 +
 +      /* Lookup PCI hosts */
 +      pmac_pci_init();
 +
 +#ifdef CONFIG_PPC32
 +      ohare_init();
 +      l2cr_init();
 +#endif /* CONFIG_PPC32 */
 +
 +#ifdef CONFIG_PPC64
 +      /* Probe motherboard chipset */
 +      /* this is done earlier in setup_arch for 32-bit */
 +      pmac_feature_init();
 +
 +      /* We can NAP */
 +      powersave_nap = 1;
 +      printk(KERN_INFO "Using native/NAP idle loop\n");
 +#endif
 +
 +#ifdef CONFIG_KGDB
 +      zs_kgdb_hook(0);
 +#endif
 +
 +      find_via_cuda();
 +      find_via_pmu();
 +      smu_init();
 +
 +#ifdef CONFIG_NVRAM
 +      pmac_nvram_init();
 +#endif
 +
 +#ifdef CONFIG_PPC32
 +#ifdef CONFIG_BLK_DEV_INITRD
 +      if (initrd_start)
 +              ROOT_DEV = Root_RAM0;
 +      else
 +#endif
 +              ROOT_DEV = DEFAULT_ROOT_DEVICE;
 +#endif
 +
 +#ifdef CONFIG_SMP
 +      /* Check for Core99 */
 +      if (find_devices("uni-n") || find_devices("u3"))
 +              smp_ops = &core99_smp_ops;
 +#ifdef CONFIG_PPC32
 +      else
 +              smp_ops = &psurge_smp_ops;
 +#endif
 +#endif /* CONFIG_SMP */
 +}
 +
 +char *bootpath;
 +char *bootdevice;
 +void *boot_host;
 +int boot_target;
 +int boot_part;
 +extern dev_t boot_dev;
 +
 +#ifdef CONFIG_SCSI
 +void __init note_scsi_host(struct device_node *node, void *host)
 +{
 +      int l;
 +      char *p;
 +
 +      l = strlen(node->full_name);
 +      if (bootpath != NULL && bootdevice != NULL
 +          && strncmp(node->full_name, bootdevice, l) == 0
 +          && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
 +              boot_host = host;
 +              /*
 +               * There's a bug in OF 1.0.5.  (Why am I not surprised.)
 +               * If you pass a path like scsi/sd@1:0 to canon, it returns
 +               * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
 +               * That is, the scsi target number doesn't get preserved.
 +               * So we pick the target number out of bootpath and use that.
 +               */
 +              p = strstr(bootpath, "/sd@");
 +              if (p != NULL) {
 +                      p += 4;
 +                      boot_target = simple_strtoul(p, NULL, 10);
 +                      p = strchr(p, ':');
 +                      if (p != NULL)
 +                              boot_part = simple_strtoul(p + 1, NULL, 10);
 +              }
 +      }
 +}
 +EXPORT_SYMBOL(note_scsi_host);
 +#endif
 +
 +#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
 +static dev_t __init find_ide_boot(void)
 +{
 +      char *p;
 +      int n;
 +      dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
 +
 +      if (bootdevice == NULL)
 +              return 0;
 +      p = strrchr(bootdevice, '/');
 +      if (p == NULL)
 +              return 0;
 +      n = p - bootdevice;
 +
 +      return pmac_find_ide_boot(bootdevice, n);
 +}
 +#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
 +
 +static void __init find_boot_device(void)
 +{
 +#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
 +      boot_dev = find_ide_boot();
 +#endif
 +}
 +
 +/* TODO: Merge the suspend-to-ram with the common code !!!
 + * currently, this is a stub implementation for suspend-to-disk
 + * only
 + */
 +
 +#ifdef CONFIG_SOFTWARE_SUSPEND
 +
 +static int pmac_pm_prepare(suspend_state_t state)
 +{
 +      printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
 +
 +      return 0;
 +}
 +
 +static int pmac_pm_enter(suspend_state_t state)
 +{
 +      printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
 +
 +      /* Giveup the lazy FPU & vec so we don't have to back them
 +       * up from the low level code
 +       */
 +      enable_kernel_fp();
 +
 +#ifdef CONFIG_ALTIVEC
 +      if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
 +              enable_kernel_altivec();
 +#endif /* CONFIG_ALTIVEC */
 +
 +      return 0;
 +}
 +
 +static int pmac_pm_finish(suspend_state_t state)
 +{
 +      printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
 +
 +      /* Restore userland MMU context */
 +      set_context(current->active_mm->context, current->active_mm->pgd);
 +
 +      return 0;
 +}
 +
 +static struct pm_ops pmac_pm_ops = {
 +      .pm_disk_mode   = PM_DISK_SHUTDOWN,
 +      .prepare        = pmac_pm_prepare,
 +      .enter          = pmac_pm_enter,
 +      .finish         = pmac_pm_finish,
 +};
 +
 +#endif /* CONFIG_SOFTWARE_SUSPEND */
 +
 +static int initializing = 1;
 +
 +static int pmac_late_init(void)
 +{
 +      initializing = 0;
 +#ifdef CONFIG_SOFTWARE_SUSPEND
 +      pm_set_ops(&pmac_pm_ops);
 +#endif /* CONFIG_SOFTWARE_SUSPEND */
 +      return 0;
 +}
 +
 +late_initcall(pmac_late_init);
 +
 +/* can't be __init - can be called whenever a disk is first accessed */
 +void note_bootable_part(dev_t dev, int part, int goodness)
 +{
 +      static int found_boot = 0;
 +      char *p;
 +
 +      if (!initializing)
 +              return;
 +      if ((goodness <= current_root_goodness) &&
 +          ROOT_DEV != DEFAULT_ROOT_DEVICE)
 +              return;
 +      p = strstr(saved_command_line, "root=");
 +      if (p != NULL && (p == saved_command_line || p[-1] == ' '))
 +              return;
 +
 +      if (!found_boot) {
 +              find_boot_device();
 +              found_boot = 1;
 +      }
 +      if (!boot_dev || dev == boot_dev) {
 +              ROOT_DEV = dev + part;
 +              boot_dev = 0;
 +              current_root_goodness = goodness;
 +      }
 +}
 +
 +#ifdef CONFIG_ADB_CUDA
 +static void cuda_restart(void)
 +{
 +      struct adb_request req;
 +
 +      cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
 +      for (;;)
 +              cuda_poll();
 +}
 +
 +static void cuda_shutdown(void)
 +{
 +      struct adb_request req;
 +
 +      cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
 +      for (;;)
 +              cuda_poll();
 +}
 +
 +#else
 +#define cuda_restart()
 +#define cuda_shutdown()
 +#endif
 +
 +#ifndef CONFIG_ADB_PMU
 +#define pmu_restart()
 +#define pmu_shutdown()
 +#endif
 +
 +#ifndef CONFIG_PMAC_SMU
 +#define smu_restart()
 +#define smu_shutdown()
 +#endif
 +
 +static void pmac_restart(char *cmd)
 +{
 +      switch (sys_ctrler) {
 +      case SYS_CTRLER_CUDA:
 +              cuda_restart();
 +              break;
 +      case SYS_CTRLER_PMU:
 +              pmu_restart();
 +              break;
 +      case SYS_CTRLER_SMU:
 +              smu_restart();
 +              break;
 +      default: ;
 +      }
 +}
 +
 +static void pmac_power_off(void)
 +{
 +      switch (sys_ctrler) {
 +      case SYS_CTRLER_CUDA:
 +              cuda_shutdown();
 +              break;
 +      case SYS_CTRLER_PMU:
 +              pmu_shutdown();
 +              break;
 +      case SYS_CTRLER_SMU:
 +              smu_shutdown();
 +              break;
 +      default: ;
 +      }
 +}
 +
 +static void
 +pmac_halt(void)
 +{
 +      pmac_power_off();
 +}
 +
 +#ifdef CONFIG_PPC32
 +void __init pmac_init(void)
 +{
 +      /* isa_io_base gets set in pmac_pci_init */
 +      isa_mem_base = PMAC_ISA_MEM_BASE;
 +      pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
 +      ISA_DMA_THRESHOLD = ~0L;
 +      DMA_MODE_READ = 1;
 +      DMA_MODE_WRITE = 2;
 +
 +      ppc_md = pmac_md;
 +
 +#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
 +#ifdef CONFIG_BLK_DEV_IDE_PMAC
 +        ppc_ide_md.ide_init_hwif      = pmac_ide_init_hwif_ports;
 +        ppc_ide_md.default_io_base    = pmac_ide_get_base;
 +#endif /* CONFIG_BLK_DEV_IDE_PMAC */
 +#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
 +
 +      if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
 +
 +}
 +#endif
 +
 +/* 
 + * Early initialization.
 + */
 +static void __init pmac_init_early(void)
 +{
 +#ifdef CONFIG_PPC64
 +      /* Initialize hash table, from now on, we can take hash faults
 +       * and call ioremap
 +       */
 +      hpte_init_native();
 +
 +      /* Init SCC */
 +      if (strstr(cmd_line, "sccdbg")) {
 +              sccdbg = 1;
 +              udbg_init_scc(NULL);
 +      }
 +
 +      /* Setup interrupt mapping options */
 +      ppc64_interrupt_controller = IC_OPEN_PIC;
 +
 +      iommu_init_early_u3();
 +#endif
 +}
 +
 +static void __init pmac_progress(char *s, unsigned short hex)
 +{
 +#ifdef CONFIG_PPC64
 +      if (sccdbg) {
 +              udbg_puts(s);
 +              udbg_puts("\n");
 +              return;
 +      }
 +#endif
 +#ifdef CONFIG_BOOTX_TEXT
 +      if (boot_text_mapped) {
 +              btext_drawstring(s);
 +              btext_drawchar('\n');
 +      }
 +#endif /* CONFIG_BOOTX_TEXT */
 +}
 +
 +/*
 + * pmac has no legacy IO, anything calling this function has to
 + * fail or bad things will happen
 + */
 +static int pmac_check_legacy_ioport(unsigned int baseport)
 +{
 +      return -ENODEV;
 +}
 +
 +static int __init pmac_declare_of_platform_devices(void)
 +{
-       np = find_devices("u3");
-       if (np) {
-               for (np = np->child; np != NULL; np = np->sibling)
++      struct device_node *np, *npp;
 +
 +      np = find_devices("uni-n");
 +      if (np) {
 +              for (np = np->child; np != NULL; np = np->sibling)
 +                      if (strncmp(np->name, "i2c", 3) == 0) {
 +                              of_platform_device_create(np, "uni-n-i2c",
 +                                                        NULL);
 +                              break;
 +                      }
 +      }
 +      np = find_devices("valkyrie");
 +      if (np)
 +              of_platform_device_create(np, "valkyrie", NULL);
 +      np = find_devices("platinum");
 +      if (np)
 +              of_platform_device_create(np, "platinum", NULL);
 +
-                               of_platform_device_create(np, "u3-i2c",
-                                                         NULL);
++      npp = of_find_node_by_name(NULL, "u3");
++      if (npp) {
++              for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
 +                      if (strncmp(np->name, "i2c", 3) == 0) {
++                              of_platform_device_create(np, "u3-i2c", NULL);
++                              of_node_put(np);
 +                              break;
 +                      }
++              }
++              of_node_put(npp);
 +      }
 +        np = of_find_node_by_type(NULL, "smu");
 +        if (np) {
 +              of_platform_device_create(np, "smu", NULL);
 +              of_node_put(np);
 +      }
 +
 +      return 0;
 +}
 +
 +device_initcall(pmac_declare_of_platform_devices);
 +
 +/*
 + * Called very early, MMU is off, device-tree isn't unflattened
 + */
 +static int __init pmac_probe(int platform)
 +{
 +#ifdef CONFIG_PPC64
 +      if (platform != PLATFORM_POWERMAC)
 +              return 0;
 +
 +      /*
 +       * On U3, the DART (iommu) must be allocated now since it
 +       * has an impact on htab_initialize (due to the large page it
 +       * occupies having to be broken up so the DART itself is not
 +       * part of the cacheable linar mapping
 +       */
 +      alloc_u3_dart_table();
 +#endif
 +
 +#ifdef CONFIG_PMAC_SMU
 +      /*
 +       * SMU based G5s need some memory below 2Gb, at least the current
 +       * driver needs that. We have to allocate it now. We allocate 4k
 +       * (1 small page) for now.
 +       */
 +      smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
 +#endif /* CONFIG_PMAC_SMU */
 +
 +      return 1;
 +}
 +
 +#ifdef CONFIG_PPC64
 +static int pmac_probe_mode(struct pci_bus *bus)
 +{
 +      struct device_node *node = bus->sysdata;
 +
 +      /* We need to use normal PCI probing for the AGP bus,
 +         since the device for the AGP bridge isn't in the tree. */
 +      if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
 +              return PCI_PROBE_NORMAL;
 +
 +      return PCI_PROBE_DEVTREE;
 +}
 +#endif
 +
 +struct machdep_calls __initdata pmac_md = {
 +#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
 +      .cpu_die                = generic_mach_cpu_die,
 +#endif
 +      .probe                  = pmac_probe,
 +      .setup_arch             = pmac_setup_arch,
 +      .init_early             = pmac_init_early,
 +      .show_cpuinfo           = pmac_show_cpuinfo,
 +      .show_percpuinfo        = pmac_show_percpuinfo,
 +      .init_IRQ               = pmac_pic_init,
 +      .get_irq                = mpic_get_irq, /* changed later */
 +      .pcibios_fixup          = pmac_pcibios_fixup,
 +      .restart                = pmac_restart,
 +      .power_off              = pmac_power_off,
 +      .halt                   = pmac_halt,
 +      .time_init              = pmac_time_init,
 +      .get_boot_time          = pmac_get_boot_time,
 +      .set_rtc_time           = pmac_set_rtc_time,
 +      .get_rtc_time           = pmac_get_rtc_time,
 +      .calibrate_decr         = pmac_calibrate_decr,
 +      .feature_call           = pmac_do_feature_call,
 +      .check_legacy_ioport    = pmac_check_legacy_ioport,
 +      .progress               = pmac_progress,
 +#ifdef CONFIG_PPC64
 +      .pci_probe_mode         = pmac_probe_mode,
 +      .idle_loop              = native_idle,
 +      .enable_pmcs            = power4_enable_pmcs,
 +#endif
 +#ifdef CONFIG_PPC32
 +      .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
 +      .pcibios_after_init     = pmac_pcibios_after_init,
 +      .phys_mem_access_prot   = pci_phys_mem_access_prot,
 +#endif
 +};
index 373642aff7d8df5f8dce375bbf17d2e0cf244c20,0000000000000000000000000000000000000000..c019501dacebad84b5e54b87f55b3dbf4c9ad1a1
mode 100644,000000..100644
--- /dev/null
@@@ -1,426 -1,0 +1,427 @@@
 +#ifndef __ASM_POWERPC_CPUTABLE_H
 +#define __ASM_POWERPC_CPUTABLE_H
 +
 +#include <linux/config.h>
 +#include <asm/ppc_asm.h> /* for ASM_CONST */
 +
 +#define PPC_FEATURE_32                        0x80000000
 +#define PPC_FEATURE_64                        0x40000000
 +#define PPC_FEATURE_601_INSTR         0x20000000
 +#define PPC_FEATURE_HAS_ALTIVEC               0x10000000
 +#define PPC_FEATURE_HAS_FPU           0x08000000
 +#define PPC_FEATURE_HAS_MMU           0x04000000
 +#define PPC_FEATURE_HAS_4xxMAC                0x02000000
 +#define PPC_FEATURE_UNIFIED_CACHE     0x01000000
 +#define PPC_FEATURE_HAS_SPE           0x00800000
 +#define PPC_FEATURE_HAS_EFP_SINGLE    0x00400000
 +#define PPC_FEATURE_HAS_EFP_DOUBLE    0x00200000
++#define PPC_FEATURE_NO_TB             0x00100000
 +
 +#ifdef __KERNEL__
 +#ifndef __ASSEMBLY__
 +
 +/* This structure can grow, it's real size is used by head.S code
 + * via the mkdefs mechanism.
 + */
 +struct cpu_spec;
 +struct op_powerpc_model;
 +
 +typedef       void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
 +
 +struct cpu_spec {
 +      /* CPU is matched via (PVR & pvr_mask) == pvr_value */
 +      unsigned int    pvr_mask;
 +      unsigned int    pvr_value;
 +
 +      char            *cpu_name;
 +      unsigned long   cpu_features;           /* Kernel features */
 +      unsigned int    cpu_user_features;      /* Userland features */
 +
 +      /* cache line sizes */
 +      unsigned int    icache_bsize;
 +      unsigned int    dcache_bsize;
 +
 +      /* number of performance monitor counters */
 +      unsigned int    num_pmcs;
 +
 +      /* this is called to initialize various CPU bits like L1 cache,
 +       * BHT, SPD, etc... from head.S before branching to identify_machine
 +       */
 +      cpu_setup_t     cpu_setup;
 +
 +      /* Used by oprofile userspace to select the right counters */
 +      char            *oprofile_cpu_type;
 +
 +      /* Processor specific oprofile operations */
 +      struct op_powerpc_model *oprofile_model;
 +};
 +
 +extern struct cpu_spec                *cur_cpu_spec;
 +
 +extern void identify_cpu(unsigned long offset, unsigned long cpu);
 +extern void do_cpu_ftr_fixups(unsigned long offset);
 +
 +#endif /* __ASSEMBLY__ */
 +
 +/* CPU kernel features */
 +
 +/* Retain the 32b definitions all use bottom half of word */
 +#define CPU_FTR_SPLIT_ID_CACHE                ASM_CONST(0x0000000000000001)
 +#define CPU_FTR_L2CR                  ASM_CONST(0x0000000000000002)
 +#define CPU_FTR_SPEC7450              ASM_CONST(0x0000000000000004)
 +#define CPU_FTR_ALTIVEC                       ASM_CONST(0x0000000000000008)
 +#define CPU_FTR_TAU                   ASM_CONST(0x0000000000000010)
 +#define CPU_FTR_CAN_DOZE              ASM_CONST(0x0000000000000020)
 +#define CPU_FTR_USE_TB                        ASM_CONST(0x0000000000000040)
 +#define CPU_FTR_604_PERF_MON          ASM_CONST(0x0000000000000080)
 +#define CPU_FTR_601                   ASM_CONST(0x0000000000000100)
 +#define CPU_FTR_HPTE_TABLE            ASM_CONST(0x0000000000000200)
 +#define CPU_FTR_CAN_NAP                       ASM_CONST(0x0000000000000400)
 +#define CPU_FTR_L3CR                  ASM_CONST(0x0000000000000800)
 +#define CPU_FTR_L3_DISABLE_NAP                ASM_CONST(0x0000000000001000)
 +#define CPU_FTR_NAP_DISABLE_L2_PR     ASM_CONST(0x0000000000002000)
 +#define CPU_FTR_DUAL_PLL_750FX                ASM_CONST(0x0000000000004000)
 +#define CPU_FTR_NO_DPM                        ASM_CONST(0x0000000000008000)
 +#define CPU_FTR_HAS_HIGH_BATS         ASM_CONST(0x0000000000010000)
 +#define CPU_FTR_NEED_COHERENT         ASM_CONST(0x0000000000020000)
 +#define CPU_FTR_NO_BTIC                       ASM_CONST(0x0000000000040000)
 +#define CPU_FTR_BIG_PHYS              ASM_CONST(0x0000000000080000)
 +
 +#ifdef __powerpc64__
 +/* Add the 64b processor unique features in the top half of the word */
 +#define CPU_FTR_SLB                           ASM_CONST(0x0000000100000000)
 +#define CPU_FTR_16M_PAGE                      ASM_CONST(0x0000000200000000)
 +#define CPU_FTR_TLBIEL                        ASM_CONST(0x0000000400000000)
 +#define CPU_FTR_NOEXECUTE                     ASM_CONST(0x0000000800000000)
 +#define CPU_FTR_NODSISRALIGN                  ASM_CONST(0x0000001000000000)
 +#define CPU_FTR_IABR                          ASM_CONST(0x0000002000000000)
 +#define CPU_FTR_MMCRA                         ASM_CONST(0x0000004000000000)
 +#define CPU_FTR_CTRL                  ASM_CONST(0x0000008000000000)
 +#define CPU_FTR_SMT                   ASM_CONST(0x0000010000000000)
 +#define CPU_FTR_COHERENT_ICACHE       ASM_CONST(0x0000020000000000)
 +#define CPU_FTR_LOCKLESS_TLBIE                ASM_CONST(0x0000040000000000)
 +#define CPU_FTR_MMCRA_SIHV            ASM_CONST(0x0000080000000000)
 +#else
 +/* ensure on 32b processors the flags are available for compiling but
 + * don't do anything */
 +#define CPU_FTR_SLB                           ASM_CONST(0x0)
 +#define CPU_FTR_16M_PAGE                      ASM_CONST(0x0)
 +#define CPU_FTR_TLBIEL                        ASM_CONST(0x0)
 +#define CPU_FTR_NOEXECUTE                     ASM_CONST(0x0)
 +#define CPU_FTR_NODSISRALIGN                  ASM_CONST(0x0)
 +#define CPU_FTR_IABR                          ASM_CONST(0x0)
 +#define CPU_FTR_MMCRA                         ASM_CONST(0x0)
 +#define CPU_FTR_CTRL                  ASM_CONST(0x0)
 +#define CPU_FTR_SMT                   ASM_CONST(0x0)
 +#define CPU_FTR_COHERENT_ICACHE       ASM_CONST(0x0)
 +#define CPU_FTR_LOCKLESS_TLBIE                ASM_CONST(0x0)
 +#define CPU_FTR_MMCRA_SIHV            ASM_CONST(0x0)
 +#endif
 +
 +#ifndef __ASSEMBLY__
 +
 +#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
 +                                      CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
 +                                      CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
 +
 +/* iSeries doesn't support large pages */
 +#ifdef CONFIG_PPC_ISERIES
 +#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
 +#else
 +#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
 +#endif /* CONFIG_PPC_ISERIES */
 +
 +/* We only set the altivec features if the kernel was compiled with altivec
 + * support
 + */
 +#ifdef CONFIG_ALTIVEC
 +#define CPU_FTR_ALTIVEC_COMP  CPU_FTR_ALTIVEC
 +#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
 +#else
 +#define CPU_FTR_ALTIVEC_COMP  0
 +#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
 +#endif
 +
 +/* We need to mark all pages as being coherent if we're SMP or we
 + * have a 74[45]x and an MPC107 host bridge.
 + */
 +#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
 +#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
 +#else
 +#define CPU_FTR_COMMON                  0
 +#endif
 +
 +/* The powersave features NAP & DOZE seems to confuse BDI when
 +   debugging. So if a BDI is used, disable theses
 + */
 +#ifndef CONFIG_BDI_SWITCH
 +#define CPU_FTR_MAYBE_CAN_DOZE        CPU_FTR_CAN_DOZE
 +#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
 +#else
 +#define CPU_FTR_MAYBE_CAN_DOZE        0
 +#define CPU_FTR_MAYBE_CAN_NAP 0
 +#endif
 +
 +#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
 +                   !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
 +                   !defined(CONFIG_BOOKE))
 +
 +enum {
 +      CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
 +      CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
 +      CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
 +          CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
 +      CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
 +          CPU_FTR_NO_DPM,
 +      CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
 +          CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
 +      CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
 +          CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
 +          CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
 +      CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
 +          CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
 +          CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
 +          CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
 +          CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
 +      CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
 +          CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
 +      CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB |
 +          CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
 +          CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
 +          CPU_FTR_NEED_COHERENT,
 +      CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
 +      CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
 +          CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
 +      CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
 +          CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
 +      CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
 +      CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
 +      CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
 +      CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
 +          CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
 +          CPU_FTR_MAYBE_CAN_NAP,
 +      CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
 +      CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
 +      CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
 +      CPU_FTRS_E200 = CPU_FTR_USE_TB,
 +      CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
 +      CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_BIG_PHYS,
 +      CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
 +#ifdef __powerpc64__
 +      CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
 +      CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
 +          CPU_FTR_MMCRA | CPU_FTR_CTRL,
 +      CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
 +      CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
 +          CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
 +      CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
 +          CPU_FTR_MMCRA | CPU_FTR_SMT |
 +          CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
 +          CPU_FTR_MMCRA_SIHV,
 +      CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
 +          CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
 +      CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
 +          CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
 +#endif
 +
 +      CPU_FTRS_POSSIBLE =
 +#if CLASSIC_PPC
 +          CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
 +          CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
 +          CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
 +          CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
 +          CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
 +          CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
 +          CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
 +          CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
 +#else
 +          CPU_FTRS_GENERIC_32 |
 +#endif
 +#ifdef CONFIG_PPC64BRIDGE
 +          CPU_FTRS_POWER3_32 |
 +#endif
 +#ifdef CONFIG_POWER4
 +          CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
 +#endif
 +#ifdef CONFIG_8xx
 +          CPU_FTRS_8XX |
 +#endif
 +#ifdef CONFIG_40x
 +          CPU_FTRS_40X |
 +#endif
 +#ifdef CONFIG_44x
 +          CPU_FTRS_44X |
 +#endif
 +#ifdef CONFIG_E200
 +          CPU_FTRS_E200 |
 +#endif
 +#ifdef CONFIG_E500
 +          CPU_FTRS_E500 | CPU_FTRS_E500_2 |
 +#endif
 +#ifdef __powerpc64__
 +          CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
 +          CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
 +#endif
 +          0,
 +
 +      CPU_FTRS_ALWAYS =
 +#if CLASSIC_PPC
 +          CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
 +          CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
 +          CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
 +          CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
 +          CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
 +          CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
 +          CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
 +          CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
 +#else
 +          CPU_FTRS_GENERIC_32 &
 +#endif
 +#ifdef CONFIG_PPC64BRIDGE
 +          CPU_FTRS_POWER3_32 &
 +#endif
 +#ifdef CONFIG_POWER4
 +          CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
 +#endif
 +#ifdef CONFIG_8xx
 +          CPU_FTRS_8XX &
 +#endif
 +#ifdef CONFIG_40x
 +          CPU_FTRS_40X &
 +#endif
 +#ifdef CONFIG_44x
 +          CPU_FTRS_44X &
 +#endif
 +#ifdef CONFIG_E200
 +          CPU_FTRS_E200 &
 +#endif
 +#ifdef CONFIG_E500
 +          CPU_FTRS_E500 & CPU_FTRS_E500_2 &
 +#endif
 +#ifdef __powerpc64__
 +          CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
 +          CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
 +#endif
 +          CPU_FTRS_POSSIBLE,
 +};
 +
 +static inline int cpu_has_feature(unsigned long feature)
 +{
 +      return (CPU_FTRS_ALWAYS & feature) ||
 +             (CPU_FTRS_POSSIBLE
 +              & cur_cpu_spec->cpu_features
 +              & feature);
 +}
 +
 +#endif /* !__ASSEMBLY__ */
 +
 +#ifdef __ASSEMBLY__
 +
 +#define BEGIN_FTR_SECTION             98:
 +
 +#ifndef __powerpc64__
 +#define END_FTR_SECTION(msk, val)             \
 +99:                                           \
 +      .section __ftr_fixup,"a";               \
 +      .align 2;                               \
 +      .long msk;                              \
 +      .long val;                              \
 +      .long 98b;                              \
 +      .long 99b;                              \
 +      .previous
 +#else /* __powerpc64__ */
 +#define END_FTR_SECTION(msk, val)             \
 +99:                                           \
 +      .section __ftr_fixup,"a";               \
 +      .align 3;                               \
 +      .llong msk;                             \
 +      .llong val;                             \
 +      .llong 98b;                             \
 +      .llong 99b;                             \
 +      .previous
 +#endif /* __powerpc64__ */
 +
 +#define END_FTR_SECTION_IFSET(msk)    END_FTR_SECTION((msk), (msk))
 +#define END_FTR_SECTION_IFCLR(msk)    END_FTR_SECTION((msk), 0)
 +#endif /* __ASSEMBLY__ */
 +
 +#endif /* __KERNEL__ */
 +#endif /* __ASM_POWERPC_CPUTABLE_H */