]> err.no Git - linux-2.6/blob - arch/ppc64/kernel/iSeries_setup.c
ppc64 iSeries: Move setup of systemcfg->platform into iSeries device tree
[linux-2.6] / arch / ppc64 / kernel / iSeries_setup.c
1 /*
2  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3  *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4  *
5  *    Module name: iSeries_setup.c
6  *
7  *    Description:
8  *      Architecture- / platform-specific boot-time initialization code for
9  *      the IBM iSeries LPAR.  Adapted from original code by Grant Erickson and
10  *      code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
11  *      <dan@net4x.com>.
12  *
13  *      This program is free software; you can redistribute it and/or
14  *      modify it under the terms of the GNU General Public License
15  *      as published by the Free Software Foundation; either version
16  *      2 of the License, or (at your option) any later version.
17  */
18
19 #undef DEBUG
20
21 #include <linux/config.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/smp.h>
25 #include <linux/param.h>
26 #include <linux/string.h>
27 #include <linux/initrd.h>
28 #include <linux/seq_file.h>
29 #include <linux/kdev_t.h>
30 #include <linux/major.h>
31 #include <linux/root_dev.h>
32
33 #include <asm/processor.h>
34 #include <asm/machdep.h>
35 #include <asm/page.h>
36 #include <asm/mmu.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
39 #include <asm/cputable.h>
40 #include <asm/sections.h>
41 #include <asm/iommu.h>
42 #include <asm/firmware.h>
43
44 #include <asm/time.h>
45 #include "iSeries_setup.h"
46 #include <asm/naca.h>
47 #include <asm/paca.h>
48 #include <asm/cache.h>
49 #include <asm/sections.h>
50 #include <asm/abs_addr.h>
51 #include <asm/iSeries/HvCallHpt.h>
52 #include <asm/iSeries/HvLpConfig.h>
53 #include <asm/iSeries/HvCallEvent.h>
54 #include <asm/iSeries/HvCallSm.h>
55 #include <asm/iSeries/HvCallXm.h>
56 #include <asm/iSeries/ItLpQueue.h>
57 #include <asm/iSeries/IoHriMainStore.h>
58 #include <asm/iSeries/mf.h>
59 #include <asm/iSeries/HvLpEvent.h>
60 #include <asm/iSeries/iSeries_irq.h>
61 #include <asm/iSeries/IoHriProcessorVpd.h>
62 #include <asm/iSeries/ItVpdAreas.h>
63 #include <asm/iSeries/LparMap.h>
64
65 extern void hvlog(char *fmt, ...);
66
67 #ifdef DEBUG
68 #define DBG(fmt...) hvlog(fmt)
69 #else
70 #define DBG(fmt...)
71 #endif
72
73 /* Function Prototypes */
74 extern void ppcdbg_initialize(void);
75
76 static void build_iSeries_Memory_Map(void);
77 static void setup_iSeries_cache_sizes(void);
78 static int iseries_shared_idle(void);
79 static int iseries_dedicated_idle(void);
80 #ifdef CONFIG_PCI
81 extern void iSeries_pci_final_fixup(void);
82 #else
83 static void iSeries_pci_final_fixup(void) { }
84 #endif
85
86 /* Global Variables */
87 static unsigned long procFreqHz;
88 static unsigned long procFreqMhz;
89 static unsigned long procFreqMhzHundreths;
90
91 static unsigned long tbFreqHz;
92 static unsigned long tbFreqMhz;
93 static unsigned long tbFreqMhzHundreths;
94
95 int piranha_simulator;
96
97 extern int rd_size;             /* Defined in drivers/block/rd.c */
98 extern unsigned long klimit;
99 extern unsigned long embedded_sysmap_start;
100 extern unsigned long embedded_sysmap_end;
101
102 extern unsigned long iSeries_recal_tb;
103 extern unsigned long iSeries_recal_titan;
104
105 static int mf_initialized;
106
107 struct MemoryBlock {
108         unsigned long absStart;
109         unsigned long absEnd;
110         unsigned long logicalStart;
111         unsigned long logicalEnd;
112 };
113
114 /*
115  * Process the main store vpd to determine where the holes in memory are
116  * and return the number of physical blocks and fill in the array of
117  * block data.
118  */
119 static unsigned long iSeries_process_Condor_mainstore_vpd(
120                 struct MemoryBlock *mb_array, unsigned long max_entries)
121 {
122         unsigned long holeFirstChunk, holeSizeChunks;
123         unsigned long numMemoryBlocks = 1;
124         struct IoHriMainStoreSegment4 *msVpd =
125                 (struct IoHriMainStoreSegment4 *)xMsVpd;
126         unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
127         unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
128         unsigned long holeSize = holeEnd - holeStart;
129
130         printk("Mainstore_VPD: Condor\n");
131         /*
132          * Determine if absolute memory has any
133          * holes so that we can interpret the
134          * access map we get back from the hypervisor
135          * correctly.
136          */
137         mb_array[0].logicalStart = 0;
138         mb_array[0].logicalEnd = 0x100000000;
139         mb_array[0].absStart = 0;
140         mb_array[0].absEnd = 0x100000000;
141
142         if (holeSize) {
143                 numMemoryBlocks = 2;
144                 holeStart = holeStart & 0x000fffffffffffff;
145                 holeStart = addr_to_chunk(holeStart);
146                 holeFirstChunk = holeStart;
147                 holeSize = addr_to_chunk(holeSize);
148                 holeSizeChunks = holeSize;
149                 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
150                                 holeFirstChunk, holeSizeChunks );
151                 mb_array[0].logicalEnd = holeFirstChunk;
152                 mb_array[0].absEnd = holeFirstChunk;
153                 mb_array[1].logicalStart = holeFirstChunk;
154                 mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
155                 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
156                 mb_array[1].absEnd = 0x100000000;
157         }
158         return numMemoryBlocks;
159 }
160
161 #define MaxSegmentAreas                 32
162 #define MaxSegmentAdrRangeBlocks        128
163 #define MaxAreaRangeBlocks              4
164
165 static unsigned long iSeries_process_Regatta_mainstore_vpd(
166                 struct MemoryBlock *mb_array, unsigned long max_entries)
167 {
168         struct IoHriMainStoreSegment5 *msVpdP =
169                 (struct IoHriMainStoreSegment5 *)xMsVpd;
170         unsigned long numSegmentBlocks = 0;
171         u32 existsBits = msVpdP->msAreaExists;
172         unsigned long area_num;
173
174         printk("Mainstore_VPD: Regatta\n");
175
176         for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
177                 unsigned long numAreaBlocks;
178                 struct IoHriMainStoreArea4 *currentArea;
179
180                 if (existsBits & 0x80000000) {
181                         unsigned long block_num;
182
183                         currentArea = &msVpdP->msAreaArray[area_num];
184                         numAreaBlocks = currentArea->numAdrRangeBlocks;
185                         printk("ms_vpd: processing area %2ld  blocks=%ld",
186                                         area_num, numAreaBlocks);
187                         for (block_num = 0; block_num < numAreaBlocks;
188                                         ++block_num ) {
189                                 /* Process an address range block */
190                                 struct MemoryBlock tempBlock;
191                                 unsigned long i;
192
193                                 tempBlock.absStart =
194                                         (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
195                                 tempBlock.absEnd =
196                                         (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
197                                 tempBlock.logicalStart = 0;
198                                 tempBlock.logicalEnd   = 0;
199                                 printk("\n          block %ld absStart=%016lx absEnd=%016lx",
200                                                 block_num, tempBlock.absStart,
201                                                 tempBlock.absEnd);
202
203                                 for (i = 0; i < numSegmentBlocks; ++i) {
204                                         if (mb_array[i].absStart ==
205                                                         tempBlock.absStart)
206                                                 break;
207                                 }
208                                 if (i == numSegmentBlocks) {
209                                         if (numSegmentBlocks == max_entries)
210                                                 panic("iSeries_process_mainstore_vpd: too many memory blocks");
211                                         mb_array[numSegmentBlocks] = tempBlock;
212                                         ++numSegmentBlocks;
213                                 } else
214                                         printk(" (duplicate)");
215                         }
216                         printk("\n");
217                 }
218                 existsBits <<= 1;
219         }
220         /* Now sort the blocks found into ascending sequence */
221         if (numSegmentBlocks > 1) {
222                 unsigned long m, n;
223
224                 for (m = 0; m < numSegmentBlocks - 1; ++m) {
225                         for (n = numSegmentBlocks - 1; m < n; --n) {
226                                 if (mb_array[n].absStart <
227                                                 mb_array[n-1].absStart) {
228                                         struct MemoryBlock tempBlock;
229
230                                         tempBlock = mb_array[n];
231                                         mb_array[n] = mb_array[n-1];
232                                         mb_array[n-1] = tempBlock;
233                                 }
234                         }
235                 }
236         }
237         /*
238          * Assign "logical" addresses to each block.  These
239          * addresses correspond to the hypervisor "bitmap" space.
240          * Convert all addresses into units of 256K chunks.
241          */
242         {
243         unsigned long i, nextBitmapAddress;
244
245         printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
246         nextBitmapAddress = 0;
247         for (i = 0; i < numSegmentBlocks; ++i) {
248                 unsigned long length = mb_array[i].absEnd -
249                         mb_array[i].absStart;
250
251                 mb_array[i].logicalStart = nextBitmapAddress;
252                 mb_array[i].logicalEnd = nextBitmapAddress + length;
253                 nextBitmapAddress += length;
254                 printk("          Bitmap range: %016lx - %016lx\n"
255                                 "        Absolute range: %016lx - %016lx\n",
256                                 mb_array[i].logicalStart,
257                                 mb_array[i].logicalEnd,
258                                 mb_array[i].absStart, mb_array[i].absEnd);
259                 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
260                                 0x000fffffffffffff);
261                 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
262                                 0x000fffffffffffff);
263                 mb_array[i].logicalStart =
264                         addr_to_chunk(mb_array[i].logicalStart);
265                 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
266         }
267         }
268
269         return numSegmentBlocks;
270 }
271
272 static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
273                 unsigned long max_entries)
274 {
275         unsigned long i;
276         unsigned long mem_blocks = 0;
277
278         if (cpu_has_feature(CPU_FTR_SLB))
279                 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
280                                 max_entries);
281         else
282                 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
283                                 max_entries);
284
285         printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
286         for (i = 0; i < mem_blocks; ++i) {
287                 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
288                        "                             abs chunks %016lx - %016lx\n",
289                         i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
290                         mb_array[i].absStart, mb_array[i].absEnd);
291         }
292         return mem_blocks;
293 }
294
295 static void __init iSeries_get_cmdline(void)
296 {
297         char *p, *q;
298
299         /* copy the command line parameter from the primary VSP  */
300         HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
301                         HvLpDma_Direction_RemoteToLocal);
302
303         p = cmd_line;
304         q = cmd_line + 255;
305         while(p < q) {
306                 if (!*p || *p == '\n')
307                         break;
308                 ++p;
309         }
310         *p = 0;
311 }
312
313 static void __init iSeries_init_early(void)
314 {
315         extern unsigned long memory_limit;
316
317         DBG(" -> iSeries_init_early()\n");
318
319         ppc64_firmware_features = FW_FEATURE_ISERIES;
320
321         ppcdbg_initialize();
322
323         ppc64_interrupt_controller = IC_ISERIES;
324
325 #if defined(CONFIG_BLK_DEV_INITRD)
326         /*
327          * If the init RAM disk has been configured and there is
328          * a non-zero starting address for it, set it up
329          */
330         if (naca.xRamDisk) {
331                 initrd_start = (unsigned long)__va(naca.xRamDisk);
332                 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
333                 initrd_below_start_ok = 1;      // ramdisk in kernel space
334                 ROOT_DEV = Root_RAM0;
335                 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
336                         rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
337         } else
338 #endif /* CONFIG_BLK_DEV_INITRD */
339         {
340             /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
341         }
342
343         iSeries_recal_tb = get_tb();
344         iSeries_recal_titan = HvCallXm_loadTod();
345
346         /*
347          * Cache sizes must be initialized before hpte_init_iSeries is called
348          * as the later need them for flush_icache_range()
349          */
350         setup_iSeries_cache_sizes();
351
352         /*
353          * Initialize the hash table management pointers
354          */
355         hpte_init_iSeries();
356
357         /*
358          * Initialize the DMA/TCE management
359          */
360         iommu_init_early_iSeries();
361
362         iSeries_get_cmdline();
363
364         /* Save unparsed command line copy for /proc/cmdline */
365         strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
366
367         /* Parse early parameters, in particular mem=x */
368         parse_early_param();
369
370         if (memory_limit) {
371                 if (memory_limit < systemcfg->physicalMemorySize)
372                         systemcfg->physicalMemorySize = memory_limit;
373                 else {
374                         printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
375                         memory_limit = 0;
376                 }
377         }
378
379         /* Initialize machine-dependency vectors */
380 #ifdef CONFIG_SMP
381         smp_init_iSeries();
382 #endif
383         if (itLpNaca.xPirEnvironMode == 0)
384                 piranha_simulator = 1;
385
386         /* Associate Lp Event Queue 0 with processor 0 */
387         HvCallEvent_setLpEventQueueInterruptProc(0, 0);
388
389         mf_init();
390         mf_initialized = 1;
391         mb();
392
393         /* If we were passed an initrd, set the ROOT_DEV properly if the values
394          * look sensible. If not, clear initrd reference.
395          */
396 #ifdef CONFIG_BLK_DEV_INITRD
397         if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
398             initrd_end > initrd_start)
399                 ROOT_DEV = Root_RAM0;
400         else
401                 initrd_start = initrd_end = 0;
402 #endif /* CONFIG_BLK_DEV_INITRD */
403
404         DBG(" <- iSeries_init_early()\n");
405 }
406
407 struct mschunks_map mschunks_map = {
408         /* XXX We don't use these, but Piranha might need them. */
409         .chunk_size  = MSCHUNKS_CHUNK_SIZE,
410         .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
411         .chunk_mask  = MSCHUNKS_OFFSET_MASK,
412 };
413 EXPORT_SYMBOL(mschunks_map);
414
415 void mschunks_alloc(unsigned long num_chunks)
416 {
417         klimit = _ALIGN(klimit, sizeof(u32));
418         mschunks_map.mapping = (u32 *)klimit;
419         klimit += num_chunks * sizeof(u32);
420         mschunks_map.num_chunks = num_chunks;
421 }
422
423 /*
424  * The iSeries may have very large memories ( > 128 GB ) and a partition
425  * may get memory in "chunks" that may be anywhere in the 2**52 real
426  * address space.  The chunks are 256K in size.  To map this to the
427  * memory model Linux expects, the AS/400 specific code builds a
428  * translation table to translate what Linux thinks are "physical"
429  * addresses to the actual real addresses.  This allows us to make
430  * it appear to Linux that we have contiguous memory starting at
431  * physical address zero while in fact this could be far from the truth.
432  * To avoid confusion, I'll let the words physical and/or real address
433  * apply to the Linux addresses while I'll use "absolute address" to
434  * refer to the actual hardware real address.
435  *
436  * build_iSeries_Memory_Map gets information from the Hypervisor and
437  * looks at the Main Store VPD to determine the absolute addresses
438  * of the memory that has been assigned to our partition and builds
439  * a table used to translate Linux's physical addresses to these
440  * absolute addresses.  Absolute addresses are needed when
441  * communicating with the hypervisor (e.g. to build HPT entries)
442  */
443
444 static void __init build_iSeries_Memory_Map(void)
445 {
446         u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
447         u32 nextPhysChunk;
448         u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
449         u32 num_ptegs;
450         u32 totalChunks,moreChunks;
451         u32 currChunk, thisChunk, absChunk;
452         u32 currDword;
453         u32 chunkBit;
454         u64 map;
455         struct MemoryBlock mb[32];
456         unsigned long numMemoryBlocks, curBlock;
457
458         /* Chunk size on iSeries is 256K bytes */
459         totalChunks = (u32)HvLpConfig_getMsChunks();
460         mschunks_alloc(totalChunks);
461
462         /*
463          * Get absolute address of our load area
464          * and map it to physical address 0
465          * This guarantees that the loadarea ends up at physical 0
466          * otherwise, it might not be returned by PLIC as the first
467          * chunks
468          */
469
470         loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
471         loadAreaSize =  itLpNaca.xLoadAreaChunks;
472
473         /*
474          * Only add the pages already mapped here.
475          * Otherwise we might add the hpt pages
476          * The rest of the pages of the load area
477          * aren't in the HPT yet and can still
478          * be assigned an arbitrary physical address
479          */
480         if ((loadAreaSize * 64) > HvPagesToMap)
481                 loadAreaSize = HvPagesToMap / 64;
482
483         loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
484
485         /*
486          * TODO Do we need to do something if the HPT is in the 64MB load area?
487          * This would be required if the itLpNaca.xLoadAreaChunks includes
488          * the HPT size
489          */
490
491         printk("Mapping load area - physical addr = 0000000000000000\n"
492                 "                    absolute addr = %016lx\n",
493                 chunk_to_addr(loadAreaFirstChunk));
494         printk("Load area size %dK\n", loadAreaSize * 256);
495
496         for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
497                 mschunks_map.mapping[nextPhysChunk] =
498                         loadAreaFirstChunk + nextPhysChunk;
499
500         /*
501          * Get absolute address of our HPT and remember it so
502          * we won't map it to any physical address
503          */
504         hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
505         hptSizePages = (u32)HvCallHpt_getHptPages();
506         hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
507         hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
508
509         printk("HPT absolute addr = %016lx, size = %dK\n",
510                         chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
511
512         /* Fill in the hashed page table hash mask */
513         num_ptegs = hptSizePages *
514                 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
515         htab_hash_mask = num_ptegs - 1;
516
517         /*
518          * The actual hashed page table is in the hypervisor,
519          * we have no direct access
520          */
521         htab_address = NULL;
522
523         /*
524          * Determine if absolute memory has any
525          * holes so that we can interpret the
526          * access map we get back from the hypervisor
527          * correctly.
528          */
529         numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
530
531         /*
532          * Process the main store access map from the hypervisor
533          * to build up our physical -> absolute translation table
534          */
535         curBlock = 0;
536         currChunk = 0;
537         currDword = 0;
538         moreChunks = totalChunks;
539
540         while (moreChunks) {
541                 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
542                                 currDword);
543                 thisChunk = currChunk;
544                 while (map) {
545                         chunkBit = map >> 63;
546                         map <<= 1;
547                         if (chunkBit) {
548                                 --moreChunks;
549                                 while (thisChunk >= mb[curBlock].logicalEnd) {
550                                         ++curBlock;
551                                         if (curBlock >= numMemoryBlocks)
552                                                 panic("out of memory blocks");
553                                 }
554                                 if (thisChunk < mb[curBlock].logicalStart)
555                                         panic("memory block error");
556
557                                 absChunk = mb[curBlock].absStart +
558                                         (thisChunk - mb[curBlock].logicalStart);
559                                 if (((absChunk < hptFirstChunk) ||
560                                      (absChunk > hptLastChunk)) &&
561                                     ((absChunk < loadAreaFirstChunk) ||
562                                      (absChunk > loadAreaLastChunk))) {
563                                         mschunks_map.mapping[nextPhysChunk] =
564                                                 absChunk;
565                                         ++nextPhysChunk;
566                                 }
567                         }
568                         ++thisChunk;
569                 }
570                 ++currDword;
571                 currChunk += 64;
572         }
573
574         /*
575          * main store size (in chunks) is
576          *   totalChunks - hptSizeChunks
577          * which should be equal to
578          *   nextPhysChunk
579          */
580         systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
581 }
582
583 /*
584  * Set up the variables that describe the cache line sizes
585  * for this machine.
586  */
587 static void __init setup_iSeries_cache_sizes(void)
588 {
589         unsigned int i, n;
590         unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
591
592         systemcfg->icache_size =
593         ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
594         systemcfg->icache_line_size =
595         ppc64_caches.iline_size =
596                 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
597         systemcfg->dcache_size =
598         ppc64_caches.dsize =
599                 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
600         systemcfg->dcache_line_size =
601         ppc64_caches.dline_size =
602                 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
603         ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
604         ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
605
606         i = ppc64_caches.iline_size;
607         n = 0;
608         while ((i = (i / 2)))
609                 ++n;
610         ppc64_caches.log_iline_size = n;
611
612         i = ppc64_caches.dline_size;
613         n = 0;
614         while ((i = (i / 2)))
615                 ++n;
616         ppc64_caches.log_dline_size = n;
617
618         printk("D-cache line size = %d\n",
619                         (unsigned int)ppc64_caches.dline_size);
620         printk("I-cache line size = %d\n",
621                         (unsigned int)ppc64_caches.iline_size);
622 }
623
624 /*
625  * Document me.
626  */
627 static void __init iSeries_setup_arch(void)
628 {
629         unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
630
631         if (get_paca()->lppaca.shared_proc) {
632                 ppc_md.idle_loop = iseries_shared_idle;
633                 printk(KERN_INFO "Using shared processor idle loop\n");
634         } else {
635                 ppc_md.idle_loop = iseries_dedicated_idle;
636                 printk(KERN_INFO "Using dedicated idle loop\n");
637         }
638
639         /* Add an eye catcher and the systemcfg layout version number */
640         strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
641         systemcfg->version.major = SYSTEMCFG_MAJOR;
642         systemcfg->version.minor = SYSTEMCFG_MINOR;
643
644         /* Setup the Lp Event Queue */
645         setup_hvlpevent_queue();
646
647         /* Compute processor frequency */
648         procFreqHz = ((1UL << 34) * 1000000) /
649                         xIoHriProcessorVpd[procIx].xProcFreq;
650         procFreqMhz = procFreqHz / 1000000;
651         procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
652         ppc_proc_freq = procFreqHz;
653
654         /* Compute time base frequency */
655         tbFreqHz = ((1UL << 32) * 1000000) /
656                 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
657         tbFreqMhz = tbFreqHz / 1000000;
658         tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
659         ppc_tb_freq = tbFreqHz;
660
661         printk("Max  logical processors = %d\n",
662                         itVpdAreas.xSlicMaxLogicalProcs);
663         printk("Max physical processors = %d\n",
664                         itVpdAreas.xSlicMaxPhysicalProcs);
665         printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
666                         procFreqMhzHundreths);
667         printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
668                         tbFreqMhzHundreths);
669         systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
670         printk("Processor version = %x\n", systemcfg->processor);
671 }
672
673 static void iSeries_get_cpuinfo(struct seq_file *m)
674 {
675         seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
676 }
677
678 /*
679  * Document me.
680  * and Implement me.
681  */
682 static int iSeries_get_irq(struct pt_regs *regs)
683 {
684         /* -2 means ignore this interrupt */
685         return -2;
686 }
687
688 /*
689  * Document me.
690  */
691 static void iSeries_restart(char *cmd)
692 {
693         mf_reboot();
694 }
695
696 /*
697  * Document me.
698  */
699 static void iSeries_power_off(void)
700 {
701         mf_power_off();
702 }
703
704 /*
705  * Document me.
706  */
707 static void iSeries_halt(void)
708 {
709         mf_power_off();
710 }
711
712 /*
713  * void __init iSeries_calibrate_decr()
714  *
715  * Description:
716  *   This routine retrieves the internal processor frequency from the VPD,
717  *   and sets up the kernel timer decrementer based on that value.
718  *
719  */
720 static void __init iSeries_calibrate_decr(void)
721 {
722         unsigned long   cyclesPerUsec;
723         struct div_result divres;
724
725         /* Compute decrementer (and TB) frequency in cycles/sec */
726         cyclesPerUsec = ppc_tb_freq / 1000000;
727
728         /*
729          * Set the amount to refresh the decrementer by.  This
730          * is the number of decrementer ticks it takes for
731          * 1/HZ seconds.
732          */
733         tb_ticks_per_jiffy = ppc_tb_freq / HZ;
734
735 #if 0
736         /* TEST CODE FOR ADJTIME */
737         tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
738         /* END OF TEST CODE */
739 #endif
740
741         /*
742          * tb_ticks_per_sec = freq; would give better accuracy
743          * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
744          * that jiffies (and xtime) will match the time returned
745          * by do_gettimeofday.
746          */
747         tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
748         tb_ticks_per_usec = cyclesPerUsec;
749         tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
750         div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
751         tb_to_xs = divres.result_low;
752         setup_default_decr();
753 }
754
755 static void __init iSeries_progress(char * st, unsigned short code)
756 {
757         printk("Progress: [%04x] - %s\n", (unsigned)code, st);
758         if (!piranha_simulator && mf_initialized) {
759                 if (code != 0xffff)
760                         mf_display_progress(code);
761                 else
762                         mf_clear_src();
763         }
764 }
765
766 static void __init iSeries_fixup_klimit(void)
767 {
768         /*
769          * Change klimit to take into account any ram disk
770          * that may be included
771          */
772         if (naca.xRamDisk)
773                 klimit = KERNELBASE + (u64)naca.xRamDisk +
774                         (naca.xRamDiskSize * PAGE_SIZE);
775         else {
776                 /*
777                  * No ram disk was included - check and see if there
778                  * was an embedded system map.  Change klimit to take
779                  * into account any embedded system map
780                  */
781                 if (embedded_sysmap_end)
782                         klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
783                                         0xfffffffffffff000);
784         }
785 }
786
787 static int __init iSeries_src_init(void)
788 {
789         /* clear the progress line */
790         ppc_md.progress(" ", 0xffff);
791         return 0;
792 }
793
794 late_initcall(iSeries_src_init);
795
796 static inline void process_iSeries_events(void)
797 {
798         asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
799 }
800
801 static void yield_shared_processor(void)
802 {
803         unsigned long tb;
804
805         HvCall_setEnabledInterrupts(HvCall_MaskIPI |
806                                     HvCall_MaskLpEvent |
807                                     HvCall_MaskLpProd |
808                                     HvCall_MaskTimeout);
809
810         tb = get_tb();
811         /* Compute future tb value when yield should expire */
812         HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
813
814         /*
815          * The decrementer stops during the yield.  Force a fake decrementer
816          * here and let the timer_interrupt code sort out the actual time.
817          */
818         get_paca()->lppaca.int_dword.fields.decr_int = 1;
819         process_iSeries_events();
820 }
821
822 static int iseries_shared_idle(void)
823 {
824         while (1) {
825                 while (!need_resched() && !hvlpevent_is_pending()) {
826                         local_irq_disable();
827                         ppc64_runlatch_off();
828
829                         /* Recheck with irqs off */
830                         if (!need_resched() && !hvlpevent_is_pending())
831                                 yield_shared_processor();
832
833                         HMT_medium();
834                         local_irq_enable();
835                 }
836
837                 ppc64_runlatch_on();
838
839                 if (hvlpevent_is_pending())
840                         process_iSeries_events();
841
842                 schedule();
843         }
844
845         return 0;
846 }
847
848 static int iseries_dedicated_idle(void)
849 {
850         long oldval;
851
852         while (1) {
853                 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
854
855                 if (!oldval) {
856                         set_thread_flag(TIF_POLLING_NRFLAG);
857
858                         while (!need_resched()) {
859                                 ppc64_runlatch_off();
860                                 HMT_low();
861
862                                 if (hvlpevent_is_pending()) {
863                                         HMT_medium();
864                                         ppc64_runlatch_on();
865                                         process_iSeries_events();
866                                 }
867                         }
868
869                         HMT_medium();
870                         clear_thread_flag(TIF_POLLING_NRFLAG);
871                 } else {
872                         set_need_resched();
873                 }
874
875                 ppc64_runlatch_on();
876                 schedule();
877         }
878
879         return 0;
880 }
881
882 #ifndef CONFIG_PCI
883 void __init iSeries_init_IRQ(void) { }
884 #endif
885
886 static int __init iseries_probe(int platform)
887 {
888         return PLATFORM_ISERIES_LPAR == platform;
889 }
890
891 struct machdep_calls __initdata iseries_md = {
892         .setup_arch     = iSeries_setup_arch,
893         .get_cpuinfo    = iSeries_get_cpuinfo,
894         .init_IRQ       = iSeries_init_IRQ,
895         .get_irq        = iSeries_get_irq,
896         .init_early     = iSeries_init_early,
897         .pcibios_fixup  = iSeries_pci_final_fixup,
898         .restart        = iSeries_restart,
899         .power_off      = iSeries_power_off,
900         .halt           = iSeries_halt,
901         .get_boot_time  = iSeries_get_boot_time,
902         .set_rtc_time   = iSeries_set_rtc_time,
903         .get_rtc_time   = iSeries_get_rtc_time,
904         .calibrate_decr = iSeries_calibrate_decr,
905         .progress       = iSeries_progress,
906         .probe          = iseries_probe,
907         /* XXX Implement enable_pmcs for iSeries */
908 };
909
910 struct blob {
911         unsigned char data[PAGE_SIZE];
912         unsigned long next;
913 };
914
915 struct iseries_flat_dt {
916         struct boot_param_header header;
917         u64 reserve_map[2];
918         struct blob dt;
919         struct blob strings;
920 };
921
922 struct iseries_flat_dt iseries_dt;
923
924 void dt_init(struct iseries_flat_dt *dt)
925 {
926         dt->header.off_mem_rsvmap =
927                 offsetof(struct iseries_flat_dt, reserve_map);
928         dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
929         dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
930         dt->header.totalsize = sizeof(struct iseries_flat_dt);
931         dt->header.dt_strings_size = sizeof(struct blob);
932
933         /* There is no notion of hardware cpu id on iSeries */
934         dt->header.boot_cpuid_phys = smp_processor_id();
935
936         dt->dt.next = (unsigned long)&dt->dt.data;
937         dt->strings.next = (unsigned long)&dt->strings.data;
938
939         dt->header.magic = OF_DT_HEADER;
940         dt->header.version = 0x10;
941         dt->header.last_comp_version = 0x10;
942
943         dt->reserve_map[0] = 0;
944         dt->reserve_map[1] = 0;
945 }
946
947 void dt_check_blob(struct blob *b)
948 {
949         if (b->next >= (unsigned long)&b->next) {
950                 DBG("Ran out of space in flat device tree blob!\n");
951                 BUG();
952         }
953 }
954
955 void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
956 {
957         *((u32*)dt->dt.next) = value;
958         dt->dt.next += sizeof(u32);
959
960         dt_check_blob(&dt->dt);
961 }
962
963 void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
964 {
965         *((u64*)dt->dt.next) = value;
966         dt->dt.next += sizeof(u64);
967
968         dt_check_blob(&dt->dt);
969 }
970
971 unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
972 {
973         unsigned long start = blob->next - (unsigned long)blob->data;
974
975         memcpy((char *)blob->next, data, len);
976         blob->next = _ALIGN(blob->next + len, 4);
977
978         dt_check_blob(blob);
979
980         return start;
981 }
982
983 void dt_start_node(struct iseries_flat_dt *dt, char *name)
984 {
985         dt_push_u32(dt, OF_DT_BEGIN_NODE);
986         dt_push_bytes(&dt->dt, name, strlen(name) + 1);
987 }
988
989 #define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
990
991 void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
992 {
993         unsigned long offset;
994
995         dt_push_u32(dt, OF_DT_PROP);
996
997         /* Length of the data */
998         dt_push_u32(dt, len);
999
1000         /* Put the property name in the string blob. */
1001         offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
1002
1003         /* The offset of the properties name in the string blob. */
1004         dt_push_u32(dt, (u32)offset);
1005
1006         /* The actual data. */
1007         dt_push_bytes(&dt->dt, data, len);
1008 }
1009
1010 void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
1011 {
1012         dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
1013 }
1014
1015 void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
1016 {
1017         dt_prop(dt, name, (char *)&data, sizeof(u32));
1018 }
1019
1020 void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
1021 {
1022         dt_prop(dt, name, (char *)&data, sizeof(u64));
1023 }
1024
1025 void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
1026 {
1027         dt_prop(dt, name, (char *)data, sizeof(u64) * n);
1028 }
1029
1030 void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
1031 {
1032         dt_prop(dt, name, NULL, 0);
1033 }
1034
1035 void build_flat_dt(struct iseries_flat_dt *dt)
1036 {
1037         u64 tmp[2];
1038
1039         dt_init(dt);
1040
1041         dt_start_node(dt, "");
1042
1043         dt_prop_u32(dt, "#address-cells", 2);
1044         dt_prop_u32(dt, "#size-cells", 2);
1045
1046         /* /memory */
1047         dt_start_node(dt, "memory@0");
1048         dt_prop_str(dt, "name", "memory");
1049         dt_prop_str(dt, "device_type", "memory");
1050         tmp[0] = 0;
1051         tmp[1] = systemcfg->physicalMemorySize;
1052         dt_prop_u64_list(dt, "reg", tmp, 2);
1053         dt_end_node(dt);
1054
1055         /* /chosen */
1056         dt_start_node(dt, "chosen");
1057         dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
1058         dt_end_node(dt);
1059
1060         dt_end_node(dt);
1061
1062         dt_push_u32(dt, OF_DT_END);
1063 }
1064
1065 void * __init iSeries_early_setup(void)
1066 {
1067         iSeries_fixup_klimit();
1068
1069         /*
1070          * Initialize the table which translate Linux physical addresses to
1071          * AS/400 absolute addresses
1072          */
1073         build_iSeries_Memory_Map();
1074
1075         build_flat_dt(&iseries_dt);
1076
1077         return (void *) __pa(&iseries_dt);
1078 }